1 /*- 2 * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Neither the name of Matthew Macy nor the names of its 12 * contributors may be used to endorse or promote products derived from 13 * this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_acpi.h" 34 #include "opt_sched.h" 35 36 #include <sys/param.h> 37 #include <sys/types.h> 38 #include <sys/bus.h> 39 #include <sys/eventhandler.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/module.h> 44 #include <sys/kobj.h> 45 #include <sys/rman.h> 46 #include <sys/sbuf.h> 47 #include <sys/smp.h> 48 #include <sys/socket.h> 49 #include <sys/sockio.h> 50 #include <sys/sysctl.h> 51 #include <sys/syslog.h> 52 #include <sys/taskqueue.h> 53 #include <sys/limits.h> 54 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_types.h> 58 #include <net/if_media.h> 59 #include <net/bpf.h> 60 #include <net/ethernet.h> 61 #include <net/mp_ring.h> 62 #include <net/debugnet.h> 63 #include <net/pfil.h> 64 #include <net/vnet.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_pcb.h> 68 #include <netinet/tcp_lro.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/if_ether.h> 71 #include <netinet/ip.h> 72 #include <netinet/ip6.h> 73 #include <netinet/tcp.h> 74 #include <netinet/ip_var.h> 75 #include <netinet6/ip6_var.h> 76 77 #include <machine/bus.h> 78 #include <machine/in_cksum.h> 79 80 #include <vm/vm.h> 81 #include <vm/pmap.h> 82 83 #include <dev/led/led.h> 84 #include <dev/pci/pcireg.h> 85 #include <dev/pci/pcivar.h> 86 #include <dev/pci/pci_private.h> 87 88 #include <net/iflib.h> 89 #include <net/iflib_private.h> 90 91 #include "ifdi_if.h" 92 93 #ifdef PCI_IOV 94 #include <dev/pci/pci_iov.h> 95 #endif 96 97 #include <sys/bitstring.h> 98 /* 99 * enable accounting of every mbuf as it comes in to and goes out of 100 * iflib's software descriptor references 101 */ 102 #define MEMORY_LOGGING 0 103 /* 104 * Enable mbuf vectors for compressing long mbuf chains 105 */ 106 107 /* 108 * NB: 109 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead 110 * we prefetch needs to be determined by the time spent in m_free vis a vis 111 * the cost of a prefetch. This will of course vary based on the workload: 112 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which 113 * is quite expensive, thus suggesting very little prefetch. 114 * - small packet forwarding which is just returning a single mbuf to 115 * UMA will typically be very fast vis a vis the cost of a memory 116 * access. 117 */ 118 119 /* 120 * File organization: 121 * - private structures 122 * - iflib private utility functions 123 * - ifnet functions 124 * - vlan registry and other exported functions 125 * - iflib public core functions 126 * 127 * 128 */ 129 MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); 130 131 #define IFLIB_RXEOF_MORE (1U << 0) 132 #define IFLIB_RXEOF_EMPTY (2U << 0) 133 134 struct iflib_txq; 135 typedef struct iflib_txq *iflib_txq_t; 136 struct iflib_rxq; 137 typedef struct iflib_rxq *iflib_rxq_t; 138 struct iflib_fl; 139 typedef struct iflib_fl *iflib_fl_t; 140 141 struct iflib_ctx; 142 143 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); 144 static void iflib_timer(void *arg); 145 static void iflib_tqg_detach(if_ctx_t ctx); 146 147 typedef struct iflib_filter_info { 148 driver_filter_t *ifi_filter; 149 void *ifi_filter_arg; 150 struct grouptask *ifi_task; 151 void *ifi_ctx; 152 } *iflib_filter_info_t; 153 154 struct iflib_ctx { 155 KOBJ_FIELDS; 156 /* 157 * Pointer to hardware driver's softc 158 */ 159 void *ifc_softc; 160 device_t ifc_dev; 161 if_t ifc_ifp; 162 163 cpuset_t ifc_cpus; 164 if_shared_ctx_t ifc_sctx; 165 struct if_softc_ctx ifc_softc_ctx; 166 167 struct sx ifc_ctx_sx; 168 struct mtx ifc_state_mtx; 169 170 iflib_txq_t ifc_txqs; 171 iflib_rxq_t ifc_rxqs; 172 uint32_t ifc_if_flags; 173 uint32_t ifc_flags; 174 uint32_t ifc_max_fl_buf_size; 175 uint32_t ifc_rx_mbuf_sz; 176 177 int ifc_link_state; 178 int ifc_watchdog_events; 179 struct cdev *ifc_led_dev; 180 struct resource *ifc_msix_mem; 181 182 struct if_irq ifc_legacy_irq; 183 struct grouptask ifc_admin_task; 184 struct grouptask ifc_vflr_task; 185 struct iflib_filter_info ifc_filter_info; 186 struct ifmedia ifc_media; 187 struct ifmedia *ifc_mediap; 188 189 struct sysctl_oid *ifc_sysctl_node; 190 uint16_t ifc_sysctl_ntxqs; 191 uint16_t ifc_sysctl_nrxqs; 192 uint16_t ifc_sysctl_qs_eq_override; 193 uint16_t ifc_sysctl_rx_budget; 194 uint16_t ifc_sysctl_tx_abdicate; 195 uint16_t ifc_sysctl_core_offset; 196 #define CORE_OFFSET_UNSPECIFIED 0xffff 197 uint8_t ifc_sysctl_separate_txrx; 198 199 qidx_t ifc_sysctl_ntxds[8]; 200 qidx_t ifc_sysctl_nrxds[8]; 201 struct if_txrx ifc_txrx; 202 #define isc_txd_encap ifc_txrx.ift_txd_encap 203 #define isc_txd_flush ifc_txrx.ift_txd_flush 204 #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update 205 #define isc_rxd_available ifc_txrx.ift_rxd_available 206 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get 207 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 208 #define isc_rxd_flush ifc_txrx.ift_rxd_flush 209 #define isc_legacy_intr ifc_txrx.ift_legacy_intr 210 eventhandler_tag ifc_vlan_attach_event; 211 eventhandler_tag ifc_vlan_detach_event; 212 struct ether_addr ifc_mac; 213 }; 214 215 void * 216 iflib_get_softc(if_ctx_t ctx) 217 { 218 219 return (ctx->ifc_softc); 220 } 221 222 device_t 223 iflib_get_dev(if_ctx_t ctx) 224 { 225 226 return (ctx->ifc_dev); 227 } 228 229 if_t 230 iflib_get_ifp(if_ctx_t ctx) 231 { 232 233 return (ctx->ifc_ifp); 234 } 235 236 struct ifmedia * 237 iflib_get_media(if_ctx_t ctx) 238 { 239 240 return (ctx->ifc_mediap); 241 } 242 243 uint32_t 244 iflib_get_flags(if_ctx_t ctx) 245 { 246 return (ctx->ifc_flags); 247 } 248 249 void 250 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) 251 { 252 253 bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN); 254 } 255 256 if_softc_ctx_t 257 iflib_get_softc_ctx(if_ctx_t ctx) 258 { 259 260 return (&ctx->ifc_softc_ctx); 261 } 262 263 if_shared_ctx_t 264 iflib_get_sctx(if_ctx_t ctx) 265 { 266 267 return (ctx->ifc_sctx); 268 } 269 270 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) 271 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) 272 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) 273 274 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) 275 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) 276 277 typedef struct iflib_sw_rx_desc_array { 278 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 279 struct mbuf **ifsd_m; /* pkthdr mbufs */ 280 caddr_t *ifsd_cl; /* direct cluster pointer for rx */ 281 bus_addr_t *ifsd_ba; /* bus addr of cluster for rx */ 282 } iflib_rxsd_array_t; 283 284 typedef struct iflib_sw_tx_desc_array { 285 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 286 bus_dmamap_t *ifsd_tso_map; /* bus_dma maps for TSO packet */ 287 struct mbuf **ifsd_m; /* pkthdr mbufs */ 288 } if_txsd_vec_t; 289 290 /* magic number that should be high enough for any hardware */ 291 #define IFLIB_MAX_TX_SEGS 128 292 #define IFLIB_RX_COPY_THRESH 128 293 #define IFLIB_MAX_RX_REFRESH 32 294 /* The minimum descriptors per second before we start coalescing */ 295 #define IFLIB_MIN_DESC_SEC 16384 296 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 297 #define IFLIB_QUEUE_IDLE 0 298 #define IFLIB_QUEUE_HUNG 1 299 #define IFLIB_QUEUE_WORKING 2 300 /* maximum number of txqs that can share an rx interrupt */ 301 #define IFLIB_MAX_TX_SHARED_INTR 4 302 303 /* this should really scale with ring size - this is a fairly arbitrary value */ 304 #define TX_BATCH_SIZE 32 305 306 #define IFLIB_RESTART_BUDGET 8 307 308 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ 309 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ 310 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) 311 312 struct iflib_txq { 313 qidx_t ift_in_use; 314 qidx_t ift_cidx; 315 qidx_t ift_cidx_processed; 316 qidx_t ift_pidx; 317 uint8_t ift_gen; 318 uint8_t ift_br_offset; 319 uint16_t ift_npending; 320 uint16_t ift_db_pending; 321 uint16_t ift_rs_pending; 322 /* implicit pad */ 323 uint8_t ift_txd_size[8]; 324 uint64_t ift_processed; 325 uint64_t ift_cleaned; 326 uint64_t ift_cleaned_prev; 327 #if MEMORY_LOGGING 328 uint64_t ift_enqueued; 329 uint64_t ift_dequeued; 330 #endif 331 uint64_t ift_no_tx_dma_setup; 332 uint64_t ift_no_desc_avail; 333 uint64_t ift_mbuf_defrag_failed; 334 uint64_t ift_mbuf_defrag; 335 uint64_t ift_map_failed; 336 uint64_t ift_txd_encap_efbig; 337 uint64_t ift_pullups; 338 uint64_t ift_last_timer_tick; 339 340 struct mtx ift_mtx; 341 struct mtx ift_db_mtx; 342 343 /* constant values */ 344 if_ctx_t ift_ctx; 345 struct ifmp_ring *ift_br; 346 struct grouptask ift_task; 347 qidx_t ift_size; 348 uint16_t ift_id; 349 struct callout ift_timer; 350 #ifdef DEV_NETMAP 351 struct callout ift_netmap_timer; 352 #endif /* DEV_NETMAP */ 353 354 if_txsd_vec_t ift_sds; 355 uint8_t ift_qstatus; 356 uint8_t ift_closed; 357 uint8_t ift_update_freq; 358 struct iflib_filter_info ift_filter_info; 359 bus_dma_tag_t ift_buf_tag; 360 bus_dma_tag_t ift_tso_buf_tag; 361 iflib_dma_info_t ift_ifdi; 362 #define MTX_NAME_LEN 32 363 char ift_mtx_name[MTX_NAME_LEN]; 364 bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); 365 #ifdef IFLIB_DIAGNOSTICS 366 uint64_t ift_cpu_exec_count[256]; 367 #endif 368 } __aligned(CACHE_LINE_SIZE); 369 370 struct iflib_fl { 371 qidx_t ifl_cidx; 372 qidx_t ifl_pidx; 373 qidx_t ifl_credits; 374 uint8_t ifl_gen; 375 uint8_t ifl_rxd_size; 376 #if MEMORY_LOGGING 377 uint64_t ifl_m_enqueued; 378 uint64_t ifl_m_dequeued; 379 uint64_t ifl_cl_enqueued; 380 uint64_t ifl_cl_dequeued; 381 #endif 382 /* implicit pad */ 383 bitstr_t *ifl_rx_bitmap; 384 qidx_t ifl_fragidx; 385 /* constant */ 386 qidx_t ifl_size; 387 uint16_t ifl_buf_size; 388 uint16_t ifl_cltype; 389 uma_zone_t ifl_zone; 390 iflib_rxsd_array_t ifl_sds; 391 iflib_rxq_t ifl_rxq; 392 uint8_t ifl_id; 393 bus_dma_tag_t ifl_buf_tag; 394 iflib_dma_info_t ifl_ifdi; 395 uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); 396 qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; 397 } __aligned(CACHE_LINE_SIZE); 398 399 static inline qidx_t 400 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) 401 { 402 qidx_t used; 403 404 if (pidx > cidx) 405 used = pidx - cidx; 406 else if (pidx < cidx) 407 used = size - cidx + pidx; 408 else if (gen == 0 && pidx == cidx) 409 used = 0; 410 else if (gen == 1 && pidx == cidx) 411 used = size; 412 else 413 panic("bad state"); 414 415 return (used); 416 } 417 418 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) 419 420 #define IDXDIFF(head, tail, wrap) \ 421 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 422 423 struct iflib_rxq { 424 if_ctx_t ifr_ctx; 425 iflib_fl_t ifr_fl; 426 uint64_t ifr_rx_irq; 427 struct pfil_head *pfil; 428 /* 429 * If there is a separate completion queue (IFLIB_HAS_RXCQ), this is 430 * the completion queue consumer index. Otherwise it's unused. 431 */ 432 qidx_t ifr_cq_cidx; 433 uint16_t ifr_id; 434 uint8_t ifr_nfl; 435 uint8_t ifr_ntxqirq; 436 uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; 437 uint8_t ifr_fl_offset; 438 struct lro_ctrl ifr_lc; 439 struct grouptask ifr_task; 440 struct callout ifr_watchdog; 441 struct iflib_filter_info ifr_filter_info; 442 iflib_dma_info_t ifr_ifdi; 443 444 /* dynamically allocate if any drivers need a value substantially larger than this */ 445 struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); 446 #ifdef IFLIB_DIAGNOSTICS 447 uint64_t ifr_cpu_exec_count[256]; 448 #endif 449 } __aligned(CACHE_LINE_SIZE); 450 451 typedef struct if_rxsd { 452 caddr_t *ifsd_cl; 453 iflib_fl_t ifsd_fl; 454 } *if_rxsd_t; 455 456 /* multiple of word size */ 457 #ifdef __LP64__ 458 #define PKT_INFO_SIZE 6 459 #define RXD_INFO_SIZE 5 460 #define PKT_TYPE uint64_t 461 #else 462 #define PKT_INFO_SIZE 11 463 #define RXD_INFO_SIZE 8 464 #define PKT_TYPE uint32_t 465 #endif 466 #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) 467 #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) 468 469 typedef struct if_pkt_info_pad { 470 PKT_TYPE pkt_val[PKT_INFO_SIZE]; 471 } *if_pkt_info_pad_t; 472 typedef struct if_rxd_info_pad { 473 PKT_TYPE rxd_val[RXD_INFO_SIZE]; 474 } *if_rxd_info_pad_t; 475 476 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); 477 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); 478 479 static inline void 480 pkt_info_zero(if_pkt_info_t pi) 481 { 482 if_pkt_info_pad_t pi_pad; 483 484 pi_pad = (if_pkt_info_pad_t)pi; 485 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; 486 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; 487 #ifndef __LP64__ 488 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; 489 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; 490 #endif 491 } 492 493 static device_method_t iflib_pseudo_methods[] = { 494 DEVMETHOD(device_attach, noop_attach), 495 DEVMETHOD(device_detach, iflib_pseudo_detach), 496 DEVMETHOD_END 497 }; 498 499 driver_t iflib_pseudodriver = { 500 "iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx), 501 }; 502 503 static inline void 504 rxd_info_zero(if_rxd_info_t ri) 505 { 506 if_rxd_info_pad_t ri_pad; 507 int i; 508 509 ri_pad = (if_rxd_info_pad_t)ri; 510 for (i = 0; i < RXD_LOOP_BOUND; i += 4) { 511 ri_pad->rxd_val[i] = 0; 512 ri_pad->rxd_val[i+1] = 0; 513 ri_pad->rxd_val[i+2] = 0; 514 ri_pad->rxd_val[i+3] = 0; 515 } 516 #ifdef __LP64__ 517 ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; 518 #endif 519 } 520 521 /* 522 * Only allow a single packet to take up most 1/nth of the tx ring 523 */ 524 #define MAX_SINGLE_PACKET_FRACTION 12 525 #define IF_BAD_DMA (bus_addr_t)-1 526 527 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) 528 529 #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock") 530 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx) 531 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx) 532 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx) 533 534 #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF) 535 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx) 536 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx) 537 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx) 538 539 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) 540 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) 541 542 void 543 iflib_set_detach(if_ctx_t ctx) 544 { 545 STATE_LOCK(ctx); 546 ctx->ifc_flags |= IFC_IN_DETACH; 547 STATE_UNLOCK(ctx); 548 } 549 550 /* Our boot-time initialization hook */ 551 static int iflib_module_event_handler(module_t, int, void *); 552 553 static moduledata_t iflib_moduledata = { 554 "iflib", 555 iflib_module_event_handler, 556 NULL 557 }; 558 559 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); 560 MODULE_VERSION(iflib, 1); 561 562 MODULE_DEPEND(iflib, pci, 1, 1, 1); 563 MODULE_DEPEND(iflib, ether, 1, 1, 1); 564 565 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); 566 TASKQGROUP_DEFINE(if_config_tqg, 1, 1); 567 568 #ifndef IFLIB_DEBUG_COUNTERS 569 #ifdef INVARIANTS 570 #define IFLIB_DEBUG_COUNTERS 1 571 #else 572 #define IFLIB_DEBUG_COUNTERS 0 573 #endif /* !INVARIANTS */ 574 #endif 575 576 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 577 "iflib driver parameters"); 578 579 /* 580 * XXX need to ensure that this can't accidentally cause the head to be moved backwards 581 */ 582 static int iflib_min_tx_latency = 0; 583 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, 584 &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); 585 static int iflib_no_tx_batch = 0; 586 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, 587 &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); 588 static int iflib_timer_default = 1000; 589 SYSCTL_INT(_net_iflib, OID_AUTO, timer_default, CTLFLAG_RW, 590 &iflib_timer_default, 0, "number of ticks between iflib_timer calls"); 591 592 593 #if IFLIB_DEBUG_COUNTERS 594 595 static int iflib_tx_seen; 596 static int iflib_tx_sent; 597 static int iflib_tx_encap; 598 static int iflib_rx_allocs; 599 static int iflib_fl_refills; 600 static int iflib_fl_refills_large; 601 static int iflib_tx_frees; 602 603 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, 604 &iflib_tx_seen, 0, "# TX mbufs seen"); 605 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, 606 &iflib_tx_sent, 0, "# TX mbufs sent"); 607 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, 608 &iflib_tx_encap, 0, "# TX mbufs encapped"); 609 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, 610 &iflib_tx_frees, 0, "# TX frees"); 611 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, 612 &iflib_rx_allocs, 0, "# RX allocations"); 613 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, 614 &iflib_fl_refills, 0, "# refills"); 615 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, 616 &iflib_fl_refills_large, 0, "# large refills"); 617 618 static int iflib_txq_drain_flushing; 619 static int iflib_txq_drain_oactive; 620 static int iflib_txq_drain_notready; 621 622 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, 623 &iflib_txq_drain_flushing, 0, "# drain flushes"); 624 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, 625 &iflib_txq_drain_oactive, 0, "# drain oactives"); 626 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, 627 &iflib_txq_drain_notready, 0, "# drain notready"); 628 629 static int iflib_encap_load_mbuf_fail; 630 static int iflib_encap_pad_mbuf_fail; 631 static int iflib_encap_txq_avail_fail; 632 static int iflib_encap_txd_encap_fail; 633 634 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, 635 &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); 636 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, 637 &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); 638 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, 639 &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); 640 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, 641 &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); 642 643 static int iflib_task_fn_rxs; 644 static int iflib_rx_intr_enables; 645 static int iflib_fast_intrs; 646 static int iflib_rx_unavail; 647 static int iflib_rx_ctx_inactive; 648 static int iflib_rx_if_input; 649 static int iflib_rxd_flush; 650 651 static int iflib_verbose_debug; 652 653 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, 654 &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); 655 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, 656 &iflib_rx_intr_enables, 0, "# RX intr enables"); 657 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, 658 &iflib_fast_intrs, 0, "# fast_intr calls"); 659 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, 660 &iflib_rx_unavail, 0, "# times rxeof called with no available data"); 661 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, 662 &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); 663 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, 664 &iflib_rx_if_input, 0, "# times rxeof called if_input"); 665 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, 666 &iflib_rxd_flush, 0, "# times rxd_flush called"); 667 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, 668 &iflib_verbose_debug, 0, "enable verbose debugging"); 669 670 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) 671 static void 672 iflib_debug_reset(void) 673 { 674 iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = 675 iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = 676 iflib_txq_drain_flushing = iflib_txq_drain_oactive = 677 iflib_txq_drain_notready = 678 iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = 679 iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = 680 iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = 681 iflib_rx_unavail = 682 iflib_rx_ctx_inactive = iflib_rx_if_input = 683 iflib_rxd_flush = 0; 684 } 685 686 #else 687 #define DBG_COUNTER_INC(name) 688 static void iflib_debug_reset(void) {} 689 #endif 690 691 #define IFLIB_DEBUG 0 692 693 static void iflib_tx_structures_free(if_ctx_t ctx); 694 static void iflib_rx_structures_free(if_ctx_t ctx); 695 static int iflib_queues_alloc(if_ctx_t ctx); 696 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); 697 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); 698 static int iflib_qset_structures_setup(if_ctx_t ctx); 699 static int iflib_msix_init(if_ctx_t ctx); 700 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str); 701 static void iflib_txq_check_drain(iflib_txq_t txq, int budget); 702 static uint32_t iflib_txq_can_drain(struct ifmp_ring *); 703 #ifdef ALTQ 704 static void iflib_altq_if_start(if_t ifp); 705 static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m); 706 #endif 707 static int iflib_register(if_ctx_t); 708 static void iflib_deregister(if_ctx_t); 709 static void iflib_unregister_vlan_handlers(if_ctx_t ctx); 710 static uint16_t iflib_get_mbuf_size_for(unsigned int size); 711 static void iflib_init_locked(if_ctx_t ctx); 712 static void iflib_add_device_sysctl_pre(if_ctx_t ctx); 713 static void iflib_add_device_sysctl_post(if_ctx_t ctx); 714 static void iflib_ifmp_purge(iflib_txq_t txq); 715 static void _iflib_pre_assert(if_softc_ctx_t scctx); 716 static void iflib_if_init_locked(if_ctx_t ctx); 717 static void iflib_free_intr_mem(if_ctx_t ctx); 718 #ifndef __NO_STRICT_ALIGNMENT 719 static struct mbuf * iflib_fixup_rx(struct mbuf *m); 720 #endif 721 722 static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets = 723 SLIST_HEAD_INITIALIZER(cpu_offsets); 724 struct cpu_offset { 725 SLIST_ENTRY(cpu_offset) entries; 726 cpuset_t set; 727 unsigned int refcount; 728 uint16_t offset; 729 }; 730 static struct mtx cpu_offset_mtx; 731 MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock", 732 MTX_DEF); 733 734 DEBUGNET_DEFINE(iflib); 735 736 static int 737 iflib_num_rx_descs(if_ctx_t ctx) 738 { 739 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 740 if_shared_ctx_t sctx = ctx->ifc_sctx; 741 uint16_t first_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 742 743 return scctx->isc_nrxd[first_rxq]; 744 } 745 746 static int 747 iflib_num_tx_descs(if_ctx_t ctx) 748 { 749 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 750 if_shared_ctx_t sctx = ctx->ifc_sctx; 751 uint16_t first_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 752 753 return scctx->isc_ntxd[first_txq]; 754 } 755 756 #ifdef DEV_NETMAP 757 #include <sys/selinfo.h> 758 #include <net/netmap.h> 759 #include <dev/netmap/netmap_kern.h> 760 761 MODULE_DEPEND(iflib, netmap, 1, 1, 1); 762 763 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init); 764 static void iflib_netmap_timer(void *arg); 765 766 /* 767 * device-specific sysctl variables: 768 * 769 * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. 770 * During regular operations the CRC is stripped, but on some 771 * hardware reception of frames not multiple of 64 is slower, 772 * so using crcstrip=0 helps in benchmarks. 773 * 774 * iflib_rx_miss, iflib_rx_miss_bufs: 775 * count packets that might be missed due to lost interrupts. 776 */ 777 SYSCTL_DECL(_dev_netmap); 778 /* 779 * The xl driver by default strips CRCs and we do not override it. 780 */ 781 782 int iflib_crcstrip = 1; 783 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, 784 CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames"); 785 786 int iflib_rx_miss, iflib_rx_miss_bufs; 787 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, 788 CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr"); 789 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, 790 CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs"); 791 792 /* 793 * Register/unregister. We are already under netmap lock. 794 * Only called on the first register or the last unregister. 795 */ 796 static int 797 iflib_netmap_register(struct netmap_adapter *na, int onoff) 798 { 799 if_t ifp = na->ifp; 800 if_ctx_t ctx = ifp->if_softc; 801 int status; 802 803 CTX_LOCK(ctx); 804 if (!CTX_IS_VF(ctx)) 805 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); 806 807 iflib_stop(ctx); 808 809 /* 810 * Enable (or disable) netmap flags, and intercept (or restore) 811 * ifp->if_transmit. This is done once the device has been stopped 812 * to prevent race conditions. Also, this must be done after 813 * calling netmap_disable_all_rings() and before calling 814 * netmap_enable_all_rings(), so that these two functions see the 815 * updated state of the NAF_NETMAP_ON bit. 816 */ 817 if (onoff) { 818 nm_set_native_flags(na); 819 } else { 820 nm_clear_native_flags(na); 821 } 822 823 iflib_init_locked(ctx); 824 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? 825 status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; 826 if (status) 827 nm_clear_native_flags(na); 828 CTX_UNLOCK(ctx); 829 return (status); 830 } 831 832 static int 833 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, bool init) 834 { 835 struct netmap_adapter *na = kring->na; 836 u_int const lim = kring->nkr_num_slots - 1; 837 struct netmap_ring *ring = kring->ring; 838 bus_dmamap_t *map; 839 struct if_rxd_update iru; 840 if_ctx_t ctx = rxq->ifr_ctx; 841 iflib_fl_t fl = &rxq->ifr_fl[0]; 842 u_int nic_i_first, nic_i; 843 u_int nm_i; 844 int i, n; 845 #if IFLIB_DEBUG_COUNTERS 846 int rf_count = 0; 847 #endif 848 849 /* 850 * This function is used both at initialization and in rxsync. 851 * At initialization we need to prepare (with isc_rxd_refill()) 852 * all the netmap buffers currently owned by the kernel, in 853 * such a way to keep fl->ifl_pidx and kring->nr_hwcur in sync 854 * (except for kring->nkr_hwofs). These may be less than 855 * kring->nkr_num_slots if netmap_reset() was called while 856 * an application using the kring that still owned some 857 * buffers. 858 * At rxsync time, both indexes point to the next buffer to be 859 * refilled. 860 * In any case we publish (with isc_rxd_flush()) up to 861 * (fl->ifl_pidx - 1) % N (included), to avoid the NIC tail/prod 862 * pointer to overrun the head/cons pointer, although this is 863 * not necessary for some NICs (e.g. vmx). 864 */ 865 if (__predict_false(init)) { 866 n = kring->nkr_num_slots - nm_kr_rxspace(kring); 867 } else { 868 n = kring->rhead - kring->nr_hwcur; 869 if (n == 0) 870 return (0); /* Nothing to do. */ 871 if (n < 0) 872 n += kring->nkr_num_slots; 873 } 874 875 iru_init(&iru, rxq, 0 /* flid */); 876 map = fl->ifl_sds.ifsd_map; 877 nic_i = fl->ifl_pidx; 878 nm_i = netmap_idx_n2k(kring, nic_i); 879 if (__predict_false(init)) { 880 /* 881 * On init/reset, nic_i must be 0, and we must 882 * start to refill from hwtail (see netmap_reset()). 883 */ 884 MPASS(nic_i == 0); 885 MPASS(nm_i == kring->nr_hwtail); 886 } else 887 MPASS(nm_i == kring->nr_hwcur); 888 DBG_COUNTER_INC(fl_refills); 889 while (n > 0) { 890 #if IFLIB_DEBUG_COUNTERS 891 if (++rf_count == 9) 892 DBG_COUNTER_INC(fl_refills_large); 893 #endif 894 nic_i_first = nic_i; 895 for (i = 0; n > 0 && i < IFLIB_MAX_RX_REFRESH; n--, i++) { 896 struct netmap_slot *slot = &ring->slot[nm_i]; 897 void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[i]); 898 899 MPASS(i < IFLIB_MAX_RX_REFRESH); 900 901 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 902 return netmap_ring_reinit(kring); 903 904 fl->ifl_rxd_idxs[i] = nic_i; 905 906 if (__predict_false(init)) { 907 netmap_load_map(na, fl->ifl_buf_tag, 908 map[nic_i], addr); 909 } else if (slot->flags & NS_BUF_CHANGED) { 910 /* buffer has changed, reload map */ 911 netmap_reload_map(na, fl->ifl_buf_tag, 912 map[nic_i], addr); 913 } 914 bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i], 915 BUS_DMASYNC_PREREAD); 916 slot->flags &= ~NS_BUF_CHANGED; 917 918 nm_i = nm_next(nm_i, lim); 919 nic_i = nm_next(nic_i, lim); 920 } 921 922 iru.iru_pidx = nic_i_first; 923 iru.iru_count = i; 924 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 925 } 926 fl->ifl_pidx = nic_i; 927 /* 928 * At the end of the loop we must have refilled everything 929 * we could possibly refill. 930 */ 931 MPASS(nm_i == kring->rhead); 932 kring->nr_hwcur = nm_i; 933 934 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 935 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 936 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, 937 nm_prev(nic_i, lim)); 938 DBG_COUNTER_INC(rxd_flush); 939 940 return (0); 941 } 942 943 #define NETMAP_TX_TIMER_US 90 944 945 /* 946 * Reconcile kernel and user view of the transmit ring. 947 * 948 * All information is in the kring. 949 * Userspace wants to send packets up to the one before kring->rhead, 950 * kernel knows kring->nr_hwcur is the first unsent packet. 951 * 952 * Here we push packets out (as many as possible), and possibly 953 * reclaim buffers from previously completed transmission. 954 * 955 * The caller (netmap) guarantees that there is only one instance 956 * running at any time. Any interference with other driver 957 * methods should be handled by the individual drivers. 958 */ 959 static int 960 iflib_netmap_txsync(struct netmap_kring *kring, int flags) 961 { 962 struct netmap_adapter *na = kring->na; 963 if_t ifp = na->ifp; 964 struct netmap_ring *ring = kring->ring; 965 u_int nm_i; /* index into the netmap kring */ 966 u_int nic_i; /* index into the NIC ring */ 967 u_int n; 968 u_int const lim = kring->nkr_num_slots - 1; 969 u_int const head = kring->rhead; 970 struct if_pkt_info pi; 971 972 /* 973 * interrupts on every tx packet are expensive so request 974 * them every half ring, or where NS_REPORT is set 975 */ 976 u_int report_frequency = kring->nkr_num_slots >> 1; 977 /* device-specific */ 978 if_ctx_t ctx = ifp->if_softc; 979 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; 980 981 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 982 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 983 984 /* 985 * First part: process new packets to send. 986 * nm_i is the current index in the netmap kring, 987 * nic_i is the corresponding index in the NIC ring. 988 * 989 * If we have packets to send (nm_i != head) 990 * iterate over the netmap ring, fetch length and update 991 * the corresponding slot in the NIC ring. Some drivers also 992 * need to update the buffer's physical address in the NIC slot 993 * even NS_BUF_CHANGED is not set (PNMB computes the addresses). 994 * 995 * The netmap_reload_map() calls is especially expensive, 996 * even when (as in this case) the tag is 0, so do only 997 * when the buffer has actually changed. 998 * 999 * If possible do not set the report/intr bit on all slots, 1000 * but only a few times per ring or when NS_REPORT is set. 1001 * 1002 * Finally, on 10G and faster drivers, it might be useful 1003 * to prefetch the next slot and txr entry. 1004 */ 1005 1006 nm_i = kring->nr_hwcur; 1007 if (nm_i != head) { /* we have new packets to send */ 1008 pkt_info_zero(&pi); 1009 pi.ipi_segs = txq->ift_segs; 1010 pi.ipi_qsidx = kring->ring_id; 1011 nic_i = netmap_idx_k2n(kring, nm_i); 1012 1013 __builtin_prefetch(&ring->slot[nm_i]); 1014 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); 1015 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); 1016 1017 for (n = 0; nm_i != head; n++) { 1018 struct netmap_slot *slot = &ring->slot[nm_i]; 1019 u_int len = slot->len; 1020 uint64_t paddr; 1021 void *addr = PNMB(na, slot, &paddr); 1022 int flags = (slot->flags & NS_REPORT || 1023 nic_i == 0 || nic_i == report_frequency) ? 1024 IPI_TX_INTR : 0; 1025 1026 /* device-specific */ 1027 pi.ipi_len = len; 1028 pi.ipi_segs[0].ds_addr = paddr; 1029 pi.ipi_segs[0].ds_len = len; 1030 pi.ipi_nsegs = 1; 1031 pi.ipi_ndescs = 0; 1032 pi.ipi_pidx = nic_i; 1033 pi.ipi_flags = flags; 1034 1035 /* Fill the slot in the NIC ring. */ 1036 ctx->isc_txd_encap(ctx->ifc_softc, &pi); 1037 DBG_COUNTER_INC(tx_encap); 1038 1039 /* prefetch for next round */ 1040 __builtin_prefetch(&ring->slot[nm_i + 1]); 1041 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); 1042 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); 1043 1044 NM_CHECK_ADDR_LEN(na, addr, len); 1045 1046 if (slot->flags & NS_BUF_CHANGED) { 1047 /* buffer has changed, reload map */ 1048 netmap_reload_map(na, txq->ift_buf_tag, 1049 txq->ift_sds.ifsd_map[nic_i], addr); 1050 } 1051 /* make sure changes to the buffer are synced */ 1052 bus_dmamap_sync(txq->ift_buf_tag, 1053 txq->ift_sds.ifsd_map[nic_i], 1054 BUS_DMASYNC_PREWRITE); 1055 1056 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1057 nm_i = nm_next(nm_i, lim); 1058 nic_i = nm_next(nic_i, lim); 1059 } 1060 kring->nr_hwcur = nm_i; 1061 1062 /* synchronize the NIC ring */ 1063 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 1064 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1065 1066 /* (re)start the tx unit up to slot nic_i (excluded) */ 1067 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); 1068 } 1069 1070 /* 1071 * Second part: reclaim buffers for completed transmissions. 1072 * 1073 * If there are unclaimed buffers, attempt to reclaim them. 1074 * If we don't manage to reclaim them all, and TX IRQs are not in use, 1075 * trigger a per-tx-queue timer to try again later. 1076 */ 1077 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1078 if (iflib_tx_credits_update(ctx, txq)) { 1079 /* some tx completed, increment avail */ 1080 nic_i = txq->ift_cidx_processed; 1081 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 1082 } 1083 } 1084 1085 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) 1086 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1087 callout_reset_sbt_on(&txq->ift_netmap_timer, 1088 NETMAP_TX_TIMER_US * SBT_1US, SBT_1US, 1089 iflib_netmap_timer, txq, 1090 txq->ift_netmap_timer.c_cpu, 0); 1091 } 1092 return (0); 1093 } 1094 1095 /* 1096 * Reconcile kernel and user view of the receive ring. 1097 * Same as for the txsync, this routine must be efficient. 1098 * The caller guarantees a single invocations, but races against 1099 * the rest of the driver should be handled here. 1100 * 1101 * On call, kring->rhead is the first packet that userspace wants 1102 * to keep, and kring->rcur is the wakeup point. 1103 * The kernel has previously reported packets up to kring->rtail. 1104 * 1105 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective 1106 * of whether or not we received an interrupt. 1107 */ 1108 static int 1109 iflib_netmap_rxsync(struct netmap_kring *kring, int flags) 1110 { 1111 struct netmap_adapter *na = kring->na; 1112 struct netmap_ring *ring = kring->ring; 1113 if_t ifp = na->ifp; 1114 uint32_t nm_i; /* index into the netmap ring */ 1115 uint32_t nic_i; /* index into the NIC ring */ 1116 u_int n; 1117 u_int const lim = kring->nkr_num_slots - 1; 1118 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1119 1120 if_ctx_t ctx = ifp->if_softc; 1121 if_shared_ctx_t sctx = ctx->ifc_sctx; 1122 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1123 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; 1124 iflib_fl_t fl = &rxq->ifr_fl[0]; 1125 struct if_rxd_info ri; 1126 qidx_t *cidxp; 1127 1128 /* 1129 * netmap only uses free list 0, to avoid out of order consumption 1130 * of receive buffers 1131 */ 1132 1133 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 1134 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1135 1136 /* 1137 * First part: import newly received packets. 1138 * 1139 * nm_i is the index of the next free slot in the netmap ring, 1140 * nic_i is the index of the next received packet in the NIC ring 1141 * (or in the free list 0 if IFLIB_HAS_RXCQ is set), and they may 1142 * differ in case if_init() has been called while 1143 * in netmap mode. For the receive ring we have 1144 * 1145 * nic_i = fl->ifl_cidx; 1146 * nm_i = kring->nr_hwtail (previous) 1147 * and 1148 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1149 * 1150 * fl->ifl_cidx is set to 0 on a ring reinit 1151 */ 1152 if (netmap_no_pendintr || force_update) { 1153 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim); 1154 bool have_rxcq = sctx->isc_flags & IFLIB_HAS_RXCQ; 1155 int crclen = iflib_crcstrip ? 0 : 4; 1156 int error, avail; 1157 1158 /* 1159 * For the free list consumer index, we use the same 1160 * logic as in iflib_rxeof(). 1161 */ 1162 if (have_rxcq) 1163 cidxp = &rxq->ifr_cq_cidx; 1164 else 1165 cidxp = &fl->ifl_cidx; 1166 avail = ctx->isc_rxd_available(ctx->ifc_softc, 1167 rxq->ifr_id, *cidxp, USHRT_MAX); 1168 1169 nic_i = fl->ifl_cidx; 1170 nm_i = netmap_idx_n2k(kring, nic_i); 1171 MPASS(nm_i == kring->nr_hwtail); 1172 for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) { 1173 rxd_info_zero(&ri); 1174 ri.iri_frags = rxq->ifr_frags; 1175 ri.iri_qsidx = kring->ring_id; 1176 ri.iri_ifp = ctx->ifc_ifp; 1177 ri.iri_cidx = *cidxp; 1178 1179 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 1180 ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; 1181 ring->slot[nm_i].flags = 0; 1182 if (have_rxcq) { 1183 *cidxp = ri.iri_cidx; 1184 while (*cidxp >= scctx->isc_nrxd[0]) 1185 *cidxp -= scctx->isc_nrxd[0]; 1186 } 1187 bus_dmamap_sync(fl->ifl_buf_tag, 1188 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); 1189 nm_i = nm_next(nm_i, lim); 1190 fl->ifl_cidx = nic_i = nm_next(nic_i, lim); 1191 } 1192 if (n) { /* update the state variables */ 1193 if (netmap_no_pendintr && !force_update) { 1194 /* diagnostics */ 1195 iflib_rx_miss ++; 1196 iflib_rx_miss_bufs += n; 1197 } 1198 kring->nr_hwtail = nm_i; 1199 } 1200 kring->nr_kflags &= ~NKR_PENDINTR; 1201 } 1202 /* 1203 * Second part: skip past packets that userspace has released. 1204 * (kring->nr_hwcur to head excluded), 1205 * and make the buffers available for reception. 1206 * As usual nm_i is the index in the netmap ring, 1207 * nic_i is the index in the NIC ring, and 1208 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1209 */ 1210 netmap_fl_refill(rxq, kring, false); 1211 1212 return (0); 1213 } 1214 1215 static void 1216 iflib_netmap_intr(struct netmap_adapter *na, int onoff) 1217 { 1218 if_ctx_t ctx = na->ifp->if_softc; 1219 1220 CTX_LOCK(ctx); 1221 if (onoff) { 1222 IFDI_INTR_ENABLE(ctx); 1223 } else { 1224 IFDI_INTR_DISABLE(ctx); 1225 } 1226 CTX_UNLOCK(ctx); 1227 } 1228 1229 static int 1230 iflib_netmap_attach(if_ctx_t ctx) 1231 { 1232 struct netmap_adapter na; 1233 1234 bzero(&na, sizeof(na)); 1235 1236 na.ifp = ctx->ifc_ifp; 1237 na.na_flags = NAF_BDG_MAYSLEEP; 1238 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); 1239 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); 1240 1241 na.num_tx_desc = iflib_num_tx_descs(ctx); 1242 na.num_rx_desc = iflib_num_rx_descs(ctx); 1243 na.nm_txsync = iflib_netmap_txsync; 1244 na.nm_rxsync = iflib_netmap_rxsync; 1245 na.nm_register = iflib_netmap_register; 1246 na.nm_intr = iflib_netmap_intr; 1247 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; 1248 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; 1249 return (netmap_attach(&na)); 1250 } 1251 1252 static int 1253 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) 1254 { 1255 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1256 struct netmap_slot *slot; 1257 1258 slot = netmap_reset(na, NR_TX, txq->ift_id, 0); 1259 if (slot == NULL) 1260 return (0); 1261 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { 1262 /* 1263 * In netmap mode, set the map for the packet buffer. 1264 * NOTE: Some drivers (not this one) also need to set 1265 * the physical buffer address in the NIC ring. 1266 * netmap_idx_n2k() maps a nic index, i, into the corresponding 1267 * netmap slot index, si 1268 */ 1269 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); 1270 netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i], 1271 NMB(na, slot + si)); 1272 } 1273 return (1); 1274 } 1275 1276 static int 1277 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) 1278 { 1279 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1280 struct netmap_kring *kring; 1281 struct netmap_slot *slot; 1282 1283 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); 1284 if (slot == NULL) 1285 return (0); 1286 kring = na->rx_rings[rxq->ifr_id]; 1287 netmap_fl_refill(rxq, kring, true); 1288 return (1); 1289 } 1290 1291 static void 1292 iflib_netmap_timer(void *arg) 1293 { 1294 iflib_txq_t txq = arg; 1295 if_ctx_t ctx = txq->ift_ctx; 1296 1297 /* 1298 * Wake up the netmap application, to give it a chance to 1299 * call txsync and reclaim more completed TX buffers. 1300 */ 1301 netmap_tx_irq(ctx->ifc_ifp, txq->ift_id); 1302 } 1303 1304 #define iflib_netmap_detach(ifp) netmap_detach(ifp) 1305 1306 #else 1307 #define iflib_netmap_txq_init(ctx, txq) (0) 1308 #define iflib_netmap_rxq_init(ctx, rxq) (0) 1309 #define iflib_netmap_detach(ifp) 1310 #define netmap_enable_all_rings(ifp) 1311 #define netmap_disable_all_rings(ifp) 1312 1313 #define iflib_netmap_attach(ctx) (0) 1314 #define netmap_rx_irq(ifp, qid, budget) (0) 1315 #endif 1316 1317 #if defined(__i386__) || defined(__amd64__) 1318 static __inline void 1319 prefetch(void *x) 1320 { 1321 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1322 } 1323 static __inline void 1324 prefetch2cachelines(void *x) 1325 { 1326 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1327 #if (CACHE_LINE_SIZE < 128) 1328 __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); 1329 #endif 1330 } 1331 #else 1332 #define prefetch(x) 1333 #define prefetch2cachelines(x) 1334 #endif 1335 1336 static void 1337 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) 1338 { 1339 iflib_fl_t fl; 1340 1341 fl = &rxq->ifr_fl[flid]; 1342 iru->iru_paddrs = fl->ifl_bus_addrs; 1343 iru->iru_idxs = fl->ifl_rxd_idxs; 1344 iru->iru_qsidx = rxq->ifr_id; 1345 iru->iru_buf_size = fl->ifl_buf_size; 1346 iru->iru_flidx = fl->ifl_id; 1347 } 1348 1349 static void 1350 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) 1351 { 1352 if (err) 1353 return; 1354 *(bus_addr_t *) arg = segs[0].ds_addr; 1355 } 1356 1357 int 1358 iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags) 1359 { 1360 int err; 1361 device_t dev = ctx->ifc_dev; 1362 1363 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1364 align, 0, /* alignment, bounds */ 1365 BUS_SPACE_MAXADDR, /* lowaddr */ 1366 BUS_SPACE_MAXADDR, /* highaddr */ 1367 NULL, NULL, /* filter, filterarg */ 1368 size, /* maxsize */ 1369 1, /* nsegments */ 1370 size, /* maxsegsize */ 1371 BUS_DMA_ALLOCNOW, /* flags */ 1372 NULL, /* lockfunc */ 1373 NULL, /* lockarg */ 1374 &dma->idi_tag); 1375 if (err) { 1376 device_printf(dev, 1377 "%s: bus_dma_tag_create failed: %d\n", 1378 __func__, err); 1379 goto fail_0; 1380 } 1381 1382 err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, 1383 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); 1384 if (err) { 1385 device_printf(dev, 1386 "%s: bus_dmamem_alloc(%ju) failed: %d\n", 1387 __func__, (uintmax_t)size, err); 1388 goto fail_1; 1389 } 1390 1391 dma->idi_paddr = IF_BAD_DMA; 1392 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, 1393 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); 1394 if (err || dma->idi_paddr == IF_BAD_DMA) { 1395 device_printf(dev, 1396 "%s: bus_dmamap_load failed: %d\n", 1397 __func__, err); 1398 goto fail_2; 1399 } 1400 1401 dma->idi_size = size; 1402 return (0); 1403 1404 fail_2: 1405 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1406 fail_1: 1407 bus_dma_tag_destroy(dma->idi_tag); 1408 fail_0: 1409 dma->idi_tag = NULL; 1410 1411 return (err); 1412 } 1413 1414 int 1415 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) 1416 { 1417 if_shared_ctx_t sctx = ctx->ifc_sctx; 1418 1419 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); 1420 1421 return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags)); 1422 } 1423 1424 int 1425 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) 1426 { 1427 int i, err; 1428 iflib_dma_info_t *dmaiter; 1429 1430 dmaiter = dmalist; 1431 for (i = 0; i < count; i++, dmaiter++) { 1432 if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) 1433 break; 1434 } 1435 if (err) 1436 iflib_dma_free_multi(dmalist, i); 1437 return (err); 1438 } 1439 1440 void 1441 iflib_dma_free(iflib_dma_info_t dma) 1442 { 1443 if (dma->idi_tag == NULL) 1444 return; 1445 if (dma->idi_paddr != IF_BAD_DMA) { 1446 bus_dmamap_sync(dma->idi_tag, dma->idi_map, 1447 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1448 bus_dmamap_unload(dma->idi_tag, dma->idi_map); 1449 dma->idi_paddr = IF_BAD_DMA; 1450 } 1451 if (dma->idi_vaddr != NULL) { 1452 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1453 dma->idi_vaddr = NULL; 1454 } 1455 bus_dma_tag_destroy(dma->idi_tag); 1456 dma->idi_tag = NULL; 1457 } 1458 1459 void 1460 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) 1461 { 1462 int i; 1463 iflib_dma_info_t *dmaiter = dmalist; 1464 1465 for (i = 0; i < count; i++, dmaiter++) 1466 iflib_dma_free(*dmaiter); 1467 } 1468 1469 static int 1470 iflib_fast_intr(void *arg) 1471 { 1472 iflib_filter_info_t info = arg; 1473 struct grouptask *gtask = info->ifi_task; 1474 int result; 1475 1476 DBG_COUNTER_INC(fast_intrs); 1477 if (info->ifi_filter != NULL) { 1478 result = info->ifi_filter(info->ifi_filter_arg); 1479 if ((result & FILTER_SCHEDULE_THREAD) == 0) 1480 return (result); 1481 } 1482 1483 GROUPTASK_ENQUEUE(gtask); 1484 return (FILTER_HANDLED); 1485 } 1486 1487 static int 1488 iflib_fast_intr_rxtx(void *arg) 1489 { 1490 iflib_filter_info_t info = arg; 1491 struct grouptask *gtask = info->ifi_task; 1492 if_ctx_t ctx; 1493 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; 1494 iflib_txq_t txq; 1495 void *sc; 1496 int i, cidx, result; 1497 qidx_t txqid; 1498 bool intr_enable, intr_legacy; 1499 1500 DBG_COUNTER_INC(fast_intrs); 1501 if (info->ifi_filter != NULL) { 1502 result = info->ifi_filter(info->ifi_filter_arg); 1503 if ((result & FILTER_SCHEDULE_THREAD) == 0) 1504 return (result); 1505 } 1506 1507 ctx = rxq->ifr_ctx; 1508 sc = ctx->ifc_softc; 1509 intr_enable = false; 1510 intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY); 1511 MPASS(rxq->ifr_ntxqirq); 1512 for (i = 0; i < rxq->ifr_ntxqirq; i++) { 1513 txqid = rxq->ifr_txqid[i]; 1514 txq = &ctx->ifc_txqs[txqid]; 1515 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 1516 BUS_DMASYNC_POSTREAD); 1517 if (!ctx->isc_txd_credits_update(sc, txqid, false)) { 1518 if (intr_legacy) 1519 intr_enable = true; 1520 else 1521 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); 1522 continue; 1523 } 1524 GROUPTASK_ENQUEUE(&txq->ift_task); 1525 } 1526 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) 1527 cidx = rxq->ifr_cq_cidx; 1528 else 1529 cidx = rxq->ifr_fl[0].ifl_cidx; 1530 if (iflib_rxd_avail(ctx, rxq, cidx, 1)) 1531 GROUPTASK_ENQUEUE(gtask); 1532 else { 1533 if (intr_legacy) 1534 intr_enable = true; 1535 else 1536 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 1537 DBG_COUNTER_INC(rx_intr_enables); 1538 } 1539 if (intr_enable) 1540 IFDI_INTR_ENABLE(ctx); 1541 return (FILTER_HANDLED); 1542 } 1543 1544 static int 1545 iflib_fast_intr_ctx(void *arg) 1546 { 1547 iflib_filter_info_t info = arg; 1548 struct grouptask *gtask = info->ifi_task; 1549 int result; 1550 1551 DBG_COUNTER_INC(fast_intrs); 1552 if (info->ifi_filter != NULL) { 1553 result = info->ifi_filter(info->ifi_filter_arg); 1554 if ((result & FILTER_SCHEDULE_THREAD) == 0) 1555 return (result); 1556 } 1557 1558 GROUPTASK_ENQUEUE(gtask); 1559 return (FILTER_HANDLED); 1560 } 1561 1562 static int 1563 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 1564 driver_filter_t filter, driver_intr_t handler, void *arg, 1565 const char *name) 1566 { 1567 struct resource *res; 1568 void *tag = NULL; 1569 device_t dev = ctx->ifc_dev; 1570 int flags, i, rc; 1571 1572 flags = RF_ACTIVE; 1573 if (ctx->ifc_flags & IFC_LEGACY) 1574 flags |= RF_SHAREABLE; 1575 MPASS(rid < 512); 1576 i = rid; 1577 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, flags); 1578 if (res == NULL) { 1579 device_printf(dev, 1580 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 1581 return (ENOMEM); 1582 } 1583 irq->ii_res = res; 1584 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); 1585 rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, 1586 filter, handler, arg, &tag); 1587 if (rc != 0) { 1588 device_printf(dev, 1589 "failed to setup interrupt for rid %d, name %s: %d\n", 1590 rid, name ? name : "unknown", rc); 1591 return (rc); 1592 } else if (name) 1593 bus_describe_intr(dev, res, tag, "%s", name); 1594 1595 irq->ii_tag = tag; 1596 return (0); 1597 } 1598 1599 /********************************************************************* 1600 * 1601 * Allocate DMA resources for TX buffers as well as memory for the TX 1602 * mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a 1603 * iflib_sw_tx_desc_array structure, storing all the information that 1604 * is needed to transmit a packet on the wire. This is called only 1605 * once at attach, setup is done every reset. 1606 * 1607 **********************************************************************/ 1608 static int 1609 iflib_txsd_alloc(iflib_txq_t txq) 1610 { 1611 if_ctx_t ctx = txq->ift_ctx; 1612 if_shared_ctx_t sctx = ctx->ifc_sctx; 1613 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1614 device_t dev = ctx->ifc_dev; 1615 bus_size_t tsomaxsize; 1616 int err, nsegments, ntsosegments; 1617 bool tso; 1618 1619 nsegments = scctx->isc_tx_nsegments; 1620 ntsosegments = scctx->isc_tx_tso_segments_max; 1621 tsomaxsize = scctx->isc_tx_tso_size_max; 1622 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU) 1623 tsomaxsize += sizeof(struct ether_vlan_header); 1624 MPASS(scctx->isc_ntxd[0] > 0); 1625 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); 1626 MPASS(nsegments > 0); 1627 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) { 1628 MPASS(ntsosegments > 0); 1629 MPASS(sctx->isc_tso_maxsize >= tsomaxsize); 1630 } 1631 1632 /* 1633 * Set up DMA tags for TX buffers. 1634 */ 1635 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1636 1, 0, /* alignment, bounds */ 1637 BUS_SPACE_MAXADDR, /* lowaddr */ 1638 BUS_SPACE_MAXADDR, /* highaddr */ 1639 NULL, NULL, /* filter, filterarg */ 1640 sctx->isc_tx_maxsize, /* maxsize */ 1641 nsegments, /* nsegments */ 1642 sctx->isc_tx_maxsegsize, /* maxsegsize */ 1643 0, /* flags */ 1644 NULL, /* lockfunc */ 1645 NULL, /* lockfuncarg */ 1646 &txq->ift_buf_tag))) { 1647 device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); 1648 device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", 1649 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); 1650 goto fail; 1651 } 1652 tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0; 1653 if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev), 1654 1, 0, /* alignment, bounds */ 1655 BUS_SPACE_MAXADDR, /* lowaddr */ 1656 BUS_SPACE_MAXADDR, /* highaddr */ 1657 NULL, NULL, /* filter, filterarg */ 1658 tsomaxsize, /* maxsize */ 1659 ntsosegments, /* nsegments */ 1660 sctx->isc_tso_maxsegsize,/* maxsegsize */ 1661 0, /* flags */ 1662 NULL, /* lockfunc */ 1663 NULL, /* lockfuncarg */ 1664 &txq->ift_tso_buf_tag))) { 1665 device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n", 1666 err); 1667 goto fail; 1668 } 1669 1670 /* Allocate memory for the TX mbuf map. */ 1671 if (!(txq->ift_sds.ifsd_m = 1672 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1673 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1674 device_printf(dev, "Unable to allocate TX mbuf map memory\n"); 1675 err = ENOMEM; 1676 goto fail; 1677 } 1678 1679 /* 1680 * Create the DMA maps for TX buffers. 1681 */ 1682 if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc( 1683 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], 1684 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { 1685 device_printf(dev, 1686 "Unable to allocate TX buffer DMA map memory\n"); 1687 err = ENOMEM; 1688 goto fail; 1689 } 1690 if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc( 1691 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], 1692 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { 1693 device_printf(dev, 1694 "Unable to allocate TSO TX buffer map memory\n"); 1695 err = ENOMEM; 1696 goto fail; 1697 } 1698 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { 1699 err = bus_dmamap_create(txq->ift_buf_tag, 0, 1700 &txq->ift_sds.ifsd_map[i]); 1701 if (err != 0) { 1702 device_printf(dev, "Unable to create TX DMA map\n"); 1703 goto fail; 1704 } 1705 if (!tso) 1706 continue; 1707 err = bus_dmamap_create(txq->ift_tso_buf_tag, 0, 1708 &txq->ift_sds.ifsd_tso_map[i]); 1709 if (err != 0) { 1710 device_printf(dev, "Unable to create TSO TX DMA map\n"); 1711 goto fail; 1712 } 1713 } 1714 return (0); 1715 fail: 1716 /* We free all, it handles case where we are in the middle */ 1717 iflib_tx_structures_free(ctx); 1718 return (err); 1719 } 1720 1721 static void 1722 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) 1723 { 1724 bus_dmamap_t map; 1725 1726 if (txq->ift_sds.ifsd_map != NULL) { 1727 map = txq->ift_sds.ifsd_map[i]; 1728 bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE); 1729 bus_dmamap_unload(txq->ift_buf_tag, map); 1730 bus_dmamap_destroy(txq->ift_buf_tag, map); 1731 txq->ift_sds.ifsd_map[i] = NULL; 1732 } 1733 1734 if (txq->ift_sds.ifsd_tso_map != NULL) { 1735 map = txq->ift_sds.ifsd_tso_map[i]; 1736 bus_dmamap_sync(txq->ift_tso_buf_tag, map, 1737 BUS_DMASYNC_POSTWRITE); 1738 bus_dmamap_unload(txq->ift_tso_buf_tag, map); 1739 bus_dmamap_destroy(txq->ift_tso_buf_tag, map); 1740 txq->ift_sds.ifsd_tso_map[i] = NULL; 1741 } 1742 } 1743 1744 static void 1745 iflib_txq_destroy(iflib_txq_t txq) 1746 { 1747 if_ctx_t ctx = txq->ift_ctx; 1748 1749 for (int i = 0; i < txq->ift_size; i++) 1750 iflib_txsd_destroy(ctx, txq, i); 1751 1752 if (txq->ift_br != NULL) { 1753 ifmp_ring_free(txq->ift_br); 1754 txq->ift_br = NULL; 1755 } 1756 1757 mtx_destroy(&txq->ift_mtx); 1758 1759 if (txq->ift_sds.ifsd_map != NULL) { 1760 free(txq->ift_sds.ifsd_map, M_IFLIB); 1761 txq->ift_sds.ifsd_map = NULL; 1762 } 1763 if (txq->ift_sds.ifsd_tso_map != NULL) { 1764 free(txq->ift_sds.ifsd_tso_map, M_IFLIB); 1765 txq->ift_sds.ifsd_tso_map = NULL; 1766 } 1767 if (txq->ift_sds.ifsd_m != NULL) { 1768 free(txq->ift_sds.ifsd_m, M_IFLIB); 1769 txq->ift_sds.ifsd_m = NULL; 1770 } 1771 if (txq->ift_buf_tag != NULL) { 1772 bus_dma_tag_destroy(txq->ift_buf_tag); 1773 txq->ift_buf_tag = NULL; 1774 } 1775 if (txq->ift_tso_buf_tag != NULL) { 1776 bus_dma_tag_destroy(txq->ift_tso_buf_tag); 1777 txq->ift_tso_buf_tag = NULL; 1778 } 1779 if (txq->ift_ifdi != NULL) { 1780 free(txq->ift_ifdi, M_IFLIB); 1781 } 1782 } 1783 1784 static void 1785 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) 1786 { 1787 struct mbuf **mp; 1788 1789 mp = &txq->ift_sds.ifsd_m[i]; 1790 if (*mp == NULL) 1791 return; 1792 1793 if (txq->ift_sds.ifsd_map != NULL) { 1794 bus_dmamap_sync(txq->ift_buf_tag, 1795 txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE); 1796 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]); 1797 } 1798 if (txq->ift_sds.ifsd_tso_map != NULL) { 1799 bus_dmamap_sync(txq->ift_tso_buf_tag, 1800 txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE); 1801 bus_dmamap_unload(txq->ift_tso_buf_tag, 1802 txq->ift_sds.ifsd_tso_map[i]); 1803 } 1804 m_freem(*mp); 1805 DBG_COUNTER_INC(tx_frees); 1806 *mp = NULL; 1807 } 1808 1809 static int 1810 iflib_txq_setup(iflib_txq_t txq) 1811 { 1812 if_ctx_t ctx = txq->ift_ctx; 1813 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1814 if_shared_ctx_t sctx = ctx->ifc_sctx; 1815 iflib_dma_info_t di; 1816 int i; 1817 1818 /* Set number of descriptors available */ 1819 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 1820 /* XXX make configurable */ 1821 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; 1822 1823 /* Reset indices */ 1824 txq->ift_cidx_processed = 0; 1825 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; 1826 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; 1827 1828 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) 1829 bzero((void *)di->idi_vaddr, di->idi_size); 1830 1831 IFDI_TXQ_SETUP(ctx, txq->ift_id); 1832 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) 1833 bus_dmamap_sync(di->idi_tag, di->idi_map, 1834 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1835 return (0); 1836 } 1837 1838 /********************************************************************* 1839 * 1840 * Allocate DMA resources for RX buffers as well as memory for the RX 1841 * mbuf map, direct RX cluster pointer map and RX cluster bus address 1842 * map. RX DMA map, RX mbuf map, direct RX cluster pointer map and 1843 * RX cluster map are kept in a iflib_sw_rx_desc_array structure. 1844 * Since we use use one entry in iflib_sw_rx_desc_array per received 1845 * packet, the maximum number of entries we'll need is equal to the 1846 * number of hardware receive descriptors that we've allocated. 1847 * 1848 **********************************************************************/ 1849 static int 1850 iflib_rxsd_alloc(iflib_rxq_t rxq) 1851 { 1852 if_ctx_t ctx = rxq->ifr_ctx; 1853 if_shared_ctx_t sctx = ctx->ifc_sctx; 1854 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1855 device_t dev = ctx->ifc_dev; 1856 iflib_fl_t fl; 1857 int err; 1858 1859 MPASS(scctx->isc_nrxd[0] > 0); 1860 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); 1861 1862 fl = rxq->ifr_fl; 1863 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { 1864 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ 1865 /* Set up DMA tag for RX buffers. */ 1866 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1867 1, 0, /* alignment, bounds */ 1868 BUS_SPACE_MAXADDR, /* lowaddr */ 1869 BUS_SPACE_MAXADDR, /* highaddr */ 1870 NULL, NULL, /* filter, filterarg */ 1871 sctx->isc_rx_maxsize, /* maxsize */ 1872 sctx->isc_rx_nsegments, /* nsegments */ 1873 sctx->isc_rx_maxsegsize, /* maxsegsize */ 1874 0, /* flags */ 1875 NULL, /* lockfunc */ 1876 NULL, /* lockarg */ 1877 &fl->ifl_buf_tag); 1878 if (err) { 1879 device_printf(dev, 1880 "Unable to allocate RX DMA tag: %d\n", err); 1881 goto fail; 1882 } 1883 1884 /* Allocate memory for the RX mbuf map. */ 1885 if (!(fl->ifl_sds.ifsd_m = 1886 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1887 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1888 device_printf(dev, 1889 "Unable to allocate RX mbuf map memory\n"); 1890 err = ENOMEM; 1891 goto fail; 1892 } 1893 1894 /* Allocate memory for the direct RX cluster pointer map. */ 1895 if (!(fl->ifl_sds.ifsd_cl = 1896 (caddr_t *) malloc(sizeof(caddr_t) * 1897 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1898 device_printf(dev, 1899 "Unable to allocate RX cluster map memory\n"); 1900 err = ENOMEM; 1901 goto fail; 1902 } 1903 1904 /* Allocate memory for the RX cluster bus address map. */ 1905 if (!(fl->ifl_sds.ifsd_ba = 1906 (bus_addr_t *) malloc(sizeof(bus_addr_t) * 1907 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1908 device_printf(dev, 1909 "Unable to allocate RX bus address map memory\n"); 1910 err = ENOMEM; 1911 goto fail; 1912 } 1913 1914 /* 1915 * Create the DMA maps for RX buffers. 1916 */ 1917 if (!(fl->ifl_sds.ifsd_map = 1918 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1919 device_printf(dev, 1920 "Unable to allocate RX buffer DMA map memory\n"); 1921 err = ENOMEM; 1922 goto fail; 1923 } 1924 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { 1925 err = bus_dmamap_create(fl->ifl_buf_tag, 0, 1926 &fl->ifl_sds.ifsd_map[i]); 1927 if (err != 0) { 1928 device_printf(dev, "Unable to create RX buffer DMA map\n"); 1929 goto fail; 1930 } 1931 } 1932 } 1933 return (0); 1934 1935 fail: 1936 iflib_rx_structures_free(ctx); 1937 return (err); 1938 } 1939 1940 /* 1941 * Internal service routines 1942 */ 1943 1944 struct rxq_refill_cb_arg { 1945 int error; 1946 bus_dma_segment_t seg; 1947 int nseg; 1948 }; 1949 1950 static void 1951 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1952 { 1953 struct rxq_refill_cb_arg *cb_arg = arg; 1954 1955 cb_arg->error = error; 1956 cb_arg->seg = segs[0]; 1957 cb_arg->nseg = nseg; 1958 } 1959 1960 /** 1961 * iflib_fl_refill - refill an rxq free-buffer list 1962 * @ctx: the iflib context 1963 * @fl: the free list to refill 1964 * @count: the number of new buffers to allocate 1965 * 1966 * (Re)populate an rxq free-buffer list with up to @count new packet buffers. 1967 * The caller must assure that @count does not exceed the queue's capacity 1968 * minus one (since we always leave a descriptor unavailable). 1969 */ 1970 static uint8_t 1971 iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) 1972 { 1973 struct if_rxd_update iru; 1974 struct rxq_refill_cb_arg cb_arg; 1975 struct mbuf *m; 1976 caddr_t cl, *sd_cl; 1977 struct mbuf **sd_m; 1978 bus_dmamap_t *sd_map; 1979 bus_addr_t bus_addr, *sd_ba; 1980 int err, frag_idx, i, idx, n, pidx; 1981 qidx_t credits; 1982 1983 MPASS(count <= fl->ifl_size - fl->ifl_credits - 1); 1984 1985 sd_m = fl->ifl_sds.ifsd_m; 1986 sd_map = fl->ifl_sds.ifsd_map; 1987 sd_cl = fl->ifl_sds.ifsd_cl; 1988 sd_ba = fl->ifl_sds.ifsd_ba; 1989 pidx = fl->ifl_pidx; 1990 idx = pidx; 1991 frag_idx = fl->ifl_fragidx; 1992 credits = fl->ifl_credits; 1993 1994 i = 0; 1995 n = count; 1996 MPASS(n > 0); 1997 MPASS(credits + n <= fl->ifl_size); 1998 1999 if (pidx < fl->ifl_cidx) 2000 MPASS(pidx + n <= fl->ifl_cidx); 2001 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) 2002 MPASS(fl->ifl_gen == 0); 2003 if (pidx > fl->ifl_cidx) 2004 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); 2005 2006 DBG_COUNTER_INC(fl_refills); 2007 if (n > 8) 2008 DBG_COUNTER_INC(fl_refills_large); 2009 iru_init(&iru, fl->ifl_rxq, fl->ifl_id); 2010 while (n-- > 0) { 2011 /* 2012 * We allocate an uninitialized mbuf + cluster, mbuf is 2013 * initialized after rx. 2014 * 2015 * If the cluster is still set then we know a minimum sized 2016 * packet was received 2017 */ 2018 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, 2019 &frag_idx); 2020 if (frag_idx < 0) 2021 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); 2022 MPASS(frag_idx >= 0); 2023 if ((cl = sd_cl[frag_idx]) == NULL) { 2024 cl = uma_zalloc(fl->ifl_zone, M_NOWAIT); 2025 if (__predict_false(cl == NULL)) 2026 break; 2027 2028 cb_arg.error = 0; 2029 MPASS(sd_map != NULL); 2030 err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx], 2031 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 2032 BUS_DMA_NOWAIT); 2033 if (__predict_false(err != 0 || cb_arg.error)) { 2034 uma_zfree(fl->ifl_zone, cl); 2035 break; 2036 } 2037 2038 sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr; 2039 sd_cl[frag_idx] = cl; 2040 #if MEMORY_LOGGING 2041 fl->ifl_cl_enqueued++; 2042 #endif 2043 } else { 2044 bus_addr = sd_ba[frag_idx]; 2045 } 2046 bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx], 2047 BUS_DMASYNC_PREREAD); 2048 2049 if (sd_m[frag_idx] == NULL) { 2050 m = m_gethdr(M_NOWAIT, MT_NOINIT); 2051 if (__predict_false(m == NULL)) 2052 break; 2053 sd_m[frag_idx] = m; 2054 } 2055 bit_set(fl->ifl_rx_bitmap, frag_idx); 2056 #if MEMORY_LOGGING 2057 fl->ifl_m_enqueued++; 2058 #endif 2059 2060 DBG_COUNTER_INC(rx_allocs); 2061 fl->ifl_rxd_idxs[i] = frag_idx; 2062 fl->ifl_bus_addrs[i] = bus_addr; 2063 credits++; 2064 i++; 2065 MPASS(credits <= fl->ifl_size); 2066 if (++idx == fl->ifl_size) { 2067 #ifdef INVARIANTS 2068 fl->ifl_gen = 1; 2069 #endif 2070 idx = 0; 2071 } 2072 if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { 2073 iru.iru_pidx = pidx; 2074 iru.iru_count = i; 2075 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2076 fl->ifl_pidx = idx; 2077 fl->ifl_credits = credits; 2078 pidx = idx; 2079 i = 0; 2080 } 2081 } 2082 2083 if (n < count - 1) { 2084 if (i != 0) { 2085 iru.iru_pidx = pidx; 2086 iru.iru_count = i; 2087 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2088 fl->ifl_pidx = idx; 2089 fl->ifl_credits = credits; 2090 } 2091 DBG_COUNTER_INC(rxd_flush); 2092 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2093 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2094 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, 2095 fl->ifl_id, fl->ifl_pidx); 2096 if (__predict_true(bit_test(fl->ifl_rx_bitmap, frag_idx))) { 2097 fl->ifl_fragidx = frag_idx + 1; 2098 if (fl->ifl_fragidx == fl->ifl_size) 2099 fl->ifl_fragidx = 0; 2100 } else { 2101 fl->ifl_fragidx = frag_idx; 2102 } 2103 } 2104 2105 return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY); 2106 } 2107 2108 static inline uint8_t 2109 iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl) 2110 { 2111 /* 2112 * We leave an unused descriptor to avoid pidx to catch up with cidx. 2113 * This is important as it confuses most NICs. For instance, 2114 * Intel NICs have (per receive ring) RDH and RDT registers, where 2115 * RDH points to the next receive descriptor to be used by the NIC, 2116 * and RDT for the next receive descriptor to be published by the 2117 * driver to the NIC (RDT - 1 is thus the last valid one). 2118 * The condition RDH == RDT means no descriptors are available to 2119 * the NIC, and thus it would be ambiguous if it also meant that 2120 * all the descriptors are available to the NIC. 2121 */ 2122 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; 2123 #ifdef INVARIANTS 2124 int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; 2125 #endif 2126 2127 MPASS(fl->ifl_credits <= fl->ifl_size); 2128 MPASS(reclaimable == delta); 2129 2130 if (reclaimable > 0) 2131 return (iflib_fl_refill(ctx, fl, reclaimable)); 2132 return (0); 2133 } 2134 2135 uint8_t 2136 iflib_in_detach(if_ctx_t ctx) 2137 { 2138 bool in_detach; 2139 2140 STATE_LOCK(ctx); 2141 in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH); 2142 STATE_UNLOCK(ctx); 2143 return (in_detach); 2144 } 2145 2146 static void 2147 iflib_fl_bufs_free(iflib_fl_t fl) 2148 { 2149 iflib_dma_info_t idi = fl->ifl_ifdi; 2150 bus_dmamap_t sd_map; 2151 uint32_t i; 2152 2153 for (i = 0; i < fl->ifl_size; i++) { 2154 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; 2155 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; 2156 2157 if (*sd_cl != NULL) { 2158 sd_map = fl->ifl_sds.ifsd_map[i]; 2159 bus_dmamap_sync(fl->ifl_buf_tag, sd_map, 2160 BUS_DMASYNC_POSTREAD); 2161 bus_dmamap_unload(fl->ifl_buf_tag, sd_map); 2162 uma_zfree(fl->ifl_zone, *sd_cl); 2163 *sd_cl = NULL; 2164 if (*sd_m != NULL) { 2165 m_init(*sd_m, M_NOWAIT, MT_DATA, 0); 2166 uma_zfree(zone_mbuf, *sd_m); 2167 *sd_m = NULL; 2168 } 2169 } else { 2170 MPASS(*sd_m == NULL); 2171 } 2172 #if MEMORY_LOGGING 2173 fl->ifl_m_dequeued++; 2174 fl->ifl_cl_dequeued++; 2175 #endif 2176 } 2177 #ifdef INVARIANTS 2178 for (i = 0; i < fl->ifl_size; i++) { 2179 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); 2180 MPASS(fl->ifl_sds.ifsd_m[i] == NULL); 2181 } 2182 #endif 2183 /* 2184 * Reset free list values 2185 */ 2186 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; 2187 bzero(idi->idi_vaddr, idi->idi_size); 2188 } 2189 2190 /********************************************************************* 2191 * 2192 * Initialize a free list and its buffers. 2193 * 2194 **********************************************************************/ 2195 static int 2196 iflib_fl_setup(iflib_fl_t fl) 2197 { 2198 iflib_rxq_t rxq = fl->ifl_rxq; 2199 if_ctx_t ctx = rxq->ifr_ctx; 2200 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2201 int qidx; 2202 2203 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); 2204 /* 2205 ** Free current RX buffer structs and their mbufs 2206 */ 2207 iflib_fl_bufs_free(fl); 2208 /* Now replenish the mbufs */ 2209 MPASS(fl->ifl_credits == 0); 2210 qidx = rxq->ifr_fl_offset + fl->ifl_id; 2211 if (scctx->isc_rxd_buf_size[qidx] != 0) 2212 fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx]; 2213 else 2214 fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz; 2215 /* 2216 * ifl_buf_size may be a driver-supplied value, so pull it up 2217 * to the selected mbuf size. 2218 */ 2219 fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size); 2220 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) 2221 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; 2222 fl->ifl_cltype = m_gettype(fl->ifl_buf_size); 2223 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 2224 2225 /* 2226 * Avoid pre-allocating zillions of clusters to an idle card 2227 * potentially speeding up attach. In any case make sure 2228 * to leave a descriptor unavailable. See the comment in 2229 * iflib_fl_refill_all(). 2230 */ 2231 MPASS(fl->ifl_size > 0); 2232 (void)iflib_fl_refill(ctx, fl, min(128, fl->ifl_size - 1)); 2233 if (min(128, fl->ifl_size - 1) != fl->ifl_credits) 2234 return (ENOBUFS); 2235 /* 2236 * handle failure 2237 */ 2238 MPASS(rxq != NULL); 2239 MPASS(fl->ifl_ifdi != NULL); 2240 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2241 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2242 return (0); 2243 } 2244 2245 /********************************************************************* 2246 * 2247 * Free receive ring data structures 2248 * 2249 **********************************************************************/ 2250 static void 2251 iflib_rx_sds_free(iflib_rxq_t rxq) 2252 { 2253 iflib_fl_t fl; 2254 int i, j; 2255 2256 if (rxq->ifr_fl != NULL) { 2257 for (i = 0; i < rxq->ifr_nfl; i++) { 2258 fl = &rxq->ifr_fl[i]; 2259 if (fl->ifl_buf_tag != NULL) { 2260 if (fl->ifl_sds.ifsd_map != NULL) { 2261 for (j = 0; j < fl->ifl_size; j++) { 2262 bus_dmamap_sync( 2263 fl->ifl_buf_tag, 2264 fl->ifl_sds.ifsd_map[j], 2265 BUS_DMASYNC_POSTREAD); 2266 bus_dmamap_unload( 2267 fl->ifl_buf_tag, 2268 fl->ifl_sds.ifsd_map[j]); 2269 bus_dmamap_destroy( 2270 fl->ifl_buf_tag, 2271 fl->ifl_sds.ifsd_map[j]); 2272 } 2273 } 2274 bus_dma_tag_destroy(fl->ifl_buf_tag); 2275 fl->ifl_buf_tag = NULL; 2276 } 2277 free(fl->ifl_sds.ifsd_m, M_IFLIB); 2278 free(fl->ifl_sds.ifsd_cl, M_IFLIB); 2279 free(fl->ifl_sds.ifsd_ba, M_IFLIB); 2280 free(fl->ifl_sds.ifsd_map, M_IFLIB); 2281 free(fl->ifl_rx_bitmap, M_IFLIB); 2282 fl->ifl_sds.ifsd_m = NULL; 2283 fl->ifl_sds.ifsd_cl = NULL; 2284 fl->ifl_sds.ifsd_ba = NULL; 2285 fl->ifl_sds.ifsd_map = NULL; 2286 fl->ifl_rx_bitmap = NULL; 2287 } 2288 free(rxq->ifr_fl, M_IFLIB); 2289 rxq->ifr_fl = NULL; 2290 free(rxq->ifr_ifdi, M_IFLIB); 2291 rxq->ifr_ifdi = NULL; 2292 rxq->ifr_cq_cidx = 0; 2293 } 2294 } 2295 2296 /* 2297 * Timer routine 2298 */ 2299 static void 2300 iflib_timer(void *arg) 2301 { 2302 iflib_txq_t txq = arg; 2303 if_ctx_t ctx = txq->ift_ctx; 2304 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2305 uint64_t this_tick = ticks; 2306 2307 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 2308 return; 2309 2310 /* 2311 ** Check on the state of the TX queue(s), this 2312 ** can be done without the lock because its RO 2313 ** and the HUNG state will be static if set. 2314 */ 2315 if (this_tick - txq->ift_last_timer_tick >= iflib_timer_default) { 2316 txq->ift_last_timer_tick = this_tick; 2317 IFDI_TIMER(ctx, txq->ift_id); 2318 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && 2319 ((txq->ift_cleaned_prev == txq->ift_cleaned) || 2320 (sctx->isc_pause_frames == 0))) 2321 goto hung; 2322 2323 if (txq->ift_qstatus != IFLIB_QUEUE_IDLE && 2324 ifmp_ring_is_stalled(txq->ift_br)) { 2325 KASSERT(ctx->ifc_link_state == LINK_STATE_UP, 2326 ("queue can't be marked as hung if interface is down")); 2327 txq->ift_qstatus = IFLIB_QUEUE_HUNG; 2328 } 2329 txq->ift_cleaned_prev = txq->ift_cleaned; 2330 } 2331 /* handle any laggards */ 2332 if (txq->ift_db_pending) 2333 GROUPTASK_ENQUEUE(&txq->ift_task); 2334 2335 sctx->isc_pause_frames = 0; 2336 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) 2337 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, 2338 txq, txq->ift_timer.c_cpu); 2339 return; 2340 2341 hung: 2342 device_printf(ctx->ifc_dev, 2343 "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n", 2344 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); 2345 STATE_LOCK(ctx); 2346 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2347 ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET); 2348 iflib_admin_intr_deferred(ctx); 2349 STATE_UNLOCK(ctx); 2350 } 2351 2352 static uint16_t 2353 iflib_get_mbuf_size_for(unsigned int size) 2354 { 2355 2356 if (size <= MCLBYTES) 2357 return (MCLBYTES); 2358 else 2359 return (MJUMPAGESIZE); 2360 } 2361 2362 static void 2363 iflib_calc_rx_mbuf_sz(if_ctx_t ctx) 2364 { 2365 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2366 2367 /* 2368 * XXX don't set the max_frame_size to larger 2369 * than the hardware can handle 2370 */ 2371 ctx->ifc_rx_mbuf_sz = 2372 iflib_get_mbuf_size_for(sctx->isc_max_frame_size); 2373 } 2374 2375 uint32_t 2376 iflib_get_rx_mbuf_sz(if_ctx_t ctx) 2377 { 2378 2379 return (ctx->ifc_rx_mbuf_sz); 2380 } 2381 2382 static void 2383 iflib_init_locked(if_ctx_t ctx) 2384 { 2385 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2386 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2387 if_t ifp = ctx->ifc_ifp; 2388 iflib_fl_t fl; 2389 iflib_txq_t txq; 2390 iflib_rxq_t rxq; 2391 int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; 2392 2393 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2394 IFDI_INTR_DISABLE(ctx); 2395 2396 /* 2397 * See iflib_stop(). Useful in case iflib_init_locked() is 2398 * called without first calling iflib_stop(). 2399 */ 2400 netmap_disable_all_rings(ifp); 2401 2402 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); 2403 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); 2404 /* Set hardware offload abilities */ 2405 if_clearhwassist(ifp); 2406 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 2407 if_sethwassistbits(ifp, tx_ip_csum_flags, 0); 2408 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 2409 if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); 2410 if (if_getcapenable(ifp) & IFCAP_TSO4) 2411 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 2412 if (if_getcapenable(ifp) & IFCAP_TSO6) 2413 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 2414 2415 for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { 2416 CALLOUT_LOCK(txq); 2417 callout_stop(&txq->ift_timer); 2418 #ifdef DEV_NETMAP 2419 callout_stop(&txq->ift_netmap_timer); 2420 #endif /* DEV_NETMAP */ 2421 CALLOUT_UNLOCK(txq); 2422 iflib_netmap_txq_init(ctx, txq); 2423 } 2424 2425 /* 2426 * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so 2427 * that drivers can use the value when setting up the hardware receive 2428 * buffers. 2429 */ 2430 iflib_calc_rx_mbuf_sz(ctx); 2431 2432 #ifdef INVARIANTS 2433 i = if_getdrvflags(ifp); 2434 #endif 2435 IFDI_INIT(ctx); 2436 MPASS(if_getdrvflags(ifp) == i); 2437 for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { 2438 if (iflib_netmap_rxq_init(ctx, rxq) > 0) { 2439 /* This rxq is in netmap mode. Skip normal init. */ 2440 continue; 2441 } 2442 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 2443 if (iflib_fl_setup(fl)) { 2444 device_printf(ctx->ifc_dev, 2445 "setting up free list %d failed - " 2446 "check cluster settings\n", j); 2447 goto done; 2448 } 2449 } 2450 } 2451 done: 2452 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 2453 IFDI_INTR_ENABLE(ctx); 2454 txq = ctx->ifc_txqs; 2455 for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) 2456 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq, 2457 txq->ift_timer.c_cpu); 2458 2459 /* Re-enable txsync/rxsync. */ 2460 netmap_enable_all_rings(ifp); 2461 } 2462 2463 static int 2464 iflib_media_change(if_t ifp) 2465 { 2466 if_ctx_t ctx = if_getsoftc(ifp); 2467 int err; 2468 2469 CTX_LOCK(ctx); 2470 if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) 2471 iflib_init_locked(ctx); 2472 CTX_UNLOCK(ctx); 2473 return (err); 2474 } 2475 2476 static void 2477 iflib_media_status(if_t ifp, struct ifmediareq *ifmr) 2478 { 2479 if_ctx_t ctx = if_getsoftc(ifp); 2480 2481 CTX_LOCK(ctx); 2482 IFDI_UPDATE_ADMIN_STATUS(ctx); 2483 IFDI_MEDIA_STATUS(ctx, ifmr); 2484 CTX_UNLOCK(ctx); 2485 } 2486 2487 void 2488 iflib_stop(if_ctx_t ctx) 2489 { 2490 iflib_txq_t txq = ctx->ifc_txqs; 2491 iflib_rxq_t rxq = ctx->ifc_rxqs; 2492 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2493 if_shared_ctx_t sctx = ctx->ifc_sctx; 2494 iflib_dma_info_t di; 2495 iflib_fl_t fl; 2496 int i, j; 2497 2498 /* Tell the stack that the interface is no longer active */ 2499 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2500 2501 IFDI_INTR_DISABLE(ctx); 2502 DELAY(1000); 2503 IFDI_STOP(ctx); 2504 DELAY(1000); 2505 2506 /* 2507 * Stop any pending txsync/rxsync and prevent new ones 2508 * form starting. Processes blocked in poll() will get 2509 * POLLERR. 2510 */ 2511 netmap_disable_all_rings(ctx->ifc_ifp); 2512 2513 iflib_debug_reset(); 2514 /* Wait for current tx queue users to exit to disarm watchdog timer. */ 2515 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { 2516 /* make sure all transmitters have completed before proceeding XXX */ 2517 2518 CALLOUT_LOCK(txq); 2519 callout_stop(&txq->ift_timer); 2520 #ifdef DEV_NETMAP 2521 callout_stop(&txq->ift_netmap_timer); 2522 #endif /* DEV_NETMAP */ 2523 CALLOUT_UNLOCK(txq); 2524 2525 /* clean any enqueued buffers */ 2526 iflib_ifmp_purge(txq); 2527 /* Free any existing tx buffers. */ 2528 for (j = 0; j < txq->ift_size; j++) { 2529 iflib_txsd_free(ctx, txq, j); 2530 } 2531 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; 2532 txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; 2533 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; 2534 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; 2535 txq->ift_pullups = 0; 2536 ifmp_ring_reset_stats(txq->ift_br); 2537 for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++) 2538 bzero((void *)di->idi_vaddr, di->idi_size); 2539 } 2540 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { 2541 /* make sure all transmitters have completed before proceeding XXX */ 2542 2543 rxq->ifr_cq_cidx = 0; 2544 for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++) 2545 bzero((void *)di->idi_vaddr, di->idi_size); 2546 /* also resets the free lists pidx/cidx */ 2547 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 2548 iflib_fl_bufs_free(fl); 2549 } 2550 } 2551 2552 static inline caddr_t 2553 calc_next_rxd(iflib_fl_t fl, int cidx) 2554 { 2555 qidx_t size; 2556 int nrxd; 2557 caddr_t start, end, cur, next; 2558 2559 nrxd = fl->ifl_size; 2560 size = fl->ifl_rxd_size; 2561 start = fl->ifl_ifdi->idi_vaddr; 2562 2563 if (__predict_false(size == 0)) 2564 return (start); 2565 cur = start + size*cidx; 2566 end = start + size*nrxd; 2567 next = CACHE_PTR_NEXT(cur); 2568 return (next < end ? next : start); 2569 } 2570 2571 static inline void 2572 prefetch_pkts(iflib_fl_t fl, int cidx) 2573 { 2574 int nextptr; 2575 int nrxd = fl->ifl_size; 2576 caddr_t next_rxd; 2577 2578 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); 2579 prefetch(&fl->ifl_sds.ifsd_m[nextptr]); 2580 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); 2581 next_rxd = calc_next_rxd(fl, cidx); 2582 prefetch(next_rxd); 2583 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); 2584 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); 2585 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); 2586 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); 2587 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); 2588 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); 2589 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); 2590 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); 2591 } 2592 2593 static struct mbuf * 2594 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd, 2595 int *pf_rv, if_rxd_info_t ri) 2596 { 2597 bus_dmamap_t map; 2598 iflib_fl_t fl; 2599 caddr_t payload; 2600 struct mbuf *m; 2601 int flid, cidx, len, next; 2602 2603 map = NULL; 2604 flid = irf->irf_flid; 2605 cidx = irf->irf_idx; 2606 fl = &rxq->ifr_fl[flid]; 2607 sd->ifsd_fl = fl; 2608 m = fl->ifl_sds.ifsd_m[cidx]; 2609 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; 2610 fl->ifl_credits--; 2611 #if MEMORY_LOGGING 2612 fl->ifl_m_dequeued++; 2613 #endif 2614 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) 2615 prefetch_pkts(fl, cidx); 2616 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); 2617 prefetch(&fl->ifl_sds.ifsd_map[next]); 2618 map = fl->ifl_sds.ifsd_map[cidx]; 2619 2620 bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD); 2621 2622 if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL && 2623 irf->irf_len != 0) { 2624 payload = *sd->ifsd_cl; 2625 payload += ri->iri_pad; 2626 len = ri->iri_len - ri->iri_pad; 2627 *pf_rv = pfil_run_hooks(rxq->pfil, payload, ri->iri_ifp, 2628 len | PFIL_MEMPTR | PFIL_IN, NULL); 2629 switch (*pf_rv) { 2630 case PFIL_DROPPED: 2631 case PFIL_CONSUMED: 2632 /* 2633 * The filter ate it. Everything is recycled. 2634 */ 2635 m = NULL; 2636 unload = 0; 2637 break; 2638 case PFIL_REALLOCED: 2639 /* 2640 * The filter copied it. Everything is recycled. 2641 */ 2642 m = pfil_mem2mbuf(payload); 2643 unload = 0; 2644 break; 2645 case PFIL_PASS: 2646 /* 2647 * Filter said it was OK, so receive like 2648 * normal 2649 */ 2650 fl->ifl_sds.ifsd_m[cidx] = NULL; 2651 break; 2652 default: 2653 MPASS(0); 2654 } 2655 } else { 2656 fl->ifl_sds.ifsd_m[cidx] = NULL; 2657 *pf_rv = PFIL_PASS; 2658 } 2659 2660 if (unload && irf->irf_len != 0) 2661 bus_dmamap_unload(fl->ifl_buf_tag, map); 2662 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); 2663 if (__predict_false(fl->ifl_cidx == 0)) 2664 fl->ifl_gen = 0; 2665 bit_clear(fl->ifl_rx_bitmap, cidx); 2666 return (m); 2667 } 2668 2669 static struct mbuf * 2670 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv) 2671 { 2672 struct mbuf *m, *mh, *mt; 2673 caddr_t cl; 2674 int *pf_rv_ptr, flags, i, padlen; 2675 bool consumed; 2676 2677 i = 0; 2678 mh = NULL; 2679 consumed = false; 2680 *pf_rv = PFIL_PASS; 2681 pf_rv_ptr = pf_rv; 2682 do { 2683 m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd, 2684 pf_rv_ptr, ri); 2685 2686 MPASS(*sd->ifsd_cl != NULL); 2687 2688 /* 2689 * Exclude zero-length frags & frags from 2690 * packets the filter has consumed or dropped 2691 */ 2692 if (ri->iri_frags[i].irf_len == 0 || consumed || 2693 *pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) { 2694 if (mh == NULL) { 2695 /* everything saved here */ 2696 consumed = true; 2697 pf_rv_ptr = NULL; 2698 continue; 2699 } 2700 /* XXX we can save the cluster here, but not the mbuf */ 2701 m_init(m, M_NOWAIT, MT_DATA, 0); 2702 m_free(m); 2703 continue; 2704 } 2705 if (mh == NULL) { 2706 flags = M_PKTHDR|M_EXT; 2707 mh = mt = m; 2708 padlen = ri->iri_pad; 2709 } else { 2710 flags = M_EXT; 2711 mt->m_next = m; 2712 mt = m; 2713 /* assuming padding is only on the first fragment */ 2714 padlen = 0; 2715 } 2716 cl = *sd->ifsd_cl; 2717 *sd->ifsd_cl = NULL; 2718 2719 /* Can these two be made one ? */ 2720 m_init(m, M_NOWAIT, MT_DATA, flags); 2721 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); 2722 /* 2723 * These must follow m_init and m_cljset 2724 */ 2725 m->m_data += padlen; 2726 ri->iri_len -= padlen; 2727 m->m_len = ri->iri_frags[i].irf_len; 2728 } while (++i < ri->iri_nfrags); 2729 2730 return (mh); 2731 } 2732 2733 /* 2734 * Process one software descriptor 2735 */ 2736 static struct mbuf * 2737 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) 2738 { 2739 struct if_rxsd sd; 2740 struct mbuf *m; 2741 int pf_rv; 2742 2743 /* should I merge this back in now that the two paths are basically duplicated? */ 2744 if (ri->iri_nfrags == 1 && 2745 ri->iri_frags[0].irf_len != 0 && 2746 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { 2747 m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd, 2748 &pf_rv, ri); 2749 if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED) 2750 return (m); 2751 if (pf_rv == PFIL_PASS) { 2752 m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); 2753 #ifndef __NO_STRICT_ALIGNMENT 2754 if (!IP_ALIGNED(m)) 2755 m->m_data += 2; 2756 #endif 2757 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); 2758 m->m_len = ri->iri_frags[0].irf_len; 2759 } 2760 } else { 2761 m = assemble_segments(rxq, ri, &sd, &pf_rv); 2762 if (m == NULL) 2763 return (NULL); 2764 if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED) 2765 return (m); 2766 } 2767 m->m_pkthdr.len = ri->iri_len; 2768 m->m_pkthdr.rcvif = ri->iri_ifp; 2769 m->m_flags |= ri->iri_flags; 2770 m->m_pkthdr.ether_vtag = ri->iri_vtag; 2771 m->m_pkthdr.flowid = ri->iri_flowid; 2772 M_HASHTYPE_SET(m, ri->iri_rsstype); 2773 m->m_pkthdr.csum_flags = ri->iri_csum_flags; 2774 m->m_pkthdr.csum_data = ri->iri_csum_data; 2775 return (m); 2776 } 2777 2778 #if defined(INET6) || defined(INET) 2779 static void 2780 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) 2781 { 2782 CURVNET_SET(lc->ifp->if_vnet); 2783 #if defined(INET6) 2784 *v6 = V_ip6_forwarding; 2785 #endif 2786 #if defined(INET) 2787 *v4 = V_ipforwarding; 2788 #endif 2789 CURVNET_RESTORE(); 2790 } 2791 2792 /* 2793 * Returns true if it's possible this packet could be LROed. 2794 * if it returns false, it is guaranteed that tcp_lro_rx() 2795 * would not return zero. 2796 */ 2797 static bool 2798 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) 2799 { 2800 struct ether_header *eh; 2801 2802 eh = mtod(m, struct ether_header *); 2803 switch (eh->ether_type) { 2804 #if defined(INET6) 2805 case htons(ETHERTYPE_IPV6): 2806 return (!v6_forwarding); 2807 #endif 2808 #if defined (INET) 2809 case htons(ETHERTYPE_IP): 2810 return (!v4_forwarding); 2811 #endif 2812 } 2813 2814 return false; 2815 } 2816 #else 2817 static void 2818 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) 2819 { 2820 } 2821 #endif 2822 2823 static void 2824 _task_fn_rx_watchdog(void *context) 2825 { 2826 iflib_rxq_t rxq = context; 2827 2828 GROUPTASK_ENQUEUE(&rxq->ifr_task); 2829 } 2830 2831 static uint8_t 2832 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) 2833 { 2834 if_t ifp; 2835 if_ctx_t ctx = rxq->ifr_ctx; 2836 if_shared_ctx_t sctx = ctx->ifc_sctx; 2837 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2838 int avail, i; 2839 qidx_t *cidxp; 2840 struct if_rxd_info ri; 2841 int err, budget_left, rx_bytes, rx_pkts; 2842 iflib_fl_t fl; 2843 int lro_enabled; 2844 bool v4_forwarding, v6_forwarding, lro_possible; 2845 uint8_t retval = 0; 2846 2847 /* 2848 * XXX early demux data packets so that if_input processing only handles 2849 * acks in interrupt context 2850 */ 2851 struct mbuf *m, *mh, *mt, *mf; 2852 2853 NET_EPOCH_ASSERT(); 2854 2855 lro_possible = v4_forwarding = v6_forwarding = false; 2856 ifp = ctx->ifc_ifp; 2857 mh = mt = NULL; 2858 MPASS(budget > 0); 2859 rx_pkts = rx_bytes = 0; 2860 if (sctx->isc_flags & IFLIB_HAS_RXCQ) 2861 cidxp = &rxq->ifr_cq_cidx; 2862 else 2863 cidxp = &rxq->ifr_fl[0].ifl_cidx; 2864 if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { 2865 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2866 retval |= iflib_fl_refill_all(ctx, fl); 2867 DBG_COUNTER_INC(rx_unavail); 2868 return (retval); 2869 } 2870 2871 /* pfil needs the vnet to be set */ 2872 CURVNET_SET_QUIET(ifp->if_vnet); 2873 for (budget_left = budget; budget_left > 0 && avail > 0;) { 2874 if (__predict_false(!CTX_ACTIVE(ctx))) { 2875 DBG_COUNTER_INC(rx_ctx_inactive); 2876 break; 2877 } 2878 /* 2879 * Reset client set fields to their default values 2880 */ 2881 rxd_info_zero(&ri); 2882 ri.iri_qsidx = rxq->ifr_id; 2883 ri.iri_cidx = *cidxp; 2884 ri.iri_ifp = ifp; 2885 ri.iri_frags = rxq->ifr_frags; 2886 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 2887 2888 if (err) 2889 goto err; 2890 rx_pkts += 1; 2891 rx_bytes += ri.iri_len; 2892 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 2893 *cidxp = ri.iri_cidx; 2894 /* Update our consumer index */ 2895 /* XXX NB: shurd - check if this is still safe */ 2896 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) 2897 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; 2898 /* was this only a completion queue message? */ 2899 if (__predict_false(ri.iri_nfrags == 0)) 2900 continue; 2901 } 2902 MPASS(ri.iri_nfrags != 0); 2903 MPASS(ri.iri_len != 0); 2904 2905 /* will advance the cidx on the corresponding free lists */ 2906 m = iflib_rxd_pkt_get(rxq, &ri); 2907 avail--; 2908 budget_left--; 2909 if (avail == 0 && budget_left) 2910 avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); 2911 2912 if (__predict_false(m == NULL)) 2913 continue; 2914 2915 /* imm_pkt: -- cxgb */ 2916 if (mh == NULL) 2917 mh = mt = m; 2918 else { 2919 mt->m_nextpkt = m; 2920 mt = m; 2921 } 2922 } 2923 CURVNET_RESTORE(); 2924 /* make sure that we can refill faster than drain */ 2925 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2926 retval |= iflib_fl_refill_all(ctx, fl); 2927 2928 lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); 2929 if (lro_enabled) 2930 iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); 2931 mt = mf = NULL; 2932 while (mh != NULL) { 2933 m = mh; 2934 mh = mh->m_nextpkt; 2935 m->m_nextpkt = NULL; 2936 #ifndef __NO_STRICT_ALIGNMENT 2937 if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) 2938 continue; 2939 #endif 2940 rx_bytes += m->m_pkthdr.len; 2941 rx_pkts++; 2942 #if defined(INET6) || defined(INET) 2943 if (lro_enabled) { 2944 if (!lro_possible) { 2945 lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); 2946 if (lro_possible && mf != NULL) { 2947 ifp->if_input(ifp, mf); 2948 DBG_COUNTER_INC(rx_if_input); 2949 mt = mf = NULL; 2950 } 2951 } 2952 if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == 2953 (CSUM_L4_CALC|CSUM_L4_VALID)) { 2954 if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) 2955 continue; 2956 } 2957 } 2958 #endif 2959 if (lro_possible) { 2960 ifp->if_input(ifp, m); 2961 DBG_COUNTER_INC(rx_if_input); 2962 continue; 2963 } 2964 2965 if (mf == NULL) 2966 mf = m; 2967 if (mt != NULL) 2968 mt->m_nextpkt = m; 2969 mt = m; 2970 } 2971 if (mf != NULL) { 2972 ifp->if_input(ifp, mf); 2973 DBG_COUNTER_INC(rx_if_input); 2974 } 2975 2976 if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); 2977 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); 2978 2979 /* 2980 * Flush any outstanding LRO work 2981 */ 2982 #if defined(INET6) || defined(INET) 2983 tcp_lro_flush_all(&rxq->ifr_lc); 2984 #endif 2985 if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0) 2986 retval |= IFLIB_RXEOF_MORE; 2987 return (retval); 2988 err: 2989 STATE_LOCK(ctx); 2990 ctx->ifc_flags |= IFC_DO_RESET; 2991 iflib_admin_intr_deferred(ctx); 2992 STATE_UNLOCK(ctx); 2993 return (0); 2994 } 2995 2996 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) 2997 static inline qidx_t 2998 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) 2999 { 3000 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 3001 qidx_t minthresh = txq->ift_size / 8; 3002 if (in_use > 4*minthresh) 3003 return (notify_count); 3004 if (in_use > 2*minthresh) 3005 return (notify_count >> 1); 3006 if (in_use > minthresh) 3007 return (notify_count >> 3); 3008 return (0); 3009 } 3010 3011 static inline qidx_t 3012 txq_max_rs_deferred(iflib_txq_t txq) 3013 { 3014 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 3015 qidx_t minthresh = txq->ift_size / 8; 3016 if (txq->ift_in_use > 4*minthresh) 3017 return (notify_count); 3018 if (txq->ift_in_use > 2*minthresh) 3019 return (notify_count >> 1); 3020 if (txq->ift_in_use > minthresh) 3021 return (notify_count >> 2); 3022 return (2); 3023 } 3024 3025 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) 3026 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) 3027 3028 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) 3029 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) 3030 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) 3031 3032 /* forward compatibility for cxgb */ 3033 #define FIRST_QSET(ctx) 0 3034 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) 3035 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) 3036 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) 3037 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) 3038 3039 /* XXX we should be setting this to something other than zero */ 3040 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) 3041 #define MAX_TX_DESC(ctx) MAX((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \ 3042 (ctx)->ifc_softc_ctx.isc_tx_nsegments) 3043 3044 static inline bool 3045 iflib_txd_db_check(iflib_txq_t txq, int ring) 3046 { 3047 if_ctx_t ctx = txq->ift_ctx; 3048 qidx_t dbval, max; 3049 3050 max = TXQ_MAX_DB_DEFERRED(txq, txq->ift_in_use); 3051 3052 /* force || threshold exceeded || at the edge of the ring */ 3053 if (ring || (txq->ift_db_pending >= max) || (TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2)) { 3054 3055 /* 3056 * 'npending' is used if the card's doorbell is in terms of the number of descriptors 3057 * pending flush (BRCM). 'pidx' is used in cases where the card's doorbeel uses the 3058 * producer index explicitly (INTC). 3059 */ 3060 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; 3061 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 3062 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3063 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); 3064 3065 /* 3066 * Absent bugs there are zero packets pending so reset pending counts to zero. 3067 */ 3068 txq->ift_db_pending = txq->ift_npending = 0; 3069 return (true); 3070 } 3071 return (false); 3072 } 3073 3074 #ifdef PKT_DEBUG 3075 static void 3076 print_pkt(if_pkt_info_t pi) 3077 { 3078 printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", 3079 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); 3080 printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", 3081 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); 3082 printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", 3083 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); 3084 } 3085 #endif 3086 3087 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) 3088 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO)) 3089 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) 3090 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO)) 3091 3092 static int 3093 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) 3094 { 3095 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; 3096 struct ether_vlan_header *eh; 3097 struct mbuf *m; 3098 3099 m = *mp; 3100 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && 3101 M_WRITABLE(m) == 0) { 3102 if ((m = m_dup(m, M_NOWAIT)) == NULL) { 3103 return (ENOMEM); 3104 } else { 3105 m_freem(*mp); 3106 DBG_COUNTER_INC(tx_frees); 3107 *mp = m; 3108 } 3109 } 3110 3111 /* 3112 * Determine where frame payload starts. 3113 * Jump over vlan headers if already present, 3114 * helpful for QinQ too. 3115 */ 3116 if (__predict_false(m->m_len < sizeof(*eh))) { 3117 txq->ift_pullups++; 3118 if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) 3119 return (ENOMEM); 3120 } 3121 eh = mtod(m, struct ether_vlan_header *); 3122 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3123 pi->ipi_etype = ntohs(eh->evl_proto); 3124 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3125 } else { 3126 pi->ipi_etype = ntohs(eh->evl_encap_proto); 3127 pi->ipi_ehdrlen = ETHER_HDR_LEN; 3128 } 3129 3130 switch (pi->ipi_etype) { 3131 #ifdef INET 3132 case ETHERTYPE_IP: 3133 { 3134 struct mbuf *n; 3135 struct ip *ip = NULL; 3136 struct tcphdr *th = NULL; 3137 int minthlen; 3138 3139 minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); 3140 if (__predict_false(m->m_len < minthlen)) { 3141 /* 3142 * if this code bloat is causing too much of a hit 3143 * move it to a separate function and mark it noinline 3144 */ 3145 if (m->m_len == pi->ipi_ehdrlen) { 3146 n = m->m_next; 3147 MPASS(n); 3148 if (n->m_len >= sizeof(*ip)) { 3149 ip = (struct ip *)n->m_data; 3150 if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 3151 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 3152 } else { 3153 txq->ift_pullups++; 3154 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 3155 return (ENOMEM); 3156 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 3157 } 3158 } else { 3159 txq->ift_pullups++; 3160 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 3161 return (ENOMEM); 3162 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 3163 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 3164 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 3165 } 3166 } else { 3167 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 3168 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 3169 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 3170 } 3171 pi->ipi_ip_hlen = ip->ip_hl << 2; 3172 pi->ipi_ipproto = ip->ip_p; 3173 pi->ipi_flags |= IPI_TX_IPV4; 3174 3175 /* TCP checksum offload may require TCP header length */ 3176 if (IS_TX_OFFLOAD4(pi)) { 3177 if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { 3178 if (__predict_false(th == NULL)) { 3179 txq->ift_pullups++; 3180 if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) 3181 return (ENOMEM); 3182 th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); 3183 } 3184 pi->ipi_tcp_hflags = th->th_flags; 3185 pi->ipi_tcp_hlen = th->th_off << 2; 3186 pi->ipi_tcp_seq = th->th_seq; 3187 } 3188 if (IS_TSO4(pi)) { 3189 if (__predict_false(ip->ip_p != IPPROTO_TCP)) 3190 return (ENXIO); 3191 /* 3192 * TSO always requires hardware checksum offload. 3193 */ 3194 pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP); 3195 th->th_sum = in_pseudo(ip->ip_src.s_addr, 3196 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 3197 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 3198 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { 3199 ip->ip_sum = 0; 3200 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); 3201 } 3202 } 3203 } 3204 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) 3205 ip->ip_sum = 0; 3206 3207 break; 3208 } 3209 #endif 3210 #ifdef INET6 3211 case ETHERTYPE_IPV6: 3212 { 3213 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); 3214 struct tcphdr *th; 3215 pi->ipi_ip_hlen = sizeof(struct ip6_hdr); 3216 3217 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { 3218 txq->ift_pullups++; 3219 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) 3220 return (ENOMEM); 3221 } 3222 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); 3223 3224 /* XXX-BZ this will go badly in case of ext hdrs. */ 3225 pi->ipi_ipproto = ip6->ip6_nxt; 3226 pi->ipi_flags |= IPI_TX_IPV6; 3227 3228 /* TCP checksum offload may require TCP header length */ 3229 if (IS_TX_OFFLOAD6(pi)) { 3230 if (pi->ipi_ipproto == IPPROTO_TCP) { 3231 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { 3232 txq->ift_pullups++; 3233 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) 3234 return (ENOMEM); 3235 } 3236 pi->ipi_tcp_hflags = th->th_flags; 3237 pi->ipi_tcp_hlen = th->th_off << 2; 3238 pi->ipi_tcp_seq = th->th_seq; 3239 } 3240 if (IS_TSO6(pi)) { 3241 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) 3242 return (ENXIO); 3243 /* 3244 * TSO always requires hardware checksum offload. 3245 */ 3246 pi->ipi_csum_flags |= CSUM_IP6_TCP; 3247 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); 3248 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 3249 } 3250 } 3251 break; 3252 } 3253 #endif 3254 default: 3255 pi->ipi_csum_flags &= ~CSUM_OFFLOAD; 3256 pi->ipi_ip_hlen = 0; 3257 break; 3258 } 3259 *mp = m; 3260 3261 return (0); 3262 } 3263 3264 /* 3265 * If dodgy hardware rejects the scatter gather chain we've handed it 3266 * we'll need to remove the mbuf chain from ifsg_m[] before we can add the 3267 * m_defrag'd mbufs 3268 */ 3269 static __noinline struct mbuf * 3270 iflib_remove_mbuf(iflib_txq_t txq) 3271 { 3272 int ntxd, pidx; 3273 struct mbuf *m, **ifsd_m; 3274 3275 ifsd_m = txq->ift_sds.ifsd_m; 3276 ntxd = txq->ift_size; 3277 pidx = txq->ift_pidx & (ntxd - 1); 3278 ifsd_m = txq->ift_sds.ifsd_m; 3279 m = ifsd_m[pidx]; 3280 ifsd_m[pidx] = NULL; 3281 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]); 3282 if (txq->ift_sds.ifsd_tso_map != NULL) 3283 bus_dmamap_unload(txq->ift_tso_buf_tag, 3284 txq->ift_sds.ifsd_tso_map[pidx]); 3285 #if MEMORY_LOGGING 3286 txq->ift_dequeued++; 3287 #endif 3288 return (m); 3289 } 3290 3291 static inline caddr_t 3292 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) 3293 { 3294 qidx_t size; 3295 int ntxd; 3296 caddr_t start, end, cur, next; 3297 3298 ntxd = txq->ift_size; 3299 size = txq->ift_txd_size[qid]; 3300 start = txq->ift_ifdi[qid].idi_vaddr; 3301 3302 if (__predict_false(size == 0)) 3303 return (start); 3304 cur = start + size*cidx; 3305 end = start + size*ntxd; 3306 next = CACHE_PTR_NEXT(cur); 3307 return (next < end ? next : start); 3308 } 3309 3310 /* 3311 * Pad an mbuf to ensure a minimum ethernet frame size. 3312 * min_frame_size is the frame size (less CRC) to pad the mbuf to 3313 */ 3314 static __noinline int 3315 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) 3316 { 3317 /* 3318 * 18 is enough bytes to pad an ARP packet to 46 bytes, and 3319 * and ARP message is the smallest common payload I can think of 3320 */ 3321 static char pad[18]; /* just zeros */ 3322 int n; 3323 struct mbuf *new_head; 3324 3325 if (!M_WRITABLE(*m_head)) { 3326 new_head = m_dup(*m_head, M_NOWAIT); 3327 if (new_head == NULL) { 3328 m_freem(*m_head); 3329 device_printf(dev, "cannot pad short frame, m_dup() failed"); 3330 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3331 DBG_COUNTER_INC(tx_frees); 3332 return ENOMEM; 3333 } 3334 m_freem(*m_head); 3335 *m_head = new_head; 3336 } 3337 3338 for (n = min_frame_size - (*m_head)->m_pkthdr.len; 3339 n > 0; n -= sizeof(pad)) 3340 if (!m_append(*m_head, min(n, sizeof(pad)), pad)) 3341 break; 3342 3343 if (n > 0) { 3344 m_freem(*m_head); 3345 device_printf(dev, "cannot pad short frame\n"); 3346 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3347 DBG_COUNTER_INC(tx_frees); 3348 return (ENOBUFS); 3349 } 3350 3351 return 0; 3352 } 3353 3354 static int 3355 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) 3356 { 3357 if_ctx_t ctx; 3358 if_shared_ctx_t sctx; 3359 if_softc_ctx_t scctx; 3360 bus_dma_tag_t buf_tag; 3361 bus_dma_segment_t *segs; 3362 struct mbuf *m_head, **ifsd_m; 3363 void *next_txd; 3364 bus_dmamap_t map; 3365 struct if_pkt_info pi; 3366 int remap = 0; 3367 int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; 3368 3369 ctx = txq->ift_ctx; 3370 sctx = ctx->ifc_sctx; 3371 scctx = &ctx->ifc_softc_ctx; 3372 segs = txq->ift_segs; 3373 ntxd = txq->ift_size; 3374 m_head = *m_headp; 3375 map = NULL; 3376 3377 /* 3378 * If we're doing TSO the next descriptor to clean may be quite far ahead 3379 */ 3380 cidx = txq->ift_cidx; 3381 pidx = txq->ift_pidx; 3382 if (ctx->ifc_flags & IFC_PREFETCH) { 3383 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); 3384 if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { 3385 next_txd = calc_next_txd(txq, cidx, 0); 3386 prefetch(next_txd); 3387 } 3388 3389 /* prefetch the next cache line of mbuf pointers and flags */ 3390 prefetch(&txq->ift_sds.ifsd_m[next]); 3391 prefetch(&txq->ift_sds.ifsd_map[next]); 3392 next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); 3393 } 3394 map = txq->ift_sds.ifsd_map[pidx]; 3395 ifsd_m = txq->ift_sds.ifsd_m; 3396 3397 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3398 buf_tag = txq->ift_tso_buf_tag; 3399 max_segs = scctx->isc_tx_tso_segments_max; 3400 map = txq->ift_sds.ifsd_tso_map[pidx]; 3401 MPASS(buf_tag != NULL); 3402 MPASS(max_segs > 0); 3403 } else { 3404 buf_tag = txq->ift_buf_tag; 3405 max_segs = scctx->isc_tx_nsegments; 3406 map = txq->ift_sds.ifsd_map[pidx]; 3407 } 3408 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && 3409 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { 3410 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); 3411 if (err) { 3412 DBG_COUNTER_INC(encap_txd_encap_fail); 3413 return err; 3414 } 3415 } 3416 m_head = *m_headp; 3417 3418 pkt_info_zero(&pi); 3419 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); 3420 pi.ipi_pidx = pidx; 3421 pi.ipi_qsidx = txq->ift_id; 3422 pi.ipi_len = m_head->m_pkthdr.len; 3423 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; 3424 pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0; 3425 3426 /* deliberate bitwise OR to make one condition */ 3427 if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { 3428 if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) { 3429 DBG_COUNTER_INC(encap_txd_encap_fail); 3430 return (err); 3431 } 3432 m_head = *m_headp; 3433 } 3434 3435 retry: 3436 err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs, 3437 BUS_DMA_NOWAIT); 3438 defrag: 3439 if (__predict_false(err)) { 3440 switch (err) { 3441 case EFBIG: 3442 /* try collapse once and defrag once */ 3443 if (remap == 0) { 3444 m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); 3445 /* try defrag if collapsing fails */ 3446 if (m_head == NULL) 3447 remap++; 3448 } 3449 if (remap == 1) { 3450 txq->ift_mbuf_defrag++; 3451 m_head = m_defrag(*m_headp, M_NOWAIT); 3452 } 3453 /* 3454 * remap should never be >1 unless bus_dmamap_load_mbuf_sg 3455 * failed to map an mbuf that was run through m_defrag 3456 */ 3457 MPASS(remap <= 1); 3458 if (__predict_false(m_head == NULL || remap > 1)) 3459 goto defrag_failed; 3460 remap++; 3461 *m_headp = m_head; 3462 goto retry; 3463 break; 3464 case ENOMEM: 3465 txq->ift_no_tx_dma_setup++; 3466 break; 3467 default: 3468 txq->ift_no_tx_dma_setup++; 3469 m_freem(*m_headp); 3470 DBG_COUNTER_INC(tx_frees); 3471 *m_headp = NULL; 3472 break; 3473 } 3474 txq->ift_map_failed++; 3475 DBG_COUNTER_INC(encap_load_mbuf_fail); 3476 DBG_COUNTER_INC(encap_txd_encap_fail); 3477 return (err); 3478 } 3479 ifsd_m[pidx] = m_head; 3480 /* 3481 * XXX assumes a 1 to 1 relationship between segments and 3482 * descriptors - this does not hold true on all drivers, e.g. 3483 * cxgb 3484 */ 3485 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { 3486 txq->ift_no_desc_avail++; 3487 bus_dmamap_unload(buf_tag, map); 3488 DBG_COUNTER_INC(encap_txq_avail_fail); 3489 DBG_COUNTER_INC(encap_txd_encap_fail); 3490 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) 3491 GROUPTASK_ENQUEUE(&txq->ift_task); 3492 return (ENOBUFS); 3493 } 3494 /* 3495 * On Intel cards we can greatly reduce the number of TX interrupts 3496 * we see by only setting report status on every Nth descriptor. 3497 * However, this also means that the driver will need to keep track 3498 * of the descriptors that RS was set on to check them for the DD bit. 3499 */ 3500 txq->ift_rs_pending += nsegs + 1; 3501 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || 3502 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { 3503 pi.ipi_flags |= IPI_TX_INTR; 3504 txq->ift_rs_pending = 0; 3505 } 3506 3507 pi.ipi_segs = segs; 3508 pi.ipi_nsegs = nsegs; 3509 3510 MPASS(pidx >= 0 && pidx < txq->ift_size); 3511 #ifdef PKT_DEBUG 3512 print_pkt(&pi); 3513 #endif 3514 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { 3515 bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE); 3516 DBG_COUNTER_INC(tx_encap); 3517 MPASS(pi.ipi_new_pidx < txq->ift_size); 3518 3519 ndesc = pi.ipi_new_pidx - pi.ipi_pidx; 3520 if (pi.ipi_new_pidx < pi.ipi_pidx) { 3521 ndesc += txq->ift_size; 3522 txq->ift_gen = 1; 3523 } 3524 /* 3525 * drivers can need as many as 3526 * two sentinels 3527 */ 3528 MPASS(ndesc <= pi.ipi_nsegs + 2); 3529 MPASS(pi.ipi_new_pidx != pidx); 3530 MPASS(ndesc > 0); 3531 txq->ift_in_use += ndesc; 3532 txq->ift_db_pending += ndesc; 3533 3534 /* 3535 * We update the last software descriptor again here because there may 3536 * be a sentinel and/or there may be more mbufs than segments 3537 */ 3538 txq->ift_pidx = pi.ipi_new_pidx; 3539 txq->ift_npending += pi.ipi_ndescs; 3540 } else { 3541 *m_headp = m_head = iflib_remove_mbuf(txq); 3542 if (err == EFBIG) { 3543 txq->ift_txd_encap_efbig++; 3544 if (remap < 2) { 3545 remap = 1; 3546 goto defrag; 3547 } 3548 } 3549 goto defrag_failed; 3550 } 3551 /* 3552 * err can't possibly be non-zero here, so we don't neet to test it 3553 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail). 3554 */ 3555 return (err); 3556 3557 defrag_failed: 3558 txq->ift_mbuf_defrag_failed++; 3559 txq->ift_map_failed++; 3560 m_freem(*m_headp); 3561 DBG_COUNTER_INC(tx_frees); 3562 *m_headp = NULL; 3563 DBG_COUNTER_INC(encap_txd_encap_fail); 3564 return (ENOMEM); 3565 } 3566 3567 static void 3568 iflib_tx_desc_free(iflib_txq_t txq, int n) 3569 { 3570 uint32_t qsize, cidx, mask, gen; 3571 struct mbuf *m, **ifsd_m; 3572 bool do_prefetch; 3573 3574 cidx = txq->ift_cidx; 3575 gen = txq->ift_gen; 3576 qsize = txq->ift_size; 3577 mask = qsize-1; 3578 ifsd_m = txq->ift_sds.ifsd_m; 3579 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); 3580 3581 while (n-- > 0) { 3582 if (do_prefetch) { 3583 prefetch(ifsd_m[(cidx + 3) & mask]); 3584 prefetch(ifsd_m[(cidx + 4) & mask]); 3585 } 3586 if ((m = ifsd_m[cidx]) != NULL) { 3587 prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); 3588 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 3589 bus_dmamap_sync(txq->ift_tso_buf_tag, 3590 txq->ift_sds.ifsd_tso_map[cidx], 3591 BUS_DMASYNC_POSTWRITE); 3592 bus_dmamap_unload(txq->ift_tso_buf_tag, 3593 txq->ift_sds.ifsd_tso_map[cidx]); 3594 } else { 3595 bus_dmamap_sync(txq->ift_buf_tag, 3596 txq->ift_sds.ifsd_map[cidx], 3597 BUS_DMASYNC_POSTWRITE); 3598 bus_dmamap_unload(txq->ift_buf_tag, 3599 txq->ift_sds.ifsd_map[cidx]); 3600 } 3601 /* XXX we don't support any drivers that batch packets yet */ 3602 MPASS(m->m_nextpkt == NULL); 3603 m_freem(m); 3604 ifsd_m[cidx] = NULL; 3605 #if MEMORY_LOGGING 3606 txq->ift_dequeued++; 3607 #endif 3608 DBG_COUNTER_INC(tx_frees); 3609 } 3610 if (__predict_false(++cidx == qsize)) { 3611 cidx = 0; 3612 gen = 0; 3613 } 3614 } 3615 txq->ift_cidx = cidx; 3616 txq->ift_gen = gen; 3617 } 3618 3619 static __inline int 3620 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) 3621 { 3622 int reclaim; 3623 if_ctx_t ctx = txq->ift_ctx; 3624 3625 KASSERT(thresh >= 0, ("invalid threshold to reclaim")); 3626 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); 3627 3628 /* 3629 * Need a rate-limiting check so that this isn't called every time 3630 */ 3631 iflib_tx_credits_update(ctx, txq); 3632 reclaim = DESC_RECLAIMABLE(txq); 3633 3634 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { 3635 #ifdef INVARIANTS 3636 if (iflib_verbose_debug) { 3637 printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, 3638 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, 3639 reclaim, thresh); 3640 } 3641 #endif 3642 return (0); 3643 } 3644 iflib_tx_desc_free(txq, reclaim); 3645 txq->ift_cleaned += reclaim; 3646 txq->ift_in_use -= reclaim; 3647 3648 return (reclaim); 3649 } 3650 3651 static struct mbuf ** 3652 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) 3653 { 3654 int next, size; 3655 struct mbuf **items; 3656 3657 size = r->size; 3658 next = (cidx + CACHE_PTR_INCREMENT) & (size-1); 3659 items = __DEVOLATILE(struct mbuf **, &r->items[0]); 3660 3661 prefetch(items[(cidx + offset) & (size-1)]); 3662 if (remaining > 1) { 3663 prefetch2cachelines(&items[next]); 3664 prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); 3665 prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); 3666 prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); 3667 } 3668 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); 3669 } 3670 3671 static void 3672 iflib_txq_check_drain(iflib_txq_t txq, int budget) 3673 { 3674 3675 ifmp_ring_check_drainage(txq->ift_br, budget); 3676 } 3677 3678 static uint32_t 3679 iflib_txq_can_drain(struct ifmp_ring *r) 3680 { 3681 iflib_txq_t txq = r->cookie; 3682 if_ctx_t ctx = txq->ift_ctx; 3683 3684 if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) 3685 return (1); 3686 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 3687 BUS_DMASYNC_POSTREAD); 3688 return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, 3689 false)); 3690 } 3691 3692 static uint32_t 3693 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3694 { 3695 iflib_txq_t txq = r->cookie; 3696 if_ctx_t ctx = txq->ift_ctx; 3697 if_t ifp = ctx->ifc_ifp; 3698 struct mbuf *m, **mp; 3699 int avail, bytes_sent, skipped, count, err, i; 3700 int mcast_sent, pkt_sent, reclaimed; 3701 bool do_prefetch, rang, ring; 3702 3703 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || 3704 !LINK_ACTIVE(ctx))) { 3705 DBG_COUNTER_INC(txq_drain_notready); 3706 return (0); 3707 } 3708 reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 3709 rang = iflib_txd_db_check(txq, reclaimed && txq->ift_db_pending); 3710 avail = IDXDIFF(pidx, cidx, r->size); 3711 3712 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { 3713 /* 3714 * The driver is unloading so we need to free all pending packets. 3715 */ 3716 DBG_COUNTER_INC(txq_drain_flushing); 3717 for (i = 0; i < avail; i++) { 3718 if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq)) 3719 m_freem(r->items[(cidx + i) & (r->size-1)]); 3720 r->items[(cidx + i) & (r->size-1)] = NULL; 3721 } 3722 return (avail); 3723 } 3724 3725 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { 3726 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3727 CALLOUT_LOCK(txq); 3728 callout_stop(&txq->ift_timer); 3729 CALLOUT_UNLOCK(txq); 3730 DBG_COUNTER_INC(txq_drain_oactive); 3731 return (0); 3732 } 3733 3734 /* 3735 * If we've reclaimed any packets this queue cannot be hung. 3736 */ 3737 if (reclaimed) 3738 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3739 skipped = mcast_sent = bytes_sent = pkt_sent = 0; 3740 count = MIN(avail, TX_BATCH_SIZE); 3741 #ifdef INVARIANTS 3742 if (iflib_verbose_debug) 3743 printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, 3744 avail, ctx->ifc_flags, TXQ_AVAIL(txq)); 3745 #endif 3746 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); 3747 err = 0; 3748 for (i = 0; i < count && TXQ_AVAIL(txq) >= MAX_TX_DESC(ctx) + 2; i++) { 3749 int rem = do_prefetch ? count - i : 0; 3750 3751 mp = _ring_peek_one(r, cidx, i, rem); 3752 MPASS(mp != NULL && *mp != NULL); 3753 3754 /* 3755 * Completion interrupts will use the address of the txq 3756 * as a sentinel to enqueue _something_ in order to acquire 3757 * the lock on the mp_ring (there's no direct lock call). 3758 * We obviously whave to check for these sentinel cases 3759 * and skip them. 3760 */ 3761 if (__predict_false(*mp == (struct mbuf *)txq)) { 3762 skipped++; 3763 continue; 3764 } 3765 err = iflib_encap(txq, mp); 3766 if (__predict_false(err)) { 3767 /* no room - bail out */ 3768 if (err == ENOBUFS) 3769 break; 3770 skipped++; 3771 /* we can't send this packet - skip it */ 3772 continue; 3773 } 3774 pkt_sent++; 3775 m = *mp; 3776 DBG_COUNTER_INC(tx_sent); 3777 bytes_sent += m->m_pkthdr.len; 3778 mcast_sent += !!(m->m_flags & M_MCAST); 3779 3780 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) 3781 break; 3782 ETHER_BPF_MTAP(ifp, m); 3783 rang = iflib_txd_db_check(txq, false); 3784 } 3785 3786 /* deliberate use of bitwise or to avoid gratuitous short-circuit */ 3787 ring = rang ? false : (iflib_min_tx_latency | err); 3788 iflib_txd_db_check(txq, ring); 3789 if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); 3790 if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); 3791 if (mcast_sent) 3792 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); 3793 #ifdef INVARIANTS 3794 if (iflib_verbose_debug) 3795 printf("consumed=%d\n", skipped + pkt_sent); 3796 #endif 3797 return (skipped + pkt_sent); 3798 } 3799 3800 static uint32_t 3801 iflib_txq_drain_always(struct ifmp_ring *r) 3802 { 3803 return (1); 3804 } 3805 3806 static uint32_t 3807 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3808 { 3809 int i, avail; 3810 struct mbuf **mp; 3811 iflib_txq_t txq; 3812 3813 txq = r->cookie; 3814 3815 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3816 CALLOUT_LOCK(txq); 3817 callout_stop(&txq->ift_timer); 3818 CALLOUT_UNLOCK(txq); 3819 3820 avail = IDXDIFF(pidx, cidx, r->size); 3821 for (i = 0; i < avail; i++) { 3822 mp = _ring_peek_one(r, cidx, i, avail - i); 3823 if (__predict_false(*mp == (struct mbuf *)txq)) 3824 continue; 3825 m_freem(*mp); 3826 DBG_COUNTER_INC(tx_frees); 3827 } 3828 MPASS(ifmp_ring_is_stalled(r) == 0); 3829 return (avail); 3830 } 3831 3832 static void 3833 iflib_ifmp_purge(iflib_txq_t txq) 3834 { 3835 struct ifmp_ring *r; 3836 3837 r = txq->ift_br; 3838 r->drain = iflib_txq_drain_free; 3839 r->can_drain = iflib_txq_drain_always; 3840 3841 ifmp_ring_check_drainage(r, r->size); 3842 3843 r->drain = iflib_txq_drain; 3844 r->can_drain = iflib_txq_can_drain; 3845 } 3846 3847 static void 3848 _task_fn_tx(void *context) 3849 { 3850 iflib_txq_t txq = context; 3851 if_ctx_t ctx = txq->ift_ctx; 3852 if_t ifp = ctx->ifc_ifp; 3853 int abdicate = ctx->ifc_sysctl_tx_abdicate; 3854 3855 #ifdef IFLIB_DIAGNOSTICS 3856 txq->ift_cpu_exec_count[curcpu]++; 3857 #endif 3858 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 3859 return; 3860 #ifdef DEV_NETMAP 3861 if ((if_getcapenable(ifp) & IFCAP_NETMAP) && 3862 netmap_tx_irq(ifp, txq->ift_id)) 3863 goto skip_ifmp; 3864 #endif 3865 #ifdef ALTQ 3866 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 3867 iflib_altq_if_start(ifp); 3868 #endif 3869 if (txq->ift_db_pending) 3870 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate); 3871 else if (!abdicate) 3872 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3873 /* 3874 * When abdicating, we always need to check drainage, not just when we don't enqueue 3875 */ 3876 if (abdicate) 3877 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3878 #ifdef DEV_NETMAP 3879 skip_ifmp: 3880 #endif 3881 if (ctx->ifc_flags & IFC_LEGACY) 3882 IFDI_INTR_ENABLE(ctx); 3883 else 3884 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3885 } 3886 3887 static void 3888 _task_fn_rx(void *context) 3889 { 3890 iflib_rxq_t rxq = context; 3891 if_ctx_t ctx = rxq->ifr_ctx; 3892 uint8_t more; 3893 uint16_t budget; 3894 #ifdef DEV_NETMAP 3895 u_int work = 0; 3896 int nmirq; 3897 #endif 3898 3899 #ifdef IFLIB_DIAGNOSTICS 3900 rxq->ifr_cpu_exec_count[curcpu]++; 3901 #endif 3902 DBG_COUNTER_INC(task_fn_rxs); 3903 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3904 return; 3905 #ifdef DEV_NETMAP 3906 nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work); 3907 if (nmirq != NM_IRQ_PASS) { 3908 more = (nmirq == NM_IRQ_RESCHED) ? IFLIB_RXEOF_MORE : 0; 3909 goto skip_rxeof; 3910 } 3911 #endif 3912 budget = ctx->ifc_sysctl_rx_budget; 3913 if (budget == 0) 3914 budget = 16; /* XXX */ 3915 more = iflib_rxeof(rxq, budget); 3916 #ifdef DEV_NETMAP 3917 skip_rxeof: 3918 #endif 3919 if ((more & IFLIB_RXEOF_MORE) == 0) { 3920 if (ctx->ifc_flags & IFC_LEGACY) 3921 IFDI_INTR_ENABLE(ctx); 3922 else 3923 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 3924 DBG_COUNTER_INC(rx_intr_enables); 3925 } 3926 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3927 return; 3928 3929 if (more & IFLIB_RXEOF_MORE) 3930 GROUPTASK_ENQUEUE(&rxq->ifr_task); 3931 else if (more & IFLIB_RXEOF_EMPTY) 3932 callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq); 3933 } 3934 3935 static void 3936 _task_fn_admin(void *context) 3937 { 3938 if_ctx_t ctx = context; 3939 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 3940 iflib_txq_t txq; 3941 int i; 3942 bool oactive, running, do_reset, do_watchdog, in_detach; 3943 3944 STATE_LOCK(ctx); 3945 running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); 3946 oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); 3947 do_reset = (ctx->ifc_flags & IFC_DO_RESET); 3948 do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); 3949 in_detach = (ctx->ifc_flags & IFC_IN_DETACH); 3950 ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG); 3951 STATE_UNLOCK(ctx); 3952 3953 if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) 3954 return; 3955 if (in_detach) 3956 return; 3957 3958 CTX_LOCK(ctx); 3959 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3960 CALLOUT_LOCK(txq); 3961 callout_stop(&txq->ift_timer); 3962 CALLOUT_UNLOCK(txq); 3963 } 3964 if (do_watchdog) { 3965 ctx->ifc_watchdog_events++; 3966 IFDI_WATCHDOG_RESET(ctx); 3967 } 3968 IFDI_UPDATE_ADMIN_STATUS(ctx); 3969 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3970 callout_reset_on(&txq->ift_timer, iflib_timer_default, iflib_timer, txq, 3971 txq->ift_timer.c_cpu); 3972 } 3973 IFDI_LINK_INTR_ENABLE(ctx); 3974 if (do_reset) 3975 iflib_if_init_locked(ctx); 3976 CTX_UNLOCK(ctx); 3977 3978 if (LINK_ACTIVE(ctx) == 0) 3979 return; 3980 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) 3981 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 3982 } 3983 3984 static void 3985 _task_fn_iov(void *context) 3986 { 3987 if_ctx_t ctx = context; 3988 3989 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) && 3990 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) 3991 return; 3992 3993 CTX_LOCK(ctx); 3994 IFDI_VFLR_HANDLE(ctx); 3995 CTX_UNLOCK(ctx); 3996 } 3997 3998 static int 3999 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 4000 { 4001 int err; 4002 if_int_delay_info_t info; 4003 if_ctx_t ctx; 4004 4005 info = (if_int_delay_info_t)arg1; 4006 ctx = info->iidi_ctx; 4007 info->iidi_req = req; 4008 info->iidi_oidp = oidp; 4009 CTX_LOCK(ctx); 4010 err = IFDI_SYSCTL_INT_DELAY(ctx, info); 4011 CTX_UNLOCK(ctx); 4012 return (err); 4013 } 4014 4015 /********************************************************************* 4016 * 4017 * IFNET FUNCTIONS 4018 * 4019 **********************************************************************/ 4020 4021 static void 4022 iflib_if_init_locked(if_ctx_t ctx) 4023 { 4024 iflib_stop(ctx); 4025 iflib_init_locked(ctx); 4026 } 4027 4028 static void 4029 iflib_if_init(void *arg) 4030 { 4031 if_ctx_t ctx = arg; 4032 4033 CTX_LOCK(ctx); 4034 iflib_if_init_locked(ctx); 4035 CTX_UNLOCK(ctx); 4036 } 4037 4038 static int 4039 iflib_if_transmit(if_t ifp, struct mbuf *m) 4040 { 4041 if_ctx_t ctx = if_getsoftc(ifp); 4042 4043 iflib_txq_t txq; 4044 int err, qidx; 4045 int abdicate = ctx->ifc_sysctl_tx_abdicate; 4046 4047 if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { 4048 DBG_COUNTER_INC(tx_frees); 4049 m_freem(m); 4050 return (ENETDOWN); 4051 } 4052 4053 MPASS(m->m_nextpkt == NULL); 4054 /* ALTQ-enabled interfaces always use queue 0. */ 4055 qidx = 0; 4056 if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd)) 4057 qidx = QIDX(ctx, m); 4058 /* 4059 * XXX calculate buf_ring based on flowid (divvy up bits?) 4060 */ 4061 txq = &ctx->ifc_txqs[qidx]; 4062 4063 #ifdef DRIVER_BACKPRESSURE 4064 if (txq->ift_closed) { 4065 while (m != NULL) { 4066 next = m->m_nextpkt; 4067 m->m_nextpkt = NULL; 4068 m_freem(m); 4069 DBG_COUNTER_INC(tx_frees); 4070 m = next; 4071 } 4072 return (ENOBUFS); 4073 } 4074 #endif 4075 #ifdef notyet 4076 qidx = count = 0; 4077 mp = marr; 4078 next = m; 4079 do { 4080 count++; 4081 next = next->m_nextpkt; 4082 } while (next != NULL); 4083 4084 if (count > nitems(marr)) 4085 if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { 4086 /* XXX check nextpkt */ 4087 m_freem(m); 4088 /* XXX simplify for now */ 4089 DBG_COUNTER_INC(tx_frees); 4090 return (ENOBUFS); 4091 } 4092 for (next = m, i = 0; next != NULL; i++) { 4093 mp[i] = next; 4094 next = next->m_nextpkt; 4095 mp[i]->m_nextpkt = NULL; 4096 } 4097 #endif 4098 DBG_COUNTER_INC(tx_seen); 4099 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate); 4100 4101 if (abdicate) 4102 GROUPTASK_ENQUEUE(&txq->ift_task); 4103 if (err) { 4104 if (!abdicate) 4105 GROUPTASK_ENQUEUE(&txq->ift_task); 4106 /* support forthcoming later */ 4107 #ifdef DRIVER_BACKPRESSURE 4108 txq->ift_closed = TRUE; 4109 #endif 4110 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 4111 m_freem(m); 4112 DBG_COUNTER_INC(tx_frees); 4113 } 4114 4115 return (err); 4116 } 4117 4118 #ifdef ALTQ 4119 /* 4120 * The overall approach to integrating iflib with ALTQ is to continue to use 4121 * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware 4122 * ring. Technically, when using ALTQ, queueing to an intermediate mp_ring 4123 * is redundant/unnecessary, but doing so minimizes the amount of 4124 * ALTQ-specific code required in iflib. It is assumed that the overhead of 4125 * redundantly queueing to an intermediate mp_ring is swamped by the 4126 * performance limitations inherent in using ALTQ. 4127 * 4128 * When ALTQ support is compiled in, all iflib drivers will use a transmit 4129 * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the 4130 * given interface. If ALTQ is enabled for an interface, then all 4131 * transmitted packets for that interface will be submitted to the ALTQ 4132 * subsystem via IFQ_ENQUEUE(). We don't use the legacy if_transmit() 4133 * implementation because it uses IFQ_HANDOFF(), which will duplicatively 4134 * update stats that the iflib machinery handles, and which is sensitve to 4135 * the disused IFF_DRV_OACTIVE flag. Additionally, iflib_altq_if_start() 4136 * will be installed as the start routine for use by ALTQ facilities that 4137 * need to trigger queue drains on a scheduled basis. 4138 * 4139 */ 4140 static void 4141 iflib_altq_if_start(if_t ifp) 4142 { 4143 struct ifaltq *ifq = &ifp->if_snd; 4144 struct mbuf *m; 4145 4146 IFQ_LOCK(ifq); 4147 IFQ_DEQUEUE_NOLOCK(ifq, m); 4148 while (m != NULL) { 4149 iflib_if_transmit(ifp, m); 4150 IFQ_DEQUEUE_NOLOCK(ifq, m); 4151 } 4152 IFQ_UNLOCK(ifq); 4153 } 4154 4155 static int 4156 iflib_altq_if_transmit(if_t ifp, struct mbuf *m) 4157 { 4158 int err; 4159 4160 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 4161 IFQ_ENQUEUE(&ifp->if_snd, m, err); 4162 if (err == 0) 4163 iflib_altq_if_start(ifp); 4164 } else 4165 err = iflib_if_transmit(ifp, m); 4166 4167 return (err); 4168 } 4169 #endif /* ALTQ */ 4170 4171 static void 4172 iflib_if_qflush(if_t ifp) 4173 { 4174 if_ctx_t ctx = if_getsoftc(ifp); 4175 iflib_txq_t txq = ctx->ifc_txqs; 4176 int i; 4177 4178 STATE_LOCK(ctx); 4179 ctx->ifc_flags |= IFC_QFLUSH; 4180 STATE_UNLOCK(ctx); 4181 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 4182 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) 4183 iflib_txq_check_drain(txq, 0); 4184 STATE_LOCK(ctx); 4185 ctx->ifc_flags &= ~IFC_QFLUSH; 4186 STATE_UNLOCK(ctx); 4187 4188 /* 4189 * When ALTQ is enabled, this will also take care of purging the 4190 * ALTQ queue(s). 4191 */ 4192 if_qflush(ifp); 4193 } 4194 4195 #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ 4196 IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ 4197 IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \ 4198 IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM | IFCAP_NOMAP) 4199 4200 static int 4201 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) 4202 { 4203 if_ctx_t ctx = if_getsoftc(ifp); 4204 struct ifreq *ifr = (struct ifreq *)data; 4205 #if defined(INET) || defined(INET6) 4206 struct ifaddr *ifa = (struct ifaddr *)data; 4207 #endif 4208 bool avoid_reset = false; 4209 int err = 0, reinit = 0, bits; 4210 4211 switch (command) { 4212 case SIOCSIFADDR: 4213 #ifdef INET 4214 if (ifa->ifa_addr->sa_family == AF_INET) 4215 avoid_reset = true; 4216 #endif 4217 #ifdef INET6 4218 if (ifa->ifa_addr->sa_family == AF_INET6) 4219 avoid_reset = true; 4220 #endif 4221 /* 4222 ** Calling init results in link renegotiation, 4223 ** so we avoid doing it when possible. 4224 */ 4225 if (avoid_reset) { 4226 if_setflagbits(ifp, IFF_UP,0); 4227 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 4228 reinit = 1; 4229 #ifdef INET 4230 if (!(if_getflags(ifp) & IFF_NOARP)) 4231 arp_ifinit(ifp, ifa); 4232 #endif 4233 } else 4234 err = ether_ioctl(ifp, command, data); 4235 break; 4236 case SIOCSIFMTU: 4237 CTX_LOCK(ctx); 4238 if (ifr->ifr_mtu == if_getmtu(ifp)) { 4239 CTX_UNLOCK(ctx); 4240 break; 4241 } 4242 bits = if_getdrvflags(ifp); 4243 /* stop the driver and free any clusters before proceeding */ 4244 iflib_stop(ctx); 4245 4246 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { 4247 STATE_LOCK(ctx); 4248 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) 4249 ctx->ifc_flags |= IFC_MULTISEG; 4250 else 4251 ctx->ifc_flags &= ~IFC_MULTISEG; 4252 STATE_UNLOCK(ctx); 4253 err = if_setmtu(ifp, ifr->ifr_mtu); 4254 } 4255 iflib_init_locked(ctx); 4256 STATE_LOCK(ctx); 4257 if_setdrvflags(ifp, bits); 4258 STATE_UNLOCK(ctx); 4259 CTX_UNLOCK(ctx); 4260 break; 4261 case SIOCSIFFLAGS: 4262 CTX_LOCK(ctx); 4263 if (if_getflags(ifp) & IFF_UP) { 4264 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4265 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & 4266 (IFF_PROMISC | IFF_ALLMULTI)) { 4267 CTX_UNLOCK(ctx); 4268 err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); 4269 CTX_LOCK(ctx); 4270 } 4271 } else 4272 reinit = 1; 4273 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4274 iflib_stop(ctx); 4275 } 4276 ctx->ifc_if_flags = if_getflags(ifp); 4277 CTX_UNLOCK(ctx); 4278 break; 4279 case SIOCADDMULTI: 4280 case SIOCDELMULTI: 4281 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4282 CTX_LOCK(ctx); 4283 IFDI_INTR_DISABLE(ctx); 4284 IFDI_MULTI_SET(ctx); 4285 IFDI_INTR_ENABLE(ctx); 4286 CTX_UNLOCK(ctx); 4287 } 4288 break; 4289 case SIOCSIFMEDIA: 4290 CTX_LOCK(ctx); 4291 IFDI_MEDIA_SET(ctx); 4292 CTX_UNLOCK(ctx); 4293 /* FALLTHROUGH */ 4294 case SIOCGIFMEDIA: 4295 case SIOCGIFXMEDIA: 4296 err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command); 4297 break; 4298 case SIOCGI2C: 4299 { 4300 struct ifi2creq i2c; 4301 4302 err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 4303 if (err != 0) 4304 break; 4305 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 4306 err = EINVAL; 4307 break; 4308 } 4309 if (i2c.len > sizeof(i2c.data)) { 4310 err = EINVAL; 4311 break; 4312 } 4313 4314 if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) 4315 err = copyout(&i2c, ifr_data_get_ptr(ifr), 4316 sizeof(i2c)); 4317 break; 4318 } 4319 case SIOCSIFCAP: 4320 { 4321 int mask, setmask, oldmask; 4322 4323 oldmask = if_getcapenable(ifp); 4324 mask = ifr->ifr_reqcap ^ oldmask; 4325 mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_NOMAP; 4326 setmask = 0; 4327 #ifdef TCP_OFFLOAD 4328 setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); 4329 #endif 4330 setmask |= (mask & IFCAP_FLAGS); 4331 setmask |= (mask & IFCAP_WOL); 4332 4333 /* 4334 * If any RX csum has changed, change all the ones that 4335 * are supported by the driver. 4336 */ 4337 if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { 4338 setmask |= ctx->ifc_softc_ctx.isc_capabilities & 4339 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); 4340 } 4341 4342 /* 4343 * want to ensure that traffic has stopped before we change any of the flags 4344 */ 4345 if (setmask) { 4346 CTX_LOCK(ctx); 4347 bits = if_getdrvflags(ifp); 4348 if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) 4349 iflib_stop(ctx); 4350 STATE_LOCK(ctx); 4351 if_togglecapenable(ifp, setmask); 4352 STATE_UNLOCK(ctx); 4353 if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) 4354 iflib_init_locked(ctx); 4355 STATE_LOCK(ctx); 4356 if_setdrvflags(ifp, bits); 4357 STATE_UNLOCK(ctx); 4358 CTX_UNLOCK(ctx); 4359 } 4360 if_vlancap(ifp); 4361 break; 4362 } 4363 case SIOCGPRIVATE_0: 4364 case SIOCSDRVSPEC: 4365 case SIOCGDRVSPEC: 4366 CTX_LOCK(ctx); 4367 err = IFDI_PRIV_IOCTL(ctx, command, data); 4368 CTX_UNLOCK(ctx); 4369 break; 4370 default: 4371 err = ether_ioctl(ifp, command, data); 4372 break; 4373 } 4374 if (reinit) 4375 iflib_if_init(ctx); 4376 return (err); 4377 } 4378 4379 static uint64_t 4380 iflib_if_get_counter(if_t ifp, ift_counter cnt) 4381 { 4382 if_ctx_t ctx = if_getsoftc(ifp); 4383 4384 return (IFDI_GET_COUNTER(ctx, cnt)); 4385 } 4386 4387 /********************************************************************* 4388 * 4389 * OTHER FUNCTIONS EXPORTED TO THE STACK 4390 * 4391 **********************************************************************/ 4392 4393 static void 4394 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) 4395 { 4396 if_ctx_t ctx = if_getsoftc(ifp); 4397 4398 if ((void *)ctx != arg) 4399 return; 4400 4401 if ((vtag == 0) || (vtag > 4095)) 4402 return; 4403 4404 if (iflib_in_detach(ctx)) 4405 return; 4406 4407 CTX_LOCK(ctx); 4408 /* Driver may need all untagged packets to be flushed */ 4409 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) 4410 iflib_stop(ctx); 4411 IFDI_VLAN_REGISTER(ctx, vtag); 4412 /* Re-init to load the changes, if required */ 4413 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) 4414 iflib_init_locked(ctx); 4415 CTX_UNLOCK(ctx); 4416 } 4417 4418 static void 4419 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) 4420 { 4421 if_ctx_t ctx = if_getsoftc(ifp); 4422 4423 if ((void *)ctx != arg) 4424 return; 4425 4426 if ((vtag == 0) || (vtag > 4095)) 4427 return; 4428 4429 CTX_LOCK(ctx); 4430 /* Driver may need all tagged packets to be flushed */ 4431 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) 4432 iflib_stop(ctx); 4433 IFDI_VLAN_UNREGISTER(ctx, vtag); 4434 /* Re-init to load the changes, if required */ 4435 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) 4436 iflib_init_locked(ctx); 4437 CTX_UNLOCK(ctx); 4438 } 4439 4440 static void 4441 iflib_led_func(void *arg, int onoff) 4442 { 4443 if_ctx_t ctx = arg; 4444 4445 CTX_LOCK(ctx); 4446 IFDI_LED_FUNC(ctx, onoff); 4447 CTX_UNLOCK(ctx); 4448 } 4449 4450 /********************************************************************* 4451 * 4452 * BUS FUNCTION DEFINITIONS 4453 * 4454 **********************************************************************/ 4455 4456 int 4457 iflib_device_probe(device_t dev) 4458 { 4459 const pci_vendor_info_t *ent; 4460 if_shared_ctx_t sctx; 4461 uint16_t pci_device_id, pci_rev_id, pci_subdevice_id, pci_subvendor_id; 4462 uint16_t pci_vendor_id; 4463 4464 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4465 return (ENOTSUP); 4466 4467 pci_vendor_id = pci_get_vendor(dev); 4468 pci_device_id = pci_get_device(dev); 4469 pci_subvendor_id = pci_get_subvendor(dev); 4470 pci_subdevice_id = pci_get_subdevice(dev); 4471 pci_rev_id = pci_get_revid(dev); 4472 if (sctx->isc_parse_devinfo != NULL) 4473 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); 4474 4475 ent = sctx->isc_vendor_info; 4476 while (ent->pvi_vendor_id != 0) { 4477 if (pci_vendor_id != ent->pvi_vendor_id) { 4478 ent++; 4479 continue; 4480 } 4481 if ((pci_device_id == ent->pvi_device_id) && 4482 ((pci_subvendor_id == ent->pvi_subvendor_id) || 4483 (ent->pvi_subvendor_id == 0)) && 4484 ((pci_subdevice_id == ent->pvi_subdevice_id) || 4485 (ent->pvi_subdevice_id == 0)) && 4486 ((pci_rev_id == ent->pvi_rev_id) || 4487 (ent->pvi_rev_id == 0))) { 4488 device_set_desc_copy(dev, ent->pvi_name); 4489 /* this needs to be changed to zero if the bus probing code 4490 * ever stops re-probing on best match because the sctx 4491 * may have its values over written by register calls 4492 * in subsequent probes 4493 */ 4494 return (BUS_PROBE_DEFAULT); 4495 } 4496 ent++; 4497 } 4498 return (ENXIO); 4499 } 4500 4501 int 4502 iflib_device_probe_vendor(device_t dev) 4503 { 4504 int probe; 4505 4506 probe = iflib_device_probe(dev); 4507 if (probe == BUS_PROBE_DEFAULT) 4508 return (BUS_PROBE_VENDOR); 4509 else 4510 return (probe); 4511 } 4512 4513 static void 4514 iflib_reset_qvalues(if_ctx_t ctx) 4515 { 4516 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 4517 if_shared_ctx_t sctx = ctx->ifc_sctx; 4518 device_t dev = ctx->ifc_dev; 4519 int i; 4520 4521 if (ctx->ifc_sysctl_ntxqs != 0) 4522 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; 4523 if (ctx->ifc_sysctl_nrxqs != 0) 4524 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; 4525 4526 for (i = 0; i < sctx->isc_ntxqs; i++) { 4527 if (ctx->ifc_sysctl_ntxds[i] != 0) 4528 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; 4529 else 4530 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; 4531 } 4532 4533 for (i = 0; i < sctx->isc_nrxqs; i++) { 4534 if (ctx->ifc_sysctl_nrxds[i] != 0) 4535 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; 4536 else 4537 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; 4538 } 4539 4540 for (i = 0; i < sctx->isc_nrxqs; i++) { 4541 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { 4542 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", 4543 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); 4544 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; 4545 } 4546 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { 4547 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", 4548 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); 4549 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; 4550 } 4551 if (!powerof2(scctx->isc_nrxd[i])) { 4552 device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n", 4553 i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]); 4554 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; 4555 } 4556 } 4557 4558 for (i = 0; i < sctx->isc_ntxqs; i++) { 4559 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { 4560 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", 4561 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); 4562 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; 4563 } 4564 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { 4565 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", 4566 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); 4567 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; 4568 } 4569 if (!powerof2(scctx->isc_ntxd[i])) { 4570 device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n", 4571 i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]); 4572 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; 4573 } 4574 } 4575 } 4576 4577 static void 4578 iflib_add_pfil(if_ctx_t ctx) 4579 { 4580 struct pfil_head *pfil; 4581 struct pfil_head_args pa; 4582 iflib_rxq_t rxq; 4583 int i; 4584 4585 pa.pa_version = PFIL_VERSION; 4586 pa.pa_flags = PFIL_IN; 4587 pa.pa_type = PFIL_TYPE_ETHERNET; 4588 pa.pa_headname = ctx->ifc_ifp->if_xname; 4589 pfil = pfil_head_register(&pa); 4590 4591 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 4592 rxq->pfil = pfil; 4593 } 4594 } 4595 4596 static void 4597 iflib_rem_pfil(if_ctx_t ctx) 4598 { 4599 struct pfil_head *pfil; 4600 iflib_rxq_t rxq; 4601 int i; 4602 4603 rxq = ctx->ifc_rxqs; 4604 pfil = rxq->pfil; 4605 for (i = 0; i < NRXQSETS(ctx); i++, rxq++) { 4606 rxq->pfil = NULL; 4607 } 4608 pfil_head_unregister(pfil); 4609 } 4610 4611 static uint16_t 4612 get_ctx_core_offset(if_ctx_t ctx) 4613 { 4614 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 4615 struct cpu_offset *op; 4616 uint16_t qc; 4617 uint16_t ret = ctx->ifc_sysctl_core_offset; 4618 4619 if (ret != CORE_OFFSET_UNSPECIFIED) 4620 return (ret); 4621 4622 if (ctx->ifc_sysctl_separate_txrx) 4623 qc = scctx->isc_ntxqsets + scctx->isc_nrxqsets; 4624 else 4625 qc = max(scctx->isc_ntxqsets, scctx->isc_nrxqsets); 4626 4627 mtx_lock(&cpu_offset_mtx); 4628 SLIST_FOREACH(op, &cpu_offsets, entries) { 4629 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { 4630 ret = op->offset; 4631 op->offset += qc; 4632 MPASS(op->refcount < UINT_MAX); 4633 op->refcount++; 4634 break; 4635 } 4636 } 4637 if (ret == CORE_OFFSET_UNSPECIFIED) { 4638 ret = 0; 4639 op = malloc(sizeof(struct cpu_offset), M_IFLIB, 4640 M_NOWAIT | M_ZERO); 4641 if (op == NULL) { 4642 device_printf(ctx->ifc_dev, 4643 "allocation for cpu offset failed.\n"); 4644 } else { 4645 op->offset = qc; 4646 op->refcount = 1; 4647 CPU_COPY(&ctx->ifc_cpus, &op->set); 4648 SLIST_INSERT_HEAD(&cpu_offsets, op, entries); 4649 } 4650 } 4651 mtx_unlock(&cpu_offset_mtx); 4652 4653 return (ret); 4654 } 4655 4656 static void 4657 unref_ctx_core_offset(if_ctx_t ctx) 4658 { 4659 struct cpu_offset *op, *top; 4660 4661 mtx_lock(&cpu_offset_mtx); 4662 SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) { 4663 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { 4664 MPASS(op->refcount > 0); 4665 op->refcount--; 4666 if (op->refcount == 0) { 4667 SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries); 4668 free(op, M_IFLIB); 4669 } 4670 break; 4671 } 4672 } 4673 mtx_unlock(&cpu_offset_mtx); 4674 } 4675 4676 int 4677 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) 4678 { 4679 if_ctx_t ctx; 4680 if_t ifp; 4681 if_softc_ctx_t scctx; 4682 kobjop_desc_t kobj_desc; 4683 kobj_method_t *kobj_method; 4684 int err, msix, rid; 4685 int num_txd, num_rxd; 4686 4687 ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); 4688 4689 if (sc == NULL) { 4690 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4691 device_set_softc(dev, ctx); 4692 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4693 } 4694 4695 ctx->ifc_sctx = sctx; 4696 ctx->ifc_dev = dev; 4697 ctx->ifc_softc = sc; 4698 4699 if ((err = iflib_register(ctx)) != 0) { 4700 device_printf(dev, "iflib_register failed %d\n", err); 4701 goto fail_ctx_free; 4702 } 4703 iflib_add_device_sysctl_pre(ctx); 4704 4705 scctx = &ctx->ifc_softc_ctx; 4706 ifp = ctx->ifc_ifp; 4707 4708 iflib_reset_qvalues(ctx); 4709 CTX_LOCK(ctx); 4710 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4711 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4712 goto fail_unlock; 4713 } 4714 _iflib_pre_assert(scctx); 4715 ctx->ifc_txrx = *scctx->isc_txrx; 4716 4717 if (sctx->isc_flags & IFLIB_DRIVER_MEDIA) 4718 ctx->ifc_mediap = scctx->isc_media; 4719 4720 #ifdef INVARIANTS 4721 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4722 MPASS(scctx->isc_tx_csum_flags); 4723 #endif 4724 4725 if_setcapabilities(ifp, 4726 scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_NOMAP); 4727 if_setcapenable(ifp, 4728 scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_NOMAP); 4729 4730 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4731 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4732 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4733 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4734 4735 num_txd = iflib_num_tx_descs(ctx); 4736 num_rxd = iflib_num_rx_descs(ctx); 4737 4738 /* XXX change for per-queue sizes */ 4739 device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n", 4740 num_txd, num_rxd); 4741 4742 if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION) 4743 scctx->isc_tx_nsegments = max(1, num_txd / 4744 MAX_SINGLE_PACKET_FRACTION); 4745 if (scctx->isc_tx_tso_segments_max > num_txd / 4746 MAX_SINGLE_PACKET_FRACTION) 4747 scctx->isc_tx_tso_segments_max = max(1, 4748 num_txd / MAX_SINGLE_PACKET_FRACTION); 4749 4750 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4751 if (if_getcapabilities(ifp) & IFCAP_TSO) { 4752 /* 4753 * The stack can't handle a TSO size larger than IP_MAXPACKET, 4754 * but some MACs do. 4755 */ 4756 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 4757 IP_MAXPACKET)); 4758 /* 4759 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 4760 * into account. In the worst case, each of these calls will 4761 * add another mbuf and, thus, the requirement for another DMA 4762 * segment. So for best performance, it doesn't make sense to 4763 * advertize a maximum of TSO segments that typically will 4764 * require defragmentation in iflib_encap(). 4765 */ 4766 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 4767 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 4768 } 4769 if (scctx->isc_rss_table_size == 0) 4770 scctx->isc_rss_table_size = 64; 4771 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4772 4773 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4774 /* XXX format name */ 4775 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, 4776 NULL, NULL, "admin"); 4777 4778 /* Set up cpu set. If it fails, use the set of all CPUs. */ 4779 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { 4780 device_printf(dev, "Unable to fetch CPU list\n"); 4781 CPU_COPY(&all_cpus, &ctx->ifc_cpus); 4782 } 4783 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); 4784 4785 /* 4786 ** Now set up MSI or MSI-X, should return us the number of supported 4787 ** vectors (will be 1 for a legacy interrupt and MSI). 4788 */ 4789 if (sctx->isc_flags & IFLIB_SKIP_MSIX) { 4790 msix = scctx->isc_vectors; 4791 } else if (scctx->isc_msix_bar != 0) 4792 /* 4793 * The simple fact that isc_msix_bar is not 0 does not mean we 4794 * we have a good value there that is known to work. 4795 */ 4796 msix = iflib_msix_init(ctx); 4797 else { 4798 scctx->isc_vectors = 1; 4799 scctx->isc_ntxqsets = 1; 4800 scctx->isc_nrxqsets = 1; 4801 scctx->isc_intr = IFLIB_INTR_LEGACY; 4802 msix = 0; 4803 } 4804 /* Get memory for the station queues */ 4805 if ((err = iflib_queues_alloc(ctx))) { 4806 device_printf(dev, "Unable to allocate queue memory\n"); 4807 goto fail_intr_free; 4808 } 4809 4810 if ((err = iflib_qset_structures_setup(ctx))) 4811 goto fail_queues; 4812 4813 /* 4814 * Now that we know how many queues there are, get the core offset. 4815 */ 4816 ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx); 4817 4818 if (msix > 1) { 4819 /* 4820 * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable 4821 * aren't the default NULL implementation. 4822 */ 4823 kobj_desc = &ifdi_rx_queue_intr_enable_desc; 4824 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, 4825 kobj_desc); 4826 if (kobj_method == &kobj_desc->deflt) { 4827 device_printf(dev, 4828 "MSI-X requires ifdi_rx_queue_intr_enable method"); 4829 err = EOPNOTSUPP; 4830 goto fail_queues; 4831 } 4832 kobj_desc = &ifdi_tx_queue_intr_enable_desc; 4833 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, 4834 kobj_desc); 4835 if (kobj_method == &kobj_desc->deflt) { 4836 device_printf(dev, 4837 "MSI-X requires ifdi_tx_queue_intr_enable method"); 4838 err = EOPNOTSUPP; 4839 goto fail_queues; 4840 } 4841 4842 /* 4843 * Assign the MSI-X vectors. 4844 * Note that the default NULL ifdi_msix_intr_assign method will 4845 * fail here, too. 4846 */ 4847 err = IFDI_MSIX_INTR_ASSIGN(ctx, msix); 4848 if (err != 0) { 4849 device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", 4850 err); 4851 goto fail_queues; 4852 } 4853 } else if (scctx->isc_intr != IFLIB_INTR_MSIX) { 4854 rid = 0; 4855 if (scctx->isc_intr == IFLIB_INTR_MSI) { 4856 MPASS(msix == 1); 4857 rid = 1; 4858 } 4859 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { 4860 device_printf(dev, "iflib_legacy_setup failed %d\n", err); 4861 goto fail_queues; 4862 } 4863 } else { 4864 device_printf(dev, 4865 "Cannot use iflib with only 1 MSI-X interrupt!\n"); 4866 err = ENODEV; 4867 goto fail_intr_free; 4868 } 4869 4870 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); 4871 4872 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4873 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4874 goto fail_detach; 4875 } 4876 4877 /* 4878 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4879 * This must appear after the call to ether_ifattach() because 4880 * ether_ifattach() sets if_hdrlen to the default value. 4881 */ 4882 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4883 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 4884 4885 if ((err = iflib_netmap_attach(ctx))) { 4886 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); 4887 goto fail_detach; 4888 } 4889 *ctxp = ctx; 4890 4891 DEBUGNET_SET(ctx->ifc_ifp, iflib); 4892 4893 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4894 iflib_add_device_sysctl_post(ctx); 4895 iflib_add_pfil(ctx); 4896 ctx->ifc_flags |= IFC_INIT_DONE; 4897 CTX_UNLOCK(ctx); 4898 4899 return (0); 4900 4901 fail_detach: 4902 ether_ifdetach(ctx->ifc_ifp); 4903 fail_intr_free: 4904 iflib_free_intr_mem(ctx); 4905 fail_queues: 4906 iflib_tx_structures_free(ctx); 4907 iflib_rx_structures_free(ctx); 4908 iflib_tqg_detach(ctx); 4909 IFDI_DETACH(ctx); 4910 fail_unlock: 4911 CTX_UNLOCK(ctx); 4912 iflib_deregister(ctx); 4913 fail_ctx_free: 4914 device_set_softc(ctx->ifc_dev, NULL); 4915 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4916 free(ctx->ifc_softc, M_IFLIB); 4917 free(ctx, M_IFLIB); 4918 return (err); 4919 } 4920 4921 int 4922 iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, 4923 struct iflib_cloneattach_ctx *clctx) 4924 { 4925 int num_txd, num_rxd; 4926 int err; 4927 if_ctx_t ctx; 4928 if_t ifp; 4929 if_softc_ctx_t scctx; 4930 int i; 4931 void *sc; 4932 4933 ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO); 4934 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4935 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4936 if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL)) 4937 ctx->ifc_flags |= IFC_PSEUDO; 4938 4939 ctx->ifc_sctx = sctx; 4940 ctx->ifc_softc = sc; 4941 ctx->ifc_dev = dev; 4942 4943 if ((err = iflib_register(ctx)) != 0) { 4944 device_printf(dev, "%s: iflib_register failed %d\n", __func__, err); 4945 goto fail_ctx_free; 4946 } 4947 iflib_add_device_sysctl_pre(ctx); 4948 4949 scctx = &ctx->ifc_softc_ctx; 4950 ifp = ctx->ifc_ifp; 4951 4952 iflib_reset_qvalues(ctx); 4953 CTX_LOCK(ctx); 4954 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4955 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4956 goto fail_unlock; 4957 } 4958 if (sctx->isc_flags & IFLIB_GEN_MAC) 4959 ether_gen_addr(ifp, &ctx->ifc_mac); 4960 if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name, 4961 clctx->cc_params)) != 0) { 4962 device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err); 4963 goto fail_unlock; 4964 } 4965 #ifdef INVARIANTS 4966 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4967 MPASS(scctx->isc_tx_csum_flags); 4968 #endif 4969 4970 if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4971 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4972 4973 ifp->if_flags |= IFF_NOGROUP; 4974 if (sctx->isc_flags & IFLIB_PSEUDO) { 4975 ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO, 0, NULL); 4976 ifmedia_set(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO); 4977 if (sctx->isc_flags & IFLIB_PSEUDO_ETHER) { 4978 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); 4979 } else { 4980 if_attach(ctx->ifc_ifp); 4981 bpfattach(ctx->ifc_ifp, DLT_NULL, sizeof(u_int32_t)); 4982 } 4983 4984 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4985 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4986 goto fail_detach; 4987 } 4988 *ctxp = ctx; 4989 4990 /* 4991 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4992 * This must appear after the call to ether_ifattach() because 4993 * ether_ifattach() sets if_hdrlen to the default value. 4994 */ 4995 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4996 if_setifheaderlen(ifp, 4997 sizeof(struct ether_vlan_header)); 4998 4999 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 5000 iflib_add_device_sysctl_post(ctx); 5001 ctx->ifc_flags |= IFC_INIT_DONE; 5002 CTX_UNLOCK(ctx); 5003 return (0); 5004 } 5005 ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 5006 ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO, 0, NULL); 5007 ifmedia_set(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO); 5008 5009 _iflib_pre_assert(scctx); 5010 ctx->ifc_txrx = *scctx->isc_txrx; 5011 5012 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 5013 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 5014 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 5015 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 5016 5017 num_txd = iflib_num_tx_descs(ctx); 5018 num_rxd = iflib_num_rx_descs(ctx); 5019 5020 /* XXX change for per-queue sizes */ 5021 device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n", 5022 num_txd, num_rxd); 5023 5024 if (scctx->isc_tx_nsegments > num_txd / MAX_SINGLE_PACKET_FRACTION) 5025 scctx->isc_tx_nsegments = max(1, num_txd / 5026 MAX_SINGLE_PACKET_FRACTION); 5027 if (scctx->isc_tx_tso_segments_max > num_txd / 5028 MAX_SINGLE_PACKET_FRACTION) 5029 scctx->isc_tx_tso_segments_max = max(1, 5030 num_txd / MAX_SINGLE_PACKET_FRACTION); 5031 5032 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 5033 if (if_getcapabilities(ifp) & IFCAP_TSO) { 5034 /* 5035 * The stack can't handle a TSO size larger than IP_MAXPACKET, 5036 * but some MACs do. 5037 */ 5038 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 5039 IP_MAXPACKET)); 5040 /* 5041 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 5042 * into account. In the worst case, each of these calls will 5043 * add another mbuf and, thus, the requirement for another DMA 5044 * segment. So for best performance, it doesn't make sense to 5045 * advertize a maximum of TSO segments that typically will 5046 * require defragmentation in iflib_encap(). 5047 */ 5048 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 5049 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 5050 } 5051 if (scctx->isc_rss_table_size == 0) 5052 scctx->isc_rss_table_size = 64; 5053 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 5054 5055 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 5056 /* XXX format name */ 5057 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, 5058 NULL, NULL, "admin"); 5059 5060 /* XXX --- can support > 1 -- but keep it simple for now */ 5061 scctx->isc_intr = IFLIB_INTR_LEGACY; 5062 5063 /* Get memory for the station queues */ 5064 if ((err = iflib_queues_alloc(ctx))) { 5065 device_printf(dev, "Unable to allocate queue memory\n"); 5066 goto fail_iflib_detach; 5067 } 5068 5069 if ((err = iflib_qset_structures_setup(ctx))) { 5070 device_printf(dev, "qset structure setup failed %d\n", err); 5071 goto fail_queues; 5072 } 5073 5074 /* 5075 * XXX What if anything do we want to do about interrupts? 5076 */ 5077 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); 5078 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 5079 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 5080 goto fail_detach; 5081 } 5082 5083 /* 5084 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 5085 * This must appear after the call to ether_ifattach() because 5086 * ether_ifattach() sets if_hdrlen to the default value. 5087 */ 5088 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 5089 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 5090 5091 /* XXX handle more than one queue */ 5092 for (i = 0; i < scctx->isc_nrxqsets; i++) 5093 IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl); 5094 5095 *ctxp = ctx; 5096 5097 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 5098 iflib_add_device_sysctl_post(ctx); 5099 ctx->ifc_flags |= IFC_INIT_DONE; 5100 CTX_UNLOCK(ctx); 5101 5102 return (0); 5103 fail_detach: 5104 ether_ifdetach(ctx->ifc_ifp); 5105 fail_queues: 5106 iflib_tx_structures_free(ctx); 5107 iflib_rx_structures_free(ctx); 5108 iflib_tqg_detach(ctx); 5109 fail_iflib_detach: 5110 IFDI_DETACH(ctx); 5111 fail_unlock: 5112 CTX_UNLOCK(ctx); 5113 iflib_deregister(ctx); 5114 fail_ctx_free: 5115 free(ctx->ifc_softc, M_IFLIB); 5116 free(ctx, M_IFLIB); 5117 return (err); 5118 } 5119 5120 int 5121 iflib_pseudo_deregister(if_ctx_t ctx) 5122 { 5123 if_t ifp = ctx->ifc_ifp; 5124 if_shared_ctx_t sctx = ctx->ifc_sctx; 5125 5126 /* Unregister VLAN event handlers early */ 5127 iflib_unregister_vlan_handlers(ctx); 5128 5129 if ((sctx->isc_flags & IFLIB_PSEUDO) && 5130 (sctx->isc_flags & IFLIB_PSEUDO_ETHER) == 0) { 5131 bpfdetach(ifp); 5132 if_detach(ifp); 5133 } else { 5134 ether_ifdetach(ifp); 5135 } 5136 5137 iflib_tqg_detach(ctx); 5138 iflib_tx_structures_free(ctx); 5139 iflib_rx_structures_free(ctx); 5140 5141 iflib_deregister(ctx); 5142 5143 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 5144 free(ctx->ifc_softc, M_IFLIB); 5145 free(ctx, M_IFLIB); 5146 return (0); 5147 } 5148 5149 int 5150 iflib_device_attach(device_t dev) 5151 { 5152 if_ctx_t ctx; 5153 if_shared_ctx_t sctx; 5154 5155 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 5156 return (ENOTSUP); 5157 5158 pci_enable_busmaster(dev); 5159 5160 return (iflib_device_register(dev, NULL, sctx, &ctx)); 5161 } 5162 5163 int 5164 iflib_device_deregister(if_ctx_t ctx) 5165 { 5166 if_t ifp = ctx->ifc_ifp; 5167 device_t dev = ctx->ifc_dev; 5168 5169 /* Make sure VLANS are not using driver */ 5170 if (if_vlantrunkinuse(ifp)) { 5171 device_printf(dev, "Vlan in use, detach first\n"); 5172 return (EBUSY); 5173 } 5174 #ifdef PCI_IOV 5175 if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) { 5176 device_printf(dev, "SR-IOV in use; detach first.\n"); 5177 return (EBUSY); 5178 } 5179 #endif 5180 5181 STATE_LOCK(ctx); 5182 ctx->ifc_flags |= IFC_IN_DETACH; 5183 STATE_UNLOCK(ctx); 5184 5185 /* Unregister VLAN handlers before calling iflib_stop() */ 5186 iflib_unregister_vlan_handlers(ctx); 5187 5188 iflib_netmap_detach(ifp); 5189 ether_ifdetach(ifp); 5190 5191 CTX_LOCK(ctx); 5192 iflib_stop(ctx); 5193 CTX_UNLOCK(ctx); 5194 5195 iflib_rem_pfil(ctx); 5196 if (ctx->ifc_led_dev != NULL) 5197 led_destroy(ctx->ifc_led_dev); 5198 5199 iflib_tqg_detach(ctx); 5200 CTX_LOCK(ctx); 5201 IFDI_DETACH(ctx); 5202 CTX_UNLOCK(ctx); 5203 5204 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 5205 iflib_free_intr_mem(ctx); 5206 5207 bus_generic_detach(dev); 5208 5209 iflib_tx_structures_free(ctx); 5210 iflib_rx_structures_free(ctx); 5211 5212 iflib_deregister(ctx); 5213 5214 device_set_softc(ctx->ifc_dev, NULL); 5215 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 5216 free(ctx->ifc_softc, M_IFLIB); 5217 unref_ctx_core_offset(ctx); 5218 free(ctx, M_IFLIB); 5219 return (0); 5220 } 5221 5222 static void 5223 iflib_tqg_detach(if_ctx_t ctx) 5224 { 5225 iflib_txq_t txq; 5226 iflib_rxq_t rxq; 5227 int i; 5228 struct taskqgroup *tqg; 5229 5230 /* XXX drain any dependent tasks */ 5231 tqg = qgroup_if_io_tqg; 5232 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 5233 callout_drain(&txq->ift_timer); 5234 #ifdef DEV_NETMAP 5235 callout_drain(&txq->ift_netmap_timer); 5236 #endif /* DEV_NETMAP */ 5237 if (txq->ift_task.gt_uniq != NULL) 5238 taskqgroup_detach(tqg, &txq->ift_task); 5239 } 5240 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 5241 if (rxq->ifr_task.gt_uniq != NULL) 5242 taskqgroup_detach(tqg, &rxq->ifr_task); 5243 } 5244 tqg = qgroup_if_config_tqg; 5245 if (ctx->ifc_admin_task.gt_uniq != NULL) 5246 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 5247 if (ctx->ifc_vflr_task.gt_uniq != NULL) 5248 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 5249 } 5250 5251 static void 5252 iflib_free_intr_mem(if_ctx_t ctx) 5253 { 5254 5255 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { 5256 iflib_irq_free(ctx, &ctx->ifc_legacy_irq); 5257 } 5258 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { 5259 pci_release_msi(ctx->ifc_dev); 5260 } 5261 if (ctx->ifc_msix_mem != NULL) { 5262 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, 5263 rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem); 5264 ctx->ifc_msix_mem = NULL; 5265 } 5266 } 5267 5268 int 5269 iflib_device_detach(device_t dev) 5270 { 5271 if_ctx_t ctx = device_get_softc(dev); 5272 5273 return (iflib_device_deregister(ctx)); 5274 } 5275 5276 int 5277 iflib_device_suspend(device_t dev) 5278 { 5279 if_ctx_t ctx = device_get_softc(dev); 5280 5281 CTX_LOCK(ctx); 5282 IFDI_SUSPEND(ctx); 5283 CTX_UNLOCK(ctx); 5284 5285 return bus_generic_suspend(dev); 5286 } 5287 int 5288 iflib_device_shutdown(device_t dev) 5289 { 5290 if_ctx_t ctx = device_get_softc(dev); 5291 5292 CTX_LOCK(ctx); 5293 IFDI_SHUTDOWN(ctx); 5294 CTX_UNLOCK(ctx); 5295 5296 return bus_generic_suspend(dev); 5297 } 5298 5299 int 5300 iflib_device_resume(device_t dev) 5301 { 5302 if_ctx_t ctx = device_get_softc(dev); 5303 iflib_txq_t txq = ctx->ifc_txqs; 5304 5305 CTX_LOCK(ctx); 5306 IFDI_RESUME(ctx); 5307 iflib_if_init_locked(ctx); 5308 CTX_UNLOCK(ctx); 5309 for (int i = 0; i < NTXQSETS(ctx); i++, txq++) 5310 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 5311 5312 return (bus_generic_resume(dev)); 5313 } 5314 5315 int 5316 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 5317 { 5318 int error; 5319 if_ctx_t ctx = device_get_softc(dev); 5320 5321 CTX_LOCK(ctx); 5322 error = IFDI_IOV_INIT(ctx, num_vfs, params); 5323 CTX_UNLOCK(ctx); 5324 5325 return (error); 5326 } 5327 5328 void 5329 iflib_device_iov_uninit(device_t dev) 5330 { 5331 if_ctx_t ctx = device_get_softc(dev); 5332 5333 CTX_LOCK(ctx); 5334 IFDI_IOV_UNINIT(ctx); 5335 CTX_UNLOCK(ctx); 5336 } 5337 5338 int 5339 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 5340 { 5341 int error; 5342 if_ctx_t ctx = device_get_softc(dev); 5343 5344 CTX_LOCK(ctx); 5345 error = IFDI_IOV_VF_ADD(ctx, vfnum, params); 5346 CTX_UNLOCK(ctx); 5347 5348 return (error); 5349 } 5350 5351 /********************************************************************* 5352 * 5353 * MODULE FUNCTION DEFINITIONS 5354 * 5355 **********************************************************************/ 5356 5357 /* 5358 * - Start a fast taskqueue thread for each core 5359 * - Start a taskqueue for control operations 5360 */ 5361 static int 5362 iflib_module_init(void) 5363 { 5364 iflib_timer_default = hz / 2; 5365 return (0); 5366 } 5367 5368 static int 5369 iflib_module_event_handler(module_t mod, int what, void *arg) 5370 { 5371 int err; 5372 5373 switch (what) { 5374 case MOD_LOAD: 5375 if ((err = iflib_module_init()) != 0) 5376 return (err); 5377 break; 5378 case MOD_UNLOAD: 5379 return (EBUSY); 5380 default: 5381 return (EOPNOTSUPP); 5382 } 5383 5384 return (0); 5385 } 5386 5387 /********************************************************************* 5388 * 5389 * PUBLIC FUNCTION DEFINITIONS 5390 * ordered as in iflib.h 5391 * 5392 **********************************************************************/ 5393 5394 static void 5395 _iflib_assert(if_shared_ctx_t sctx) 5396 { 5397 int i; 5398 5399 MPASS(sctx->isc_tx_maxsize); 5400 MPASS(sctx->isc_tx_maxsegsize); 5401 5402 MPASS(sctx->isc_rx_maxsize); 5403 MPASS(sctx->isc_rx_nsegments); 5404 MPASS(sctx->isc_rx_maxsegsize); 5405 5406 MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8); 5407 for (i = 0; i < sctx->isc_nrxqs; i++) { 5408 MPASS(sctx->isc_nrxd_min[i]); 5409 MPASS(powerof2(sctx->isc_nrxd_min[i])); 5410 MPASS(sctx->isc_nrxd_max[i]); 5411 MPASS(powerof2(sctx->isc_nrxd_max[i])); 5412 MPASS(sctx->isc_nrxd_default[i]); 5413 MPASS(powerof2(sctx->isc_nrxd_default[i])); 5414 } 5415 5416 MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8); 5417 for (i = 0; i < sctx->isc_ntxqs; i++) { 5418 MPASS(sctx->isc_ntxd_min[i]); 5419 MPASS(powerof2(sctx->isc_ntxd_min[i])); 5420 MPASS(sctx->isc_ntxd_max[i]); 5421 MPASS(powerof2(sctx->isc_ntxd_max[i])); 5422 MPASS(sctx->isc_ntxd_default[i]); 5423 MPASS(powerof2(sctx->isc_ntxd_default[i])); 5424 } 5425 } 5426 5427 static void 5428 _iflib_pre_assert(if_softc_ctx_t scctx) 5429 { 5430 5431 MPASS(scctx->isc_txrx->ift_txd_encap); 5432 MPASS(scctx->isc_txrx->ift_txd_flush); 5433 MPASS(scctx->isc_txrx->ift_txd_credits_update); 5434 MPASS(scctx->isc_txrx->ift_rxd_available); 5435 MPASS(scctx->isc_txrx->ift_rxd_pkt_get); 5436 MPASS(scctx->isc_txrx->ift_rxd_refill); 5437 MPASS(scctx->isc_txrx->ift_rxd_flush); 5438 } 5439 5440 static int 5441 iflib_register(if_ctx_t ctx) 5442 { 5443 if_shared_ctx_t sctx = ctx->ifc_sctx; 5444 driver_t *driver = sctx->isc_driver; 5445 device_t dev = ctx->ifc_dev; 5446 if_t ifp; 5447 u_char type; 5448 int iflags; 5449 5450 if ((sctx->isc_flags & IFLIB_PSEUDO) == 0) 5451 _iflib_assert(sctx); 5452 5453 CTX_LOCK_INIT(ctx); 5454 STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); 5455 if (sctx->isc_flags & IFLIB_PSEUDO) { 5456 if (sctx->isc_flags & IFLIB_PSEUDO_ETHER) 5457 type = IFT_ETHER; 5458 else 5459 type = IFT_PPP; 5460 } else 5461 type = IFT_ETHER; 5462 ifp = ctx->ifc_ifp = if_alloc(type); 5463 if (ifp == NULL) { 5464 device_printf(dev, "can not allocate ifnet structure\n"); 5465 return (ENOMEM); 5466 } 5467 5468 /* 5469 * Initialize our context's device specific methods 5470 */ 5471 kobj_init((kobj_t) ctx, (kobj_class_t) driver); 5472 kobj_class_compile((kobj_class_t) driver); 5473 5474 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 5475 if_setsoftc(ifp, ctx); 5476 if_setdev(ifp, dev); 5477 if_setinitfn(ifp, iflib_if_init); 5478 if_setioctlfn(ifp, iflib_if_ioctl); 5479 #ifdef ALTQ 5480 if_setstartfn(ifp, iflib_altq_if_start); 5481 if_settransmitfn(ifp, iflib_altq_if_transmit); 5482 if_setsendqready(ifp); 5483 #else 5484 if_settransmitfn(ifp, iflib_if_transmit); 5485 #endif 5486 if_setqflushfn(ifp, iflib_if_qflush); 5487 iflags = IFF_MULTICAST | IFF_KNOWSEPOCH; 5488 5489 if ((sctx->isc_flags & IFLIB_PSEUDO) && 5490 (sctx->isc_flags & IFLIB_PSEUDO_ETHER) == 0) 5491 iflags |= IFF_POINTOPOINT; 5492 else 5493 iflags |= IFF_BROADCAST | IFF_SIMPLEX; 5494 if_setflags(ifp, iflags); 5495 ctx->ifc_vlan_attach_event = 5496 EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, 5497 EVENTHANDLER_PRI_FIRST); 5498 ctx->ifc_vlan_detach_event = 5499 EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, 5500 EVENTHANDLER_PRI_FIRST); 5501 5502 if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) { 5503 ctx->ifc_mediap = &ctx->ifc_media; 5504 ifmedia_init(ctx->ifc_mediap, IFM_IMASK, 5505 iflib_media_change, iflib_media_status); 5506 } 5507 return (0); 5508 } 5509 5510 static void 5511 iflib_unregister_vlan_handlers(if_ctx_t ctx) 5512 { 5513 /* Unregister VLAN events */ 5514 if (ctx->ifc_vlan_attach_event != NULL) { 5515 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 5516 ctx->ifc_vlan_attach_event = NULL; 5517 } 5518 if (ctx->ifc_vlan_detach_event != NULL) { 5519 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 5520 ctx->ifc_vlan_detach_event = NULL; 5521 } 5522 5523 } 5524 5525 static void 5526 iflib_deregister(if_ctx_t ctx) 5527 { 5528 if_t ifp = ctx->ifc_ifp; 5529 5530 /* Remove all media */ 5531 ifmedia_removeall(&ctx->ifc_media); 5532 5533 /* Ensure that VLAN event handlers are unregistered */ 5534 iflib_unregister_vlan_handlers(ctx); 5535 5536 /* Release kobject reference */ 5537 kobj_delete((kobj_t) ctx, NULL); 5538 5539 /* Free the ifnet structure */ 5540 if_free(ifp); 5541 5542 STATE_LOCK_DESTROY(ctx); 5543 5544 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 5545 CTX_LOCK_DESTROY(ctx); 5546 } 5547 5548 static int 5549 iflib_queues_alloc(if_ctx_t ctx) 5550 { 5551 if_shared_ctx_t sctx = ctx->ifc_sctx; 5552 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5553 device_t dev = ctx->ifc_dev; 5554 int nrxqsets = scctx->isc_nrxqsets; 5555 int ntxqsets = scctx->isc_ntxqsets; 5556 iflib_txq_t txq; 5557 iflib_rxq_t rxq; 5558 iflib_fl_t fl = NULL; 5559 int i, j, cpu, err, txconf, rxconf; 5560 iflib_dma_info_t ifdip; 5561 uint32_t *rxqsizes = scctx->isc_rxqsizes; 5562 uint32_t *txqsizes = scctx->isc_txqsizes; 5563 uint8_t nrxqs = sctx->isc_nrxqs; 5564 uint8_t ntxqs = sctx->isc_ntxqs; 5565 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; 5566 int fl_offset = (sctx->isc_flags & IFLIB_HAS_RXCQ ? 1 : 0); 5567 caddr_t *vaddrs; 5568 uint64_t *paddrs; 5569 5570 KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); 5571 KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); 5572 KASSERT(nrxqs >= fl_offset + nfree_lists, 5573 ("there must be at least a rxq for each free list")); 5574 5575 /* Allocate the TX ring struct memory */ 5576 if (!(ctx->ifc_txqs = 5577 (iflib_txq_t) malloc(sizeof(struct iflib_txq) * 5578 ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5579 device_printf(dev, "Unable to allocate TX ring memory\n"); 5580 err = ENOMEM; 5581 goto fail; 5582 } 5583 5584 /* Now allocate the RX */ 5585 if (!(ctx->ifc_rxqs = 5586 (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * 5587 nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5588 device_printf(dev, "Unable to allocate RX ring memory\n"); 5589 err = ENOMEM; 5590 goto rx_fail; 5591 } 5592 5593 txq = ctx->ifc_txqs; 5594 rxq = ctx->ifc_rxqs; 5595 5596 /* 5597 * XXX handle allocation failure 5598 */ 5599 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { 5600 /* Set up some basics */ 5601 5602 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, 5603 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { 5604 device_printf(dev, 5605 "Unable to allocate TX DMA info memory\n"); 5606 err = ENOMEM; 5607 goto err_tx_desc; 5608 } 5609 txq->ift_ifdi = ifdip; 5610 for (j = 0; j < ntxqs; j++, ifdip++) { 5611 if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) { 5612 device_printf(dev, 5613 "Unable to allocate TX descriptors\n"); 5614 err = ENOMEM; 5615 goto err_tx_desc; 5616 } 5617 txq->ift_txd_size[j] = scctx->isc_txd_size[j]; 5618 bzero((void *)ifdip->idi_vaddr, txqsizes[j]); 5619 } 5620 txq->ift_ctx = ctx; 5621 txq->ift_id = i; 5622 if (sctx->isc_flags & IFLIB_HAS_TXCQ) { 5623 txq->ift_br_offset = 1; 5624 } else { 5625 txq->ift_br_offset = 0; 5626 } 5627 5628 if (iflib_txsd_alloc(txq)) { 5629 device_printf(dev, "Critical Failure setting up TX buffers\n"); 5630 err = ENOMEM; 5631 goto err_tx_desc; 5632 } 5633 5634 /* Initialize the TX lock */ 5635 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout", 5636 device_get_nameunit(dev), txq->ift_id); 5637 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); 5638 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); 5639 txq->ift_timer.c_cpu = cpu; 5640 #ifdef DEV_NETMAP 5641 callout_init_mtx(&txq->ift_netmap_timer, &txq->ift_mtx, 0); 5642 txq->ift_netmap_timer.c_cpu = cpu; 5643 #endif /* DEV_NETMAP */ 5644 5645 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, 5646 iflib_txq_can_drain, M_IFLIB, M_WAITOK); 5647 if (err) { 5648 /* XXX free any allocated rings */ 5649 device_printf(dev, "Unable to allocate buf_ring\n"); 5650 goto err_tx_desc; 5651 } 5652 } 5653 5654 for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { 5655 /* Set up some basics */ 5656 callout_init(&rxq->ifr_watchdog, 1); 5657 5658 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, 5659 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { 5660 device_printf(dev, 5661 "Unable to allocate RX DMA info memory\n"); 5662 err = ENOMEM; 5663 goto err_tx_desc; 5664 } 5665 5666 rxq->ifr_ifdi = ifdip; 5667 /* XXX this needs to be changed if #rx queues != #tx queues */ 5668 rxq->ifr_ntxqirq = 1; 5669 rxq->ifr_txqid[0] = i; 5670 for (j = 0; j < nrxqs; j++, ifdip++) { 5671 if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) { 5672 device_printf(dev, 5673 "Unable to allocate RX descriptors\n"); 5674 err = ENOMEM; 5675 goto err_tx_desc; 5676 } 5677 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); 5678 } 5679 rxq->ifr_ctx = ctx; 5680 rxq->ifr_id = i; 5681 rxq->ifr_fl_offset = fl_offset; 5682 rxq->ifr_nfl = nfree_lists; 5683 if (!(fl = 5684 (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { 5685 device_printf(dev, "Unable to allocate free list memory\n"); 5686 err = ENOMEM; 5687 goto err_tx_desc; 5688 } 5689 rxq->ifr_fl = fl; 5690 for (j = 0; j < nfree_lists; j++) { 5691 fl[j].ifl_rxq = rxq; 5692 fl[j].ifl_id = j; 5693 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; 5694 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; 5695 } 5696 /* Allocate receive buffers for the ring */ 5697 if (iflib_rxsd_alloc(rxq)) { 5698 device_printf(dev, 5699 "Critical Failure setting up receive buffers\n"); 5700 err = ENOMEM; 5701 goto err_rx_desc; 5702 } 5703 5704 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 5705 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, 5706 M_WAITOK); 5707 } 5708 5709 /* TXQs */ 5710 vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5711 paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5712 for (i = 0; i < ntxqsets; i++) { 5713 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; 5714 5715 for (j = 0; j < ntxqs; j++, di++) { 5716 vaddrs[i*ntxqs + j] = di->idi_vaddr; 5717 paddrs[i*ntxqs + j] = di->idi_paddr; 5718 } 5719 } 5720 if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { 5721 device_printf(ctx->ifc_dev, 5722 "Unable to allocate device TX queue\n"); 5723 iflib_tx_structures_free(ctx); 5724 free(vaddrs, M_IFLIB); 5725 free(paddrs, M_IFLIB); 5726 goto err_rx_desc; 5727 } 5728 free(vaddrs, M_IFLIB); 5729 free(paddrs, M_IFLIB); 5730 5731 /* RXQs */ 5732 vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5733 paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5734 for (i = 0; i < nrxqsets; i++) { 5735 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; 5736 5737 for (j = 0; j < nrxqs; j++, di++) { 5738 vaddrs[i*nrxqs + j] = di->idi_vaddr; 5739 paddrs[i*nrxqs + j] = di->idi_paddr; 5740 } 5741 } 5742 if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { 5743 device_printf(ctx->ifc_dev, 5744 "Unable to allocate device RX queue\n"); 5745 iflib_tx_structures_free(ctx); 5746 free(vaddrs, M_IFLIB); 5747 free(paddrs, M_IFLIB); 5748 goto err_rx_desc; 5749 } 5750 free(vaddrs, M_IFLIB); 5751 free(paddrs, M_IFLIB); 5752 5753 return (0); 5754 5755 /* XXX handle allocation failure changes */ 5756 err_rx_desc: 5757 err_tx_desc: 5758 rx_fail: 5759 if (ctx->ifc_rxqs != NULL) 5760 free(ctx->ifc_rxqs, M_IFLIB); 5761 ctx->ifc_rxqs = NULL; 5762 if (ctx->ifc_txqs != NULL) 5763 free(ctx->ifc_txqs, M_IFLIB); 5764 ctx->ifc_txqs = NULL; 5765 fail: 5766 return (err); 5767 } 5768 5769 static int 5770 iflib_tx_structures_setup(if_ctx_t ctx) 5771 { 5772 iflib_txq_t txq = ctx->ifc_txqs; 5773 int i; 5774 5775 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 5776 iflib_txq_setup(txq); 5777 5778 return (0); 5779 } 5780 5781 static void 5782 iflib_tx_structures_free(if_ctx_t ctx) 5783 { 5784 iflib_txq_t txq = ctx->ifc_txqs; 5785 if_shared_ctx_t sctx = ctx->ifc_sctx; 5786 int i, j; 5787 5788 for (i = 0; i < NTXQSETS(ctx); i++, txq++) { 5789 for (j = 0; j < sctx->isc_ntxqs; j++) 5790 iflib_dma_free(&txq->ift_ifdi[j]); 5791 iflib_txq_destroy(txq); 5792 } 5793 free(ctx->ifc_txqs, M_IFLIB); 5794 ctx->ifc_txqs = NULL; 5795 IFDI_QUEUES_FREE(ctx); 5796 } 5797 5798 /********************************************************************* 5799 * 5800 * Initialize all receive rings. 5801 * 5802 **********************************************************************/ 5803 static int 5804 iflib_rx_structures_setup(if_ctx_t ctx) 5805 { 5806 iflib_rxq_t rxq = ctx->ifc_rxqs; 5807 int q; 5808 #if defined(INET6) || defined(INET) 5809 int err, i; 5810 #endif 5811 5812 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { 5813 #if defined(INET6) || defined(INET) 5814 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) { 5815 err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, 5816 TCP_LRO_ENTRIES, min(1024, 5817 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset])); 5818 if (err != 0) { 5819 device_printf(ctx->ifc_dev, 5820 "LRO Initialization failed!\n"); 5821 goto fail; 5822 } 5823 } 5824 #endif 5825 IFDI_RXQ_SETUP(ctx, rxq->ifr_id); 5826 } 5827 return (0); 5828 #if defined(INET6) || defined(INET) 5829 fail: 5830 /* 5831 * Free LRO resources allocated so far, we will only handle 5832 * the rings that completed, the failing case will have 5833 * cleaned up for itself. 'q' failed, so its the terminus. 5834 */ 5835 rxq = ctx->ifc_rxqs; 5836 for (i = 0; i < q; ++i, rxq++) { 5837 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) 5838 tcp_lro_free(&rxq->ifr_lc); 5839 } 5840 return (err); 5841 #endif 5842 } 5843 5844 /********************************************************************* 5845 * 5846 * Free all receive rings. 5847 * 5848 **********************************************************************/ 5849 static void 5850 iflib_rx_structures_free(if_ctx_t ctx) 5851 { 5852 iflib_rxq_t rxq = ctx->ifc_rxqs; 5853 if_shared_ctx_t sctx = ctx->ifc_sctx; 5854 int i, j; 5855 5856 for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { 5857 for (j = 0; j < sctx->isc_nrxqs; j++) 5858 iflib_dma_free(&rxq->ifr_ifdi[j]); 5859 iflib_rx_sds_free(rxq); 5860 #if defined(INET6) || defined(INET) 5861 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) 5862 tcp_lro_free(&rxq->ifr_lc); 5863 #endif 5864 } 5865 free(ctx->ifc_rxqs, M_IFLIB); 5866 ctx->ifc_rxqs = NULL; 5867 } 5868 5869 static int 5870 iflib_qset_structures_setup(if_ctx_t ctx) 5871 { 5872 int err; 5873 5874 /* 5875 * It is expected that the caller takes care of freeing queues if this 5876 * fails. 5877 */ 5878 if ((err = iflib_tx_structures_setup(ctx)) != 0) { 5879 device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); 5880 return (err); 5881 } 5882 5883 if ((err = iflib_rx_structures_setup(ctx)) != 0) 5884 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); 5885 5886 return (err); 5887 } 5888 5889 int 5890 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 5891 driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name) 5892 { 5893 5894 return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); 5895 } 5896 5897 #ifdef SMP 5898 static int 5899 find_nth(if_ctx_t ctx, int qid) 5900 { 5901 cpuset_t cpus; 5902 int i, cpuid, eqid, count; 5903 5904 CPU_COPY(&ctx->ifc_cpus, &cpus); 5905 count = CPU_COUNT(&cpus); 5906 eqid = qid % count; 5907 /* clear up to the qid'th bit */ 5908 for (i = 0; i < eqid; i++) { 5909 cpuid = CPU_FFS(&cpus); 5910 MPASS(cpuid != 0); 5911 CPU_CLR(cpuid-1, &cpus); 5912 } 5913 cpuid = CPU_FFS(&cpus); 5914 MPASS(cpuid != 0); 5915 return (cpuid-1); 5916 } 5917 5918 #ifdef SCHED_ULE 5919 extern struct cpu_group *cpu_top; /* CPU topology */ 5920 5921 static int 5922 find_child_with_core(int cpu, struct cpu_group *grp) 5923 { 5924 int i; 5925 5926 if (grp->cg_children == 0) 5927 return -1; 5928 5929 MPASS(grp->cg_child); 5930 for (i = 0; i < grp->cg_children; i++) { 5931 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) 5932 return i; 5933 } 5934 5935 return -1; 5936 } 5937 5938 /* 5939 * Find the nth "close" core to the specified core 5940 * "close" is defined as the deepest level that shares 5941 * at least an L2 cache. With threads, this will be 5942 * threads on the same core. If the shared cache is L3 5943 * or higher, simply returns the same core. 5944 */ 5945 static int 5946 find_close_core(int cpu, int core_offset) 5947 { 5948 struct cpu_group *grp; 5949 int i; 5950 int fcpu; 5951 cpuset_t cs; 5952 5953 grp = cpu_top; 5954 if (grp == NULL) 5955 return cpu; 5956 i = 0; 5957 while ((i = find_child_with_core(cpu, grp)) != -1) { 5958 /* If the child only has one cpu, don't descend */ 5959 if (grp->cg_child[i].cg_count <= 1) 5960 break; 5961 grp = &grp->cg_child[i]; 5962 } 5963 5964 /* If they don't share at least an L2 cache, use the same CPU */ 5965 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) 5966 return cpu; 5967 5968 /* Now pick one */ 5969 CPU_COPY(&grp->cg_mask, &cs); 5970 5971 /* Add the selected CPU offset to core offset. */ 5972 for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) { 5973 if (fcpu - 1 == cpu) 5974 break; 5975 CPU_CLR(fcpu - 1, &cs); 5976 } 5977 MPASS(fcpu); 5978 5979 core_offset += i; 5980 5981 CPU_COPY(&grp->cg_mask, &cs); 5982 for (i = core_offset % grp->cg_count; i > 0; i--) { 5983 MPASS(CPU_FFS(&cs)); 5984 CPU_CLR(CPU_FFS(&cs) - 1, &cs); 5985 } 5986 MPASS(CPU_FFS(&cs)); 5987 return CPU_FFS(&cs) - 1; 5988 } 5989 #else 5990 static int 5991 find_close_core(int cpu, int core_offset __unused) 5992 { 5993 return cpu; 5994 } 5995 #endif 5996 5997 static int 5998 get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid) 5999 { 6000 switch (type) { 6001 case IFLIB_INTR_TX: 6002 /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */ 6003 /* XXX handle multiple RX threads per core and more than two core per L2 group */ 6004 return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; 6005 case IFLIB_INTR_RX: 6006 case IFLIB_INTR_RXTX: 6007 /* RX queues get the specified core */ 6008 return qid / CPU_COUNT(&ctx->ifc_cpus); 6009 default: 6010 return -1; 6011 } 6012 } 6013 #else 6014 #define get_core_offset(ctx, type, qid) CPU_FIRST() 6015 #define find_close_core(cpuid, tid) CPU_FIRST() 6016 #define find_nth(ctx, gid) CPU_FIRST() 6017 #endif 6018 6019 /* Just to avoid copy/paste */ 6020 static inline int 6021 iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, 6022 int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, 6023 const char *name) 6024 { 6025 device_t dev; 6026 int co, cpuid, err, tid; 6027 6028 dev = ctx->ifc_dev; 6029 co = ctx->ifc_sysctl_core_offset; 6030 if (ctx->ifc_sysctl_separate_txrx && type == IFLIB_INTR_TX) 6031 co += ctx->ifc_softc_ctx.isc_nrxqsets; 6032 cpuid = find_nth(ctx, qid + co); 6033 tid = get_core_offset(ctx, type, qid); 6034 if (tid < 0) { 6035 device_printf(dev, "get_core_offset failed\n"); 6036 return (EOPNOTSUPP); 6037 } 6038 cpuid = find_close_core(cpuid, tid); 6039 err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev, irq->ii_res, 6040 name); 6041 if (err) { 6042 device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err); 6043 return (err); 6044 } 6045 #ifdef notyet 6046 if (cpuid > ctx->ifc_cpuid_highest) 6047 ctx->ifc_cpuid_highest = cpuid; 6048 #endif 6049 return (0); 6050 } 6051 6052 int 6053 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, 6054 iflib_intr_type_t type, driver_filter_t *filter, 6055 void *filter_arg, int qid, const char *name) 6056 { 6057 device_t dev; 6058 struct grouptask *gtask; 6059 struct taskqgroup *tqg; 6060 iflib_filter_info_t info; 6061 gtask_fn_t *fn; 6062 int tqrid, err; 6063 driver_filter_t *intr_fast; 6064 void *q; 6065 6066 info = &ctx->ifc_filter_info; 6067 tqrid = rid; 6068 6069 switch (type) { 6070 /* XXX merge tx/rx for netmap? */ 6071 case IFLIB_INTR_TX: 6072 q = &ctx->ifc_txqs[qid]; 6073 info = &ctx->ifc_txqs[qid].ift_filter_info; 6074 gtask = &ctx->ifc_txqs[qid].ift_task; 6075 tqg = qgroup_if_io_tqg; 6076 fn = _task_fn_tx; 6077 intr_fast = iflib_fast_intr; 6078 GROUPTASK_INIT(gtask, 0, fn, q); 6079 ctx->ifc_flags |= IFC_NETMAP_TX_IRQ; 6080 break; 6081 case IFLIB_INTR_RX: 6082 q = &ctx->ifc_rxqs[qid]; 6083 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 6084 gtask = &ctx->ifc_rxqs[qid].ifr_task; 6085 tqg = qgroup_if_io_tqg; 6086 fn = _task_fn_rx; 6087 intr_fast = iflib_fast_intr; 6088 NET_GROUPTASK_INIT(gtask, 0, fn, q); 6089 break; 6090 case IFLIB_INTR_RXTX: 6091 q = &ctx->ifc_rxqs[qid]; 6092 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 6093 gtask = &ctx->ifc_rxqs[qid].ifr_task; 6094 tqg = qgroup_if_io_tqg; 6095 fn = _task_fn_rx; 6096 intr_fast = iflib_fast_intr_rxtx; 6097 NET_GROUPTASK_INIT(gtask, 0, fn, q); 6098 break; 6099 case IFLIB_INTR_ADMIN: 6100 q = ctx; 6101 tqrid = -1; 6102 info = &ctx->ifc_filter_info; 6103 gtask = &ctx->ifc_admin_task; 6104 tqg = qgroup_if_config_tqg; 6105 fn = _task_fn_admin; 6106 intr_fast = iflib_fast_intr_ctx; 6107 break; 6108 default: 6109 device_printf(ctx->ifc_dev, "%s: unknown net intr type\n", 6110 __func__); 6111 return (EINVAL); 6112 } 6113 6114 info->ifi_filter = filter; 6115 info->ifi_filter_arg = filter_arg; 6116 info->ifi_task = gtask; 6117 info->ifi_ctx = q; 6118 6119 dev = ctx->ifc_dev; 6120 err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); 6121 if (err != 0) { 6122 device_printf(dev, "_iflib_irq_alloc failed %d\n", err); 6123 return (err); 6124 } 6125 if (type == IFLIB_INTR_ADMIN) 6126 return (0); 6127 6128 if (tqrid != -1) { 6129 err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, 6130 q, name); 6131 if (err) 6132 return (err); 6133 } else { 6134 taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name); 6135 } 6136 6137 return (0); 6138 } 6139 6140 void 6141 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name) 6142 { 6143 struct grouptask *gtask; 6144 struct taskqgroup *tqg; 6145 gtask_fn_t *fn; 6146 void *q; 6147 int err; 6148 6149 switch (type) { 6150 case IFLIB_INTR_TX: 6151 q = &ctx->ifc_txqs[qid]; 6152 gtask = &ctx->ifc_txqs[qid].ift_task; 6153 tqg = qgroup_if_io_tqg; 6154 fn = _task_fn_tx; 6155 GROUPTASK_INIT(gtask, 0, fn, q); 6156 break; 6157 case IFLIB_INTR_RX: 6158 q = &ctx->ifc_rxqs[qid]; 6159 gtask = &ctx->ifc_rxqs[qid].ifr_task; 6160 tqg = qgroup_if_io_tqg; 6161 fn = _task_fn_rx; 6162 NET_GROUPTASK_INIT(gtask, 0, fn, q); 6163 break; 6164 case IFLIB_INTR_IOV: 6165 q = ctx; 6166 gtask = &ctx->ifc_vflr_task; 6167 tqg = qgroup_if_config_tqg; 6168 fn = _task_fn_iov; 6169 GROUPTASK_INIT(gtask, 0, fn, q); 6170 break; 6171 default: 6172 panic("unknown net intr type"); 6173 } 6174 if (irq != NULL) { 6175 err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, 6176 q, name); 6177 if (err) 6178 taskqgroup_attach(tqg, gtask, q, ctx->ifc_dev, 6179 irq->ii_res, name); 6180 } else { 6181 taskqgroup_attach(tqg, gtask, q, NULL, NULL, name); 6182 } 6183 } 6184 6185 void 6186 iflib_irq_free(if_ctx_t ctx, if_irq_t irq) 6187 { 6188 6189 if (irq->ii_tag) 6190 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); 6191 6192 if (irq->ii_res) 6193 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, 6194 rman_get_rid(irq->ii_res), irq->ii_res); 6195 } 6196 6197 static int 6198 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name) 6199 { 6200 iflib_txq_t txq = ctx->ifc_txqs; 6201 iflib_rxq_t rxq = ctx->ifc_rxqs; 6202 if_irq_t irq = &ctx->ifc_legacy_irq; 6203 iflib_filter_info_t info; 6204 device_t dev; 6205 struct grouptask *gtask; 6206 struct resource *res; 6207 struct taskqgroup *tqg; 6208 void *q; 6209 int err, tqrid; 6210 bool rx_only; 6211 6212 q = &ctx->ifc_rxqs[0]; 6213 info = &rxq[0].ifr_filter_info; 6214 gtask = &rxq[0].ifr_task; 6215 tqg = qgroup_if_io_tqg; 6216 tqrid = *rid; 6217 rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0; 6218 6219 ctx->ifc_flags |= IFC_LEGACY; 6220 info->ifi_filter = filter; 6221 info->ifi_filter_arg = filter_arg; 6222 info->ifi_task = gtask; 6223 info->ifi_ctx = rx_only ? ctx : q; 6224 6225 dev = ctx->ifc_dev; 6226 /* We allocate a single interrupt resource */ 6227 err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr_ctx : 6228 iflib_fast_intr_rxtx, NULL, info, name); 6229 if (err != 0) 6230 return (err); 6231 NET_GROUPTASK_INIT(gtask, 0, _task_fn_rx, q); 6232 res = irq->ii_res; 6233 taskqgroup_attach(tqg, gtask, q, dev, res, name); 6234 6235 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); 6236 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res, 6237 "tx"); 6238 return (0); 6239 } 6240 6241 void 6242 iflib_led_create(if_ctx_t ctx) 6243 { 6244 6245 ctx->ifc_led_dev = led_create(iflib_led_func, ctx, 6246 device_get_nameunit(ctx->ifc_dev)); 6247 } 6248 6249 void 6250 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) 6251 { 6252 6253 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 6254 } 6255 6256 void 6257 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) 6258 { 6259 6260 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); 6261 } 6262 6263 void 6264 iflib_admin_intr_deferred(if_ctx_t ctx) 6265 { 6266 6267 MPASS(ctx->ifc_admin_task.gt_taskqueue != NULL); 6268 GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); 6269 } 6270 6271 void 6272 iflib_iov_intr_deferred(if_ctx_t ctx) 6273 { 6274 6275 GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); 6276 } 6277 6278 void 6279 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name) 6280 { 6281 6282 taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL, 6283 name); 6284 } 6285 6286 void 6287 iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, 6288 const char *name) 6289 { 6290 6291 GROUPTASK_INIT(gtask, 0, fn, ctx); 6292 taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL, 6293 name); 6294 } 6295 6296 void 6297 iflib_config_gtask_deinit(struct grouptask *gtask) 6298 { 6299 6300 taskqgroup_detach(qgroup_if_config_tqg, gtask); 6301 } 6302 6303 void 6304 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) 6305 { 6306 if_t ifp = ctx->ifc_ifp; 6307 iflib_txq_t txq = ctx->ifc_txqs; 6308 6309 if_setbaudrate(ifp, baudrate); 6310 if (baudrate >= IF_Gbps(10)) { 6311 STATE_LOCK(ctx); 6312 ctx->ifc_flags |= IFC_PREFETCH; 6313 STATE_UNLOCK(ctx); 6314 } 6315 /* If link down, disable watchdog */ 6316 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { 6317 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) 6318 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 6319 } 6320 ctx->ifc_link_state = link_state; 6321 if_link_state_change(ifp, link_state); 6322 } 6323 6324 static int 6325 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) 6326 { 6327 int credits; 6328 #ifdef INVARIANTS 6329 int credits_pre = txq->ift_cidx_processed; 6330 #endif 6331 6332 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 6333 BUS_DMASYNC_POSTREAD); 6334 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) 6335 return (0); 6336 6337 txq->ift_processed += credits; 6338 txq->ift_cidx_processed += credits; 6339 6340 MPASS(credits_pre + credits == txq->ift_cidx_processed); 6341 if (txq->ift_cidx_processed >= txq->ift_size) 6342 txq->ift_cidx_processed -= txq->ift_size; 6343 return (credits); 6344 } 6345 6346 static int 6347 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) 6348 { 6349 iflib_fl_t fl; 6350 u_int i; 6351 6352 for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++) 6353 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 6354 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 6355 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, 6356 budget)); 6357 } 6358 6359 void 6360 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, 6361 const char *description, if_int_delay_info_t info, 6362 int offset, int value) 6363 { 6364 info->iidi_ctx = ctx; 6365 info->iidi_offset = offset; 6366 info->iidi_value = value; 6367 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), 6368 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), 6369 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 6370 info, 0, iflib_sysctl_int_delay, "I", description); 6371 } 6372 6373 struct sx * 6374 iflib_ctx_lock_get(if_ctx_t ctx) 6375 { 6376 6377 return (&ctx->ifc_ctx_sx); 6378 } 6379 6380 static int 6381 iflib_msix_init(if_ctx_t ctx) 6382 { 6383 device_t dev = ctx->ifc_dev; 6384 if_shared_ctx_t sctx = ctx->ifc_sctx; 6385 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 6386 int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues; 6387 int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors; 6388 6389 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; 6390 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; 6391 6392 if (bootverbose) 6393 device_printf(dev, "msix_init qsets capped at %d\n", 6394 imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); 6395 6396 /* Override by tuneable */ 6397 if (scctx->isc_disable_msix) 6398 goto msi; 6399 6400 /* First try MSI-X */ 6401 if ((msgs = pci_msix_count(dev)) == 0) { 6402 if (bootverbose) 6403 device_printf(dev, "MSI-X not supported or disabled\n"); 6404 goto msi; 6405 } 6406 6407 bar = ctx->ifc_softc_ctx.isc_msix_bar; 6408 /* 6409 * bar == -1 => "trust me I know what I'm doing" 6410 * Some drivers are for hardware that is so shoddily 6411 * documented that no one knows which bars are which 6412 * so the developer has to map all bars. This hack 6413 * allows shoddy garbage to use MSI-X in this framework. 6414 */ 6415 if (bar != -1) { 6416 ctx->ifc_msix_mem = bus_alloc_resource_any(dev, 6417 SYS_RES_MEMORY, &bar, RF_ACTIVE); 6418 if (ctx->ifc_msix_mem == NULL) { 6419 device_printf(dev, "Unable to map MSI-X table\n"); 6420 goto msi; 6421 } 6422 } 6423 6424 admincnt = sctx->isc_admin_intrcnt; 6425 #if IFLIB_DEBUG 6426 /* use only 1 qset in debug mode */ 6427 queuemsgs = min(msgs - admincnt, 1); 6428 #else 6429 queuemsgs = msgs - admincnt; 6430 #endif 6431 #ifdef RSS 6432 queues = imin(queuemsgs, rss_getnumbuckets()); 6433 #else 6434 queues = queuemsgs; 6435 #endif 6436 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); 6437 if (bootverbose) 6438 device_printf(dev, 6439 "intr CPUs: %d queue msgs: %d admincnt: %d\n", 6440 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); 6441 #ifdef RSS 6442 /* If we're doing RSS, clamp at the number of RSS buckets */ 6443 if (queues > rss_getnumbuckets()) 6444 queues = rss_getnumbuckets(); 6445 #endif 6446 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) 6447 rx_queues = iflib_num_rx_queues; 6448 else 6449 rx_queues = queues; 6450 6451 if (rx_queues > scctx->isc_nrxqsets) 6452 rx_queues = scctx->isc_nrxqsets; 6453 6454 /* 6455 * We want this to be all logical CPUs by default 6456 */ 6457 if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) 6458 tx_queues = iflib_num_tx_queues; 6459 else 6460 tx_queues = mp_ncpus; 6461 6462 if (tx_queues > scctx->isc_ntxqsets) 6463 tx_queues = scctx->isc_ntxqsets; 6464 6465 if (ctx->ifc_sysctl_qs_eq_override == 0) { 6466 #ifdef INVARIANTS 6467 if (tx_queues != rx_queues) 6468 device_printf(dev, 6469 "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", 6470 min(rx_queues, tx_queues), min(rx_queues, tx_queues)); 6471 #endif 6472 tx_queues = min(rx_queues, tx_queues); 6473 rx_queues = min(rx_queues, tx_queues); 6474 } 6475 6476 vectors = rx_queues + admincnt; 6477 if (msgs < vectors) { 6478 device_printf(dev, 6479 "insufficient number of MSI-X vectors " 6480 "(supported %d, need %d)\n", msgs, vectors); 6481 goto msi; 6482 } 6483 6484 device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues, 6485 tx_queues); 6486 msgs = vectors; 6487 if ((err = pci_alloc_msix(dev, &vectors)) == 0) { 6488 if (vectors != msgs) { 6489 device_printf(dev, 6490 "Unable to allocate sufficient MSI-X vectors " 6491 "(got %d, need %d)\n", vectors, msgs); 6492 pci_release_msi(dev); 6493 if (bar != -1) { 6494 bus_release_resource(dev, SYS_RES_MEMORY, bar, 6495 ctx->ifc_msix_mem); 6496 ctx->ifc_msix_mem = NULL; 6497 } 6498 goto msi; 6499 } 6500 device_printf(dev, "Using MSI-X interrupts with %d vectors\n", 6501 vectors); 6502 scctx->isc_vectors = vectors; 6503 scctx->isc_nrxqsets = rx_queues; 6504 scctx->isc_ntxqsets = tx_queues; 6505 scctx->isc_intr = IFLIB_INTR_MSIX; 6506 6507 return (vectors); 6508 } else { 6509 device_printf(dev, 6510 "failed to allocate %d MSI-X vectors, err: %d\n", vectors, 6511 err); 6512 if (bar != -1) { 6513 bus_release_resource(dev, SYS_RES_MEMORY, bar, 6514 ctx->ifc_msix_mem); 6515 ctx->ifc_msix_mem = NULL; 6516 } 6517 } 6518 6519 msi: 6520 vectors = pci_msi_count(dev); 6521 scctx->isc_nrxqsets = 1; 6522 scctx->isc_ntxqsets = 1; 6523 scctx->isc_vectors = vectors; 6524 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { 6525 device_printf(dev,"Using an MSI interrupt\n"); 6526 scctx->isc_intr = IFLIB_INTR_MSI; 6527 } else { 6528 scctx->isc_vectors = 1; 6529 device_printf(dev,"Using a Legacy interrupt\n"); 6530 scctx->isc_intr = IFLIB_INTR_LEGACY; 6531 } 6532 6533 return (vectors); 6534 } 6535 6536 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; 6537 6538 static int 6539 mp_ring_state_handler(SYSCTL_HANDLER_ARGS) 6540 { 6541 int rc; 6542 uint16_t *state = ((uint16_t *)oidp->oid_arg1); 6543 struct sbuf *sb; 6544 const char *ring_state = "UNKNOWN"; 6545 6546 /* XXX needed ? */ 6547 rc = sysctl_wire_old_buffer(req, 0); 6548 MPASS(rc == 0); 6549 if (rc != 0) 6550 return (rc); 6551 sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); 6552 MPASS(sb != NULL); 6553 if (sb == NULL) 6554 return (ENOMEM); 6555 if (state[3] <= 3) 6556 ring_state = ring_states[state[3]]; 6557 6558 sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", 6559 state[0], state[1], state[2], ring_state); 6560 rc = sbuf_finish(sb); 6561 sbuf_delete(sb); 6562 return(rc); 6563 } 6564 6565 enum iflib_ndesc_handler { 6566 IFLIB_NTXD_HANDLER, 6567 IFLIB_NRXD_HANDLER, 6568 }; 6569 6570 static int 6571 mp_ndesc_handler(SYSCTL_HANDLER_ARGS) 6572 { 6573 if_ctx_t ctx = (void *)arg1; 6574 enum iflib_ndesc_handler type = arg2; 6575 char buf[256] = {0}; 6576 qidx_t *ndesc; 6577 char *p, *next; 6578 int nqs, rc, i; 6579 6580 nqs = 8; 6581 switch(type) { 6582 case IFLIB_NTXD_HANDLER: 6583 ndesc = ctx->ifc_sysctl_ntxds; 6584 if (ctx->ifc_sctx) 6585 nqs = ctx->ifc_sctx->isc_ntxqs; 6586 break; 6587 case IFLIB_NRXD_HANDLER: 6588 ndesc = ctx->ifc_sysctl_nrxds; 6589 if (ctx->ifc_sctx) 6590 nqs = ctx->ifc_sctx->isc_nrxqs; 6591 break; 6592 default: 6593 printf("%s: unhandled type\n", __func__); 6594 return (EINVAL); 6595 } 6596 if (nqs == 0) 6597 nqs = 8; 6598 6599 for (i=0; i<8; i++) { 6600 if (i >= nqs) 6601 break; 6602 if (i) 6603 strcat(buf, ","); 6604 sprintf(strchr(buf, 0), "%d", ndesc[i]); 6605 } 6606 6607 rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); 6608 if (rc || req->newptr == NULL) 6609 return rc; 6610 6611 for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; 6612 i++, p = strsep(&next, " ,")) { 6613 ndesc[i] = strtoul(p, NULL, 10); 6614 } 6615 6616 return(rc); 6617 } 6618 6619 #define NAME_BUFLEN 32 6620 static void 6621 iflib_add_device_sysctl_pre(if_ctx_t ctx) 6622 { 6623 device_t dev = iflib_get_dev(ctx); 6624 struct sysctl_oid_list *child, *oid_list; 6625 struct sysctl_ctx_list *ctx_list; 6626 struct sysctl_oid *node; 6627 6628 ctx_list = device_get_sysctl_ctx(dev); 6629 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 6630 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", 6631 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IFLIB fields"); 6632 oid_list = SYSCTL_CHILDREN(node); 6633 6634 SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", 6635 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 6636 "driver version"); 6637 6638 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", 6639 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, 6640 "# of txqs to use, 0 => use default #"); 6641 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", 6642 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, 6643 "# of rxqs to use, 0 => use default #"); 6644 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", 6645 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, 6646 "permit #txq != #rxq"); 6647 SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", 6648 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, 6649 "disable MSI-X (default 0)"); 6650 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", 6651 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, 6652 "set the RX budget"); 6653 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate", 6654 CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0, 6655 "cause TX to abdicate instead of running to completion"); 6656 ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED; 6657 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset", 6658 CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0, 6659 "offset to start using cores at"); 6660 SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx", 6661 CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0, 6662 "use separate cores for TX and RX"); 6663 6664 /* XXX change for per-queue sizes */ 6665 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", 6666 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx, 6667 IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A", 6668 "list of # of TX descriptors to use, 0 = use default #"); 6669 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", 6670 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx, 6671 IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A", 6672 "list of # of RX descriptors to use, 0 = use default #"); 6673 } 6674 6675 static void 6676 iflib_add_device_sysctl_post(if_ctx_t ctx) 6677 { 6678 if_shared_ctx_t sctx = ctx->ifc_sctx; 6679 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 6680 device_t dev = iflib_get_dev(ctx); 6681 struct sysctl_oid_list *child; 6682 struct sysctl_ctx_list *ctx_list; 6683 iflib_fl_t fl; 6684 iflib_txq_t txq; 6685 iflib_rxq_t rxq; 6686 int i, j; 6687 char namebuf[NAME_BUFLEN]; 6688 char *qfmt; 6689 struct sysctl_oid *queue_node, *fl_node, *node; 6690 struct sysctl_oid_list *queue_list, *fl_list; 6691 ctx_list = device_get_sysctl_ctx(dev); 6692 6693 node = ctx->ifc_sysctl_node; 6694 child = SYSCTL_CHILDREN(node); 6695 6696 if (scctx->isc_ntxqsets > 100) 6697 qfmt = "txq%03d"; 6698 else if (scctx->isc_ntxqsets > 10) 6699 qfmt = "txq%02d"; 6700 else 6701 qfmt = "txq%d"; 6702 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { 6703 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6704 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6705 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 6706 queue_list = SYSCTL_CHILDREN(queue_node); 6707 #if MEMORY_LOGGING 6708 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", 6709 CTLFLAG_RD, 6710 &txq->ift_dequeued, "total mbufs freed"); 6711 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", 6712 CTLFLAG_RD, 6713 &txq->ift_enqueued, "total mbufs enqueued"); 6714 #endif 6715 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", 6716 CTLFLAG_RD, 6717 &txq->ift_mbuf_defrag, "# of times m_defrag was called"); 6718 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", 6719 CTLFLAG_RD, 6720 &txq->ift_pullups, "# of times m_pullup was called"); 6721 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", 6722 CTLFLAG_RD, 6723 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); 6724 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", 6725 CTLFLAG_RD, 6726 &txq->ift_no_desc_avail, "# of times no descriptors were available"); 6727 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", 6728 CTLFLAG_RD, 6729 &txq->ift_map_failed, "# of times DMA map failed"); 6730 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", 6731 CTLFLAG_RD, 6732 &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); 6733 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", 6734 CTLFLAG_RD, 6735 &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); 6736 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", 6737 CTLFLAG_RD, 6738 &txq->ift_pidx, 1, "Producer Index"); 6739 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", 6740 CTLFLAG_RD, 6741 &txq->ift_cidx, 1, "Consumer Index"); 6742 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", 6743 CTLFLAG_RD, 6744 &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); 6745 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", 6746 CTLFLAG_RD, 6747 &txq->ift_in_use, 1, "descriptors in use"); 6748 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", 6749 CTLFLAG_RD, 6750 &txq->ift_processed, "descriptors procesed for clean"); 6751 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", 6752 CTLFLAG_RD, 6753 &txq->ift_cleaned, "total cleaned"); 6754 SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", 6755 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 6756 __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0, 6757 mp_ring_state_handler, "A", "soft ring state"); 6758 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", 6759 CTLFLAG_RD, &txq->ift_br->enqueues, 6760 "# of enqueues to the mp_ring for this queue"); 6761 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", 6762 CTLFLAG_RD, &txq->ift_br->drops, 6763 "# of drops in the mp_ring for this queue"); 6764 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", 6765 CTLFLAG_RD, &txq->ift_br->starts, 6766 "# of normal consumer starts in the mp_ring for this queue"); 6767 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", 6768 CTLFLAG_RD, &txq->ift_br->stalls, 6769 "# of consumer stalls in the mp_ring for this queue"); 6770 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", 6771 CTLFLAG_RD, &txq->ift_br->restarts, 6772 "# of consumer restarts in the mp_ring for this queue"); 6773 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", 6774 CTLFLAG_RD, &txq->ift_br->abdications, 6775 "# of consumer abdications in the mp_ring for this queue"); 6776 } 6777 6778 if (scctx->isc_nrxqsets > 100) 6779 qfmt = "rxq%03d"; 6780 else if (scctx->isc_nrxqsets > 10) 6781 qfmt = "rxq%02d"; 6782 else 6783 qfmt = "rxq%d"; 6784 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { 6785 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6786 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6787 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 6788 queue_list = SYSCTL_CHILDREN(queue_node); 6789 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 6790 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", 6791 CTLFLAG_RD, 6792 &rxq->ifr_cq_cidx, 1, "Consumer Index"); 6793 } 6794 6795 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 6796 snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); 6797 fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, 6798 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist Name"); 6799 fl_list = SYSCTL_CHILDREN(fl_node); 6800 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", 6801 CTLFLAG_RD, 6802 &fl->ifl_pidx, 1, "Producer Index"); 6803 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", 6804 CTLFLAG_RD, 6805 &fl->ifl_cidx, 1, "Consumer Index"); 6806 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", 6807 CTLFLAG_RD, 6808 &fl->ifl_credits, 1, "credits available"); 6809 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "buf_size", 6810 CTLFLAG_RD, 6811 &fl->ifl_buf_size, 1, "buffer size"); 6812 #if MEMORY_LOGGING 6813 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", 6814 CTLFLAG_RD, 6815 &fl->ifl_m_enqueued, "mbufs allocated"); 6816 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", 6817 CTLFLAG_RD, 6818 &fl->ifl_m_dequeued, "mbufs freed"); 6819 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", 6820 CTLFLAG_RD, 6821 &fl->ifl_cl_enqueued, "clusters allocated"); 6822 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", 6823 CTLFLAG_RD, 6824 &fl->ifl_cl_dequeued, "clusters freed"); 6825 #endif 6826 } 6827 } 6828 6829 } 6830 6831 void 6832 iflib_request_reset(if_ctx_t ctx) 6833 { 6834 6835 STATE_LOCK(ctx); 6836 ctx->ifc_flags |= IFC_DO_RESET; 6837 STATE_UNLOCK(ctx); 6838 } 6839 6840 #ifndef __NO_STRICT_ALIGNMENT 6841 static struct mbuf * 6842 iflib_fixup_rx(struct mbuf *m) 6843 { 6844 struct mbuf *n; 6845 6846 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 6847 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 6848 m->m_data += ETHER_HDR_LEN; 6849 n = m; 6850 } else { 6851 MGETHDR(n, M_NOWAIT, MT_DATA); 6852 if (n == NULL) { 6853 m_freem(m); 6854 return (NULL); 6855 } 6856 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 6857 m->m_data += ETHER_HDR_LEN; 6858 m->m_len -= ETHER_HDR_LEN; 6859 n->m_len = ETHER_HDR_LEN; 6860 M_MOVE_PKTHDR(n, m); 6861 n->m_next = m; 6862 } 6863 return (n); 6864 } 6865 #endif 6866 6867 #ifdef DEBUGNET 6868 static void 6869 iflib_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) 6870 { 6871 if_ctx_t ctx; 6872 6873 ctx = if_getsoftc(ifp); 6874 CTX_LOCK(ctx); 6875 *nrxr = NRXQSETS(ctx); 6876 *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size; 6877 *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size; 6878 CTX_UNLOCK(ctx); 6879 } 6880 6881 static void 6882 iflib_debugnet_event(if_t ifp, enum debugnet_ev event) 6883 { 6884 if_ctx_t ctx; 6885 if_softc_ctx_t scctx; 6886 iflib_fl_t fl; 6887 iflib_rxq_t rxq; 6888 int i, j; 6889 6890 ctx = if_getsoftc(ifp); 6891 scctx = &ctx->ifc_softc_ctx; 6892 6893 switch (event) { 6894 case DEBUGNET_START: 6895 for (i = 0; i < scctx->isc_nrxqsets; i++) { 6896 rxq = &ctx->ifc_rxqs[i]; 6897 for (j = 0; j < rxq->ifr_nfl; j++) { 6898 fl = rxq->ifr_fl; 6899 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 6900 } 6901 } 6902 iflib_no_tx_batch = 1; 6903 break; 6904 default: 6905 break; 6906 } 6907 } 6908 6909 static int 6910 iflib_debugnet_transmit(if_t ifp, struct mbuf *m) 6911 { 6912 if_ctx_t ctx; 6913 iflib_txq_t txq; 6914 int error; 6915 6916 ctx = if_getsoftc(ifp); 6917 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6918 IFF_DRV_RUNNING) 6919 return (EBUSY); 6920 6921 txq = &ctx->ifc_txqs[0]; 6922 error = iflib_encap(txq, &m); 6923 if (error == 0) 6924 (void)iflib_txd_db_check(txq, true); 6925 return (error); 6926 } 6927 6928 static int 6929 iflib_debugnet_poll(if_t ifp, int count) 6930 { 6931 struct epoch_tracker et; 6932 if_ctx_t ctx; 6933 if_softc_ctx_t scctx; 6934 iflib_txq_t txq; 6935 int i; 6936 6937 ctx = if_getsoftc(ifp); 6938 scctx = &ctx->ifc_softc_ctx; 6939 6940 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6941 IFF_DRV_RUNNING) 6942 return (EBUSY); 6943 6944 txq = &ctx->ifc_txqs[0]; 6945 (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 6946 6947 NET_EPOCH_ENTER(et); 6948 for (i = 0; i < scctx->isc_nrxqsets; i++) 6949 (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */); 6950 NET_EPOCH_EXIT(et); 6951 return (0); 6952 } 6953 #endif /* DEBUGNET */ 6954