1 /*- 2 * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Neither the name of Matthew Macy nor the names of its 12 * contributors may be used to endorse or promote products derived from 13 * this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_acpi.h" 34 #include "opt_sched.h" 35 36 #include <sys/param.h> 37 #include <sys/types.h> 38 #include <sys/bus.h> 39 #include <sys/eventhandler.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/module.h> 44 #include <sys/kobj.h> 45 #include <sys/rman.h> 46 #include <sys/sbuf.h> 47 #include <sys/smp.h> 48 #include <sys/socket.h> 49 #include <sys/sockio.h> 50 #include <sys/sysctl.h> 51 #include <sys/syslog.h> 52 #include <sys/taskqueue.h> 53 #include <sys/limits.h> 54 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_types.h> 58 #include <net/if_media.h> 59 #include <net/bpf.h> 60 #include <net/ethernet.h> 61 #include <net/mp_ring.h> 62 #include <net/debugnet.h> 63 #include <net/pfil.h> 64 #include <net/vnet.h> 65 66 #include <netinet/in.h> 67 #include <netinet/in_pcb.h> 68 #include <netinet/tcp_lro.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/if_ether.h> 71 #include <netinet/ip.h> 72 #include <netinet/ip6.h> 73 #include <netinet/tcp.h> 74 #include <netinet/ip_var.h> 75 #include <netinet6/ip6_var.h> 76 77 #include <machine/bus.h> 78 #include <machine/in_cksum.h> 79 80 #include <vm/vm.h> 81 #include <vm/pmap.h> 82 83 #include <dev/led/led.h> 84 #include <dev/pci/pcireg.h> 85 #include <dev/pci/pcivar.h> 86 #include <dev/pci/pci_private.h> 87 88 #include <net/iflib.h> 89 #include <net/iflib_private.h> 90 91 #include "ifdi_if.h" 92 93 #ifdef PCI_IOV 94 #include <dev/pci/pci_iov.h> 95 #endif 96 97 #include <sys/bitstring.h> 98 /* 99 * enable accounting of every mbuf as it comes in to and goes out of 100 * iflib's software descriptor references 101 */ 102 #define MEMORY_LOGGING 0 103 /* 104 * Enable mbuf vectors for compressing long mbuf chains 105 */ 106 107 /* 108 * NB: 109 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead 110 * we prefetch needs to be determined by the time spent in m_free vis a vis 111 * the cost of a prefetch. This will of course vary based on the workload: 112 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which 113 * is quite expensive, thus suggesting very little prefetch. 114 * - small packet forwarding which is just returning a single mbuf to 115 * UMA will typically be very fast vis a vis the cost of a memory 116 * access. 117 */ 118 119 120 /* 121 * File organization: 122 * - private structures 123 * - iflib private utility functions 124 * - ifnet functions 125 * - vlan registry and other exported functions 126 * - iflib public core functions 127 * 128 * 129 */ 130 MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); 131 132 #define IFLIB_RXEOF_MORE (1U << 0) 133 #define IFLIB_RXEOF_EMPTY (2U << 0) 134 135 struct iflib_txq; 136 typedef struct iflib_txq *iflib_txq_t; 137 struct iflib_rxq; 138 typedef struct iflib_rxq *iflib_rxq_t; 139 struct iflib_fl; 140 typedef struct iflib_fl *iflib_fl_t; 141 142 struct iflib_ctx; 143 144 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); 145 static void iflib_timer(void *arg); 146 147 typedef struct iflib_filter_info { 148 driver_filter_t *ifi_filter; 149 void *ifi_filter_arg; 150 struct grouptask *ifi_task; 151 void *ifi_ctx; 152 } *iflib_filter_info_t; 153 154 struct iflib_ctx { 155 KOBJ_FIELDS; 156 /* 157 * Pointer to hardware driver's softc 158 */ 159 void *ifc_softc; 160 device_t ifc_dev; 161 if_t ifc_ifp; 162 163 cpuset_t ifc_cpus; 164 if_shared_ctx_t ifc_sctx; 165 struct if_softc_ctx ifc_softc_ctx; 166 167 struct sx ifc_ctx_sx; 168 struct mtx ifc_state_mtx; 169 170 iflib_txq_t ifc_txqs; 171 iflib_rxq_t ifc_rxqs; 172 uint32_t ifc_if_flags; 173 uint32_t ifc_flags; 174 uint32_t ifc_max_fl_buf_size; 175 uint32_t ifc_rx_mbuf_sz; 176 177 int ifc_link_state; 178 int ifc_watchdog_events; 179 struct cdev *ifc_led_dev; 180 struct resource *ifc_msix_mem; 181 182 struct if_irq ifc_legacy_irq; 183 struct grouptask ifc_admin_task; 184 struct grouptask ifc_vflr_task; 185 struct iflib_filter_info ifc_filter_info; 186 struct ifmedia ifc_media; 187 struct ifmedia *ifc_mediap; 188 189 struct sysctl_oid *ifc_sysctl_node; 190 uint16_t ifc_sysctl_ntxqs; 191 uint16_t ifc_sysctl_nrxqs; 192 uint16_t ifc_sysctl_qs_eq_override; 193 uint16_t ifc_sysctl_rx_budget; 194 uint16_t ifc_sysctl_tx_abdicate; 195 uint16_t ifc_sysctl_core_offset; 196 #define CORE_OFFSET_UNSPECIFIED 0xffff 197 uint8_t ifc_sysctl_separate_txrx; 198 199 qidx_t ifc_sysctl_ntxds[8]; 200 qidx_t ifc_sysctl_nrxds[8]; 201 struct if_txrx ifc_txrx; 202 #define isc_txd_encap ifc_txrx.ift_txd_encap 203 #define isc_txd_flush ifc_txrx.ift_txd_flush 204 #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update 205 #define isc_rxd_available ifc_txrx.ift_rxd_available 206 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get 207 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 208 #define isc_rxd_flush ifc_txrx.ift_rxd_flush 209 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 210 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 211 #define isc_legacy_intr ifc_txrx.ift_legacy_intr 212 eventhandler_tag ifc_vlan_attach_event; 213 eventhandler_tag ifc_vlan_detach_event; 214 struct ether_addr ifc_mac; 215 }; 216 217 void * 218 iflib_get_softc(if_ctx_t ctx) 219 { 220 221 return (ctx->ifc_softc); 222 } 223 224 device_t 225 iflib_get_dev(if_ctx_t ctx) 226 { 227 228 return (ctx->ifc_dev); 229 } 230 231 if_t 232 iflib_get_ifp(if_ctx_t ctx) 233 { 234 235 return (ctx->ifc_ifp); 236 } 237 238 struct ifmedia * 239 iflib_get_media(if_ctx_t ctx) 240 { 241 242 return (ctx->ifc_mediap); 243 } 244 245 uint32_t 246 iflib_get_flags(if_ctx_t ctx) 247 { 248 return (ctx->ifc_flags); 249 } 250 251 void 252 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) 253 { 254 255 bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN); 256 } 257 258 if_softc_ctx_t 259 iflib_get_softc_ctx(if_ctx_t ctx) 260 { 261 262 return (&ctx->ifc_softc_ctx); 263 } 264 265 if_shared_ctx_t 266 iflib_get_sctx(if_ctx_t ctx) 267 { 268 269 return (ctx->ifc_sctx); 270 } 271 272 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) 273 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) 274 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) 275 276 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) 277 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) 278 279 typedef struct iflib_sw_rx_desc_array { 280 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 281 struct mbuf **ifsd_m; /* pkthdr mbufs */ 282 caddr_t *ifsd_cl; /* direct cluster pointer for rx */ 283 bus_addr_t *ifsd_ba; /* bus addr of cluster for rx */ 284 } iflib_rxsd_array_t; 285 286 typedef struct iflib_sw_tx_desc_array { 287 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 288 bus_dmamap_t *ifsd_tso_map; /* bus_dma maps for TSO packet */ 289 struct mbuf **ifsd_m; /* pkthdr mbufs */ 290 } if_txsd_vec_t; 291 292 /* magic number that should be high enough for any hardware */ 293 #define IFLIB_MAX_TX_SEGS 128 294 #define IFLIB_RX_COPY_THRESH 128 295 #define IFLIB_MAX_RX_REFRESH 32 296 /* The minimum descriptors per second before we start coalescing */ 297 #define IFLIB_MIN_DESC_SEC 16384 298 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 299 #define IFLIB_QUEUE_IDLE 0 300 #define IFLIB_QUEUE_HUNG 1 301 #define IFLIB_QUEUE_WORKING 2 302 /* maximum number of txqs that can share an rx interrupt */ 303 #define IFLIB_MAX_TX_SHARED_INTR 4 304 305 /* this should really scale with ring size - this is a fairly arbitrary value */ 306 #define TX_BATCH_SIZE 32 307 308 #define IFLIB_RESTART_BUDGET 8 309 310 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ 311 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ 312 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) 313 314 struct iflib_txq { 315 qidx_t ift_in_use; 316 qidx_t ift_cidx; 317 qidx_t ift_cidx_processed; 318 qidx_t ift_pidx; 319 uint8_t ift_gen; 320 uint8_t ift_br_offset; 321 uint16_t ift_npending; 322 uint16_t ift_db_pending; 323 uint16_t ift_rs_pending; 324 /* implicit pad */ 325 uint8_t ift_txd_size[8]; 326 uint64_t ift_processed; 327 uint64_t ift_cleaned; 328 uint64_t ift_cleaned_prev; 329 #if MEMORY_LOGGING 330 uint64_t ift_enqueued; 331 uint64_t ift_dequeued; 332 #endif 333 uint64_t ift_no_tx_dma_setup; 334 uint64_t ift_no_desc_avail; 335 uint64_t ift_mbuf_defrag_failed; 336 uint64_t ift_mbuf_defrag; 337 uint64_t ift_map_failed; 338 uint64_t ift_txd_encap_efbig; 339 uint64_t ift_pullups; 340 uint64_t ift_last_timer_tick; 341 342 struct mtx ift_mtx; 343 struct mtx ift_db_mtx; 344 345 /* constant values */ 346 if_ctx_t ift_ctx; 347 struct ifmp_ring *ift_br; 348 struct grouptask ift_task; 349 qidx_t ift_size; 350 uint16_t ift_id; 351 struct callout ift_timer; 352 353 if_txsd_vec_t ift_sds; 354 uint8_t ift_qstatus; 355 uint8_t ift_closed; 356 uint8_t ift_update_freq; 357 struct iflib_filter_info ift_filter_info; 358 bus_dma_tag_t ift_buf_tag; 359 bus_dma_tag_t ift_tso_buf_tag; 360 iflib_dma_info_t ift_ifdi; 361 #define MTX_NAME_LEN 32 362 char ift_mtx_name[MTX_NAME_LEN]; 363 bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); 364 #ifdef IFLIB_DIAGNOSTICS 365 uint64_t ift_cpu_exec_count[256]; 366 #endif 367 } __aligned(CACHE_LINE_SIZE); 368 369 struct iflib_fl { 370 qidx_t ifl_cidx; 371 qidx_t ifl_pidx; 372 qidx_t ifl_credits; 373 uint8_t ifl_gen; 374 uint8_t ifl_rxd_size; 375 #if MEMORY_LOGGING 376 uint64_t ifl_m_enqueued; 377 uint64_t ifl_m_dequeued; 378 uint64_t ifl_cl_enqueued; 379 uint64_t ifl_cl_dequeued; 380 #endif 381 /* implicit pad */ 382 bitstr_t *ifl_rx_bitmap; 383 qidx_t ifl_fragidx; 384 /* constant */ 385 qidx_t ifl_size; 386 uint16_t ifl_buf_size; 387 uint16_t ifl_cltype; 388 uma_zone_t ifl_zone; 389 iflib_rxsd_array_t ifl_sds; 390 iflib_rxq_t ifl_rxq; 391 uint8_t ifl_id; 392 bus_dma_tag_t ifl_buf_tag; 393 iflib_dma_info_t ifl_ifdi; 394 uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); 395 caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; 396 qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; 397 } __aligned(CACHE_LINE_SIZE); 398 399 static inline qidx_t 400 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) 401 { 402 qidx_t used; 403 404 if (pidx > cidx) 405 used = pidx - cidx; 406 else if (pidx < cidx) 407 used = size - cidx + pidx; 408 else if (gen == 0 && pidx == cidx) 409 used = 0; 410 else if (gen == 1 && pidx == cidx) 411 used = size; 412 else 413 panic("bad state"); 414 415 return (used); 416 } 417 418 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) 419 420 #define IDXDIFF(head, tail, wrap) \ 421 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 422 423 struct iflib_rxq { 424 if_ctx_t ifr_ctx; 425 iflib_fl_t ifr_fl; 426 uint64_t ifr_rx_irq; 427 struct pfil_head *pfil; 428 /* 429 * If there is a separate completion queue (IFLIB_HAS_RXCQ), this is 430 * the command queue consumer index. Otherwise it's unused. 431 */ 432 qidx_t ifr_cq_cidx; 433 uint16_t ifr_id; 434 uint8_t ifr_nfl; 435 uint8_t ifr_ntxqirq; 436 uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; 437 uint8_t ifr_fl_offset; 438 struct lro_ctrl ifr_lc; 439 struct grouptask ifr_task; 440 struct callout ifr_watchdog; 441 struct iflib_filter_info ifr_filter_info; 442 iflib_dma_info_t ifr_ifdi; 443 444 /* dynamically allocate if any drivers need a value substantially larger than this */ 445 struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); 446 #ifdef IFLIB_DIAGNOSTICS 447 uint64_t ifr_cpu_exec_count[256]; 448 #endif 449 } __aligned(CACHE_LINE_SIZE); 450 451 typedef struct if_rxsd { 452 caddr_t *ifsd_cl; 453 iflib_fl_t ifsd_fl; 454 } *if_rxsd_t; 455 456 /* multiple of word size */ 457 #ifdef __LP64__ 458 #define PKT_INFO_SIZE 6 459 #define RXD_INFO_SIZE 5 460 #define PKT_TYPE uint64_t 461 #else 462 #define PKT_INFO_SIZE 11 463 #define RXD_INFO_SIZE 8 464 #define PKT_TYPE uint32_t 465 #endif 466 #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) 467 #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) 468 469 typedef struct if_pkt_info_pad { 470 PKT_TYPE pkt_val[PKT_INFO_SIZE]; 471 } *if_pkt_info_pad_t; 472 typedef struct if_rxd_info_pad { 473 PKT_TYPE rxd_val[RXD_INFO_SIZE]; 474 } *if_rxd_info_pad_t; 475 476 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); 477 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); 478 479 480 static inline void 481 pkt_info_zero(if_pkt_info_t pi) 482 { 483 if_pkt_info_pad_t pi_pad; 484 485 pi_pad = (if_pkt_info_pad_t)pi; 486 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; 487 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; 488 #ifndef __LP64__ 489 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; 490 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; 491 #endif 492 } 493 494 static device_method_t iflib_pseudo_methods[] = { 495 DEVMETHOD(device_attach, noop_attach), 496 DEVMETHOD(device_detach, iflib_pseudo_detach), 497 DEVMETHOD_END 498 }; 499 500 driver_t iflib_pseudodriver = { 501 "iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx), 502 }; 503 504 static inline void 505 rxd_info_zero(if_rxd_info_t ri) 506 { 507 if_rxd_info_pad_t ri_pad; 508 int i; 509 510 ri_pad = (if_rxd_info_pad_t)ri; 511 for (i = 0; i < RXD_LOOP_BOUND; i += 4) { 512 ri_pad->rxd_val[i] = 0; 513 ri_pad->rxd_val[i+1] = 0; 514 ri_pad->rxd_val[i+2] = 0; 515 ri_pad->rxd_val[i+3] = 0; 516 } 517 #ifdef __LP64__ 518 ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; 519 #endif 520 } 521 522 /* 523 * Only allow a single packet to take up most 1/nth of the tx ring 524 */ 525 #define MAX_SINGLE_PACKET_FRACTION 12 526 #define IF_BAD_DMA (bus_addr_t)-1 527 528 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) 529 530 #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock") 531 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx) 532 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx) 533 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx) 534 535 #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF) 536 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx) 537 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx) 538 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx) 539 540 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) 541 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) 542 543 void 544 iflib_set_detach(if_ctx_t ctx) 545 { 546 STATE_LOCK(ctx); 547 ctx->ifc_flags |= IFC_IN_DETACH; 548 STATE_UNLOCK(ctx); 549 } 550 551 /* Our boot-time initialization hook */ 552 static int iflib_module_event_handler(module_t, int, void *); 553 554 static moduledata_t iflib_moduledata = { 555 "iflib", 556 iflib_module_event_handler, 557 NULL 558 }; 559 560 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); 561 MODULE_VERSION(iflib, 1); 562 563 MODULE_DEPEND(iflib, pci, 1, 1, 1); 564 MODULE_DEPEND(iflib, ether, 1, 1, 1); 565 566 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); 567 TASKQGROUP_DEFINE(if_config_tqg, 1, 1); 568 569 #ifndef IFLIB_DEBUG_COUNTERS 570 #ifdef INVARIANTS 571 #define IFLIB_DEBUG_COUNTERS 1 572 #else 573 #define IFLIB_DEBUG_COUNTERS 0 574 #endif /* !INVARIANTS */ 575 #endif 576 577 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 578 "iflib driver parameters"); 579 580 /* 581 * XXX need to ensure that this can't accidentally cause the head to be moved backwards 582 */ 583 static int iflib_min_tx_latency = 0; 584 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, 585 &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); 586 static int iflib_no_tx_batch = 0; 587 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, 588 &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); 589 590 591 #if IFLIB_DEBUG_COUNTERS 592 593 static int iflib_tx_seen; 594 static int iflib_tx_sent; 595 static int iflib_tx_encap; 596 static int iflib_rx_allocs; 597 static int iflib_fl_refills; 598 static int iflib_fl_refills_large; 599 static int iflib_tx_frees; 600 601 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, 602 &iflib_tx_seen, 0, "# TX mbufs seen"); 603 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, 604 &iflib_tx_sent, 0, "# TX mbufs sent"); 605 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, 606 &iflib_tx_encap, 0, "# TX mbufs encapped"); 607 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, 608 &iflib_tx_frees, 0, "# TX frees"); 609 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, 610 &iflib_rx_allocs, 0, "# RX allocations"); 611 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, 612 &iflib_fl_refills, 0, "# refills"); 613 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, 614 &iflib_fl_refills_large, 0, "# large refills"); 615 616 617 static int iflib_txq_drain_flushing; 618 static int iflib_txq_drain_oactive; 619 static int iflib_txq_drain_notready; 620 621 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, 622 &iflib_txq_drain_flushing, 0, "# drain flushes"); 623 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, 624 &iflib_txq_drain_oactive, 0, "# drain oactives"); 625 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, 626 &iflib_txq_drain_notready, 0, "# drain notready"); 627 628 629 static int iflib_encap_load_mbuf_fail; 630 static int iflib_encap_pad_mbuf_fail; 631 static int iflib_encap_txq_avail_fail; 632 static int iflib_encap_txd_encap_fail; 633 634 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, 635 &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); 636 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, 637 &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); 638 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, 639 &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); 640 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, 641 &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); 642 643 static int iflib_task_fn_rxs; 644 static int iflib_rx_intr_enables; 645 static int iflib_fast_intrs; 646 static int iflib_rx_unavail; 647 static int iflib_rx_ctx_inactive; 648 static int iflib_rx_if_input; 649 static int iflib_rxd_flush; 650 651 static int iflib_verbose_debug; 652 653 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, 654 &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); 655 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, 656 &iflib_rx_intr_enables, 0, "# RX intr enables"); 657 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, 658 &iflib_fast_intrs, 0, "# fast_intr calls"); 659 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, 660 &iflib_rx_unavail, 0, "# times rxeof called with no available data"); 661 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, 662 &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); 663 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, 664 &iflib_rx_if_input, 0, "# times rxeof called if_input"); 665 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, 666 &iflib_rxd_flush, 0, "# times rxd_flush called"); 667 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, 668 &iflib_verbose_debug, 0, "enable verbose debugging"); 669 670 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) 671 static void 672 iflib_debug_reset(void) 673 { 674 iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = 675 iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = 676 iflib_txq_drain_flushing = iflib_txq_drain_oactive = 677 iflib_txq_drain_notready = 678 iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = 679 iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = 680 iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = 681 iflib_rx_unavail = 682 iflib_rx_ctx_inactive = iflib_rx_if_input = 683 iflib_rxd_flush = 0; 684 } 685 686 #else 687 #define DBG_COUNTER_INC(name) 688 static void iflib_debug_reset(void) {} 689 #endif 690 691 #define IFLIB_DEBUG 0 692 693 static void iflib_tx_structures_free(if_ctx_t ctx); 694 static void iflib_rx_structures_free(if_ctx_t ctx); 695 static int iflib_queues_alloc(if_ctx_t ctx); 696 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); 697 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); 698 static int iflib_qset_structures_setup(if_ctx_t ctx); 699 static int iflib_msix_init(if_ctx_t ctx); 700 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str); 701 static void iflib_txq_check_drain(iflib_txq_t txq, int budget); 702 static uint32_t iflib_txq_can_drain(struct ifmp_ring *); 703 #ifdef ALTQ 704 static void iflib_altq_if_start(if_t ifp); 705 static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m); 706 #endif 707 static int iflib_register(if_ctx_t); 708 static void iflib_deregister(if_ctx_t); 709 static void iflib_unregister_vlan_handlers(if_ctx_t ctx); 710 static uint16_t iflib_get_mbuf_size_for(unsigned int size); 711 static void iflib_init_locked(if_ctx_t ctx); 712 static void iflib_add_device_sysctl_pre(if_ctx_t ctx); 713 static void iflib_add_device_sysctl_post(if_ctx_t ctx); 714 static void iflib_ifmp_purge(iflib_txq_t txq); 715 static void _iflib_pre_assert(if_softc_ctx_t scctx); 716 static void iflib_if_init_locked(if_ctx_t ctx); 717 static void iflib_free_intr_mem(if_ctx_t ctx); 718 #ifndef __NO_STRICT_ALIGNMENT 719 static struct mbuf * iflib_fixup_rx(struct mbuf *m); 720 #endif 721 722 static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets = 723 SLIST_HEAD_INITIALIZER(cpu_offsets); 724 struct cpu_offset { 725 SLIST_ENTRY(cpu_offset) entries; 726 cpuset_t set; 727 unsigned int refcount; 728 uint16_t offset; 729 }; 730 static struct mtx cpu_offset_mtx; 731 MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock", 732 MTX_DEF); 733 734 DEBUGNET_DEFINE(iflib); 735 736 #ifdef DEV_NETMAP 737 #include <sys/selinfo.h> 738 #include <net/netmap.h> 739 #include <dev/netmap/netmap_kern.h> 740 741 MODULE_DEPEND(iflib, netmap, 1, 1, 1); 742 743 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); 744 745 /* 746 * device-specific sysctl variables: 747 * 748 * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. 749 * During regular operations the CRC is stripped, but on some 750 * hardware reception of frames not multiple of 64 is slower, 751 * so using crcstrip=0 helps in benchmarks. 752 * 753 * iflib_rx_miss, iflib_rx_miss_bufs: 754 * count packets that might be missed due to lost interrupts. 755 */ 756 SYSCTL_DECL(_dev_netmap); 757 /* 758 * The xl driver by default strips CRCs and we do not override it. 759 */ 760 761 int iflib_crcstrip = 1; 762 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, 763 CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames"); 764 765 int iflib_rx_miss, iflib_rx_miss_bufs; 766 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, 767 CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr"); 768 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, 769 CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs"); 770 771 /* 772 * Register/unregister. We are already under netmap lock. 773 * Only called on the first register or the last unregister. 774 */ 775 static int 776 iflib_netmap_register(struct netmap_adapter *na, int onoff) 777 { 778 if_t ifp = na->ifp; 779 if_ctx_t ctx = ifp->if_softc; 780 int status; 781 782 CTX_LOCK(ctx); 783 IFDI_INTR_DISABLE(ctx); 784 785 /* Tell the stack that the interface is no longer active */ 786 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 787 788 if (!CTX_IS_VF(ctx)) 789 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); 790 791 iflib_stop(ctx); 792 793 /* 794 * Enable (or disable) netmap flags, and intercept (or restore) 795 * ifp->if_transmit. This is done once the device has been stopped 796 * to prevent race conditions. 797 */ 798 if (onoff) { 799 nm_set_native_flags(na); 800 } else { 801 nm_clear_native_flags(na); 802 } 803 804 iflib_init_locked(ctx); 805 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? 806 status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; 807 if (status) 808 nm_clear_native_flags(na); 809 CTX_UNLOCK(ctx); 810 return (status); 811 } 812 813 static int 814 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) 815 { 816 struct netmap_adapter *na = kring->na; 817 u_int const lim = kring->nkr_num_slots - 1; 818 u_int head = kring->rhead; 819 struct netmap_ring *ring = kring->ring; 820 bus_dmamap_t *map; 821 struct if_rxd_update iru; 822 if_ctx_t ctx = rxq->ifr_ctx; 823 iflib_fl_t fl = &rxq->ifr_fl[0]; 824 uint32_t refill_pidx, nic_i; 825 #if IFLIB_DEBUG_COUNTERS 826 int rf_count = 0; 827 #endif 828 829 if (nm_i == head && __predict_true(!init)) 830 return 0; 831 iru_init(&iru, rxq, 0 /* flid */); 832 map = fl->ifl_sds.ifsd_map; 833 refill_pidx = netmap_idx_k2n(kring, nm_i); 834 /* 835 * IMPORTANT: we must leave one free slot in the ring, 836 * so move head back by one unit 837 */ 838 head = nm_prev(head, lim); 839 nic_i = UINT_MAX; 840 DBG_COUNTER_INC(fl_refills); 841 while (nm_i != head) { 842 #if IFLIB_DEBUG_COUNTERS 843 if (++rf_count == 9) 844 DBG_COUNTER_INC(fl_refills_large); 845 #endif 846 for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { 847 struct netmap_slot *slot = &ring->slot[nm_i]; 848 void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); 849 uint32_t nic_i_dma = refill_pidx; 850 nic_i = netmap_idx_k2n(kring, nm_i); 851 852 MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); 853 854 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 855 return netmap_ring_reinit(kring); 856 857 fl->ifl_vm_addrs[tmp_pidx] = addr; 858 if (__predict_false(init)) { 859 netmap_load_map(na, fl->ifl_buf_tag, 860 map[nic_i], addr); 861 } else if (slot->flags & NS_BUF_CHANGED) { 862 /* buffer has changed, reload map */ 863 netmap_reload_map(na, fl->ifl_buf_tag, 864 map[nic_i], addr); 865 } 866 slot->flags &= ~NS_BUF_CHANGED; 867 868 nm_i = nm_next(nm_i, lim); 869 fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); 870 if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) 871 continue; 872 873 iru.iru_pidx = refill_pidx; 874 iru.iru_count = tmp_pidx+1; 875 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 876 refill_pidx = nic_i; 877 for (int n = 0; n < iru.iru_count; n++) { 878 bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i_dma], 879 BUS_DMASYNC_PREREAD); 880 /* XXX - change this to not use the netmap func*/ 881 nic_i_dma = nm_next(nic_i_dma, lim); 882 } 883 } 884 } 885 kring->nr_hwcur = head; 886 887 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 888 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 889 if (__predict_true(nic_i != UINT_MAX)) { 890 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); 891 DBG_COUNTER_INC(rxd_flush); 892 } 893 return (0); 894 } 895 896 /* 897 * Reconcile kernel and user view of the transmit ring. 898 * 899 * All information is in the kring. 900 * Userspace wants to send packets up to the one before kring->rhead, 901 * kernel knows kring->nr_hwcur is the first unsent packet. 902 * 903 * Here we push packets out (as many as possible), and possibly 904 * reclaim buffers from previously completed transmission. 905 * 906 * The caller (netmap) guarantees that there is only one instance 907 * running at any time. Any interference with other driver 908 * methods should be handled by the individual drivers. 909 */ 910 static int 911 iflib_netmap_txsync(struct netmap_kring *kring, int flags) 912 { 913 struct netmap_adapter *na = kring->na; 914 if_t ifp = na->ifp; 915 struct netmap_ring *ring = kring->ring; 916 u_int nm_i; /* index into the netmap kring */ 917 u_int nic_i; /* index into the NIC ring */ 918 u_int n; 919 u_int const lim = kring->nkr_num_slots - 1; 920 u_int const head = kring->rhead; 921 struct if_pkt_info pi; 922 923 /* 924 * interrupts on every tx packet are expensive so request 925 * them every half ring, or where NS_REPORT is set 926 */ 927 u_int report_frequency = kring->nkr_num_slots >> 1; 928 /* device-specific */ 929 if_ctx_t ctx = ifp->if_softc; 930 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; 931 932 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 933 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 934 935 /* 936 * First part: process new packets to send. 937 * nm_i is the current index in the netmap kring, 938 * nic_i is the corresponding index in the NIC ring. 939 * 940 * If we have packets to send (nm_i != head) 941 * iterate over the netmap ring, fetch length and update 942 * the corresponding slot in the NIC ring. Some drivers also 943 * need to update the buffer's physical address in the NIC slot 944 * even NS_BUF_CHANGED is not set (PNMB computes the addresses). 945 * 946 * The netmap_reload_map() calls is especially expensive, 947 * even when (as in this case) the tag is 0, so do only 948 * when the buffer has actually changed. 949 * 950 * If possible do not set the report/intr bit on all slots, 951 * but only a few times per ring or when NS_REPORT is set. 952 * 953 * Finally, on 10G and faster drivers, it might be useful 954 * to prefetch the next slot and txr entry. 955 */ 956 957 nm_i = kring->nr_hwcur; 958 if (nm_i != head) { /* we have new packets to send */ 959 pkt_info_zero(&pi); 960 pi.ipi_segs = txq->ift_segs; 961 pi.ipi_qsidx = kring->ring_id; 962 nic_i = netmap_idx_k2n(kring, nm_i); 963 964 __builtin_prefetch(&ring->slot[nm_i]); 965 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); 966 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); 967 968 for (n = 0; nm_i != head; n++) { 969 struct netmap_slot *slot = &ring->slot[nm_i]; 970 u_int len = slot->len; 971 uint64_t paddr; 972 void *addr = PNMB(na, slot, &paddr); 973 int flags = (slot->flags & NS_REPORT || 974 nic_i == 0 || nic_i == report_frequency) ? 975 IPI_TX_INTR : 0; 976 977 /* device-specific */ 978 pi.ipi_len = len; 979 pi.ipi_segs[0].ds_addr = paddr; 980 pi.ipi_segs[0].ds_len = len; 981 pi.ipi_nsegs = 1; 982 pi.ipi_ndescs = 0; 983 pi.ipi_pidx = nic_i; 984 pi.ipi_flags = flags; 985 986 /* Fill the slot in the NIC ring. */ 987 ctx->isc_txd_encap(ctx->ifc_softc, &pi); 988 DBG_COUNTER_INC(tx_encap); 989 990 /* prefetch for next round */ 991 __builtin_prefetch(&ring->slot[nm_i + 1]); 992 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); 993 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); 994 995 NM_CHECK_ADDR_LEN(na, addr, len); 996 997 if (slot->flags & NS_BUF_CHANGED) { 998 /* buffer has changed, reload map */ 999 netmap_reload_map(na, txq->ift_buf_tag, 1000 txq->ift_sds.ifsd_map[nic_i], addr); 1001 } 1002 /* make sure changes to the buffer are synced */ 1003 bus_dmamap_sync(txq->ift_buf_tag, 1004 txq->ift_sds.ifsd_map[nic_i], 1005 BUS_DMASYNC_PREWRITE); 1006 1007 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1008 nm_i = nm_next(nm_i, lim); 1009 nic_i = nm_next(nic_i, lim); 1010 } 1011 kring->nr_hwcur = nm_i; 1012 1013 /* synchronize the NIC ring */ 1014 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 1015 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1016 1017 /* (re)start the tx unit up to slot nic_i (excluded) */ 1018 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); 1019 } 1020 1021 /* 1022 * Second part: reclaim buffers for completed transmissions. 1023 * 1024 * If there are unclaimed buffers, attempt to reclaim them. 1025 * If none are reclaimed, and TX IRQs are not in use, do an initial 1026 * minimal delay, then trigger the tx handler which will spin in the 1027 * group task queue. 1028 */ 1029 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1030 if (iflib_tx_credits_update(ctx, txq)) { 1031 /* some tx completed, increment avail */ 1032 nic_i = txq->ift_cidx_processed; 1033 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 1034 } 1035 } 1036 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) 1037 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1038 callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000, 1039 iflib_timer, txq, txq->ift_timer.c_cpu); 1040 } 1041 return (0); 1042 } 1043 1044 /* 1045 * Reconcile kernel and user view of the receive ring. 1046 * Same as for the txsync, this routine must be efficient. 1047 * The caller guarantees a single invocations, but races against 1048 * the rest of the driver should be handled here. 1049 * 1050 * On call, kring->rhead is the first packet that userspace wants 1051 * to keep, and kring->rcur is the wakeup point. 1052 * The kernel has previously reported packets up to kring->rtail. 1053 * 1054 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective 1055 * of whether or not we received an interrupt. 1056 */ 1057 static int 1058 iflib_netmap_rxsync(struct netmap_kring *kring, int flags) 1059 { 1060 struct netmap_adapter *na = kring->na; 1061 struct netmap_ring *ring = kring->ring; 1062 if_t ifp = na->ifp; 1063 iflib_fl_t fl; 1064 uint32_t nm_i; /* index into the netmap ring */ 1065 uint32_t nic_i; /* index into the NIC ring */ 1066 u_int i, n; 1067 u_int const lim = kring->nkr_num_slots - 1; 1068 u_int const head = kring->rhead; 1069 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1070 struct if_rxd_info ri; 1071 1072 if_ctx_t ctx = ifp->if_softc; 1073 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; 1074 if (head > lim) 1075 return netmap_ring_reinit(kring); 1076 1077 /* 1078 * XXX netmap_fl_refill() only ever (re)fills free list 0 so far. 1079 */ 1080 1081 for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { 1082 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 1083 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1084 } 1085 1086 /* 1087 * First part: import newly received packets. 1088 * 1089 * nm_i is the index of the next free slot in the netmap ring, 1090 * nic_i is the index of the next received packet in the NIC ring, 1091 * and they may differ in case if_init() has been called while 1092 * in netmap mode. For the receive ring we have 1093 * 1094 * nic_i = rxr->next_check; 1095 * nm_i = kring->nr_hwtail (previous) 1096 * and 1097 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1098 * 1099 * rxr->next_check is set to 0 on a ring reinit 1100 */ 1101 if (netmap_no_pendintr || force_update) { 1102 uint32_t hwtail_lim = nm_prev(kring->nr_hwcur, lim); 1103 int crclen = iflib_crcstrip ? 0 : 4; 1104 int error, avail; 1105 1106 for (i = 0; i < rxq->ifr_nfl; i++) { 1107 fl = &rxq->ifr_fl[i]; 1108 nic_i = fl->ifl_cidx; 1109 nm_i = netmap_idx_n2k(kring, nic_i); 1110 avail = ctx->isc_rxd_available(ctx->ifc_softc, 1111 rxq->ifr_id, nic_i, USHRT_MAX); 1112 for (n = 0; avail > 0 && nm_i != hwtail_lim; n++, avail--) { 1113 rxd_info_zero(&ri); 1114 ri.iri_frags = rxq->ifr_frags; 1115 ri.iri_qsidx = kring->ring_id; 1116 ri.iri_ifp = ctx->ifc_ifp; 1117 ri.iri_cidx = nic_i; 1118 1119 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 1120 ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; 1121 ring->slot[nm_i].flags = 0; 1122 bus_dmamap_sync(fl->ifl_buf_tag, 1123 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); 1124 nm_i = nm_next(nm_i, lim); 1125 nic_i = nm_next(nic_i, lim); 1126 } 1127 if (n) { /* update the state variables */ 1128 if (netmap_no_pendintr && !force_update) { 1129 /* diagnostics */ 1130 iflib_rx_miss ++; 1131 iflib_rx_miss_bufs += n; 1132 } 1133 fl->ifl_cidx = nic_i; 1134 kring->nr_hwtail = nm_i; 1135 } 1136 kring->nr_kflags &= ~NKR_PENDINTR; 1137 } 1138 } 1139 /* 1140 * Second part: skip past packets that userspace has released. 1141 * (kring->nr_hwcur to head excluded), 1142 * and make the buffers available for reception. 1143 * As usual nm_i is the index in the netmap ring, 1144 * nic_i is the index in the NIC ring, and 1145 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1146 */ 1147 /* XXX not sure how this will work with multiple free lists */ 1148 nm_i = kring->nr_hwcur; 1149 1150 return (netmap_fl_refill(rxq, kring, nm_i, false)); 1151 } 1152 1153 static void 1154 iflib_netmap_intr(struct netmap_adapter *na, int onoff) 1155 { 1156 if_ctx_t ctx = na->ifp->if_softc; 1157 1158 CTX_LOCK(ctx); 1159 if (onoff) { 1160 IFDI_INTR_ENABLE(ctx); 1161 } else { 1162 IFDI_INTR_DISABLE(ctx); 1163 } 1164 CTX_UNLOCK(ctx); 1165 } 1166 1167 1168 static int 1169 iflib_netmap_attach(if_ctx_t ctx) 1170 { 1171 struct netmap_adapter na; 1172 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1173 1174 bzero(&na, sizeof(na)); 1175 1176 na.ifp = ctx->ifc_ifp; 1177 na.na_flags = NAF_BDG_MAYSLEEP; 1178 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); 1179 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); 1180 1181 na.num_tx_desc = scctx->isc_ntxd[0]; 1182 na.num_rx_desc = scctx->isc_nrxd[0]; 1183 na.nm_txsync = iflib_netmap_txsync; 1184 na.nm_rxsync = iflib_netmap_rxsync; 1185 na.nm_register = iflib_netmap_register; 1186 na.nm_intr = iflib_netmap_intr; 1187 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; 1188 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; 1189 return (netmap_attach(&na)); 1190 } 1191 1192 static int 1193 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) 1194 { 1195 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1196 struct netmap_slot *slot; 1197 1198 slot = netmap_reset(na, NR_TX, txq->ift_id, 0); 1199 if (slot == NULL) 1200 return (0); 1201 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { 1202 1203 /* 1204 * In netmap mode, set the map for the packet buffer. 1205 * NOTE: Some drivers (not this one) also need to set 1206 * the physical buffer address in the NIC ring. 1207 * netmap_idx_n2k() maps a nic index, i, into the corresponding 1208 * netmap slot index, si 1209 */ 1210 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); 1211 netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i], 1212 NMB(na, slot + si)); 1213 } 1214 return (1); 1215 } 1216 1217 static int 1218 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) 1219 { 1220 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1221 struct netmap_kring *kring; 1222 struct netmap_slot *slot; 1223 uint32_t nm_i; 1224 1225 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); 1226 if (slot == NULL) 1227 return (0); 1228 kring = na->rx_rings[rxq->ifr_id]; 1229 nm_i = netmap_idx_n2k(kring, 0); 1230 netmap_fl_refill(rxq, kring, nm_i, true); 1231 return (1); 1232 } 1233 1234 static void 1235 iflib_netmap_timer_adjust(if_ctx_t ctx, iflib_txq_t txq, uint32_t *reset_on) 1236 { 1237 struct netmap_kring *kring; 1238 uint16_t txqid; 1239 1240 txqid = txq->ift_id; 1241 kring = netmap_kring_on(NA(ctx->ifc_ifp), txqid, NR_TX); 1242 if (kring == NULL) 1243 return; 1244 1245 if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) { 1246 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 1247 BUS_DMASYNC_POSTREAD); 1248 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) 1249 netmap_tx_irq(ctx->ifc_ifp, txqid); 1250 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) { 1251 if (hz < 2000) 1252 *reset_on = 1; 1253 else 1254 *reset_on = hz / 1000; 1255 } 1256 } 1257 } 1258 1259 #define iflib_netmap_detach(ifp) netmap_detach(ifp) 1260 1261 #else 1262 #define iflib_netmap_txq_init(ctx, txq) (0) 1263 #define iflib_netmap_rxq_init(ctx, rxq) (0) 1264 #define iflib_netmap_detach(ifp) 1265 1266 #define iflib_netmap_attach(ctx) (0) 1267 #define netmap_rx_irq(ifp, qid, budget) (0) 1268 #define netmap_tx_irq(ifp, qid) do {} while (0) 1269 #define iflib_netmap_timer_adjust(ctx, txq, reset_on) 1270 #endif 1271 1272 #if defined(__i386__) || defined(__amd64__) 1273 static __inline void 1274 prefetch(void *x) 1275 { 1276 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1277 } 1278 static __inline void 1279 prefetch2cachelines(void *x) 1280 { 1281 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1282 #if (CACHE_LINE_SIZE < 128) 1283 __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); 1284 #endif 1285 } 1286 #else 1287 #define prefetch(x) 1288 #define prefetch2cachelines(x) 1289 #endif 1290 1291 static void 1292 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) 1293 { 1294 iflib_fl_t fl; 1295 1296 fl = &rxq->ifr_fl[flid]; 1297 iru->iru_paddrs = fl->ifl_bus_addrs; 1298 iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; 1299 iru->iru_idxs = fl->ifl_rxd_idxs; 1300 iru->iru_qsidx = rxq->ifr_id; 1301 iru->iru_buf_size = fl->ifl_buf_size; 1302 iru->iru_flidx = fl->ifl_id; 1303 } 1304 1305 static void 1306 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) 1307 { 1308 if (err) 1309 return; 1310 *(bus_addr_t *) arg = segs[0].ds_addr; 1311 } 1312 1313 int 1314 iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags) 1315 { 1316 int err; 1317 device_t dev = ctx->ifc_dev; 1318 1319 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1320 align, 0, /* alignment, bounds */ 1321 BUS_SPACE_MAXADDR, /* lowaddr */ 1322 BUS_SPACE_MAXADDR, /* highaddr */ 1323 NULL, NULL, /* filter, filterarg */ 1324 size, /* maxsize */ 1325 1, /* nsegments */ 1326 size, /* maxsegsize */ 1327 BUS_DMA_ALLOCNOW, /* flags */ 1328 NULL, /* lockfunc */ 1329 NULL, /* lockarg */ 1330 &dma->idi_tag); 1331 if (err) { 1332 device_printf(dev, 1333 "%s: bus_dma_tag_create failed: %d\n", 1334 __func__, err); 1335 goto fail_0; 1336 } 1337 1338 err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, 1339 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); 1340 if (err) { 1341 device_printf(dev, 1342 "%s: bus_dmamem_alloc(%ju) failed: %d\n", 1343 __func__, (uintmax_t)size, err); 1344 goto fail_1; 1345 } 1346 1347 dma->idi_paddr = IF_BAD_DMA; 1348 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, 1349 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); 1350 if (err || dma->idi_paddr == IF_BAD_DMA) { 1351 device_printf(dev, 1352 "%s: bus_dmamap_load failed: %d\n", 1353 __func__, err); 1354 goto fail_2; 1355 } 1356 1357 dma->idi_size = size; 1358 return (0); 1359 1360 fail_2: 1361 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1362 fail_1: 1363 bus_dma_tag_destroy(dma->idi_tag); 1364 fail_0: 1365 dma->idi_tag = NULL; 1366 1367 return (err); 1368 } 1369 1370 int 1371 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) 1372 { 1373 if_shared_ctx_t sctx = ctx->ifc_sctx; 1374 1375 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); 1376 1377 return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags)); 1378 } 1379 1380 int 1381 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) 1382 { 1383 int i, err; 1384 iflib_dma_info_t *dmaiter; 1385 1386 dmaiter = dmalist; 1387 for (i = 0; i < count; i++, dmaiter++) { 1388 if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) 1389 break; 1390 } 1391 if (err) 1392 iflib_dma_free_multi(dmalist, i); 1393 return (err); 1394 } 1395 1396 void 1397 iflib_dma_free(iflib_dma_info_t dma) 1398 { 1399 if (dma->idi_tag == NULL) 1400 return; 1401 if (dma->idi_paddr != IF_BAD_DMA) { 1402 bus_dmamap_sync(dma->idi_tag, dma->idi_map, 1403 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1404 bus_dmamap_unload(dma->idi_tag, dma->idi_map); 1405 dma->idi_paddr = IF_BAD_DMA; 1406 } 1407 if (dma->idi_vaddr != NULL) { 1408 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1409 dma->idi_vaddr = NULL; 1410 } 1411 bus_dma_tag_destroy(dma->idi_tag); 1412 dma->idi_tag = NULL; 1413 } 1414 1415 void 1416 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) 1417 { 1418 int i; 1419 iflib_dma_info_t *dmaiter = dmalist; 1420 1421 for (i = 0; i < count; i++, dmaiter++) 1422 iflib_dma_free(*dmaiter); 1423 } 1424 1425 static int 1426 iflib_fast_intr(void *arg) 1427 { 1428 iflib_filter_info_t info = arg; 1429 struct grouptask *gtask = info->ifi_task; 1430 int result; 1431 1432 DBG_COUNTER_INC(fast_intrs); 1433 if (info->ifi_filter != NULL) { 1434 result = info->ifi_filter(info->ifi_filter_arg); 1435 if ((result & FILTER_SCHEDULE_THREAD) == 0) 1436 return (result); 1437 } 1438 1439 GROUPTASK_ENQUEUE(gtask); 1440 return (FILTER_HANDLED); 1441 } 1442 1443 static int 1444 iflib_fast_intr_rxtx(void *arg) 1445 { 1446 iflib_filter_info_t info = arg; 1447 struct grouptask *gtask = info->ifi_task; 1448 if_ctx_t ctx; 1449 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; 1450 iflib_txq_t txq; 1451 void *sc; 1452 int i, cidx, result; 1453 qidx_t txqid; 1454 bool intr_enable, intr_legacy; 1455 1456 DBG_COUNTER_INC(fast_intrs); 1457 if (info->ifi_filter != NULL) { 1458 result = info->ifi_filter(info->ifi_filter_arg); 1459 if ((result & FILTER_SCHEDULE_THREAD) == 0) 1460 return (result); 1461 } 1462 1463 ctx = rxq->ifr_ctx; 1464 sc = ctx->ifc_softc; 1465 intr_enable = false; 1466 intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY); 1467 MPASS(rxq->ifr_ntxqirq); 1468 for (i = 0; i < rxq->ifr_ntxqirq; i++) { 1469 txqid = rxq->ifr_txqid[i]; 1470 txq = &ctx->ifc_txqs[txqid]; 1471 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 1472 BUS_DMASYNC_POSTREAD); 1473 if (!ctx->isc_txd_credits_update(sc, txqid, false)) { 1474 if (intr_legacy) 1475 intr_enable = true; 1476 else 1477 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); 1478 continue; 1479 } 1480 GROUPTASK_ENQUEUE(&txq->ift_task); 1481 } 1482 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) 1483 cidx = rxq->ifr_cq_cidx; 1484 else 1485 cidx = rxq->ifr_fl[0].ifl_cidx; 1486 if (iflib_rxd_avail(ctx, rxq, cidx, 1)) 1487 GROUPTASK_ENQUEUE(gtask); 1488 else { 1489 if (intr_legacy) 1490 intr_enable = true; 1491 else 1492 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 1493 DBG_COUNTER_INC(rx_intr_enables); 1494 } 1495 if (intr_enable) 1496 IFDI_INTR_ENABLE(ctx); 1497 return (FILTER_HANDLED); 1498 } 1499 1500 1501 static int 1502 iflib_fast_intr_ctx(void *arg) 1503 { 1504 iflib_filter_info_t info = arg; 1505 struct grouptask *gtask = info->ifi_task; 1506 int result; 1507 1508 DBG_COUNTER_INC(fast_intrs); 1509 if (info->ifi_filter != NULL) { 1510 result = info->ifi_filter(info->ifi_filter_arg); 1511 if ((result & FILTER_SCHEDULE_THREAD) == 0) 1512 return (result); 1513 } 1514 1515 GROUPTASK_ENQUEUE(gtask); 1516 return (FILTER_HANDLED); 1517 } 1518 1519 static int 1520 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 1521 driver_filter_t filter, driver_intr_t handler, void *arg, 1522 const char *name) 1523 { 1524 struct resource *res; 1525 void *tag = NULL; 1526 device_t dev = ctx->ifc_dev; 1527 int flags, i, rc; 1528 1529 flags = RF_ACTIVE; 1530 if (ctx->ifc_flags & IFC_LEGACY) 1531 flags |= RF_SHAREABLE; 1532 MPASS(rid < 512); 1533 i = rid; 1534 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, flags); 1535 if (res == NULL) { 1536 device_printf(dev, 1537 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 1538 return (ENOMEM); 1539 } 1540 irq->ii_res = res; 1541 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); 1542 rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, 1543 filter, handler, arg, &tag); 1544 if (rc != 0) { 1545 device_printf(dev, 1546 "failed to setup interrupt for rid %d, name %s: %d\n", 1547 rid, name ? name : "unknown", rc); 1548 return (rc); 1549 } else if (name) 1550 bus_describe_intr(dev, res, tag, "%s", name); 1551 1552 irq->ii_tag = tag; 1553 return (0); 1554 } 1555 1556 /********************************************************************* 1557 * 1558 * Allocate DMA resources for TX buffers as well as memory for the TX 1559 * mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a 1560 * iflib_sw_tx_desc_array structure, storing all the information that 1561 * is needed to transmit a packet on the wire. This is called only 1562 * once at attach, setup is done every reset. 1563 * 1564 **********************************************************************/ 1565 static int 1566 iflib_txsd_alloc(iflib_txq_t txq) 1567 { 1568 if_ctx_t ctx = txq->ift_ctx; 1569 if_shared_ctx_t sctx = ctx->ifc_sctx; 1570 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1571 device_t dev = ctx->ifc_dev; 1572 bus_size_t tsomaxsize; 1573 int err, nsegments, ntsosegments; 1574 bool tso; 1575 1576 nsegments = scctx->isc_tx_nsegments; 1577 ntsosegments = scctx->isc_tx_tso_segments_max; 1578 tsomaxsize = scctx->isc_tx_tso_size_max; 1579 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU) 1580 tsomaxsize += sizeof(struct ether_vlan_header); 1581 MPASS(scctx->isc_ntxd[0] > 0); 1582 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); 1583 MPASS(nsegments > 0); 1584 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) { 1585 MPASS(ntsosegments > 0); 1586 MPASS(sctx->isc_tso_maxsize >= tsomaxsize); 1587 } 1588 1589 /* 1590 * Set up DMA tags for TX buffers. 1591 */ 1592 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1593 1, 0, /* alignment, bounds */ 1594 BUS_SPACE_MAXADDR, /* lowaddr */ 1595 BUS_SPACE_MAXADDR, /* highaddr */ 1596 NULL, NULL, /* filter, filterarg */ 1597 sctx->isc_tx_maxsize, /* maxsize */ 1598 nsegments, /* nsegments */ 1599 sctx->isc_tx_maxsegsize, /* maxsegsize */ 1600 0, /* flags */ 1601 NULL, /* lockfunc */ 1602 NULL, /* lockfuncarg */ 1603 &txq->ift_buf_tag))) { 1604 device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); 1605 device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", 1606 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); 1607 goto fail; 1608 } 1609 tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0; 1610 if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev), 1611 1, 0, /* alignment, bounds */ 1612 BUS_SPACE_MAXADDR, /* lowaddr */ 1613 BUS_SPACE_MAXADDR, /* highaddr */ 1614 NULL, NULL, /* filter, filterarg */ 1615 tsomaxsize, /* maxsize */ 1616 ntsosegments, /* nsegments */ 1617 sctx->isc_tso_maxsegsize,/* maxsegsize */ 1618 0, /* flags */ 1619 NULL, /* lockfunc */ 1620 NULL, /* lockfuncarg */ 1621 &txq->ift_tso_buf_tag))) { 1622 device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n", 1623 err); 1624 goto fail; 1625 } 1626 1627 /* Allocate memory for the TX mbuf map. */ 1628 if (!(txq->ift_sds.ifsd_m = 1629 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1630 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1631 device_printf(dev, "Unable to allocate TX mbuf map memory\n"); 1632 err = ENOMEM; 1633 goto fail; 1634 } 1635 1636 /* 1637 * Create the DMA maps for TX buffers. 1638 */ 1639 if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc( 1640 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], 1641 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { 1642 device_printf(dev, 1643 "Unable to allocate TX buffer DMA map memory\n"); 1644 err = ENOMEM; 1645 goto fail; 1646 } 1647 if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc( 1648 sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], 1649 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { 1650 device_printf(dev, 1651 "Unable to allocate TSO TX buffer map memory\n"); 1652 err = ENOMEM; 1653 goto fail; 1654 } 1655 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { 1656 err = bus_dmamap_create(txq->ift_buf_tag, 0, 1657 &txq->ift_sds.ifsd_map[i]); 1658 if (err != 0) { 1659 device_printf(dev, "Unable to create TX DMA map\n"); 1660 goto fail; 1661 } 1662 if (!tso) 1663 continue; 1664 err = bus_dmamap_create(txq->ift_tso_buf_tag, 0, 1665 &txq->ift_sds.ifsd_tso_map[i]); 1666 if (err != 0) { 1667 device_printf(dev, "Unable to create TSO TX DMA map\n"); 1668 goto fail; 1669 } 1670 } 1671 return (0); 1672 fail: 1673 /* We free all, it handles case where we are in the middle */ 1674 iflib_tx_structures_free(ctx); 1675 return (err); 1676 } 1677 1678 static void 1679 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) 1680 { 1681 bus_dmamap_t map; 1682 1683 if (txq->ift_sds.ifsd_map != NULL) { 1684 map = txq->ift_sds.ifsd_map[i]; 1685 bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE); 1686 bus_dmamap_unload(txq->ift_buf_tag, map); 1687 bus_dmamap_destroy(txq->ift_buf_tag, map); 1688 txq->ift_sds.ifsd_map[i] = NULL; 1689 } 1690 1691 if (txq->ift_sds.ifsd_tso_map != NULL) { 1692 map = txq->ift_sds.ifsd_tso_map[i]; 1693 bus_dmamap_sync(txq->ift_tso_buf_tag, map, 1694 BUS_DMASYNC_POSTWRITE); 1695 bus_dmamap_unload(txq->ift_tso_buf_tag, map); 1696 bus_dmamap_destroy(txq->ift_tso_buf_tag, map); 1697 txq->ift_sds.ifsd_tso_map[i] = NULL; 1698 } 1699 } 1700 1701 static void 1702 iflib_txq_destroy(iflib_txq_t txq) 1703 { 1704 if_ctx_t ctx = txq->ift_ctx; 1705 1706 for (int i = 0; i < txq->ift_size; i++) 1707 iflib_txsd_destroy(ctx, txq, i); 1708 1709 if (txq->ift_br != NULL) { 1710 ifmp_ring_free(txq->ift_br); 1711 txq->ift_br = NULL; 1712 } 1713 1714 mtx_destroy(&txq->ift_mtx); 1715 1716 if (txq->ift_sds.ifsd_map != NULL) { 1717 free(txq->ift_sds.ifsd_map, M_IFLIB); 1718 txq->ift_sds.ifsd_map = NULL; 1719 } 1720 if (txq->ift_sds.ifsd_tso_map != NULL) { 1721 free(txq->ift_sds.ifsd_tso_map, M_IFLIB); 1722 txq->ift_sds.ifsd_tso_map = NULL; 1723 } 1724 if (txq->ift_sds.ifsd_m != NULL) { 1725 free(txq->ift_sds.ifsd_m, M_IFLIB); 1726 txq->ift_sds.ifsd_m = NULL; 1727 } 1728 if (txq->ift_buf_tag != NULL) { 1729 bus_dma_tag_destroy(txq->ift_buf_tag); 1730 txq->ift_buf_tag = NULL; 1731 } 1732 if (txq->ift_tso_buf_tag != NULL) { 1733 bus_dma_tag_destroy(txq->ift_tso_buf_tag); 1734 txq->ift_tso_buf_tag = NULL; 1735 } 1736 if (txq->ift_ifdi != NULL) { 1737 free(txq->ift_ifdi, M_IFLIB); 1738 } 1739 } 1740 1741 static void 1742 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) 1743 { 1744 struct mbuf **mp; 1745 1746 mp = &txq->ift_sds.ifsd_m[i]; 1747 if (*mp == NULL) 1748 return; 1749 1750 if (txq->ift_sds.ifsd_map != NULL) { 1751 bus_dmamap_sync(txq->ift_buf_tag, 1752 txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE); 1753 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]); 1754 } 1755 if (txq->ift_sds.ifsd_tso_map != NULL) { 1756 bus_dmamap_sync(txq->ift_tso_buf_tag, 1757 txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE); 1758 bus_dmamap_unload(txq->ift_tso_buf_tag, 1759 txq->ift_sds.ifsd_tso_map[i]); 1760 } 1761 m_free(*mp); 1762 DBG_COUNTER_INC(tx_frees); 1763 *mp = NULL; 1764 } 1765 1766 static int 1767 iflib_txq_setup(iflib_txq_t txq) 1768 { 1769 if_ctx_t ctx = txq->ift_ctx; 1770 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1771 if_shared_ctx_t sctx = ctx->ifc_sctx; 1772 iflib_dma_info_t di; 1773 int i; 1774 1775 /* Set number of descriptors available */ 1776 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 1777 /* XXX make configurable */ 1778 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; 1779 1780 /* Reset indices */ 1781 txq->ift_cidx_processed = 0; 1782 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; 1783 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; 1784 1785 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) 1786 bzero((void *)di->idi_vaddr, di->idi_size); 1787 1788 IFDI_TXQ_SETUP(ctx, txq->ift_id); 1789 for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) 1790 bus_dmamap_sync(di->idi_tag, di->idi_map, 1791 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1792 return (0); 1793 } 1794 1795 /********************************************************************* 1796 * 1797 * Allocate DMA resources for RX buffers as well as memory for the RX 1798 * mbuf map, direct RX cluster pointer map and RX cluster bus address 1799 * map. RX DMA map, RX mbuf map, direct RX cluster pointer map and 1800 * RX cluster map are kept in a iflib_sw_rx_desc_array structure. 1801 * Since we use use one entry in iflib_sw_rx_desc_array per received 1802 * packet, the maximum number of entries we'll need is equal to the 1803 * number of hardware receive descriptors that we've allocated. 1804 * 1805 **********************************************************************/ 1806 static int 1807 iflib_rxsd_alloc(iflib_rxq_t rxq) 1808 { 1809 if_ctx_t ctx = rxq->ifr_ctx; 1810 if_shared_ctx_t sctx = ctx->ifc_sctx; 1811 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1812 device_t dev = ctx->ifc_dev; 1813 iflib_fl_t fl; 1814 int err; 1815 1816 MPASS(scctx->isc_nrxd[0] > 0); 1817 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); 1818 1819 fl = rxq->ifr_fl; 1820 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { 1821 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ 1822 /* Set up DMA tag for RX buffers. */ 1823 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1824 1, 0, /* alignment, bounds */ 1825 BUS_SPACE_MAXADDR, /* lowaddr */ 1826 BUS_SPACE_MAXADDR, /* highaddr */ 1827 NULL, NULL, /* filter, filterarg */ 1828 sctx->isc_rx_maxsize, /* maxsize */ 1829 sctx->isc_rx_nsegments, /* nsegments */ 1830 sctx->isc_rx_maxsegsize, /* maxsegsize */ 1831 0, /* flags */ 1832 NULL, /* lockfunc */ 1833 NULL, /* lockarg */ 1834 &fl->ifl_buf_tag); 1835 if (err) { 1836 device_printf(dev, 1837 "Unable to allocate RX DMA tag: %d\n", err); 1838 goto fail; 1839 } 1840 1841 /* Allocate memory for the RX mbuf map. */ 1842 if (!(fl->ifl_sds.ifsd_m = 1843 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1844 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1845 device_printf(dev, 1846 "Unable to allocate RX mbuf map memory\n"); 1847 err = ENOMEM; 1848 goto fail; 1849 } 1850 1851 /* Allocate memory for the direct RX cluster pointer map. */ 1852 if (!(fl->ifl_sds.ifsd_cl = 1853 (caddr_t *) malloc(sizeof(caddr_t) * 1854 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1855 device_printf(dev, 1856 "Unable to allocate RX cluster map memory\n"); 1857 err = ENOMEM; 1858 goto fail; 1859 } 1860 1861 /* Allocate memory for the RX cluster bus address map. */ 1862 if (!(fl->ifl_sds.ifsd_ba = 1863 (bus_addr_t *) malloc(sizeof(bus_addr_t) * 1864 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1865 device_printf(dev, 1866 "Unable to allocate RX bus address map memory\n"); 1867 err = ENOMEM; 1868 goto fail; 1869 } 1870 1871 /* 1872 * Create the DMA maps for RX buffers. 1873 */ 1874 if (!(fl->ifl_sds.ifsd_map = 1875 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1876 device_printf(dev, 1877 "Unable to allocate RX buffer DMA map memory\n"); 1878 err = ENOMEM; 1879 goto fail; 1880 } 1881 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { 1882 err = bus_dmamap_create(fl->ifl_buf_tag, 0, 1883 &fl->ifl_sds.ifsd_map[i]); 1884 if (err != 0) { 1885 device_printf(dev, "Unable to create RX buffer DMA map\n"); 1886 goto fail; 1887 } 1888 } 1889 } 1890 return (0); 1891 1892 fail: 1893 iflib_rx_structures_free(ctx); 1894 return (err); 1895 } 1896 1897 1898 /* 1899 * Internal service routines 1900 */ 1901 1902 struct rxq_refill_cb_arg { 1903 int error; 1904 bus_dma_segment_t seg; 1905 int nseg; 1906 }; 1907 1908 static void 1909 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1910 { 1911 struct rxq_refill_cb_arg *cb_arg = arg; 1912 1913 cb_arg->error = error; 1914 cb_arg->seg = segs[0]; 1915 cb_arg->nseg = nseg; 1916 } 1917 1918 /** 1919 * _iflib_fl_refill - refill an rxq free-buffer list 1920 * @ctx: the iflib context 1921 * @fl: the free list to refill 1922 * @count: the number of new buffers to allocate 1923 * 1924 * (Re)populate an rxq free-buffer list with up to @count new packet buffers. 1925 * The caller must assure that @count does not exceed the queue's capacity. 1926 */ 1927 static uint8_t 1928 _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) 1929 { 1930 struct if_rxd_update iru; 1931 struct rxq_refill_cb_arg cb_arg; 1932 struct mbuf *m; 1933 caddr_t cl, *sd_cl; 1934 struct mbuf **sd_m; 1935 bus_dmamap_t *sd_map; 1936 bus_addr_t bus_addr, *sd_ba; 1937 int err, frag_idx, i, idx, n, pidx; 1938 qidx_t credits; 1939 1940 sd_m = fl->ifl_sds.ifsd_m; 1941 sd_map = fl->ifl_sds.ifsd_map; 1942 sd_cl = fl->ifl_sds.ifsd_cl; 1943 sd_ba = fl->ifl_sds.ifsd_ba; 1944 pidx = fl->ifl_pidx; 1945 idx = pidx; 1946 frag_idx = fl->ifl_fragidx; 1947 credits = fl->ifl_credits; 1948 1949 i = 0; 1950 n = count; 1951 MPASS(n > 0); 1952 MPASS(credits + n <= fl->ifl_size); 1953 1954 if (pidx < fl->ifl_cidx) 1955 MPASS(pidx + n <= fl->ifl_cidx); 1956 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) 1957 MPASS(fl->ifl_gen == 0); 1958 if (pidx > fl->ifl_cidx) 1959 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); 1960 1961 DBG_COUNTER_INC(fl_refills); 1962 if (n > 8) 1963 DBG_COUNTER_INC(fl_refills_large); 1964 iru_init(&iru, fl->ifl_rxq, fl->ifl_id); 1965 while (n--) { 1966 /* 1967 * We allocate an uninitialized mbuf + cluster, mbuf is 1968 * initialized after rx. 1969 * 1970 * If the cluster is still set then we know a minimum sized packet was received 1971 */ 1972 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, 1973 &frag_idx); 1974 if (frag_idx < 0) 1975 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); 1976 MPASS(frag_idx >= 0); 1977 if ((cl = sd_cl[frag_idx]) == NULL) { 1978 if ((cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) 1979 break; 1980 1981 cb_arg.error = 0; 1982 MPASS(sd_map != NULL); 1983 err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx], 1984 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 1985 BUS_DMA_NOWAIT); 1986 if (err != 0 || cb_arg.error) { 1987 /* 1988 * !zone_pack ? 1989 */ 1990 if (fl->ifl_zone == zone_pack) 1991 uma_zfree(fl->ifl_zone, cl); 1992 break; 1993 } 1994 1995 sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr; 1996 sd_cl[frag_idx] = cl; 1997 #if MEMORY_LOGGING 1998 fl->ifl_cl_enqueued++; 1999 #endif 2000 } else { 2001 bus_addr = sd_ba[frag_idx]; 2002 } 2003 bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx], 2004 BUS_DMASYNC_PREREAD); 2005 2006 if (sd_m[frag_idx] == NULL) { 2007 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { 2008 break; 2009 } 2010 sd_m[frag_idx] = m; 2011 } 2012 bit_set(fl->ifl_rx_bitmap, frag_idx); 2013 #if MEMORY_LOGGING 2014 fl->ifl_m_enqueued++; 2015 #endif 2016 2017 DBG_COUNTER_INC(rx_allocs); 2018 fl->ifl_rxd_idxs[i] = frag_idx; 2019 fl->ifl_bus_addrs[i] = bus_addr; 2020 fl->ifl_vm_addrs[i] = cl; 2021 credits++; 2022 i++; 2023 MPASS(credits <= fl->ifl_size); 2024 if (++idx == fl->ifl_size) { 2025 fl->ifl_gen = 1; 2026 idx = 0; 2027 } 2028 if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { 2029 iru.iru_pidx = pidx; 2030 iru.iru_count = i; 2031 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2032 i = 0; 2033 pidx = idx; 2034 fl->ifl_pidx = idx; 2035 fl->ifl_credits = credits; 2036 } 2037 } 2038 2039 if (i) { 2040 iru.iru_pidx = pidx; 2041 iru.iru_count = i; 2042 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2043 fl->ifl_pidx = idx; 2044 fl->ifl_credits = credits; 2045 } 2046 DBG_COUNTER_INC(rxd_flush); 2047 if (fl->ifl_pidx == 0) 2048 pidx = fl->ifl_size - 1; 2049 else 2050 pidx = fl->ifl_pidx - 1; 2051 2052 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2053 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2054 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); 2055 fl->ifl_fragidx = frag_idx + 1; 2056 if (fl->ifl_fragidx == fl->ifl_size) 2057 fl->ifl_fragidx = 0; 2058 2059 return (n == -1 ? 0 : IFLIB_RXEOF_EMPTY); 2060 } 2061 2062 static __inline uint8_t 2063 __iflib_fl_refill_all(if_ctx_t ctx, iflib_fl_t fl) 2064 { 2065 /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ 2066 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; 2067 #ifdef INVARIANTS 2068 int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; 2069 #endif 2070 2071 MPASS(fl->ifl_credits <= fl->ifl_size); 2072 MPASS(reclaimable == delta); 2073 2074 if (reclaimable > 0) 2075 return (_iflib_fl_refill(ctx, fl, reclaimable)); 2076 return (0); 2077 } 2078 2079 uint8_t 2080 iflib_in_detach(if_ctx_t ctx) 2081 { 2082 bool in_detach; 2083 2084 STATE_LOCK(ctx); 2085 in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH); 2086 STATE_UNLOCK(ctx); 2087 return (in_detach); 2088 } 2089 2090 static void 2091 iflib_fl_bufs_free(iflib_fl_t fl) 2092 { 2093 iflib_dma_info_t idi = fl->ifl_ifdi; 2094 bus_dmamap_t sd_map; 2095 uint32_t i; 2096 2097 for (i = 0; i < fl->ifl_size; i++) { 2098 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; 2099 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; 2100 2101 if (*sd_cl != NULL) { 2102 sd_map = fl->ifl_sds.ifsd_map[i]; 2103 bus_dmamap_sync(fl->ifl_buf_tag, sd_map, 2104 BUS_DMASYNC_POSTREAD); 2105 bus_dmamap_unload(fl->ifl_buf_tag, sd_map); 2106 if (*sd_cl != NULL) 2107 uma_zfree(fl->ifl_zone, *sd_cl); 2108 if (*sd_m != NULL) { 2109 m_init(*sd_m, M_NOWAIT, MT_DATA, 0); 2110 uma_zfree(zone_mbuf, *sd_m); 2111 } 2112 } else { 2113 MPASS(*sd_cl == NULL); 2114 MPASS(*sd_m == NULL); 2115 } 2116 #if MEMORY_LOGGING 2117 fl->ifl_m_dequeued++; 2118 fl->ifl_cl_dequeued++; 2119 #endif 2120 *sd_cl = NULL; 2121 *sd_m = NULL; 2122 } 2123 #ifdef INVARIANTS 2124 for (i = 0; i < fl->ifl_size; i++) { 2125 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); 2126 MPASS(fl->ifl_sds.ifsd_m[i] == NULL); 2127 } 2128 #endif 2129 /* 2130 * Reset free list values 2131 */ 2132 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; 2133 bzero(idi->idi_vaddr, idi->idi_size); 2134 } 2135 2136 /********************************************************************* 2137 * 2138 * Initialize a free list and its buffers. 2139 * 2140 **********************************************************************/ 2141 static int 2142 iflib_fl_setup(iflib_fl_t fl) 2143 { 2144 iflib_rxq_t rxq = fl->ifl_rxq; 2145 if_ctx_t ctx = rxq->ifr_ctx; 2146 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2147 int qidx; 2148 2149 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); 2150 /* 2151 ** Free current RX buffer structs and their mbufs 2152 */ 2153 iflib_fl_bufs_free(fl); 2154 /* Now replenish the mbufs */ 2155 MPASS(fl->ifl_credits == 0); 2156 qidx = rxq->ifr_fl_offset + fl->ifl_id; 2157 if (scctx->isc_rxd_buf_size[qidx] != 0) 2158 fl->ifl_buf_size = scctx->isc_rxd_buf_size[qidx]; 2159 else 2160 fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz; 2161 /* 2162 * ifl_buf_size may be a driver-supplied value, so pull it up 2163 * to the selected mbuf size. 2164 */ 2165 fl->ifl_buf_size = iflib_get_mbuf_size_for(fl->ifl_buf_size); 2166 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) 2167 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; 2168 fl->ifl_cltype = m_gettype(fl->ifl_buf_size); 2169 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 2170 2171 2172 /* avoid pre-allocating zillions of clusters to an idle card 2173 * potentially speeding up attach 2174 */ 2175 (void) _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); 2176 MPASS(min(128, fl->ifl_size) == fl->ifl_credits); 2177 if (min(128, fl->ifl_size) != fl->ifl_credits) 2178 return (ENOBUFS); 2179 /* 2180 * handle failure 2181 */ 2182 MPASS(rxq != NULL); 2183 MPASS(fl->ifl_ifdi != NULL); 2184 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2185 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2186 return (0); 2187 } 2188 2189 /********************************************************************* 2190 * 2191 * Free receive ring data structures 2192 * 2193 **********************************************************************/ 2194 static void 2195 iflib_rx_sds_free(iflib_rxq_t rxq) 2196 { 2197 iflib_fl_t fl; 2198 int i, j; 2199 2200 if (rxq->ifr_fl != NULL) { 2201 for (i = 0; i < rxq->ifr_nfl; i++) { 2202 fl = &rxq->ifr_fl[i]; 2203 if (fl->ifl_buf_tag != NULL) { 2204 if (fl->ifl_sds.ifsd_map != NULL) { 2205 for (j = 0; j < fl->ifl_size; j++) { 2206 bus_dmamap_sync( 2207 fl->ifl_buf_tag, 2208 fl->ifl_sds.ifsd_map[j], 2209 BUS_DMASYNC_POSTREAD); 2210 bus_dmamap_unload( 2211 fl->ifl_buf_tag, 2212 fl->ifl_sds.ifsd_map[j]); 2213 bus_dmamap_destroy( 2214 fl->ifl_buf_tag, 2215 fl->ifl_sds.ifsd_map[j]); 2216 } 2217 } 2218 bus_dma_tag_destroy(fl->ifl_buf_tag); 2219 fl->ifl_buf_tag = NULL; 2220 } 2221 free(fl->ifl_sds.ifsd_m, M_IFLIB); 2222 free(fl->ifl_sds.ifsd_cl, M_IFLIB); 2223 free(fl->ifl_sds.ifsd_ba, M_IFLIB); 2224 free(fl->ifl_sds.ifsd_map, M_IFLIB); 2225 fl->ifl_sds.ifsd_m = NULL; 2226 fl->ifl_sds.ifsd_cl = NULL; 2227 fl->ifl_sds.ifsd_ba = NULL; 2228 fl->ifl_sds.ifsd_map = NULL; 2229 } 2230 free(rxq->ifr_fl, M_IFLIB); 2231 rxq->ifr_fl = NULL; 2232 free(rxq->ifr_ifdi, M_IFLIB); 2233 rxq->ifr_ifdi = NULL; 2234 rxq->ifr_cq_cidx = 0; 2235 } 2236 } 2237 2238 /* 2239 * Timer routine 2240 */ 2241 static void 2242 iflib_timer(void *arg) 2243 { 2244 iflib_txq_t txq = arg; 2245 if_ctx_t ctx = txq->ift_ctx; 2246 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2247 uint64_t this_tick = ticks; 2248 uint32_t reset_on = hz / 2; 2249 2250 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 2251 return; 2252 2253 /* 2254 ** Check on the state of the TX queue(s), this 2255 ** can be done without the lock because its RO 2256 ** and the HUNG state will be static if set. 2257 */ 2258 if (this_tick - txq->ift_last_timer_tick >= hz / 2) { 2259 txq->ift_last_timer_tick = this_tick; 2260 IFDI_TIMER(ctx, txq->ift_id); 2261 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && 2262 ((txq->ift_cleaned_prev == txq->ift_cleaned) || 2263 (sctx->isc_pause_frames == 0))) 2264 goto hung; 2265 2266 if (txq->ift_qstatus != IFLIB_QUEUE_IDLE && 2267 ifmp_ring_is_stalled(txq->ift_br)) { 2268 KASSERT(ctx->ifc_link_state == LINK_STATE_UP, ("queue can't be marked as hung if interface is down")); 2269 txq->ift_qstatus = IFLIB_QUEUE_HUNG; 2270 } 2271 txq->ift_cleaned_prev = txq->ift_cleaned; 2272 } 2273 #ifdef DEV_NETMAP 2274 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) 2275 iflib_netmap_timer_adjust(ctx, txq, &reset_on); 2276 #endif 2277 /* handle any laggards */ 2278 if (txq->ift_db_pending) 2279 GROUPTASK_ENQUEUE(&txq->ift_task); 2280 2281 sctx->isc_pause_frames = 0; 2282 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) 2283 callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); 2284 return; 2285 2286 hung: 2287 device_printf(ctx->ifc_dev, 2288 "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n", 2289 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); 2290 STATE_LOCK(ctx); 2291 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2292 ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET); 2293 iflib_admin_intr_deferred(ctx); 2294 STATE_UNLOCK(ctx); 2295 } 2296 2297 static uint16_t 2298 iflib_get_mbuf_size_for(unsigned int size) 2299 { 2300 2301 if (size <= MCLBYTES) 2302 return (MCLBYTES); 2303 else 2304 return (MJUMPAGESIZE); 2305 } 2306 2307 static void 2308 iflib_calc_rx_mbuf_sz(if_ctx_t ctx) 2309 { 2310 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2311 2312 /* 2313 * XXX don't set the max_frame_size to larger 2314 * than the hardware can handle 2315 */ 2316 ctx->ifc_rx_mbuf_sz = 2317 iflib_get_mbuf_size_for(sctx->isc_max_frame_size); 2318 } 2319 2320 uint32_t 2321 iflib_get_rx_mbuf_sz(if_ctx_t ctx) 2322 { 2323 2324 return (ctx->ifc_rx_mbuf_sz); 2325 } 2326 2327 static void 2328 iflib_init_locked(if_ctx_t ctx) 2329 { 2330 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2331 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2332 if_t ifp = ctx->ifc_ifp; 2333 iflib_fl_t fl; 2334 iflib_txq_t txq; 2335 iflib_rxq_t rxq; 2336 int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; 2337 2338 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2339 IFDI_INTR_DISABLE(ctx); 2340 2341 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); 2342 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); 2343 /* Set hardware offload abilities */ 2344 if_clearhwassist(ifp); 2345 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 2346 if_sethwassistbits(ifp, tx_ip_csum_flags, 0); 2347 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 2348 if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); 2349 if (if_getcapenable(ifp) & IFCAP_TSO4) 2350 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 2351 if (if_getcapenable(ifp) & IFCAP_TSO6) 2352 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 2353 2354 for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { 2355 CALLOUT_LOCK(txq); 2356 callout_stop(&txq->ift_timer); 2357 CALLOUT_UNLOCK(txq); 2358 iflib_netmap_txq_init(ctx, txq); 2359 } 2360 2361 /* 2362 * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so 2363 * that drivers can use the value when setting up the hardware receive 2364 * buffers. 2365 */ 2366 iflib_calc_rx_mbuf_sz(ctx); 2367 2368 #ifdef INVARIANTS 2369 i = if_getdrvflags(ifp); 2370 #endif 2371 IFDI_INIT(ctx); 2372 MPASS(if_getdrvflags(ifp) == i); 2373 for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { 2374 if (iflib_netmap_rxq_init(ctx, rxq) > 0) { 2375 /* This rxq is in netmap mode. Skip normal init. */ 2376 continue; 2377 } 2378 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 2379 if (iflib_fl_setup(fl)) { 2380 device_printf(ctx->ifc_dev, 2381 "setting up free list %d failed - " 2382 "check cluster settings\n", j); 2383 goto done; 2384 } 2385 } 2386 } 2387 done: 2388 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 2389 IFDI_INTR_ENABLE(ctx); 2390 txq = ctx->ifc_txqs; 2391 for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) 2392 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, 2393 txq->ift_timer.c_cpu); 2394 } 2395 2396 static int 2397 iflib_media_change(if_t ifp) 2398 { 2399 if_ctx_t ctx = if_getsoftc(ifp); 2400 int err; 2401 2402 CTX_LOCK(ctx); 2403 if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) 2404 iflib_init_locked(ctx); 2405 CTX_UNLOCK(ctx); 2406 return (err); 2407 } 2408 2409 static void 2410 iflib_media_status(if_t ifp, struct ifmediareq *ifmr) 2411 { 2412 if_ctx_t ctx = if_getsoftc(ifp); 2413 2414 CTX_LOCK(ctx); 2415 IFDI_UPDATE_ADMIN_STATUS(ctx); 2416 IFDI_MEDIA_STATUS(ctx, ifmr); 2417 CTX_UNLOCK(ctx); 2418 } 2419 2420 void 2421 iflib_stop(if_ctx_t ctx) 2422 { 2423 iflib_txq_t txq = ctx->ifc_txqs; 2424 iflib_rxq_t rxq = ctx->ifc_rxqs; 2425 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2426 if_shared_ctx_t sctx = ctx->ifc_sctx; 2427 iflib_dma_info_t di; 2428 iflib_fl_t fl; 2429 int i, j; 2430 2431 /* Tell the stack that the interface is no longer active */ 2432 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2433 2434 IFDI_INTR_DISABLE(ctx); 2435 DELAY(1000); 2436 IFDI_STOP(ctx); 2437 DELAY(1000); 2438 2439 iflib_debug_reset(); 2440 /* Wait for current tx queue users to exit to disarm watchdog timer. */ 2441 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { 2442 /* make sure all transmitters have completed before proceeding XXX */ 2443 2444 CALLOUT_LOCK(txq); 2445 callout_stop(&txq->ift_timer); 2446 CALLOUT_UNLOCK(txq); 2447 2448 /* clean any enqueued buffers */ 2449 iflib_ifmp_purge(txq); 2450 /* Free any existing tx buffers. */ 2451 for (j = 0; j < txq->ift_size; j++) { 2452 iflib_txsd_free(ctx, txq, j); 2453 } 2454 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; 2455 txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; 2456 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; 2457 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; 2458 txq->ift_pullups = 0; 2459 ifmp_ring_reset_stats(txq->ift_br); 2460 for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++) 2461 bzero((void *)di->idi_vaddr, di->idi_size); 2462 } 2463 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { 2464 /* make sure all transmitters have completed before proceeding XXX */ 2465 2466 rxq->ifr_cq_cidx = 0; 2467 for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++) 2468 bzero((void *)di->idi_vaddr, di->idi_size); 2469 /* also resets the free lists pidx/cidx */ 2470 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 2471 iflib_fl_bufs_free(fl); 2472 } 2473 } 2474 2475 static inline caddr_t 2476 calc_next_rxd(iflib_fl_t fl, int cidx) 2477 { 2478 qidx_t size; 2479 int nrxd; 2480 caddr_t start, end, cur, next; 2481 2482 nrxd = fl->ifl_size; 2483 size = fl->ifl_rxd_size; 2484 start = fl->ifl_ifdi->idi_vaddr; 2485 2486 if (__predict_false(size == 0)) 2487 return (start); 2488 cur = start + size*cidx; 2489 end = start + size*nrxd; 2490 next = CACHE_PTR_NEXT(cur); 2491 return (next < end ? next : start); 2492 } 2493 2494 static inline void 2495 prefetch_pkts(iflib_fl_t fl, int cidx) 2496 { 2497 int nextptr; 2498 int nrxd = fl->ifl_size; 2499 caddr_t next_rxd; 2500 2501 2502 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); 2503 prefetch(&fl->ifl_sds.ifsd_m[nextptr]); 2504 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); 2505 next_rxd = calc_next_rxd(fl, cidx); 2506 prefetch(next_rxd); 2507 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); 2508 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); 2509 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); 2510 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); 2511 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); 2512 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); 2513 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); 2514 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); 2515 } 2516 2517 static struct mbuf * 2518 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd, 2519 int *pf_rv, if_rxd_info_t ri) 2520 { 2521 bus_dmamap_t map; 2522 iflib_fl_t fl; 2523 caddr_t payload; 2524 struct mbuf *m; 2525 int flid, cidx, len, next; 2526 2527 map = NULL; 2528 flid = irf->irf_flid; 2529 cidx = irf->irf_idx; 2530 fl = &rxq->ifr_fl[flid]; 2531 sd->ifsd_fl = fl; 2532 m = fl->ifl_sds.ifsd_m[cidx]; 2533 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; 2534 fl->ifl_credits--; 2535 #if MEMORY_LOGGING 2536 fl->ifl_m_dequeued++; 2537 #endif 2538 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) 2539 prefetch_pkts(fl, cidx); 2540 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); 2541 prefetch(&fl->ifl_sds.ifsd_map[next]); 2542 map = fl->ifl_sds.ifsd_map[cidx]; 2543 2544 bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD); 2545 2546 if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL && 2547 irf->irf_len != 0) { 2548 payload = *sd->ifsd_cl; 2549 payload += ri->iri_pad; 2550 len = ri->iri_len - ri->iri_pad; 2551 *pf_rv = pfil_run_hooks(rxq->pfil, payload, ri->iri_ifp, 2552 len | PFIL_MEMPTR | PFIL_IN, NULL); 2553 switch (*pf_rv) { 2554 case PFIL_DROPPED: 2555 case PFIL_CONSUMED: 2556 /* 2557 * The filter ate it. Everything is recycled. 2558 */ 2559 m = NULL; 2560 unload = 0; 2561 break; 2562 case PFIL_REALLOCED: 2563 /* 2564 * The filter copied it. Everything is recycled. 2565 */ 2566 m = pfil_mem2mbuf(payload); 2567 unload = 0; 2568 break; 2569 case PFIL_PASS: 2570 /* 2571 * Filter said it was OK, so receive like 2572 * normal 2573 */ 2574 fl->ifl_sds.ifsd_m[cidx] = NULL; 2575 break; 2576 default: 2577 MPASS(0); 2578 } 2579 } else { 2580 fl->ifl_sds.ifsd_m[cidx] = NULL; 2581 *pf_rv = PFIL_PASS; 2582 } 2583 2584 if (unload && irf->irf_len != 0) 2585 bus_dmamap_unload(fl->ifl_buf_tag, map); 2586 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); 2587 if (__predict_false(fl->ifl_cidx == 0)) 2588 fl->ifl_gen = 0; 2589 bit_clear(fl->ifl_rx_bitmap, cidx); 2590 return (m); 2591 } 2592 2593 static struct mbuf * 2594 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv) 2595 { 2596 struct mbuf *m, *mh, *mt; 2597 caddr_t cl; 2598 int *pf_rv_ptr, flags, i, padlen; 2599 bool consumed; 2600 2601 i = 0; 2602 mh = NULL; 2603 consumed = false; 2604 *pf_rv = PFIL_PASS; 2605 pf_rv_ptr = pf_rv; 2606 do { 2607 m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd, 2608 pf_rv_ptr, ri); 2609 2610 MPASS(*sd->ifsd_cl != NULL); 2611 2612 /* 2613 * Exclude zero-length frags & frags from 2614 * packets the filter has consumed or dropped 2615 */ 2616 if (ri->iri_frags[i].irf_len == 0 || consumed || 2617 *pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) { 2618 if (mh == NULL) { 2619 /* everything saved here */ 2620 consumed = true; 2621 pf_rv_ptr = NULL; 2622 continue; 2623 } 2624 /* XXX we can save the cluster here, but not the mbuf */ 2625 m_init(m, M_NOWAIT, MT_DATA, 0); 2626 m_free(m); 2627 continue; 2628 } 2629 if (mh == NULL) { 2630 flags = M_PKTHDR|M_EXT; 2631 mh = mt = m; 2632 padlen = ri->iri_pad; 2633 } else { 2634 flags = M_EXT; 2635 mt->m_next = m; 2636 mt = m; 2637 /* assuming padding is only on the first fragment */ 2638 padlen = 0; 2639 } 2640 cl = *sd->ifsd_cl; 2641 *sd->ifsd_cl = NULL; 2642 2643 /* Can these two be made one ? */ 2644 m_init(m, M_NOWAIT, MT_DATA, flags); 2645 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); 2646 /* 2647 * These must follow m_init and m_cljset 2648 */ 2649 m->m_data += padlen; 2650 ri->iri_len -= padlen; 2651 m->m_len = ri->iri_frags[i].irf_len; 2652 } while (++i < ri->iri_nfrags); 2653 2654 return (mh); 2655 } 2656 2657 /* 2658 * Process one software descriptor 2659 */ 2660 static struct mbuf * 2661 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) 2662 { 2663 struct if_rxsd sd; 2664 struct mbuf *m; 2665 int pf_rv; 2666 2667 /* should I merge this back in now that the two paths are basically duplicated? */ 2668 if (ri->iri_nfrags == 1 && 2669 ri->iri_frags[0].irf_len != 0 && 2670 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { 2671 m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd, 2672 &pf_rv, ri); 2673 if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED) 2674 return (m); 2675 if (pf_rv == PFIL_PASS) { 2676 m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); 2677 #ifndef __NO_STRICT_ALIGNMENT 2678 if (!IP_ALIGNED(m)) 2679 m->m_data += 2; 2680 #endif 2681 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); 2682 m->m_len = ri->iri_frags[0].irf_len; 2683 } 2684 } else { 2685 m = assemble_segments(rxq, ri, &sd, &pf_rv); 2686 if (m == NULL) 2687 return (NULL); 2688 if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED) 2689 return (m); 2690 } 2691 m->m_pkthdr.len = ri->iri_len; 2692 m->m_pkthdr.rcvif = ri->iri_ifp; 2693 m->m_flags |= ri->iri_flags; 2694 m->m_pkthdr.ether_vtag = ri->iri_vtag; 2695 m->m_pkthdr.flowid = ri->iri_flowid; 2696 M_HASHTYPE_SET(m, ri->iri_rsstype); 2697 m->m_pkthdr.csum_flags = ri->iri_csum_flags; 2698 m->m_pkthdr.csum_data = ri->iri_csum_data; 2699 return (m); 2700 } 2701 2702 #if defined(INET6) || defined(INET) 2703 static void 2704 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) 2705 { 2706 CURVNET_SET(lc->ifp->if_vnet); 2707 #if defined(INET6) 2708 *v6 = V_ip6_forwarding; 2709 #endif 2710 #if defined(INET) 2711 *v4 = V_ipforwarding; 2712 #endif 2713 CURVNET_RESTORE(); 2714 } 2715 2716 /* 2717 * Returns true if it's possible this packet could be LROed. 2718 * if it returns false, it is guaranteed that tcp_lro_rx() 2719 * would not return zero. 2720 */ 2721 static bool 2722 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) 2723 { 2724 struct ether_header *eh; 2725 2726 eh = mtod(m, struct ether_header *); 2727 switch (eh->ether_type) { 2728 #if defined(INET6) 2729 case htons(ETHERTYPE_IPV6): 2730 return (!v6_forwarding); 2731 #endif 2732 #if defined (INET) 2733 case htons(ETHERTYPE_IP): 2734 return (!v4_forwarding); 2735 #endif 2736 } 2737 2738 return false; 2739 } 2740 #else 2741 static void 2742 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) 2743 { 2744 } 2745 #endif 2746 2747 static void 2748 _task_fn_rx_watchdog(void *context) 2749 { 2750 iflib_rxq_t rxq = context; 2751 2752 GROUPTASK_ENQUEUE(&rxq->ifr_task); 2753 } 2754 2755 static uint8_t 2756 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) 2757 { 2758 if_t ifp; 2759 if_ctx_t ctx = rxq->ifr_ctx; 2760 if_shared_ctx_t sctx = ctx->ifc_sctx; 2761 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2762 int avail, i; 2763 qidx_t *cidxp; 2764 struct if_rxd_info ri; 2765 int err, budget_left, rx_bytes, rx_pkts; 2766 iflib_fl_t fl; 2767 int lro_enabled; 2768 bool v4_forwarding, v6_forwarding, lro_possible; 2769 uint8_t retval = 0; 2770 2771 /* 2772 * XXX early demux data packets so that if_input processing only handles 2773 * acks in interrupt context 2774 */ 2775 struct mbuf *m, *mh, *mt, *mf; 2776 2777 NET_EPOCH_ASSERT(); 2778 2779 lro_possible = v4_forwarding = v6_forwarding = false; 2780 ifp = ctx->ifc_ifp; 2781 mh = mt = NULL; 2782 MPASS(budget > 0); 2783 rx_pkts = rx_bytes = 0; 2784 if (sctx->isc_flags & IFLIB_HAS_RXCQ) 2785 cidxp = &rxq->ifr_cq_cidx; 2786 else 2787 cidxp = &rxq->ifr_fl[0].ifl_cidx; 2788 if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { 2789 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2790 retval |= __iflib_fl_refill_all(ctx, fl); 2791 DBG_COUNTER_INC(rx_unavail); 2792 return (retval); 2793 } 2794 2795 /* pfil needs the vnet to be set */ 2796 CURVNET_SET_QUIET(ifp->if_vnet); 2797 for (budget_left = budget; budget_left > 0 && avail > 0;) { 2798 if (__predict_false(!CTX_ACTIVE(ctx))) { 2799 DBG_COUNTER_INC(rx_ctx_inactive); 2800 break; 2801 } 2802 /* 2803 * Reset client set fields to their default values 2804 */ 2805 rxd_info_zero(&ri); 2806 ri.iri_qsidx = rxq->ifr_id; 2807 ri.iri_cidx = *cidxp; 2808 ri.iri_ifp = ifp; 2809 ri.iri_frags = rxq->ifr_frags; 2810 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 2811 2812 if (err) 2813 goto err; 2814 rx_pkts += 1; 2815 rx_bytes += ri.iri_len; 2816 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 2817 *cidxp = ri.iri_cidx; 2818 /* Update our consumer index */ 2819 /* XXX NB: shurd - check if this is still safe */ 2820 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) 2821 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; 2822 /* was this only a completion queue message? */ 2823 if (__predict_false(ri.iri_nfrags == 0)) 2824 continue; 2825 } 2826 MPASS(ri.iri_nfrags != 0); 2827 MPASS(ri.iri_len != 0); 2828 2829 /* will advance the cidx on the corresponding free lists */ 2830 m = iflib_rxd_pkt_get(rxq, &ri); 2831 avail--; 2832 budget_left--; 2833 if (avail == 0 && budget_left) 2834 avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); 2835 2836 if (__predict_false(m == NULL)) 2837 continue; 2838 2839 /* imm_pkt: -- cxgb */ 2840 if (mh == NULL) 2841 mh = mt = m; 2842 else { 2843 mt->m_nextpkt = m; 2844 mt = m; 2845 } 2846 } 2847 CURVNET_RESTORE(); 2848 /* make sure that we can refill faster than drain */ 2849 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2850 retval |= __iflib_fl_refill_all(ctx, fl); 2851 2852 lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); 2853 if (lro_enabled) 2854 iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); 2855 mt = mf = NULL; 2856 while (mh != NULL) { 2857 m = mh; 2858 mh = mh->m_nextpkt; 2859 m->m_nextpkt = NULL; 2860 #ifndef __NO_STRICT_ALIGNMENT 2861 if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) 2862 continue; 2863 #endif 2864 rx_bytes += m->m_pkthdr.len; 2865 rx_pkts++; 2866 #if defined(INET6) || defined(INET) 2867 if (lro_enabled) { 2868 if (!lro_possible) { 2869 lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); 2870 if (lro_possible && mf != NULL) { 2871 ifp->if_input(ifp, mf); 2872 DBG_COUNTER_INC(rx_if_input); 2873 mt = mf = NULL; 2874 } 2875 } 2876 if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == 2877 (CSUM_L4_CALC|CSUM_L4_VALID)) { 2878 if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) 2879 continue; 2880 } 2881 } 2882 #endif 2883 if (lro_possible) { 2884 ifp->if_input(ifp, m); 2885 DBG_COUNTER_INC(rx_if_input); 2886 continue; 2887 } 2888 2889 if (mf == NULL) 2890 mf = m; 2891 if (mt != NULL) 2892 mt->m_nextpkt = m; 2893 mt = m; 2894 } 2895 if (mf != NULL) { 2896 ifp->if_input(ifp, mf); 2897 DBG_COUNTER_INC(rx_if_input); 2898 } 2899 2900 if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); 2901 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); 2902 2903 /* 2904 * Flush any outstanding LRO work 2905 */ 2906 #if defined(INET6) || defined(INET) 2907 tcp_lro_flush_all(&rxq->ifr_lc); 2908 #endif 2909 if (avail != 0 || iflib_rxd_avail(ctx, rxq, *cidxp, 1) != 0) 2910 retval |= IFLIB_RXEOF_MORE; 2911 return (retval); 2912 err: 2913 STATE_LOCK(ctx); 2914 ctx->ifc_flags |= IFC_DO_RESET; 2915 iflib_admin_intr_deferred(ctx); 2916 STATE_UNLOCK(ctx); 2917 return (0); 2918 } 2919 2920 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) 2921 static inline qidx_t 2922 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) 2923 { 2924 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2925 qidx_t minthresh = txq->ift_size / 8; 2926 if (in_use > 4*minthresh) 2927 return (notify_count); 2928 if (in_use > 2*minthresh) 2929 return (notify_count >> 1); 2930 if (in_use > minthresh) 2931 return (notify_count >> 3); 2932 return (0); 2933 } 2934 2935 static inline qidx_t 2936 txq_max_rs_deferred(iflib_txq_t txq) 2937 { 2938 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2939 qidx_t minthresh = txq->ift_size / 8; 2940 if (txq->ift_in_use > 4*minthresh) 2941 return (notify_count); 2942 if (txq->ift_in_use > 2*minthresh) 2943 return (notify_count >> 1); 2944 if (txq->ift_in_use > minthresh) 2945 return (notify_count >> 2); 2946 return (2); 2947 } 2948 2949 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) 2950 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) 2951 2952 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) 2953 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) 2954 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) 2955 2956 /* forward compatibility for cxgb */ 2957 #define FIRST_QSET(ctx) 0 2958 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) 2959 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) 2960 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) 2961 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) 2962 2963 /* XXX we should be setting this to something other than zero */ 2964 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) 2965 #define MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \ 2966 (ctx)->ifc_softc_ctx.isc_tx_nsegments) 2967 2968 static inline bool 2969 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) 2970 { 2971 qidx_t dbval, max; 2972 bool rang; 2973 2974 rang = false; 2975 max = TXQ_MAX_DB_DEFERRED(txq, in_use); 2976 if (ring || txq->ift_db_pending >= max) { 2977 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; 2978 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 2979 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2980 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); 2981 txq->ift_db_pending = txq->ift_npending = 0; 2982 rang = true; 2983 } 2984 return (rang); 2985 } 2986 2987 #ifdef PKT_DEBUG 2988 static void 2989 print_pkt(if_pkt_info_t pi) 2990 { 2991 printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", 2992 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); 2993 printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", 2994 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); 2995 printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", 2996 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); 2997 } 2998 #endif 2999 3000 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) 3001 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO)) 3002 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) 3003 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO)) 3004 3005 static int 3006 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) 3007 { 3008 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; 3009 struct ether_vlan_header *eh; 3010 struct mbuf *m; 3011 3012 m = *mp; 3013 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && 3014 M_WRITABLE(m) == 0) { 3015 if ((m = m_dup(m, M_NOWAIT)) == NULL) { 3016 return (ENOMEM); 3017 } else { 3018 m_freem(*mp); 3019 DBG_COUNTER_INC(tx_frees); 3020 *mp = m; 3021 } 3022 } 3023 3024 /* 3025 * Determine where frame payload starts. 3026 * Jump over vlan headers if already present, 3027 * helpful for QinQ too. 3028 */ 3029 if (__predict_false(m->m_len < sizeof(*eh))) { 3030 txq->ift_pullups++; 3031 if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) 3032 return (ENOMEM); 3033 } 3034 eh = mtod(m, struct ether_vlan_header *); 3035 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3036 pi->ipi_etype = ntohs(eh->evl_proto); 3037 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3038 } else { 3039 pi->ipi_etype = ntohs(eh->evl_encap_proto); 3040 pi->ipi_ehdrlen = ETHER_HDR_LEN; 3041 } 3042 3043 switch (pi->ipi_etype) { 3044 #ifdef INET 3045 case ETHERTYPE_IP: 3046 { 3047 struct mbuf *n; 3048 struct ip *ip = NULL; 3049 struct tcphdr *th = NULL; 3050 int minthlen; 3051 3052 minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); 3053 if (__predict_false(m->m_len < minthlen)) { 3054 /* 3055 * if this code bloat is causing too much of a hit 3056 * move it to a separate function and mark it noinline 3057 */ 3058 if (m->m_len == pi->ipi_ehdrlen) { 3059 n = m->m_next; 3060 MPASS(n); 3061 if (n->m_len >= sizeof(*ip)) { 3062 ip = (struct ip *)n->m_data; 3063 if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 3064 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 3065 } else { 3066 txq->ift_pullups++; 3067 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 3068 return (ENOMEM); 3069 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 3070 } 3071 } else { 3072 txq->ift_pullups++; 3073 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 3074 return (ENOMEM); 3075 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 3076 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 3077 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 3078 } 3079 } else { 3080 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 3081 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 3082 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 3083 } 3084 pi->ipi_ip_hlen = ip->ip_hl << 2; 3085 pi->ipi_ipproto = ip->ip_p; 3086 pi->ipi_flags |= IPI_TX_IPV4; 3087 3088 /* TCP checksum offload may require TCP header length */ 3089 if (IS_TX_OFFLOAD4(pi)) { 3090 if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { 3091 if (__predict_false(th == NULL)) { 3092 txq->ift_pullups++; 3093 if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) 3094 return (ENOMEM); 3095 th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); 3096 } 3097 pi->ipi_tcp_hflags = th->th_flags; 3098 pi->ipi_tcp_hlen = th->th_off << 2; 3099 pi->ipi_tcp_seq = th->th_seq; 3100 } 3101 if (IS_TSO4(pi)) { 3102 if (__predict_false(ip->ip_p != IPPROTO_TCP)) 3103 return (ENXIO); 3104 /* 3105 * TSO always requires hardware checksum offload. 3106 */ 3107 pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP); 3108 th->th_sum = in_pseudo(ip->ip_src.s_addr, 3109 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 3110 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 3111 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { 3112 ip->ip_sum = 0; 3113 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); 3114 } 3115 } 3116 } 3117 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) 3118 ip->ip_sum = 0; 3119 3120 break; 3121 } 3122 #endif 3123 #ifdef INET6 3124 case ETHERTYPE_IPV6: 3125 { 3126 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); 3127 struct tcphdr *th; 3128 pi->ipi_ip_hlen = sizeof(struct ip6_hdr); 3129 3130 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { 3131 txq->ift_pullups++; 3132 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) 3133 return (ENOMEM); 3134 } 3135 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); 3136 3137 /* XXX-BZ this will go badly in case of ext hdrs. */ 3138 pi->ipi_ipproto = ip6->ip6_nxt; 3139 pi->ipi_flags |= IPI_TX_IPV6; 3140 3141 /* TCP checksum offload may require TCP header length */ 3142 if (IS_TX_OFFLOAD6(pi)) { 3143 if (pi->ipi_ipproto == IPPROTO_TCP) { 3144 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { 3145 txq->ift_pullups++; 3146 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) 3147 return (ENOMEM); 3148 } 3149 pi->ipi_tcp_hflags = th->th_flags; 3150 pi->ipi_tcp_hlen = th->th_off << 2; 3151 pi->ipi_tcp_seq = th->th_seq; 3152 } 3153 if (IS_TSO6(pi)) { 3154 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) 3155 return (ENXIO); 3156 /* 3157 * TSO always requires hardware checksum offload. 3158 */ 3159 pi->ipi_csum_flags |= CSUM_IP6_TCP; 3160 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); 3161 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 3162 } 3163 } 3164 break; 3165 } 3166 #endif 3167 default: 3168 pi->ipi_csum_flags &= ~CSUM_OFFLOAD; 3169 pi->ipi_ip_hlen = 0; 3170 break; 3171 } 3172 *mp = m; 3173 3174 return (0); 3175 } 3176 3177 /* 3178 * If dodgy hardware rejects the scatter gather chain we've handed it 3179 * we'll need to remove the mbuf chain from ifsg_m[] before we can add the 3180 * m_defrag'd mbufs 3181 */ 3182 static __noinline struct mbuf * 3183 iflib_remove_mbuf(iflib_txq_t txq) 3184 { 3185 int ntxd, pidx; 3186 struct mbuf *m, **ifsd_m; 3187 3188 ifsd_m = txq->ift_sds.ifsd_m; 3189 ntxd = txq->ift_size; 3190 pidx = txq->ift_pidx & (ntxd - 1); 3191 ifsd_m = txq->ift_sds.ifsd_m; 3192 m = ifsd_m[pidx]; 3193 ifsd_m[pidx] = NULL; 3194 bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]); 3195 if (txq->ift_sds.ifsd_tso_map != NULL) 3196 bus_dmamap_unload(txq->ift_tso_buf_tag, 3197 txq->ift_sds.ifsd_tso_map[pidx]); 3198 #if MEMORY_LOGGING 3199 txq->ift_dequeued++; 3200 #endif 3201 return (m); 3202 } 3203 3204 static inline caddr_t 3205 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) 3206 { 3207 qidx_t size; 3208 int ntxd; 3209 caddr_t start, end, cur, next; 3210 3211 ntxd = txq->ift_size; 3212 size = txq->ift_txd_size[qid]; 3213 start = txq->ift_ifdi[qid].idi_vaddr; 3214 3215 if (__predict_false(size == 0)) 3216 return (start); 3217 cur = start + size*cidx; 3218 end = start + size*ntxd; 3219 next = CACHE_PTR_NEXT(cur); 3220 return (next < end ? next : start); 3221 } 3222 3223 /* 3224 * Pad an mbuf to ensure a minimum ethernet frame size. 3225 * min_frame_size is the frame size (less CRC) to pad the mbuf to 3226 */ 3227 static __noinline int 3228 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) 3229 { 3230 /* 3231 * 18 is enough bytes to pad an ARP packet to 46 bytes, and 3232 * and ARP message is the smallest common payload I can think of 3233 */ 3234 static char pad[18]; /* just zeros */ 3235 int n; 3236 struct mbuf *new_head; 3237 3238 if (!M_WRITABLE(*m_head)) { 3239 new_head = m_dup(*m_head, M_NOWAIT); 3240 if (new_head == NULL) { 3241 m_freem(*m_head); 3242 device_printf(dev, "cannot pad short frame, m_dup() failed"); 3243 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3244 DBG_COUNTER_INC(tx_frees); 3245 return ENOMEM; 3246 } 3247 m_freem(*m_head); 3248 *m_head = new_head; 3249 } 3250 3251 for (n = min_frame_size - (*m_head)->m_pkthdr.len; 3252 n > 0; n -= sizeof(pad)) 3253 if (!m_append(*m_head, min(n, sizeof(pad)), pad)) 3254 break; 3255 3256 if (n > 0) { 3257 m_freem(*m_head); 3258 device_printf(dev, "cannot pad short frame\n"); 3259 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3260 DBG_COUNTER_INC(tx_frees); 3261 return (ENOBUFS); 3262 } 3263 3264 return 0; 3265 } 3266 3267 static int 3268 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) 3269 { 3270 if_ctx_t ctx; 3271 if_shared_ctx_t sctx; 3272 if_softc_ctx_t scctx; 3273 bus_dma_tag_t buf_tag; 3274 bus_dma_segment_t *segs; 3275 struct mbuf *m_head, **ifsd_m; 3276 void *next_txd; 3277 bus_dmamap_t map; 3278 struct if_pkt_info pi; 3279 int remap = 0; 3280 int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; 3281 3282 ctx = txq->ift_ctx; 3283 sctx = ctx->ifc_sctx; 3284 scctx = &ctx->ifc_softc_ctx; 3285 segs = txq->ift_segs; 3286 ntxd = txq->ift_size; 3287 m_head = *m_headp; 3288 map = NULL; 3289 3290 /* 3291 * If we're doing TSO the next descriptor to clean may be quite far ahead 3292 */ 3293 cidx = txq->ift_cidx; 3294 pidx = txq->ift_pidx; 3295 if (ctx->ifc_flags & IFC_PREFETCH) { 3296 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); 3297 if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { 3298 next_txd = calc_next_txd(txq, cidx, 0); 3299 prefetch(next_txd); 3300 } 3301 3302 /* prefetch the next cache line of mbuf pointers and flags */ 3303 prefetch(&txq->ift_sds.ifsd_m[next]); 3304 prefetch(&txq->ift_sds.ifsd_map[next]); 3305 next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); 3306 } 3307 map = txq->ift_sds.ifsd_map[pidx]; 3308 ifsd_m = txq->ift_sds.ifsd_m; 3309 3310 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3311 buf_tag = txq->ift_tso_buf_tag; 3312 max_segs = scctx->isc_tx_tso_segments_max; 3313 map = txq->ift_sds.ifsd_tso_map[pidx]; 3314 MPASS(buf_tag != NULL); 3315 MPASS(max_segs > 0); 3316 } else { 3317 buf_tag = txq->ift_buf_tag; 3318 max_segs = scctx->isc_tx_nsegments; 3319 map = txq->ift_sds.ifsd_map[pidx]; 3320 } 3321 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && 3322 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { 3323 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); 3324 if (err) { 3325 DBG_COUNTER_INC(encap_txd_encap_fail); 3326 return err; 3327 } 3328 } 3329 m_head = *m_headp; 3330 3331 pkt_info_zero(&pi); 3332 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); 3333 pi.ipi_pidx = pidx; 3334 pi.ipi_qsidx = txq->ift_id; 3335 pi.ipi_len = m_head->m_pkthdr.len; 3336 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; 3337 pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0; 3338 3339 /* deliberate bitwise OR to make one condition */ 3340 if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { 3341 if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) { 3342 DBG_COUNTER_INC(encap_txd_encap_fail); 3343 return (err); 3344 } 3345 m_head = *m_headp; 3346 } 3347 3348 retry: 3349 err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs, 3350 BUS_DMA_NOWAIT); 3351 defrag: 3352 if (__predict_false(err)) { 3353 switch (err) { 3354 case EFBIG: 3355 /* try collapse once and defrag once */ 3356 if (remap == 0) { 3357 m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); 3358 /* try defrag if collapsing fails */ 3359 if (m_head == NULL) 3360 remap++; 3361 } 3362 if (remap == 1) { 3363 txq->ift_mbuf_defrag++; 3364 m_head = m_defrag(*m_headp, M_NOWAIT); 3365 } 3366 /* 3367 * remap should never be >1 unless bus_dmamap_load_mbuf_sg 3368 * failed to map an mbuf that was run through m_defrag 3369 */ 3370 MPASS(remap <= 1); 3371 if (__predict_false(m_head == NULL || remap > 1)) 3372 goto defrag_failed; 3373 remap++; 3374 *m_headp = m_head; 3375 goto retry; 3376 break; 3377 case ENOMEM: 3378 txq->ift_no_tx_dma_setup++; 3379 break; 3380 default: 3381 txq->ift_no_tx_dma_setup++; 3382 m_freem(*m_headp); 3383 DBG_COUNTER_INC(tx_frees); 3384 *m_headp = NULL; 3385 break; 3386 } 3387 txq->ift_map_failed++; 3388 DBG_COUNTER_INC(encap_load_mbuf_fail); 3389 DBG_COUNTER_INC(encap_txd_encap_fail); 3390 return (err); 3391 } 3392 ifsd_m[pidx] = m_head; 3393 /* 3394 * XXX assumes a 1 to 1 relationship between segments and 3395 * descriptors - this does not hold true on all drivers, e.g. 3396 * cxgb 3397 */ 3398 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { 3399 txq->ift_no_desc_avail++; 3400 bus_dmamap_unload(buf_tag, map); 3401 DBG_COUNTER_INC(encap_txq_avail_fail); 3402 DBG_COUNTER_INC(encap_txd_encap_fail); 3403 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) 3404 GROUPTASK_ENQUEUE(&txq->ift_task); 3405 return (ENOBUFS); 3406 } 3407 /* 3408 * On Intel cards we can greatly reduce the number of TX interrupts 3409 * we see by only setting report status on every Nth descriptor. 3410 * However, this also means that the driver will need to keep track 3411 * of the descriptors that RS was set on to check them for the DD bit. 3412 */ 3413 txq->ift_rs_pending += nsegs + 1; 3414 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || 3415 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { 3416 pi.ipi_flags |= IPI_TX_INTR; 3417 txq->ift_rs_pending = 0; 3418 } 3419 3420 pi.ipi_segs = segs; 3421 pi.ipi_nsegs = nsegs; 3422 3423 MPASS(pidx >= 0 && pidx < txq->ift_size); 3424 #ifdef PKT_DEBUG 3425 print_pkt(&pi); 3426 #endif 3427 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { 3428 bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE); 3429 DBG_COUNTER_INC(tx_encap); 3430 MPASS(pi.ipi_new_pidx < txq->ift_size); 3431 3432 ndesc = pi.ipi_new_pidx - pi.ipi_pidx; 3433 if (pi.ipi_new_pidx < pi.ipi_pidx) { 3434 ndesc += txq->ift_size; 3435 txq->ift_gen = 1; 3436 } 3437 /* 3438 * drivers can need as many as 3439 * two sentinels 3440 */ 3441 MPASS(ndesc <= pi.ipi_nsegs + 2); 3442 MPASS(pi.ipi_new_pidx != pidx); 3443 MPASS(ndesc > 0); 3444 txq->ift_in_use += ndesc; 3445 3446 /* 3447 * We update the last software descriptor again here because there may 3448 * be a sentinel and/or there may be more mbufs than segments 3449 */ 3450 txq->ift_pidx = pi.ipi_new_pidx; 3451 txq->ift_npending += pi.ipi_ndescs; 3452 } else { 3453 *m_headp = m_head = iflib_remove_mbuf(txq); 3454 if (err == EFBIG) { 3455 txq->ift_txd_encap_efbig++; 3456 if (remap < 2) { 3457 remap = 1; 3458 goto defrag; 3459 } 3460 } 3461 goto defrag_failed; 3462 } 3463 /* 3464 * err can't possibly be non-zero here, so we don't neet to test it 3465 * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail). 3466 */ 3467 return (err); 3468 3469 defrag_failed: 3470 txq->ift_mbuf_defrag_failed++; 3471 txq->ift_map_failed++; 3472 m_freem(*m_headp); 3473 DBG_COUNTER_INC(tx_frees); 3474 *m_headp = NULL; 3475 DBG_COUNTER_INC(encap_txd_encap_fail); 3476 return (ENOMEM); 3477 } 3478 3479 static void 3480 iflib_tx_desc_free(iflib_txq_t txq, int n) 3481 { 3482 uint32_t qsize, cidx, mask, gen; 3483 struct mbuf *m, **ifsd_m; 3484 bool do_prefetch; 3485 3486 cidx = txq->ift_cidx; 3487 gen = txq->ift_gen; 3488 qsize = txq->ift_size; 3489 mask = qsize-1; 3490 ifsd_m = txq->ift_sds.ifsd_m; 3491 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); 3492 3493 while (n-- > 0) { 3494 if (do_prefetch) { 3495 prefetch(ifsd_m[(cidx + 3) & mask]); 3496 prefetch(ifsd_m[(cidx + 4) & mask]); 3497 } 3498 if ((m = ifsd_m[cidx]) != NULL) { 3499 prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); 3500 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 3501 bus_dmamap_sync(txq->ift_tso_buf_tag, 3502 txq->ift_sds.ifsd_tso_map[cidx], 3503 BUS_DMASYNC_POSTWRITE); 3504 bus_dmamap_unload(txq->ift_tso_buf_tag, 3505 txq->ift_sds.ifsd_tso_map[cidx]); 3506 } else { 3507 bus_dmamap_sync(txq->ift_buf_tag, 3508 txq->ift_sds.ifsd_map[cidx], 3509 BUS_DMASYNC_POSTWRITE); 3510 bus_dmamap_unload(txq->ift_buf_tag, 3511 txq->ift_sds.ifsd_map[cidx]); 3512 } 3513 /* XXX we don't support any drivers that batch packets yet */ 3514 MPASS(m->m_nextpkt == NULL); 3515 m_freem(m); 3516 ifsd_m[cidx] = NULL; 3517 #if MEMORY_LOGGING 3518 txq->ift_dequeued++; 3519 #endif 3520 DBG_COUNTER_INC(tx_frees); 3521 } 3522 if (__predict_false(++cidx == qsize)) { 3523 cidx = 0; 3524 gen = 0; 3525 } 3526 } 3527 txq->ift_cidx = cidx; 3528 txq->ift_gen = gen; 3529 } 3530 3531 static __inline int 3532 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) 3533 { 3534 int reclaim; 3535 if_ctx_t ctx = txq->ift_ctx; 3536 3537 KASSERT(thresh >= 0, ("invalid threshold to reclaim")); 3538 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); 3539 3540 /* 3541 * Need a rate-limiting check so that this isn't called every time 3542 */ 3543 iflib_tx_credits_update(ctx, txq); 3544 reclaim = DESC_RECLAIMABLE(txq); 3545 3546 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { 3547 #ifdef INVARIANTS 3548 if (iflib_verbose_debug) { 3549 printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, 3550 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, 3551 reclaim, thresh); 3552 3553 } 3554 #endif 3555 return (0); 3556 } 3557 iflib_tx_desc_free(txq, reclaim); 3558 txq->ift_cleaned += reclaim; 3559 txq->ift_in_use -= reclaim; 3560 3561 return (reclaim); 3562 } 3563 3564 static struct mbuf ** 3565 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) 3566 { 3567 int next, size; 3568 struct mbuf **items; 3569 3570 size = r->size; 3571 next = (cidx + CACHE_PTR_INCREMENT) & (size-1); 3572 items = __DEVOLATILE(struct mbuf **, &r->items[0]); 3573 3574 prefetch(items[(cidx + offset) & (size-1)]); 3575 if (remaining > 1) { 3576 prefetch2cachelines(&items[next]); 3577 prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); 3578 prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); 3579 prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); 3580 } 3581 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); 3582 } 3583 3584 static void 3585 iflib_txq_check_drain(iflib_txq_t txq, int budget) 3586 { 3587 3588 ifmp_ring_check_drainage(txq->ift_br, budget); 3589 } 3590 3591 static uint32_t 3592 iflib_txq_can_drain(struct ifmp_ring *r) 3593 { 3594 iflib_txq_t txq = r->cookie; 3595 if_ctx_t ctx = txq->ift_ctx; 3596 3597 if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) 3598 return (1); 3599 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 3600 BUS_DMASYNC_POSTREAD); 3601 return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, 3602 false)); 3603 } 3604 3605 static uint32_t 3606 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3607 { 3608 iflib_txq_t txq = r->cookie; 3609 if_ctx_t ctx = txq->ift_ctx; 3610 if_t ifp = ctx->ifc_ifp; 3611 struct mbuf *m, **mp; 3612 int avail, bytes_sent, consumed, count, err, i, in_use_prev; 3613 int mcast_sent, pkt_sent, reclaimed, txq_avail; 3614 bool do_prefetch, rang, ring; 3615 3616 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || 3617 !LINK_ACTIVE(ctx))) { 3618 DBG_COUNTER_INC(txq_drain_notready); 3619 return (0); 3620 } 3621 reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 3622 rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); 3623 avail = IDXDIFF(pidx, cidx, r->size); 3624 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { 3625 DBG_COUNTER_INC(txq_drain_flushing); 3626 for (i = 0; i < avail; i++) { 3627 if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq)) 3628 m_free(r->items[(cidx + i) & (r->size-1)]); 3629 r->items[(cidx + i) & (r->size-1)] = NULL; 3630 } 3631 return (avail); 3632 } 3633 3634 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { 3635 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3636 CALLOUT_LOCK(txq); 3637 callout_stop(&txq->ift_timer); 3638 CALLOUT_UNLOCK(txq); 3639 DBG_COUNTER_INC(txq_drain_oactive); 3640 return (0); 3641 } 3642 if (reclaimed) 3643 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3644 consumed = mcast_sent = bytes_sent = pkt_sent = 0; 3645 count = MIN(avail, TX_BATCH_SIZE); 3646 #ifdef INVARIANTS 3647 if (iflib_verbose_debug) 3648 printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, 3649 avail, ctx->ifc_flags, TXQ_AVAIL(txq)); 3650 #endif 3651 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); 3652 txq_avail = TXQ_AVAIL(txq); 3653 err = 0; 3654 for (i = 0; i < count && txq_avail > MAX_TX_DESC(ctx) + 2; i++) { 3655 int rem = do_prefetch ? count - i : 0; 3656 3657 mp = _ring_peek_one(r, cidx, i, rem); 3658 MPASS(mp != NULL && *mp != NULL); 3659 if (__predict_false(*mp == (struct mbuf *)txq)) { 3660 consumed++; 3661 continue; 3662 } 3663 in_use_prev = txq->ift_in_use; 3664 err = iflib_encap(txq, mp); 3665 if (__predict_false(err)) { 3666 /* no room - bail out */ 3667 if (err == ENOBUFS) 3668 break; 3669 consumed++; 3670 /* we can't send this packet - skip it */ 3671 continue; 3672 } 3673 consumed++; 3674 pkt_sent++; 3675 m = *mp; 3676 DBG_COUNTER_INC(tx_sent); 3677 bytes_sent += m->m_pkthdr.len; 3678 mcast_sent += !!(m->m_flags & M_MCAST); 3679 txq_avail = TXQ_AVAIL(txq); 3680 3681 txq->ift_db_pending += (txq->ift_in_use - in_use_prev); 3682 ETHER_BPF_MTAP(ifp, m); 3683 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) 3684 break; 3685 rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); 3686 } 3687 3688 /* deliberate use of bitwise or to avoid gratuitous short-circuit */ 3689 ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); 3690 iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); 3691 if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); 3692 if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); 3693 if (mcast_sent) 3694 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); 3695 #ifdef INVARIANTS 3696 if (iflib_verbose_debug) 3697 printf("consumed=%d\n", consumed); 3698 #endif 3699 return (consumed); 3700 } 3701 3702 static uint32_t 3703 iflib_txq_drain_always(struct ifmp_ring *r) 3704 { 3705 return (1); 3706 } 3707 3708 static uint32_t 3709 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3710 { 3711 int i, avail; 3712 struct mbuf **mp; 3713 iflib_txq_t txq; 3714 3715 txq = r->cookie; 3716 3717 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3718 CALLOUT_LOCK(txq); 3719 callout_stop(&txq->ift_timer); 3720 CALLOUT_UNLOCK(txq); 3721 3722 avail = IDXDIFF(pidx, cidx, r->size); 3723 for (i = 0; i < avail; i++) { 3724 mp = _ring_peek_one(r, cidx, i, avail - i); 3725 if (__predict_false(*mp == (struct mbuf *)txq)) 3726 continue; 3727 m_freem(*mp); 3728 DBG_COUNTER_INC(tx_frees); 3729 } 3730 MPASS(ifmp_ring_is_stalled(r) == 0); 3731 return (avail); 3732 } 3733 3734 static void 3735 iflib_ifmp_purge(iflib_txq_t txq) 3736 { 3737 struct ifmp_ring *r; 3738 3739 r = txq->ift_br; 3740 r->drain = iflib_txq_drain_free; 3741 r->can_drain = iflib_txq_drain_always; 3742 3743 ifmp_ring_check_drainage(r, r->size); 3744 3745 r->drain = iflib_txq_drain; 3746 r->can_drain = iflib_txq_can_drain; 3747 } 3748 3749 static void 3750 _task_fn_tx(void *context) 3751 { 3752 iflib_txq_t txq = context; 3753 if_ctx_t ctx = txq->ift_ctx; 3754 if_t ifp = ctx->ifc_ifp; 3755 int abdicate = ctx->ifc_sysctl_tx_abdicate; 3756 3757 #ifdef IFLIB_DIAGNOSTICS 3758 txq->ift_cpu_exec_count[curcpu]++; 3759 #endif 3760 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 3761 return; 3762 #ifdef DEV_NETMAP 3763 if ((if_getcapenable(ifp) & IFCAP_NETMAP) && 3764 netmap_tx_irq(ifp, txq->ift_id)) 3765 goto skip_ifmp; 3766 #endif 3767 #ifdef ALTQ 3768 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 3769 iflib_altq_if_start(ifp); 3770 #endif 3771 if (txq->ift_db_pending) 3772 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate); 3773 else if (!abdicate) 3774 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3775 /* 3776 * When abdicating, we always need to check drainage, not just when we don't enqueue 3777 */ 3778 if (abdicate) 3779 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3780 #ifdef DEV_NETMAP 3781 skip_ifmp: 3782 #endif 3783 if (ctx->ifc_flags & IFC_LEGACY) 3784 IFDI_INTR_ENABLE(ctx); 3785 else 3786 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3787 } 3788 3789 static void 3790 _task_fn_rx(void *context) 3791 { 3792 iflib_rxq_t rxq = context; 3793 if_ctx_t ctx = rxq->ifr_ctx; 3794 uint8_t more; 3795 uint16_t budget; 3796 #ifdef DEV_NETMAP 3797 u_int work = 0; 3798 int nmirq; 3799 #endif 3800 3801 #ifdef IFLIB_DIAGNOSTICS 3802 rxq->ifr_cpu_exec_count[curcpu]++; 3803 #endif 3804 DBG_COUNTER_INC(task_fn_rxs); 3805 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3806 return; 3807 #ifdef DEV_NETMAP 3808 nmirq = netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work); 3809 if (nmirq != NM_IRQ_PASS) { 3810 more = (nmirq == NM_IRQ_RESCHED) ? IFLIB_RXEOF_MORE : 0; 3811 goto skip_rxeof; 3812 } 3813 #endif 3814 budget = ctx->ifc_sysctl_rx_budget; 3815 if (budget == 0) 3816 budget = 16; /* XXX */ 3817 more = iflib_rxeof(rxq, budget); 3818 #ifdef DEV_NETMAP 3819 skip_rxeof: 3820 #endif 3821 if ((more & IFLIB_RXEOF_MORE) == 0) { 3822 if (ctx->ifc_flags & IFC_LEGACY) 3823 IFDI_INTR_ENABLE(ctx); 3824 else 3825 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 3826 DBG_COUNTER_INC(rx_intr_enables); 3827 } 3828 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3829 return; 3830 3831 if (more & IFLIB_RXEOF_MORE) 3832 GROUPTASK_ENQUEUE(&rxq->ifr_task); 3833 else if (more & IFLIB_RXEOF_EMPTY) 3834 callout_reset_curcpu(&rxq->ifr_watchdog, 1, &_task_fn_rx_watchdog, rxq); 3835 } 3836 3837 static void 3838 _task_fn_admin(void *context) 3839 { 3840 if_ctx_t ctx = context; 3841 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 3842 iflib_txq_t txq; 3843 int i; 3844 bool oactive, running, do_reset, do_watchdog, in_detach; 3845 uint32_t reset_on = hz / 2; 3846 3847 STATE_LOCK(ctx); 3848 running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); 3849 oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); 3850 do_reset = (ctx->ifc_flags & IFC_DO_RESET); 3851 do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); 3852 in_detach = (ctx->ifc_flags & IFC_IN_DETACH); 3853 ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG); 3854 STATE_UNLOCK(ctx); 3855 3856 if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) 3857 return; 3858 if (in_detach) 3859 return; 3860 3861 CTX_LOCK(ctx); 3862 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3863 CALLOUT_LOCK(txq); 3864 callout_stop(&txq->ift_timer); 3865 CALLOUT_UNLOCK(txq); 3866 } 3867 if (do_watchdog) { 3868 ctx->ifc_watchdog_events++; 3869 IFDI_WATCHDOG_RESET(ctx); 3870 } 3871 IFDI_UPDATE_ADMIN_STATUS(ctx); 3872 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3873 #ifdef DEV_NETMAP 3874 reset_on = hz / 2; 3875 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) 3876 iflib_netmap_timer_adjust(ctx, txq, &reset_on); 3877 #endif 3878 callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); 3879 } 3880 IFDI_LINK_INTR_ENABLE(ctx); 3881 if (do_reset) 3882 iflib_if_init_locked(ctx); 3883 CTX_UNLOCK(ctx); 3884 3885 if (LINK_ACTIVE(ctx) == 0) 3886 return; 3887 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) 3888 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 3889 } 3890 3891 3892 static void 3893 _task_fn_iov(void *context) 3894 { 3895 if_ctx_t ctx = context; 3896 3897 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) && 3898 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) 3899 return; 3900 3901 CTX_LOCK(ctx); 3902 IFDI_VFLR_HANDLE(ctx); 3903 CTX_UNLOCK(ctx); 3904 } 3905 3906 static int 3907 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 3908 { 3909 int err; 3910 if_int_delay_info_t info; 3911 if_ctx_t ctx; 3912 3913 info = (if_int_delay_info_t)arg1; 3914 ctx = info->iidi_ctx; 3915 info->iidi_req = req; 3916 info->iidi_oidp = oidp; 3917 CTX_LOCK(ctx); 3918 err = IFDI_SYSCTL_INT_DELAY(ctx, info); 3919 CTX_UNLOCK(ctx); 3920 return (err); 3921 } 3922 3923 /********************************************************************* 3924 * 3925 * IFNET FUNCTIONS 3926 * 3927 **********************************************************************/ 3928 3929 static void 3930 iflib_if_init_locked(if_ctx_t ctx) 3931 { 3932 iflib_stop(ctx); 3933 iflib_init_locked(ctx); 3934 } 3935 3936 3937 static void 3938 iflib_if_init(void *arg) 3939 { 3940 if_ctx_t ctx = arg; 3941 3942 CTX_LOCK(ctx); 3943 iflib_if_init_locked(ctx); 3944 CTX_UNLOCK(ctx); 3945 } 3946 3947 static int 3948 iflib_if_transmit(if_t ifp, struct mbuf *m) 3949 { 3950 if_ctx_t ctx = if_getsoftc(ifp); 3951 3952 iflib_txq_t txq; 3953 int err, qidx; 3954 int abdicate = ctx->ifc_sysctl_tx_abdicate; 3955 3956 if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { 3957 DBG_COUNTER_INC(tx_frees); 3958 m_freem(m); 3959 return (ENETDOWN); 3960 } 3961 3962 MPASS(m->m_nextpkt == NULL); 3963 /* ALTQ-enabled interfaces always use queue 0. */ 3964 qidx = 0; 3965 if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd)) 3966 qidx = QIDX(ctx, m); 3967 /* 3968 * XXX calculate buf_ring based on flowid (divvy up bits?) 3969 */ 3970 txq = &ctx->ifc_txqs[qidx]; 3971 3972 #ifdef DRIVER_BACKPRESSURE 3973 if (txq->ift_closed) { 3974 while (m != NULL) { 3975 next = m->m_nextpkt; 3976 m->m_nextpkt = NULL; 3977 m_freem(m); 3978 DBG_COUNTER_INC(tx_frees); 3979 m = next; 3980 } 3981 return (ENOBUFS); 3982 } 3983 #endif 3984 #ifdef notyet 3985 qidx = count = 0; 3986 mp = marr; 3987 next = m; 3988 do { 3989 count++; 3990 next = next->m_nextpkt; 3991 } while (next != NULL); 3992 3993 if (count > nitems(marr)) 3994 if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { 3995 /* XXX check nextpkt */ 3996 m_freem(m); 3997 /* XXX simplify for now */ 3998 DBG_COUNTER_INC(tx_frees); 3999 return (ENOBUFS); 4000 } 4001 for (next = m, i = 0; next != NULL; i++) { 4002 mp[i] = next; 4003 next = next->m_nextpkt; 4004 mp[i]->m_nextpkt = NULL; 4005 } 4006 #endif 4007 DBG_COUNTER_INC(tx_seen); 4008 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate); 4009 4010 if (abdicate) 4011 GROUPTASK_ENQUEUE(&txq->ift_task); 4012 if (err) { 4013 if (!abdicate) 4014 GROUPTASK_ENQUEUE(&txq->ift_task); 4015 /* support forthcoming later */ 4016 #ifdef DRIVER_BACKPRESSURE 4017 txq->ift_closed = TRUE; 4018 #endif 4019 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 4020 m_freem(m); 4021 DBG_COUNTER_INC(tx_frees); 4022 } 4023 4024 return (err); 4025 } 4026 4027 #ifdef ALTQ 4028 /* 4029 * The overall approach to integrating iflib with ALTQ is to continue to use 4030 * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware 4031 * ring. Technically, when using ALTQ, queueing to an intermediate mp_ring 4032 * is redundant/unnecessary, but doing so minimizes the amount of 4033 * ALTQ-specific code required in iflib. It is assumed that the overhead of 4034 * redundantly queueing to an intermediate mp_ring is swamped by the 4035 * performance limitations inherent in using ALTQ. 4036 * 4037 * When ALTQ support is compiled in, all iflib drivers will use a transmit 4038 * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the 4039 * given interface. If ALTQ is enabled for an interface, then all 4040 * transmitted packets for that interface will be submitted to the ALTQ 4041 * subsystem via IFQ_ENQUEUE(). We don't use the legacy if_transmit() 4042 * implementation because it uses IFQ_HANDOFF(), which will duplicatively 4043 * update stats that the iflib machinery handles, and which is sensitve to 4044 * the disused IFF_DRV_OACTIVE flag. Additionally, iflib_altq_if_start() 4045 * will be installed as the start routine for use by ALTQ facilities that 4046 * need to trigger queue drains on a scheduled basis. 4047 * 4048 */ 4049 static void 4050 iflib_altq_if_start(if_t ifp) 4051 { 4052 struct ifaltq *ifq = &ifp->if_snd; 4053 struct mbuf *m; 4054 4055 IFQ_LOCK(ifq); 4056 IFQ_DEQUEUE_NOLOCK(ifq, m); 4057 while (m != NULL) { 4058 iflib_if_transmit(ifp, m); 4059 IFQ_DEQUEUE_NOLOCK(ifq, m); 4060 } 4061 IFQ_UNLOCK(ifq); 4062 } 4063 4064 static int 4065 iflib_altq_if_transmit(if_t ifp, struct mbuf *m) 4066 { 4067 int err; 4068 4069 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 4070 IFQ_ENQUEUE(&ifp->if_snd, m, err); 4071 if (err == 0) 4072 iflib_altq_if_start(ifp); 4073 } else 4074 err = iflib_if_transmit(ifp, m); 4075 4076 return (err); 4077 } 4078 #endif /* ALTQ */ 4079 4080 static void 4081 iflib_if_qflush(if_t ifp) 4082 { 4083 if_ctx_t ctx = if_getsoftc(ifp); 4084 iflib_txq_t txq = ctx->ifc_txqs; 4085 int i; 4086 4087 STATE_LOCK(ctx); 4088 ctx->ifc_flags |= IFC_QFLUSH; 4089 STATE_UNLOCK(ctx); 4090 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 4091 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) 4092 iflib_txq_check_drain(txq, 0); 4093 STATE_LOCK(ctx); 4094 ctx->ifc_flags &= ~IFC_QFLUSH; 4095 STATE_UNLOCK(ctx); 4096 4097 /* 4098 * When ALTQ is enabled, this will also take care of purging the 4099 * ALTQ queue(s). 4100 */ 4101 if_qflush(ifp); 4102 } 4103 4104 4105 #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ 4106 IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ 4107 IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \ 4108 IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM | IFCAP_NOMAP) 4109 4110 static int 4111 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) 4112 { 4113 if_ctx_t ctx = if_getsoftc(ifp); 4114 struct ifreq *ifr = (struct ifreq *)data; 4115 #if defined(INET) || defined(INET6) 4116 struct ifaddr *ifa = (struct ifaddr *)data; 4117 #endif 4118 bool avoid_reset = false; 4119 int err = 0, reinit = 0, bits; 4120 4121 switch (command) { 4122 case SIOCSIFADDR: 4123 #ifdef INET 4124 if (ifa->ifa_addr->sa_family == AF_INET) 4125 avoid_reset = true; 4126 #endif 4127 #ifdef INET6 4128 if (ifa->ifa_addr->sa_family == AF_INET6) 4129 avoid_reset = true; 4130 #endif 4131 /* 4132 ** Calling init results in link renegotiation, 4133 ** so we avoid doing it when possible. 4134 */ 4135 if (avoid_reset) { 4136 if_setflagbits(ifp, IFF_UP,0); 4137 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 4138 reinit = 1; 4139 #ifdef INET 4140 if (!(if_getflags(ifp) & IFF_NOARP)) 4141 arp_ifinit(ifp, ifa); 4142 #endif 4143 } else 4144 err = ether_ioctl(ifp, command, data); 4145 break; 4146 case SIOCSIFMTU: 4147 CTX_LOCK(ctx); 4148 if (ifr->ifr_mtu == if_getmtu(ifp)) { 4149 CTX_UNLOCK(ctx); 4150 break; 4151 } 4152 bits = if_getdrvflags(ifp); 4153 /* stop the driver and free any clusters before proceeding */ 4154 iflib_stop(ctx); 4155 4156 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { 4157 STATE_LOCK(ctx); 4158 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) 4159 ctx->ifc_flags |= IFC_MULTISEG; 4160 else 4161 ctx->ifc_flags &= ~IFC_MULTISEG; 4162 STATE_UNLOCK(ctx); 4163 err = if_setmtu(ifp, ifr->ifr_mtu); 4164 } 4165 iflib_init_locked(ctx); 4166 STATE_LOCK(ctx); 4167 if_setdrvflags(ifp, bits); 4168 STATE_UNLOCK(ctx); 4169 CTX_UNLOCK(ctx); 4170 break; 4171 case SIOCSIFFLAGS: 4172 CTX_LOCK(ctx); 4173 if (if_getflags(ifp) & IFF_UP) { 4174 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4175 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & 4176 (IFF_PROMISC | IFF_ALLMULTI)) { 4177 err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); 4178 } 4179 } else 4180 reinit = 1; 4181 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4182 iflib_stop(ctx); 4183 } 4184 ctx->ifc_if_flags = if_getflags(ifp); 4185 CTX_UNLOCK(ctx); 4186 break; 4187 case SIOCADDMULTI: 4188 case SIOCDELMULTI: 4189 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4190 CTX_LOCK(ctx); 4191 IFDI_INTR_DISABLE(ctx); 4192 IFDI_MULTI_SET(ctx); 4193 IFDI_INTR_ENABLE(ctx); 4194 CTX_UNLOCK(ctx); 4195 } 4196 break; 4197 case SIOCSIFMEDIA: 4198 CTX_LOCK(ctx); 4199 IFDI_MEDIA_SET(ctx); 4200 CTX_UNLOCK(ctx); 4201 /* FALLTHROUGH */ 4202 case SIOCGIFMEDIA: 4203 case SIOCGIFXMEDIA: 4204 err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command); 4205 break; 4206 case SIOCGI2C: 4207 { 4208 struct ifi2creq i2c; 4209 4210 err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 4211 if (err != 0) 4212 break; 4213 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 4214 err = EINVAL; 4215 break; 4216 } 4217 if (i2c.len > sizeof(i2c.data)) { 4218 err = EINVAL; 4219 break; 4220 } 4221 4222 if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) 4223 err = copyout(&i2c, ifr_data_get_ptr(ifr), 4224 sizeof(i2c)); 4225 break; 4226 } 4227 case SIOCSIFCAP: 4228 { 4229 int mask, setmask, oldmask; 4230 4231 oldmask = if_getcapenable(ifp); 4232 mask = ifr->ifr_reqcap ^ oldmask; 4233 mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_NOMAP; 4234 setmask = 0; 4235 #ifdef TCP_OFFLOAD 4236 setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); 4237 #endif 4238 setmask |= (mask & IFCAP_FLAGS); 4239 setmask |= (mask & IFCAP_WOL); 4240 4241 /* 4242 * If any RX csum has changed, change all the ones that 4243 * are supported by the driver. 4244 */ 4245 if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { 4246 setmask |= ctx->ifc_softc_ctx.isc_capabilities & 4247 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); 4248 } 4249 4250 /* 4251 * want to ensure that traffic has stopped before we change any of the flags 4252 */ 4253 if (setmask) { 4254 CTX_LOCK(ctx); 4255 bits = if_getdrvflags(ifp); 4256 if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) 4257 iflib_stop(ctx); 4258 STATE_LOCK(ctx); 4259 if_togglecapenable(ifp, setmask); 4260 STATE_UNLOCK(ctx); 4261 if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) 4262 iflib_init_locked(ctx); 4263 STATE_LOCK(ctx); 4264 if_setdrvflags(ifp, bits); 4265 STATE_UNLOCK(ctx); 4266 CTX_UNLOCK(ctx); 4267 } 4268 if_vlancap(ifp); 4269 break; 4270 } 4271 case SIOCGPRIVATE_0: 4272 case SIOCSDRVSPEC: 4273 case SIOCGDRVSPEC: 4274 CTX_LOCK(ctx); 4275 err = IFDI_PRIV_IOCTL(ctx, command, data); 4276 CTX_UNLOCK(ctx); 4277 break; 4278 default: 4279 err = ether_ioctl(ifp, command, data); 4280 break; 4281 } 4282 if (reinit) 4283 iflib_if_init(ctx); 4284 return (err); 4285 } 4286 4287 static uint64_t 4288 iflib_if_get_counter(if_t ifp, ift_counter cnt) 4289 { 4290 if_ctx_t ctx = if_getsoftc(ifp); 4291 4292 return (IFDI_GET_COUNTER(ctx, cnt)); 4293 } 4294 4295 /********************************************************************* 4296 * 4297 * OTHER FUNCTIONS EXPORTED TO THE STACK 4298 * 4299 **********************************************************************/ 4300 4301 static void 4302 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) 4303 { 4304 if_ctx_t ctx = if_getsoftc(ifp); 4305 4306 if ((void *)ctx != arg) 4307 return; 4308 4309 if ((vtag == 0) || (vtag > 4095)) 4310 return; 4311 4312 if (iflib_in_detach(ctx)) 4313 return; 4314 4315 CTX_LOCK(ctx); 4316 /* Driver may need all untagged packets to be flushed */ 4317 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) 4318 iflib_stop(ctx); 4319 IFDI_VLAN_REGISTER(ctx, vtag); 4320 /* Re-init to load the changes, if required */ 4321 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) 4322 iflib_init_locked(ctx); 4323 CTX_UNLOCK(ctx); 4324 } 4325 4326 static void 4327 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) 4328 { 4329 if_ctx_t ctx = if_getsoftc(ifp); 4330 4331 if ((void *)ctx != arg) 4332 return; 4333 4334 if ((vtag == 0) || (vtag > 4095)) 4335 return; 4336 4337 CTX_LOCK(ctx); 4338 /* Driver may need all tagged packets to be flushed */ 4339 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) 4340 iflib_stop(ctx); 4341 IFDI_VLAN_UNREGISTER(ctx, vtag); 4342 /* Re-init to load the changes, if required */ 4343 if (IFDI_NEEDS_RESTART(ctx, IFLIB_RESTART_VLAN_CONFIG)) 4344 iflib_init_locked(ctx); 4345 CTX_UNLOCK(ctx); 4346 } 4347 4348 static void 4349 iflib_led_func(void *arg, int onoff) 4350 { 4351 if_ctx_t ctx = arg; 4352 4353 CTX_LOCK(ctx); 4354 IFDI_LED_FUNC(ctx, onoff); 4355 CTX_UNLOCK(ctx); 4356 } 4357 4358 /********************************************************************* 4359 * 4360 * BUS FUNCTION DEFINITIONS 4361 * 4362 **********************************************************************/ 4363 4364 int 4365 iflib_device_probe(device_t dev) 4366 { 4367 const pci_vendor_info_t *ent; 4368 if_shared_ctx_t sctx; 4369 uint16_t pci_device_id, pci_rev_id, pci_subdevice_id, pci_subvendor_id; 4370 uint16_t pci_vendor_id; 4371 4372 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4373 return (ENOTSUP); 4374 4375 pci_vendor_id = pci_get_vendor(dev); 4376 pci_device_id = pci_get_device(dev); 4377 pci_subvendor_id = pci_get_subvendor(dev); 4378 pci_subdevice_id = pci_get_subdevice(dev); 4379 pci_rev_id = pci_get_revid(dev); 4380 if (sctx->isc_parse_devinfo != NULL) 4381 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); 4382 4383 ent = sctx->isc_vendor_info; 4384 while (ent->pvi_vendor_id != 0) { 4385 if (pci_vendor_id != ent->pvi_vendor_id) { 4386 ent++; 4387 continue; 4388 } 4389 if ((pci_device_id == ent->pvi_device_id) && 4390 ((pci_subvendor_id == ent->pvi_subvendor_id) || 4391 (ent->pvi_subvendor_id == 0)) && 4392 ((pci_subdevice_id == ent->pvi_subdevice_id) || 4393 (ent->pvi_subdevice_id == 0)) && 4394 ((pci_rev_id == ent->pvi_rev_id) || 4395 (ent->pvi_rev_id == 0))) { 4396 4397 device_set_desc_copy(dev, ent->pvi_name); 4398 /* this needs to be changed to zero if the bus probing code 4399 * ever stops re-probing on best match because the sctx 4400 * may have its values over written by register calls 4401 * in subsequent probes 4402 */ 4403 return (BUS_PROBE_DEFAULT); 4404 } 4405 ent++; 4406 } 4407 return (ENXIO); 4408 } 4409 4410 int 4411 iflib_device_probe_vendor(device_t dev) 4412 { 4413 int probe; 4414 4415 probe = iflib_device_probe(dev); 4416 if (probe == BUS_PROBE_DEFAULT) 4417 return (BUS_PROBE_VENDOR); 4418 else 4419 return (probe); 4420 } 4421 4422 static void 4423 iflib_reset_qvalues(if_ctx_t ctx) 4424 { 4425 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 4426 if_shared_ctx_t sctx = ctx->ifc_sctx; 4427 device_t dev = ctx->ifc_dev; 4428 int i; 4429 4430 if (ctx->ifc_sysctl_ntxqs != 0) 4431 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; 4432 if (ctx->ifc_sysctl_nrxqs != 0) 4433 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; 4434 4435 for (i = 0; i < sctx->isc_ntxqs; i++) { 4436 if (ctx->ifc_sysctl_ntxds[i] != 0) 4437 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; 4438 else 4439 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; 4440 } 4441 4442 for (i = 0; i < sctx->isc_nrxqs; i++) { 4443 if (ctx->ifc_sysctl_nrxds[i] != 0) 4444 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; 4445 else 4446 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; 4447 } 4448 4449 for (i = 0; i < sctx->isc_nrxqs; i++) { 4450 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { 4451 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", 4452 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); 4453 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; 4454 } 4455 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { 4456 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", 4457 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); 4458 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; 4459 } 4460 if (!powerof2(scctx->isc_nrxd[i])) { 4461 device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n", 4462 i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]); 4463 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; 4464 } 4465 } 4466 4467 for (i = 0; i < sctx->isc_ntxqs; i++) { 4468 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { 4469 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", 4470 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); 4471 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; 4472 } 4473 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { 4474 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", 4475 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); 4476 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; 4477 } 4478 if (!powerof2(scctx->isc_ntxd[i])) { 4479 device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n", 4480 i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]); 4481 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; 4482 } 4483 } 4484 } 4485 4486 static void 4487 iflib_add_pfil(if_ctx_t ctx) 4488 { 4489 struct pfil_head *pfil; 4490 struct pfil_head_args pa; 4491 iflib_rxq_t rxq; 4492 int i; 4493 4494 pa.pa_version = PFIL_VERSION; 4495 pa.pa_flags = PFIL_IN; 4496 pa.pa_type = PFIL_TYPE_ETHERNET; 4497 pa.pa_headname = ctx->ifc_ifp->if_xname; 4498 pfil = pfil_head_register(&pa); 4499 4500 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 4501 rxq->pfil = pfil; 4502 } 4503 } 4504 4505 static void 4506 iflib_rem_pfil(if_ctx_t ctx) 4507 { 4508 struct pfil_head *pfil; 4509 iflib_rxq_t rxq; 4510 int i; 4511 4512 rxq = ctx->ifc_rxqs; 4513 pfil = rxq->pfil; 4514 for (i = 0; i < NRXQSETS(ctx); i++, rxq++) { 4515 rxq->pfil = NULL; 4516 } 4517 pfil_head_unregister(pfil); 4518 } 4519 4520 static uint16_t 4521 get_ctx_core_offset(if_ctx_t ctx) 4522 { 4523 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 4524 struct cpu_offset *op; 4525 uint16_t qc; 4526 uint16_t ret = ctx->ifc_sysctl_core_offset; 4527 4528 if (ret != CORE_OFFSET_UNSPECIFIED) 4529 return (ret); 4530 4531 if (ctx->ifc_sysctl_separate_txrx) 4532 qc = scctx->isc_ntxqsets + scctx->isc_nrxqsets; 4533 else 4534 qc = max(scctx->isc_ntxqsets, scctx->isc_nrxqsets); 4535 4536 mtx_lock(&cpu_offset_mtx); 4537 SLIST_FOREACH(op, &cpu_offsets, entries) { 4538 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { 4539 ret = op->offset; 4540 op->offset += qc; 4541 MPASS(op->refcount < UINT_MAX); 4542 op->refcount++; 4543 break; 4544 } 4545 } 4546 if (ret == CORE_OFFSET_UNSPECIFIED) { 4547 ret = 0; 4548 op = malloc(sizeof(struct cpu_offset), M_IFLIB, 4549 M_NOWAIT | M_ZERO); 4550 if (op == NULL) { 4551 device_printf(ctx->ifc_dev, 4552 "allocation for cpu offset failed.\n"); 4553 } else { 4554 op->offset = qc; 4555 op->refcount = 1; 4556 CPU_COPY(&ctx->ifc_cpus, &op->set); 4557 SLIST_INSERT_HEAD(&cpu_offsets, op, entries); 4558 } 4559 } 4560 mtx_unlock(&cpu_offset_mtx); 4561 4562 return (ret); 4563 } 4564 4565 static void 4566 unref_ctx_core_offset(if_ctx_t ctx) 4567 { 4568 struct cpu_offset *op, *top; 4569 4570 mtx_lock(&cpu_offset_mtx); 4571 SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) { 4572 if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { 4573 MPASS(op->refcount > 0); 4574 op->refcount--; 4575 if (op->refcount == 0) { 4576 SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries); 4577 free(op, M_IFLIB); 4578 } 4579 break; 4580 } 4581 } 4582 mtx_unlock(&cpu_offset_mtx); 4583 } 4584 4585 int 4586 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) 4587 { 4588 if_ctx_t ctx; 4589 if_t ifp; 4590 if_softc_ctx_t scctx; 4591 kobjop_desc_t kobj_desc; 4592 kobj_method_t *kobj_method; 4593 int err, msix, rid; 4594 uint16_t main_rxq, main_txq; 4595 4596 ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); 4597 4598 if (sc == NULL) { 4599 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4600 device_set_softc(dev, ctx); 4601 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4602 } 4603 4604 ctx->ifc_sctx = sctx; 4605 ctx->ifc_dev = dev; 4606 ctx->ifc_softc = sc; 4607 4608 if ((err = iflib_register(ctx)) != 0) { 4609 device_printf(dev, "iflib_register failed %d\n", err); 4610 goto fail_ctx_free; 4611 } 4612 iflib_add_device_sysctl_pre(ctx); 4613 4614 scctx = &ctx->ifc_softc_ctx; 4615 ifp = ctx->ifc_ifp; 4616 4617 iflib_reset_qvalues(ctx); 4618 CTX_LOCK(ctx); 4619 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4620 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4621 goto fail_unlock; 4622 } 4623 _iflib_pre_assert(scctx); 4624 ctx->ifc_txrx = *scctx->isc_txrx; 4625 4626 if (sctx->isc_flags & IFLIB_DRIVER_MEDIA) 4627 ctx->ifc_mediap = scctx->isc_media; 4628 4629 #ifdef INVARIANTS 4630 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4631 MPASS(scctx->isc_tx_csum_flags); 4632 #endif 4633 4634 if_setcapabilities(ifp, 4635 scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_NOMAP); 4636 if_setcapenable(ifp, 4637 scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_NOMAP); 4638 4639 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4640 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4641 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4642 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4643 4644 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4645 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4646 4647 /* XXX change for per-queue sizes */ 4648 device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n", 4649 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4650 4651 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4652 MAX_SINGLE_PACKET_FRACTION) 4653 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4654 MAX_SINGLE_PACKET_FRACTION); 4655 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4656 MAX_SINGLE_PACKET_FRACTION) 4657 scctx->isc_tx_tso_segments_max = max(1, 4658 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4659 4660 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4661 if (if_getcapabilities(ifp) & IFCAP_TSO) { 4662 /* 4663 * The stack can't handle a TSO size larger than IP_MAXPACKET, 4664 * but some MACs do. 4665 */ 4666 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 4667 IP_MAXPACKET)); 4668 /* 4669 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 4670 * into account. In the worst case, each of these calls will 4671 * add another mbuf and, thus, the requirement for another DMA 4672 * segment. So for best performance, it doesn't make sense to 4673 * advertize a maximum of TSO segments that typically will 4674 * require defragmentation in iflib_encap(). 4675 */ 4676 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 4677 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 4678 } 4679 if (scctx->isc_rss_table_size == 0) 4680 scctx->isc_rss_table_size = 64; 4681 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4682 4683 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4684 /* XXX format name */ 4685 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, 4686 NULL, NULL, "admin"); 4687 4688 /* Set up cpu set. If it fails, use the set of all CPUs. */ 4689 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { 4690 device_printf(dev, "Unable to fetch CPU list\n"); 4691 CPU_COPY(&all_cpus, &ctx->ifc_cpus); 4692 } 4693 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); 4694 4695 /* 4696 ** Now set up MSI or MSI-X, should return us the number of supported 4697 ** vectors (will be 1 for a legacy interrupt and MSI). 4698 */ 4699 if (sctx->isc_flags & IFLIB_SKIP_MSIX) { 4700 msix = scctx->isc_vectors; 4701 } else if (scctx->isc_msix_bar != 0) 4702 /* 4703 * The simple fact that isc_msix_bar is not 0 does not mean we 4704 * we have a good value there that is known to work. 4705 */ 4706 msix = iflib_msix_init(ctx); 4707 else { 4708 scctx->isc_vectors = 1; 4709 scctx->isc_ntxqsets = 1; 4710 scctx->isc_nrxqsets = 1; 4711 scctx->isc_intr = IFLIB_INTR_LEGACY; 4712 msix = 0; 4713 } 4714 /* Get memory for the station queues */ 4715 if ((err = iflib_queues_alloc(ctx))) { 4716 device_printf(dev, "Unable to allocate queue memory\n"); 4717 goto fail_intr_free; 4718 } 4719 4720 if ((err = iflib_qset_structures_setup(ctx))) 4721 goto fail_queues; 4722 4723 /* 4724 * Now that we know how many queues there are, get the core offset. 4725 */ 4726 ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx); 4727 4728 if (msix > 1) { 4729 /* 4730 * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable 4731 * aren't the default NULL implementation. 4732 */ 4733 kobj_desc = &ifdi_rx_queue_intr_enable_desc; 4734 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, 4735 kobj_desc); 4736 if (kobj_method == &kobj_desc->deflt) { 4737 device_printf(dev, 4738 "MSI-X requires ifdi_rx_queue_intr_enable method"); 4739 err = EOPNOTSUPP; 4740 goto fail_queues; 4741 } 4742 kobj_desc = &ifdi_tx_queue_intr_enable_desc; 4743 kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, 4744 kobj_desc); 4745 if (kobj_method == &kobj_desc->deflt) { 4746 device_printf(dev, 4747 "MSI-X requires ifdi_tx_queue_intr_enable method"); 4748 err = EOPNOTSUPP; 4749 goto fail_queues; 4750 } 4751 4752 /* 4753 * Assign the MSI-X vectors. 4754 * Note that the default NULL ifdi_msix_intr_assign method will 4755 * fail here, too. 4756 */ 4757 err = IFDI_MSIX_INTR_ASSIGN(ctx, msix); 4758 if (err != 0) { 4759 device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", 4760 err); 4761 goto fail_queues; 4762 } 4763 } else if (scctx->isc_intr != IFLIB_INTR_MSIX) { 4764 rid = 0; 4765 if (scctx->isc_intr == IFLIB_INTR_MSI) { 4766 MPASS(msix == 1); 4767 rid = 1; 4768 } 4769 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { 4770 device_printf(dev, "iflib_legacy_setup failed %d\n", err); 4771 goto fail_queues; 4772 } 4773 } else { 4774 device_printf(dev, 4775 "Cannot use iflib with only 1 MSI-X interrupt!\n"); 4776 err = ENODEV; 4777 goto fail_intr_free; 4778 } 4779 4780 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); 4781 4782 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4783 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4784 goto fail_detach; 4785 } 4786 4787 /* 4788 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4789 * This must appear after the call to ether_ifattach() because 4790 * ether_ifattach() sets if_hdrlen to the default value. 4791 */ 4792 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4793 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 4794 4795 if ((err = iflib_netmap_attach(ctx))) { 4796 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); 4797 goto fail_detach; 4798 } 4799 *ctxp = ctx; 4800 4801 DEBUGNET_SET(ctx->ifc_ifp, iflib); 4802 4803 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4804 iflib_add_device_sysctl_post(ctx); 4805 iflib_add_pfil(ctx); 4806 ctx->ifc_flags |= IFC_INIT_DONE; 4807 CTX_UNLOCK(ctx); 4808 4809 return (0); 4810 4811 fail_detach: 4812 ether_ifdetach(ctx->ifc_ifp); 4813 fail_intr_free: 4814 iflib_free_intr_mem(ctx); 4815 fail_queues: 4816 iflib_tx_structures_free(ctx); 4817 iflib_rx_structures_free(ctx); 4818 taskqgroup_detach(qgroup_if_config_tqg, &ctx->ifc_admin_task); 4819 IFDI_DETACH(ctx); 4820 fail_unlock: 4821 CTX_UNLOCK(ctx); 4822 iflib_deregister(ctx); 4823 fail_ctx_free: 4824 device_set_softc(ctx->ifc_dev, NULL); 4825 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4826 free(ctx->ifc_softc, M_IFLIB); 4827 free(ctx, M_IFLIB); 4828 return (err); 4829 } 4830 4831 int 4832 iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, 4833 struct iflib_cloneattach_ctx *clctx) 4834 { 4835 int err; 4836 if_ctx_t ctx; 4837 if_t ifp; 4838 if_softc_ctx_t scctx; 4839 int i; 4840 void *sc; 4841 uint16_t main_txq; 4842 uint16_t main_rxq; 4843 4844 ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO); 4845 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4846 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4847 if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL)) 4848 ctx->ifc_flags |= IFC_PSEUDO; 4849 4850 ctx->ifc_sctx = sctx; 4851 ctx->ifc_softc = sc; 4852 ctx->ifc_dev = dev; 4853 4854 if ((err = iflib_register(ctx)) != 0) { 4855 device_printf(dev, "%s: iflib_register failed %d\n", __func__, err); 4856 goto fail_ctx_free; 4857 } 4858 iflib_add_device_sysctl_pre(ctx); 4859 4860 scctx = &ctx->ifc_softc_ctx; 4861 ifp = ctx->ifc_ifp; 4862 4863 iflib_reset_qvalues(ctx); 4864 CTX_LOCK(ctx); 4865 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4866 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4867 goto fail_unlock; 4868 } 4869 if (sctx->isc_flags & IFLIB_GEN_MAC) 4870 ether_gen_addr(ifp, &ctx->ifc_mac); 4871 if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name, 4872 clctx->cc_params)) != 0) { 4873 device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err); 4874 goto fail_unlock; 4875 } 4876 #ifdef INVARIANTS 4877 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4878 MPASS(scctx->isc_tx_csum_flags); 4879 #endif 4880 4881 if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4882 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4883 4884 ifp->if_flags |= IFF_NOGROUP; 4885 if (sctx->isc_flags & IFLIB_PSEUDO) { 4886 ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO, 0, NULL); 4887 ifmedia_set(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO); 4888 if (sctx->isc_flags & IFLIB_PSEUDO_ETHER) { 4889 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); 4890 } else { 4891 if_attach(ctx->ifc_ifp); 4892 bpfattach(ctx->ifc_ifp, DLT_NULL, sizeof(u_int32_t)); 4893 } 4894 4895 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4896 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4897 goto fail_detach; 4898 } 4899 *ctxp = ctx; 4900 4901 /* 4902 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4903 * This must appear after the call to ether_ifattach() because 4904 * ether_ifattach() sets if_hdrlen to the default value. 4905 */ 4906 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4907 if_setifheaderlen(ifp, 4908 sizeof(struct ether_vlan_header)); 4909 4910 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4911 iflib_add_device_sysctl_post(ctx); 4912 ctx->ifc_flags |= IFC_INIT_DONE; 4913 CTX_UNLOCK(ctx); 4914 return (0); 4915 } 4916 ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 4917 ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO, 0, NULL); 4918 ifmedia_set(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO); 4919 4920 _iflib_pre_assert(scctx); 4921 ctx->ifc_txrx = *scctx->isc_txrx; 4922 4923 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4924 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4925 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4926 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4927 4928 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4929 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4930 4931 /* XXX change for per-queue sizes */ 4932 device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n", 4933 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4934 4935 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4936 MAX_SINGLE_PACKET_FRACTION) 4937 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4938 MAX_SINGLE_PACKET_FRACTION); 4939 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4940 MAX_SINGLE_PACKET_FRACTION) 4941 scctx->isc_tx_tso_segments_max = max(1, 4942 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4943 4944 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4945 if (if_getcapabilities(ifp) & IFCAP_TSO) { 4946 /* 4947 * The stack can't handle a TSO size larger than IP_MAXPACKET, 4948 * but some MACs do. 4949 */ 4950 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 4951 IP_MAXPACKET)); 4952 /* 4953 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 4954 * into account. In the worst case, each of these calls will 4955 * add another mbuf and, thus, the requirement for another DMA 4956 * segment. So for best performance, it doesn't make sense to 4957 * advertize a maximum of TSO segments that typically will 4958 * require defragmentation in iflib_encap(). 4959 */ 4960 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 4961 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 4962 } 4963 if (scctx->isc_rss_table_size == 0) 4964 scctx->isc_rss_table_size = 64; 4965 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4966 4967 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4968 /* XXX format name */ 4969 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, 4970 NULL, NULL, "admin"); 4971 4972 /* XXX --- can support > 1 -- but keep it simple for now */ 4973 scctx->isc_intr = IFLIB_INTR_LEGACY; 4974 4975 /* Get memory for the station queues */ 4976 if ((err = iflib_queues_alloc(ctx))) { 4977 device_printf(dev, "Unable to allocate queue memory\n"); 4978 goto fail_iflib_detach; 4979 } 4980 4981 if ((err = iflib_qset_structures_setup(ctx))) { 4982 device_printf(dev, "qset structure setup failed %d\n", err); 4983 goto fail_queues; 4984 } 4985 4986 /* 4987 * XXX What if anything do we want to do about interrupts? 4988 */ 4989 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); 4990 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4991 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4992 goto fail_detach; 4993 } 4994 4995 /* 4996 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4997 * This must appear after the call to ether_ifattach() because 4998 * ether_ifattach() sets if_hdrlen to the default value. 4999 */ 5000 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 5001 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 5002 5003 /* XXX handle more than one queue */ 5004 for (i = 0; i < scctx->isc_nrxqsets; i++) 5005 IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl); 5006 5007 *ctxp = ctx; 5008 5009 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 5010 iflib_add_device_sysctl_post(ctx); 5011 ctx->ifc_flags |= IFC_INIT_DONE; 5012 CTX_UNLOCK(ctx); 5013 5014 return (0); 5015 fail_detach: 5016 ether_ifdetach(ctx->ifc_ifp); 5017 fail_queues: 5018 iflib_tx_structures_free(ctx); 5019 iflib_rx_structures_free(ctx); 5020 fail_iflib_detach: 5021 IFDI_DETACH(ctx); 5022 fail_unlock: 5023 CTX_UNLOCK(ctx); 5024 iflib_deregister(ctx); 5025 fail_ctx_free: 5026 free(ctx->ifc_softc, M_IFLIB); 5027 free(ctx, M_IFLIB); 5028 return (err); 5029 } 5030 5031 int 5032 iflib_pseudo_deregister(if_ctx_t ctx) 5033 { 5034 if_t ifp = ctx->ifc_ifp; 5035 if_shared_ctx_t sctx = ctx->ifc_sctx; 5036 iflib_txq_t txq; 5037 iflib_rxq_t rxq; 5038 int i, j; 5039 struct taskqgroup *tqg; 5040 iflib_fl_t fl; 5041 5042 /* Unregister VLAN event handlers early */ 5043 iflib_unregister_vlan_handlers(ctx); 5044 5045 if ((sctx->isc_flags & IFLIB_PSEUDO) && 5046 (sctx->isc_flags & IFLIB_PSEUDO_ETHER) == 0) { 5047 bpfdetach(ifp); 5048 if_detach(ifp); 5049 } else { 5050 ether_ifdetach(ifp); 5051 } 5052 /* XXX drain any dependent tasks */ 5053 tqg = qgroup_if_io_tqg; 5054 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 5055 callout_drain(&txq->ift_timer); 5056 if (txq->ift_task.gt_uniq != NULL) 5057 taskqgroup_detach(tqg, &txq->ift_task); 5058 } 5059 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 5060 callout_drain(&rxq->ifr_watchdog); 5061 if (rxq->ifr_task.gt_uniq != NULL) 5062 taskqgroup_detach(tqg, &rxq->ifr_task); 5063 5064 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 5065 free(fl->ifl_rx_bitmap, M_IFLIB); 5066 } 5067 tqg = qgroup_if_config_tqg; 5068 if (ctx->ifc_admin_task.gt_uniq != NULL) 5069 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 5070 if (ctx->ifc_vflr_task.gt_uniq != NULL) 5071 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 5072 5073 iflib_tx_structures_free(ctx); 5074 iflib_rx_structures_free(ctx); 5075 5076 iflib_deregister(ctx); 5077 5078 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 5079 free(ctx->ifc_softc, M_IFLIB); 5080 free(ctx, M_IFLIB); 5081 return (0); 5082 } 5083 5084 int 5085 iflib_device_attach(device_t dev) 5086 { 5087 if_ctx_t ctx; 5088 if_shared_ctx_t sctx; 5089 5090 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 5091 return (ENOTSUP); 5092 5093 pci_enable_busmaster(dev); 5094 5095 return (iflib_device_register(dev, NULL, sctx, &ctx)); 5096 } 5097 5098 int 5099 iflib_device_deregister(if_ctx_t ctx) 5100 { 5101 if_t ifp = ctx->ifc_ifp; 5102 iflib_txq_t txq; 5103 iflib_rxq_t rxq; 5104 device_t dev = ctx->ifc_dev; 5105 int i, j; 5106 struct taskqgroup *tqg; 5107 iflib_fl_t fl; 5108 5109 /* Make sure VLANS are not using driver */ 5110 if (if_vlantrunkinuse(ifp)) { 5111 device_printf(dev, "Vlan in use, detach first\n"); 5112 return (EBUSY); 5113 } 5114 #ifdef PCI_IOV 5115 if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) { 5116 device_printf(dev, "SR-IOV in use; detach first.\n"); 5117 return (EBUSY); 5118 } 5119 #endif 5120 5121 STATE_LOCK(ctx); 5122 ctx->ifc_flags |= IFC_IN_DETACH; 5123 STATE_UNLOCK(ctx); 5124 5125 /* Unregister VLAN handlers before calling iflib_stop() */ 5126 iflib_unregister_vlan_handlers(ctx); 5127 5128 iflib_netmap_detach(ifp); 5129 ether_ifdetach(ifp); 5130 5131 CTX_LOCK(ctx); 5132 iflib_stop(ctx); 5133 CTX_UNLOCK(ctx); 5134 5135 iflib_rem_pfil(ctx); 5136 if (ctx->ifc_led_dev != NULL) 5137 led_destroy(ctx->ifc_led_dev); 5138 /* XXX drain any dependent tasks */ 5139 tqg = qgroup_if_io_tqg; 5140 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 5141 callout_drain(&txq->ift_timer); 5142 if (txq->ift_task.gt_uniq != NULL) 5143 taskqgroup_detach(tqg, &txq->ift_task); 5144 } 5145 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 5146 if (rxq->ifr_task.gt_uniq != NULL) 5147 taskqgroup_detach(tqg, &rxq->ifr_task); 5148 5149 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 5150 free(fl->ifl_rx_bitmap, M_IFLIB); 5151 } 5152 tqg = qgroup_if_config_tqg; 5153 if (ctx->ifc_admin_task.gt_uniq != NULL) 5154 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 5155 if (ctx->ifc_vflr_task.gt_uniq != NULL) 5156 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 5157 CTX_LOCK(ctx); 5158 IFDI_DETACH(ctx); 5159 CTX_UNLOCK(ctx); 5160 5161 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 5162 iflib_free_intr_mem(ctx); 5163 5164 bus_generic_detach(dev); 5165 5166 iflib_tx_structures_free(ctx); 5167 iflib_rx_structures_free(ctx); 5168 5169 iflib_deregister(ctx); 5170 5171 device_set_softc(ctx->ifc_dev, NULL); 5172 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 5173 free(ctx->ifc_softc, M_IFLIB); 5174 unref_ctx_core_offset(ctx); 5175 free(ctx, M_IFLIB); 5176 return (0); 5177 } 5178 5179 static void 5180 iflib_free_intr_mem(if_ctx_t ctx) 5181 { 5182 5183 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { 5184 iflib_irq_free(ctx, &ctx->ifc_legacy_irq); 5185 } 5186 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { 5187 pci_release_msi(ctx->ifc_dev); 5188 } 5189 if (ctx->ifc_msix_mem != NULL) { 5190 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, 5191 rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem); 5192 ctx->ifc_msix_mem = NULL; 5193 } 5194 } 5195 5196 int 5197 iflib_device_detach(device_t dev) 5198 { 5199 if_ctx_t ctx = device_get_softc(dev); 5200 5201 return (iflib_device_deregister(ctx)); 5202 } 5203 5204 int 5205 iflib_device_suspend(device_t dev) 5206 { 5207 if_ctx_t ctx = device_get_softc(dev); 5208 5209 CTX_LOCK(ctx); 5210 IFDI_SUSPEND(ctx); 5211 CTX_UNLOCK(ctx); 5212 5213 return bus_generic_suspend(dev); 5214 } 5215 int 5216 iflib_device_shutdown(device_t dev) 5217 { 5218 if_ctx_t ctx = device_get_softc(dev); 5219 5220 CTX_LOCK(ctx); 5221 IFDI_SHUTDOWN(ctx); 5222 CTX_UNLOCK(ctx); 5223 5224 return bus_generic_suspend(dev); 5225 } 5226 5227 5228 int 5229 iflib_device_resume(device_t dev) 5230 { 5231 if_ctx_t ctx = device_get_softc(dev); 5232 iflib_txq_t txq = ctx->ifc_txqs; 5233 5234 CTX_LOCK(ctx); 5235 IFDI_RESUME(ctx); 5236 iflib_if_init_locked(ctx); 5237 CTX_UNLOCK(ctx); 5238 for (int i = 0; i < NTXQSETS(ctx); i++, txq++) 5239 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 5240 5241 return (bus_generic_resume(dev)); 5242 } 5243 5244 int 5245 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 5246 { 5247 int error; 5248 if_ctx_t ctx = device_get_softc(dev); 5249 5250 CTX_LOCK(ctx); 5251 error = IFDI_IOV_INIT(ctx, num_vfs, params); 5252 CTX_UNLOCK(ctx); 5253 5254 return (error); 5255 } 5256 5257 void 5258 iflib_device_iov_uninit(device_t dev) 5259 { 5260 if_ctx_t ctx = device_get_softc(dev); 5261 5262 CTX_LOCK(ctx); 5263 IFDI_IOV_UNINIT(ctx); 5264 CTX_UNLOCK(ctx); 5265 } 5266 5267 int 5268 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 5269 { 5270 int error; 5271 if_ctx_t ctx = device_get_softc(dev); 5272 5273 CTX_LOCK(ctx); 5274 error = IFDI_IOV_VF_ADD(ctx, vfnum, params); 5275 CTX_UNLOCK(ctx); 5276 5277 return (error); 5278 } 5279 5280 /********************************************************************* 5281 * 5282 * MODULE FUNCTION DEFINITIONS 5283 * 5284 **********************************************************************/ 5285 5286 /* 5287 * - Start a fast taskqueue thread for each core 5288 * - Start a taskqueue for control operations 5289 */ 5290 static int 5291 iflib_module_init(void) 5292 { 5293 return (0); 5294 } 5295 5296 static int 5297 iflib_module_event_handler(module_t mod, int what, void *arg) 5298 { 5299 int err; 5300 5301 switch (what) { 5302 case MOD_LOAD: 5303 if ((err = iflib_module_init()) != 0) 5304 return (err); 5305 break; 5306 case MOD_UNLOAD: 5307 return (EBUSY); 5308 default: 5309 return (EOPNOTSUPP); 5310 } 5311 5312 return (0); 5313 } 5314 5315 /********************************************************************* 5316 * 5317 * PUBLIC FUNCTION DEFINITIONS 5318 * ordered as in iflib.h 5319 * 5320 **********************************************************************/ 5321 5322 5323 static void 5324 _iflib_assert(if_shared_ctx_t sctx) 5325 { 5326 int i; 5327 5328 MPASS(sctx->isc_tx_maxsize); 5329 MPASS(sctx->isc_tx_maxsegsize); 5330 5331 MPASS(sctx->isc_rx_maxsize); 5332 MPASS(sctx->isc_rx_nsegments); 5333 MPASS(sctx->isc_rx_maxsegsize); 5334 5335 MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8); 5336 for (i = 0; i < sctx->isc_nrxqs; i++) { 5337 MPASS(sctx->isc_nrxd_min[i]); 5338 MPASS(powerof2(sctx->isc_nrxd_min[i])); 5339 MPASS(sctx->isc_nrxd_max[i]); 5340 MPASS(powerof2(sctx->isc_nrxd_max[i])); 5341 MPASS(sctx->isc_nrxd_default[i]); 5342 MPASS(powerof2(sctx->isc_nrxd_default[i])); 5343 } 5344 5345 MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8); 5346 for (i = 0; i < sctx->isc_ntxqs; i++) { 5347 MPASS(sctx->isc_ntxd_min[i]); 5348 MPASS(powerof2(sctx->isc_ntxd_min[i])); 5349 MPASS(sctx->isc_ntxd_max[i]); 5350 MPASS(powerof2(sctx->isc_ntxd_max[i])); 5351 MPASS(sctx->isc_ntxd_default[i]); 5352 MPASS(powerof2(sctx->isc_ntxd_default[i])); 5353 } 5354 } 5355 5356 static void 5357 _iflib_pre_assert(if_softc_ctx_t scctx) 5358 { 5359 5360 MPASS(scctx->isc_txrx->ift_txd_encap); 5361 MPASS(scctx->isc_txrx->ift_txd_flush); 5362 MPASS(scctx->isc_txrx->ift_txd_credits_update); 5363 MPASS(scctx->isc_txrx->ift_rxd_available); 5364 MPASS(scctx->isc_txrx->ift_rxd_pkt_get); 5365 MPASS(scctx->isc_txrx->ift_rxd_refill); 5366 MPASS(scctx->isc_txrx->ift_rxd_flush); 5367 } 5368 5369 static int 5370 iflib_register(if_ctx_t ctx) 5371 { 5372 if_shared_ctx_t sctx = ctx->ifc_sctx; 5373 driver_t *driver = sctx->isc_driver; 5374 device_t dev = ctx->ifc_dev; 5375 if_t ifp; 5376 u_char type; 5377 int iflags; 5378 5379 if ((sctx->isc_flags & IFLIB_PSEUDO) == 0) 5380 _iflib_assert(sctx); 5381 5382 CTX_LOCK_INIT(ctx); 5383 STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); 5384 if (sctx->isc_flags & IFLIB_PSEUDO) { 5385 if (sctx->isc_flags & IFLIB_PSEUDO_ETHER) 5386 type = IFT_ETHER; 5387 else 5388 type = IFT_PPP; 5389 } else 5390 type = IFT_ETHER; 5391 ifp = ctx->ifc_ifp = if_alloc(type); 5392 if (ifp == NULL) { 5393 device_printf(dev, "can not allocate ifnet structure\n"); 5394 return (ENOMEM); 5395 } 5396 5397 /* 5398 * Initialize our context's device specific methods 5399 */ 5400 kobj_init((kobj_t) ctx, (kobj_class_t) driver); 5401 kobj_class_compile((kobj_class_t) driver); 5402 5403 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 5404 if_setsoftc(ifp, ctx); 5405 if_setdev(ifp, dev); 5406 if_setinitfn(ifp, iflib_if_init); 5407 if_setioctlfn(ifp, iflib_if_ioctl); 5408 #ifdef ALTQ 5409 if_setstartfn(ifp, iflib_altq_if_start); 5410 if_settransmitfn(ifp, iflib_altq_if_transmit); 5411 if_setsendqready(ifp); 5412 #else 5413 if_settransmitfn(ifp, iflib_if_transmit); 5414 #endif 5415 if_setqflushfn(ifp, iflib_if_qflush); 5416 iflags = IFF_MULTICAST | IFF_KNOWSEPOCH; 5417 5418 if ((sctx->isc_flags & IFLIB_PSEUDO) && 5419 (sctx->isc_flags & IFLIB_PSEUDO_ETHER) == 0) 5420 iflags |= IFF_POINTOPOINT; 5421 else 5422 iflags |= IFF_BROADCAST | IFF_SIMPLEX; 5423 if_setflags(ifp, iflags); 5424 ctx->ifc_vlan_attach_event = 5425 EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, 5426 EVENTHANDLER_PRI_FIRST); 5427 ctx->ifc_vlan_detach_event = 5428 EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, 5429 EVENTHANDLER_PRI_FIRST); 5430 5431 if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) { 5432 ctx->ifc_mediap = &ctx->ifc_media; 5433 ifmedia_init(ctx->ifc_mediap, IFM_IMASK, 5434 iflib_media_change, iflib_media_status); 5435 } 5436 return (0); 5437 } 5438 5439 static void 5440 iflib_unregister_vlan_handlers(if_ctx_t ctx) 5441 { 5442 /* Unregister VLAN events */ 5443 if (ctx->ifc_vlan_attach_event != NULL) { 5444 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 5445 ctx->ifc_vlan_attach_event = NULL; 5446 } 5447 if (ctx->ifc_vlan_detach_event != NULL) { 5448 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 5449 ctx->ifc_vlan_detach_event = NULL; 5450 } 5451 5452 } 5453 5454 static void 5455 iflib_deregister(if_ctx_t ctx) 5456 { 5457 if_t ifp = ctx->ifc_ifp; 5458 5459 /* Remove all media */ 5460 ifmedia_removeall(&ctx->ifc_media); 5461 5462 /* Ensure that VLAN event handlers are unregistered */ 5463 iflib_unregister_vlan_handlers(ctx); 5464 5465 /* Release kobject reference */ 5466 kobj_delete((kobj_t) ctx, NULL); 5467 5468 /* Free the ifnet structure */ 5469 if_free(ifp); 5470 5471 STATE_LOCK_DESTROY(ctx); 5472 5473 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 5474 CTX_LOCK_DESTROY(ctx); 5475 } 5476 5477 static int 5478 iflib_queues_alloc(if_ctx_t ctx) 5479 { 5480 if_shared_ctx_t sctx = ctx->ifc_sctx; 5481 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5482 device_t dev = ctx->ifc_dev; 5483 int nrxqsets = scctx->isc_nrxqsets; 5484 int ntxqsets = scctx->isc_ntxqsets; 5485 iflib_txq_t txq; 5486 iflib_rxq_t rxq; 5487 iflib_fl_t fl = NULL; 5488 int i, j, cpu, err, txconf, rxconf; 5489 iflib_dma_info_t ifdip; 5490 uint32_t *rxqsizes = scctx->isc_rxqsizes; 5491 uint32_t *txqsizes = scctx->isc_txqsizes; 5492 uint8_t nrxqs = sctx->isc_nrxqs; 5493 uint8_t ntxqs = sctx->isc_ntxqs; 5494 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; 5495 caddr_t *vaddrs; 5496 uint64_t *paddrs; 5497 5498 KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); 5499 KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); 5500 5501 /* Allocate the TX ring struct memory */ 5502 if (!(ctx->ifc_txqs = 5503 (iflib_txq_t) malloc(sizeof(struct iflib_txq) * 5504 ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5505 device_printf(dev, "Unable to allocate TX ring memory\n"); 5506 err = ENOMEM; 5507 goto fail; 5508 } 5509 5510 /* Now allocate the RX */ 5511 if (!(ctx->ifc_rxqs = 5512 (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * 5513 nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5514 device_printf(dev, "Unable to allocate RX ring memory\n"); 5515 err = ENOMEM; 5516 goto rx_fail; 5517 } 5518 5519 txq = ctx->ifc_txqs; 5520 rxq = ctx->ifc_rxqs; 5521 5522 /* 5523 * XXX handle allocation failure 5524 */ 5525 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { 5526 /* Set up some basics */ 5527 5528 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, 5529 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { 5530 device_printf(dev, 5531 "Unable to allocate TX DMA info memory\n"); 5532 err = ENOMEM; 5533 goto err_tx_desc; 5534 } 5535 txq->ift_ifdi = ifdip; 5536 for (j = 0; j < ntxqs; j++, ifdip++) { 5537 if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) { 5538 device_printf(dev, 5539 "Unable to allocate TX descriptors\n"); 5540 err = ENOMEM; 5541 goto err_tx_desc; 5542 } 5543 txq->ift_txd_size[j] = scctx->isc_txd_size[j]; 5544 bzero((void *)ifdip->idi_vaddr, txqsizes[j]); 5545 } 5546 txq->ift_ctx = ctx; 5547 txq->ift_id = i; 5548 if (sctx->isc_flags & IFLIB_HAS_TXCQ) { 5549 txq->ift_br_offset = 1; 5550 } else { 5551 txq->ift_br_offset = 0; 5552 } 5553 /* XXX fix this */ 5554 txq->ift_timer.c_cpu = cpu; 5555 5556 if (iflib_txsd_alloc(txq)) { 5557 device_printf(dev, "Critical Failure setting up TX buffers\n"); 5558 err = ENOMEM; 5559 goto err_tx_desc; 5560 } 5561 5562 /* Initialize the TX lock */ 5563 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout", 5564 device_get_nameunit(dev), txq->ift_id); 5565 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); 5566 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); 5567 5568 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, 5569 iflib_txq_can_drain, M_IFLIB, M_WAITOK); 5570 if (err) { 5571 /* XXX free any allocated rings */ 5572 device_printf(dev, "Unable to allocate buf_ring\n"); 5573 goto err_tx_desc; 5574 } 5575 } 5576 5577 for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { 5578 /* Set up some basics */ 5579 callout_init(&rxq->ifr_watchdog, 1); 5580 5581 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, 5582 M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { 5583 device_printf(dev, 5584 "Unable to allocate RX DMA info memory\n"); 5585 err = ENOMEM; 5586 goto err_tx_desc; 5587 } 5588 5589 rxq->ifr_ifdi = ifdip; 5590 /* XXX this needs to be changed if #rx queues != #tx queues */ 5591 rxq->ifr_ntxqirq = 1; 5592 rxq->ifr_txqid[0] = i; 5593 for (j = 0; j < nrxqs; j++, ifdip++) { 5594 if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) { 5595 device_printf(dev, 5596 "Unable to allocate RX descriptors\n"); 5597 err = ENOMEM; 5598 goto err_tx_desc; 5599 } 5600 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); 5601 } 5602 rxq->ifr_ctx = ctx; 5603 rxq->ifr_id = i; 5604 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 5605 rxq->ifr_fl_offset = 1; 5606 } else { 5607 rxq->ifr_fl_offset = 0; 5608 } 5609 rxq->ifr_nfl = nfree_lists; 5610 if (!(fl = 5611 (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { 5612 device_printf(dev, "Unable to allocate free list memory\n"); 5613 err = ENOMEM; 5614 goto err_tx_desc; 5615 } 5616 rxq->ifr_fl = fl; 5617 for (j = 0; j < nfree_lists; j++) { 5618 fl[j].ifl_rxq = rxq; 5619 fl[j].ifl_id = j; 5620 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; 5621 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; 5622 } 5623 /* Allocate receive buffers for the ring */ 5624 if (iflib_rxsd_alloc(rxq)) { 5625 device_printf(dev, 5626 "Critical Failure setting up receive buffers\n"); 5627 err = ENOMEM; 5628 goto err_rx_desc; 5629 } 5630 5631 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 5632 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, 5633 M_WAITOK); 5634 } 5635 5636 /* TXQs */ 5637 vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5638 paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5639 for (i = 0; i < ntxqsets; i++) { 5640 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; 5641 5642 for (j = 0; j < ntxqs; j++, di++) { 5643 vaddrs[i*ntxqs + j] = di->idi_vaddr; 5644 paddrs[i*ntxqs + j] = di->idi_paddr; 5645 } 5646 } 5647 if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { 5648 device_printf(ctx->ifc_dev, 5649 "Unable to allocate device TX queue\n"); 5650 iflib_tx_structures_free(ctx); 5651 free(vaddrs, M_IFLIB); 5652 free(paddrs, M_IFLIB); 5653 goto err_rx_desc; 5654 } 5655 free(vaddrs, M_IFLIB); 5656 free(paddrs, M_IFLIB); 5657 5658 /* RXQs */ 5659 vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5660 paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5661 for (i = 0; i < nrxqsets; i++) { 5662 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; 5663 5664 for (j = 0; j < nrxqs; j++, di++) { 5665 vaddrs[i*nrxqs + j] = di->idi_vaddr; 5666 paddrs[i*nrxqs + j] = di->idi_paddr; 5667 } 5668 } 5669 if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { 5670 device_printf(ctx->ifc_dev, 5671 "Unable to allocate device RX queue\n"); 5672 iflib_tx_structures_free(ctx); 5673 free(vaddrs, M_IFLIB); 5674 free(paddrs, M_IFLIB); 5675 goto err_rx_desc; 5676 } 5677 free(vaddrs, M_IFLIB); 5678 free(paddrs, M_IFLIB); 5679 5680 return (0); 5681 5682 /* XXX handle allocation failure changes */ 5683 err_rx_desc: 5684 err_tx_desc: 5685 rx_fail: 5686 if (ctx->ifc_rxqs != NULL) 5687 free(ctx->ifc_rxqs, M_IFLIB); 5688 ctx->ifc_rxqs = NULL; 5689 if (ctx->ifc_txqs != NULL) 5690 free(ctx->ifc_txqs, M_IFLIB); 5691 ctx->ifc_txqs = NULL; 5692 fail: 5693 return (err); 5694 } 5695 5696 static int 5697 iflib_tx_structures_setup(if_ctx_t ctx) 5698 { 5699 iflib_txq_t txq = ctx->ifc_txqs; 5700 int i; 5701 5702 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 5703 iflib_txq_setup(txq); 5704 5705 return (0); 5706 } 5707 5708 static void 5709 iflib_tx_structures_free(if_ctx_t ctx) 5710 { 5711 iflib_txq_t txq = ctx->ifc_txqs; 5712 if_shared_ctx_t sctx = ctx->ifc_sctx; 5713 int i, j; 5714 5715 for (i = 0; i < NTXQSETS(ctx); i++, txq++) { 5716 for (j = 0; j < sctx->isc_ntxqs; j++) 5717 iflib_dma_free(&txq->ift_ifdi[j]); 5718 iflib_txq_destroy(txq); 5719 } 5720 free(ctx->ifc_txqs, M_IFLIB); 5721 ctx->ifc_txqs = NULL; 5722 IFDI_QUEUES_FREE(ctx); 5723 } 5724 5725 /********************************************************************* 5726 * 5727 * Initialize all receive rings. 5728 * 5729 **********************************************************************/ 5730 static int 5731 iflib_rx_structures_setup(if_ctx_t ctx) 5732 { 5733 iflib_rxq_t rxq = ctx->ifc_rxqs; 5734 int q; 5735 #if defined(INET6) || defined(INET) 5736 int err, i; 5737 #endif 5738 5739 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { 5740 #if defined(INET6) || defined(INET) 5741 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) { 5742 err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, 5743 TCP_LRO_ENTRIES, min(1024, 5744 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset])); 5745 if (err != 0) { 5746 device_printf(ctx->ifc_dev, 5747 "LRO Initialization failed!\n"); 5748 goto fail; 5749 } 5750 } 5751 #endif 5752 IFDI_RXQ_SETUP(ctx, rxq->ifr_id); 5753 } 5754 return (0); 5755 #if defined(INET6) || defined(INET) 5756 fail: 5757 /* 5758 * Free LRO resources allocated so far, we will only handle 5759 * the rings that completed, the failing case will have 5760 * cleaned up for itself. 'q' failed, so its the terminus. 5761 */ 5762 rxq = ctx->ifc_rxqs; 5763 for (i = 0; i < q; ++i, rxq++) { 5764 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) 5765 tcp_lro_free(&rxq->ifr_lc); 5766 } 5767 return (err); 5768 #endif 5769 } 5770 5771 /********************************************************************* 5772 * 5773 * Free all receive rings. 5774 * 5775 **********************************************************************/ 5776 static void 5777 iflib_rx_structures_free(if_ctx_t ctx) 5778 { 5779 iflib_rxq_t rxq = ctx->ifc_rxqs; 5780 if_shared_ctx_t sctx = ctx->ifc_sctx; 5781 int i, j; 5782 5783 for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { 5784 for (j = 0; j < sctx->isc_nrxqs; j++) 5785 iflib_dma_free(&rxq->ifr_ifdi[j]); 5786 iflib_rx_sds_free(rxq); 5787 #if defined(INET6) || defined(INET) 5788 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) 5789 tcp_lro_free(&rxq->ifr_lc); 5790 #endif 5791 } 5792 free(ctx->ifc_rxqs, M_IFLIB); 5793 ctx->ifc_rxqs = NULL; 5794 } 5795 5796 static int 5797 iflib_qset_structures_setup(if_ctx_t ctx) 5798 { 5799 int err; 5800 5801 /* 5802 * It is expected that the caller takes care of freeing queues if this 5803 * fails. 5804 */ 5805 if ((err = iflib_tx_structures_setup(ctx)) != 0) { 5806 device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); 5807 return (err); 5808 } 5809 5810 if ((err = iflib_rx_structures_setup(ctx)) != 0) 5811 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); 5812 5813 return (err); 5814 } 5815 5816 int 5817 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 5818 driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name) 5819 { 5820 5821 return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); 5822 } 5823 5824 #ifdef SMP 5825 static int 5826 find_nth(if_ctx_t ctx, int qid) 5827 { 5828 cpuset_t cpus; 5829 int i, cpuid, eqid, count; 5830 5831 CPU_COPY(&ctx->ifc_cpus, &cpus); 5832 count = CPU_COUNT(&cpus); 5833 eqid = qid % count; 5834 /* clear up to the qid'th bit */ 5835 for (i = 0; i < eqid; i++) { 5836 cpuid = CPU_FFS(&cpus); 5837 MPASS(cpuid != 0); 5838 CPU_CLR(cpuid-1, &cpus); 5839 } 5840 cpuid = CPU_FFS(&cpus); 5841 MPASS(cpuid != 0); 5842 return (cpuid-1); 5843 } 5844 5845 #ifdef SCHED_ULE 5846 extern struct cpu_group *cpu_top; /* CPU topology */ 5847 5848 static int 5849 find_child_with_core(int cpu, struct cpu_group *grp) 5850 { 5851 int i; 5852 5853 if (grp->cg_children == 0) 5854 return -1; 5855 5856 MPASS(grp->cg_child); 5857 for (i = 0; i < grp->cg_children; i++) { 5858 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) 5859 return i; 5860 } 5861 5862 return -1; 5863 } 5864 5865 /* 5866 * Find the nth "close" core to the specified core 5867 * "close" is defined as the deepest level that shares 5868 * at least an L2 cache. With threads, this will be 5869 * threads on the same core. If the shared cache is L3 5870 * or higher, simply returns the same core. 5871 */ 5872 static int 5873 find_close_core(int cpu, int core_offset) 5874 { 5875 struct cpu_group *grp; 5876 int i; 5877 int fcpu; 5878 cpuset_t cs; 5879 5880 grp = cpu_top; 5881 if (grp == NULL) 5882 return cpu; 5883 i = 0; 5884 while ((i = find_child_with_core(cpu, grp)) != -1) { 5885 /* If the child only has one cpu, don't descend */ 5886 if (grp->cg_child[i].cg_count <= 1) 5887 break; 5888 grp = &grp->cg_child[i]; 5889 } 5890 5891 /* If they don't share at least an L2 cache, use the same CPU */ 5892 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) 5893 return cpu; 5894 5895 /* Now pick one */ 5896 CPU_COPY(&grp->cg_mask, &cs); 5897 5898 /* Add the selected CPU offset to core offset. */ 5899 for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) { 5900 if (fcpu - 1 == cpu) 5901 break; 5902 CPU_CLR(fcpu - 1, &cs); 5903 } 5904 MPASS(fcpu); 5905 5906 core_offset += i; 5907 5908 CPU_COPY(&grp->cg_mask, &cs); 5909 for (i = core_offset % grp->cg_count; i > 0; i--) { 5910 MPASS(CPU_FFS(&cs)); 5911 CPU_CLR(CPU_FFS(&cs) - 1, &cs); 5912 } 5913 MPASS(CPU_FFS(&cs)); 5914 return CPU_FFS(&cs) - 1; 5915 } 5916 #else 5917 static int 5918 find_close_core(int cpu, int core_offset __unused) 5919 { 5920 return cpu; 5921 } 5922 #endif 5923 5924 static int 5925 get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid) 5926 { 5927 switch (type) { 5928 case IFLIB_INTR_TX: 5929 /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */ 5930 /* XXX handle multiple RX threads per core and more than two core per L2 group */ 5931 return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; 5932 case IFLIB_INTR_RX: 5933 case IFLIB_INTR_RXTX: 5934 /* RX queues get the specified core */ 5935 return qid / CPU_COUNT(&ctx->ifc_cpus); 5936 default: 5937 return -1; 5938 } 5939 } 5940 #else 5941 #define get_core_offset(ctx, type, qid) CPU_FIRST() 5942 #define find_close_core(cpuid, tid) CPU_FIRST() 5943 #define find_nth(ctx, gid) CPU_FIRST() 5944 #endif 5945 5946 /* Just to avoid copy/paste */ 5947 static inline int 5948 iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, 5949 int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, 5950 const char *name) 5951 { 5952 device_t dev; 5953 int co, cpuid, err, tid; 5954 5955 dev = ctx->ifc_dev; 5956 co = ctx->ifc_sysctl_core_offset; 5957 if (ctx->ifc_sysctl_separate_txrx && type == IFLIB_INTR_TX) 5958 co += ctx->ifc_softc_ctx.isc_nrxqsets; 5959 cpuid = find_nth(ctx, qid + co); 5960 tid = get_core_offset(ctx, type, qid); 5961 if (tid < 0) { 5962 device_printf(dev, "get_core_offset failed\n"); 5963 return (EOPNOTSUPP); 5964 } 5965 cpuid = find_close_core(cpuid, tid); 5966 err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev, irq->ii_res, 5967 name); 5968 if (err) { 5969 device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err); 5970 return (err); 5971 } 5972 #ifdef notyet 5973 if (cpuid > ctx->ifc_cpuid_highest) 5974 ctx->ifc_cpuid_highest = cpuid; 5975 #endif 5976 return (0); 5977 } 5978 5979 int 5980 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, 5981 iflib_intr_type_t type, driver_filter_t *filter, 5982 void *filter_arg, int qid, const char *name) 5983 { 5984 device_t dev; 5985 struct grouptask *gtask; 5986 struct taskqgroup *tqg; 5987 iflib_filter_info_t info; 5988 gtask_fn_t *fn; 5989 int tqrid, err; 5990 driver_filter_t *intr_fast; 5991 void *q; 5992 5993 info = &ctx->ifc_filter_info; 5994 tqrid = rid; 5995 5996 switch (type) { 5997 /* XXX merge tx/rx for netmap? */ 5998 case IFLIB_INTR_TX: 5999 q = &ctx->ifc_txqs[qid]; 6000 info = &ctx->ifc_txqs[qid].ift_filter_info; 6001 gtask = &ctx->ifc_txqs[qid].ift_task; 6002 tqg = qgroup_if_io_tqg; 6003 fn = _task_fn_tx; 6004 intr_fast = iflib_fast_intr; 6005 GROUPTASK_INIT(gtask, 0, fn, q); 6006 ctx->ifc_flags |= IFC_NETMAP_TX_IRQ; 6007 break; 6008 case IFLIB_INTR_RX: 6009 q = &ctx->ifc_rxqs[qid]; 6010 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 6011 gtask = &ctx->ifc_rxqs[qid].ifr_task; 6012 tqg = qgroup_if_io_tqg; 6013 fn = _task_fn_rx; 6014 intr_fast = iflib_fast_intr; 6015 NET_GROUPTASK_INIT(gtask, 0, fn, q); 6016 break; 6017 case IFLIB_INTR_RXTX: 6018 q = &ctx->ifc_rxqs[qid]; 6019 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 6020 gtask = &ctx->ifc_rxqs[qid].ifr_task; 6021 tqg = qgroup_if_io_tqg; 6022 fn = _task_fn_rx; 6023 intr_fast = iflib_fast_intr_rxtx; 6024 NET_GROUPTASK_INIT(gtask, 0, fn, q); 6025 break; 6026 case IFLIB_INTR_ADMIN: 6027 q = ctx; 6028 tqrid = -1; 6029 info = &ctx->ifc_filter_info; 6030 gtask = &ctx->ifc_admin_task; 6031 tqg = qgroup_if_config_tqg; 6032 fn = _task_fn_admin; 6033 intr_fast = iflib_fast_intr_ctx; 6034 break; 6035 default: 6036 device_printf(ctx->ifc_dev, "%s: unknown net intr type\n", 6037 __func__); 6038 return (EINVAL); 6039 } 6040 6041 info->ifi_filter = filter; 6042 info->ifi_filter_arg = filter_arg; 6043 info->ifi_task = gtask; 6044 info->ifi_ctx = q; 6045 6046 dev = ctx->ifc_dev; 6047 err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); 6048 if (err != 0) { 6049 device_printf(dev, "_iflib_irq_alloc failed %d\n", err); 6050 return (err); 6051 } 6052 if (type == IFLIB_INTR_ADMIN) 6053 return (0); 6054 6055 if (tqrid != -1) { 6056 err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, 6057 q, name); 6058 if (err) 6059 return (err); 6060 } else { 6061 taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name); 6062 } 6063 6064 return (0); 6065 } 6066 6067 void 6068 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name) 6069 { 6070 struct grouptask *gtask; 6071 struct taskqgroup *tqg; 6072 gtask_fn_t *fn; 6073 void *q; 6074 int err; 6075 6076 switch (type) { 6077 case IFLIB_INTR_TX: 6078 q = &ctx->ifc_txqs[qid]; 6079 gtask = &ctx->ifc_txqs[qid].ift_task; 6080 tqg = qgroup_if_io_tqg; 6081 fn = _task_fn_tx; 6082 GROUPTASK_INIT(gtask, 0, fn, q); 6083 break; 6084 case IFLIB_INTR_RX: 6085 q = &ctx->ifc_rxqs[qid]; 6086 gtask = &ctx->ifc_rxqs[qid].ifr_task; 6087 tqg = qgroup_if_io_tqg; 6088 fn = _task_fn_rx; 6089 NET_GROUPTASK_INIT(gtask, 0, fn, q); 6090 break; 6091 case IFLIB_INTR_IOV: 6092 q = ctx; 6093 gtask = &ctx->ifc_vflr_task; 6094 tqg = qgroup_if_config_tqg; 6095 fn = _task_fn_iov; 6096 GROUPTASK_INIT(gtask, 0, fn, q); 6097 break; 6098 default: 6099 panic("unknown net intr type"); 6100 } 6101 if (irq != NULL) { 6102 err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, 6103 q, name); 6104 if (err) 6105 taskqgroup_attach(tqg, gtask, q, ctx->ifc_dev, 6106 irq->ii_res, name); 6107 } else { 6108 taskqgroup_attach(tqg, gtask, q, NULL, NULL, name); 6109 } 6110 } 6111 6112 void 6113 iflib_irq_free(if_ctx_t ctx, if_irq_t irq) 6114 { 6115 6116 if (irq->ii_tag) 6117 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); 6118 6119 if (irq->ii_res) 6120 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, 6121 rman_get_rid(irq->ii_res), irq->ii_res); 6122 } 6123 6124 static int 6125 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name) 6126 { 6127 iflib_txq_t txq = ctx->ifc_txqs; 6128 iflib_rxq_t rxq = ctx->ifc_rxqs; 6129 if_irq_t irq = &ctx->ifc_legacy_irq; 6130 iflib_filter_info_t info; 6131 device_t dev; 6132 struct grouptask *gtask; 6133 struct resource *res; 6134 struct taskqgroup *tqg; 6135 void *q; 6136 int err, tqrid; 6137 bool rx_only; 6138 6139 q = &ctx->ifc_rxqs[0]; 6140 info = &rxq[0].ifr_filter_info; 6141 gtask = &rxq[0].ifr_task; 6142 tqg = qgroup_if_io_tqg; 6143 tqrid = *rid; 6144 rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0; 6145 6146 ctx->ifc_flags |= IFC_LEGACY; 6147 info->ifi_filter = filter; 6148 info->ifi_filter_arg = filter_arg; 6149 info->ifi_task = gtask; 6150 info->ifi_ctx = rx_only ? ctx : q; 6151 6152 dev = ctx->ifc_dev; 6153 /* We allocate a single interrupt resource */ 6154 err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr_ctx : 6155 iflib_fast_intr_rxtx, NULL, info, name); 6156 if (err != 0) 6157 return (err); 6158 NET_GROUPTASK_INIT(gtask, 0, _task_fn_rx, q); 6159 res = irq->ii_res; 6160 taskqgroup_attach(tqg, gtask, q, dev, res, name); 6161 6162 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); 6163 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res, 6164 "tx"); 6165 return (0); 6166 } 6167 6168 void 6169 iflib_led_create(if_ctx_t ctx) 6170 { 6171 6172 ctx->ifc_led_dev = led_create(iflib_led_func, ctx, 6173 device_get_nameunit(ctx->ifc_dev)); 6174 } 6175 6176 void 6177 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) 6178 { 6179 6180 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 6181 } 6182 6183 void 6184 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) 6185 { 6186 6187 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); 6188 } 6189 6190 void 6191 iflib_admin_intr_deferred(if_ctx_t ctx) 6192 { 6193 6194 MPASS(ctx->ifc_admin_task.gt_taskqueue != NULL); 6195 GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); 6196 } 6197 6198 void 6199 iflib_iov_intr_deferred(if_ctx_t ctx) 6200 { 6201 6202 GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); 6203 } 6204 6205 void 6206 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name) 6207 { 6208 6209 taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL, 6210 name); 6211 } 6212 6213 void 6214 iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, 6215 const char *name) 6216 { 6217 6218 GROUPTASK_INIT(gtask, 0, fn, ctx); 6219 taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL, 6220 name); 6221 } 6222 6223 void 6224 iflib_config_gtask_deinit(struct grouptask *gtask) 6225 { 6226 6227 taskqgroup_detach(qgroup_if_config_tqg, gtask); 6228 } 6229 6230 void 6231 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) 6232 { 6233 if_t ifp = ctx->ifc_ifp; 6234 iflib_txq_t txq = ctx->ifc_txqs; 6235 6236 if_setbaudrate(ifp, baudrate); 6237 if (baudrate >= IF_Gbps(10)) { 6238 STATE_LOCK(ctx); 6239 ctx->ifc_flags |= IFC_PREFETCH; 6240 STATE_UNLOCK(ctx); 6241 } 6242 /* If link down, disable watchdog */ 6243 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { 6244 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) 6245 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 6246 } 6247 ctx->ifc_link_state = link_state; 6248 if_link_state_change(ifp, link_state); 6249 } 6250 6251 static int 6252 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) 6253 { 6254 int credits; 6255 #ifdef INVARIANTS 6256 int credits_pre = txq->ift_cidx_processed; 6257 #endif 6258 6259 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 6260 BUS_DMASYNC_POSTREAD); 6261 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) 6262 return (0); 6263 6264 txq->ift_processed += credits; 6265 txq->ift_cidx_processed += credits; 6266 6267 MPASS(credits_pre + credits == txq->ift_cidx_processed); 6268 if (txq->ift_cidx_processed >= txq->ift_size) 6269 txq->ift_cidx_processed -= txq->ift_size; 6270 return (credits); 6271 } 6272 6273 static int 6274 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) 6275 { 6276 iflib_fl_t fl; 6277 u_int i; 6278 6279 for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++) 6280 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 6281 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 6282 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, 6283 budget)); 6284 } 6285 6286 void 6287 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, 6288 const char *description, if_int_delay_info_t info, 6289 int offset, int value) 6290 { 6291 info->iidi_ctx = ctx; 6292 info->iidi_offset = offset; 6293 info->iidi_value = value; 6294 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), 6295 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), 6296 OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 6297 info, 0, iflib_sysctl_int_delay, "I", description); 6298 } 6299 6300 struct sx * 6301 iflib_ctx_lock_get(if_ctx_t ctx) 6302 { 6303 6304 return (&ctx->ifc_ctx_sx); 6305 } 6306 6307 static int 6308 iflib_msix_init(if_ctx_t ctx) 6309 { 6310 device_t dev = ctx->ifc_dev; 6311 if_shared_ctx_t sctx = ctx->ifc_sctx; 6312 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 6313 int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues; 6314 int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors; 6315 6316 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; 6317 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; 6318 6319 if (bootverbose) 6320 device_printf(dev, "msix_init qsets capped at %d\n", 6321 imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); 6322 6323 /* Override by tuneable */ 6324 if (scctx->isc_disable_msix) 6325 goto msi; 6326 6327 /* First try MSI-X */ 6328 if ((msgs = pci_msix_count(dev)) == 0) { 6329 if (bootverbose) 6330 device_printf(dev, "MSI-X not supported or disabled\n"); 6331 goto msi; 6332 } 6333 6334 bar = ctx->ifc_softc_ctx.isc_msix_bar; 6335 /* 6336 * bar == -1 => "trust me I know what I'm doing" 6337 * Some drivers are for hardware that is so shoddily 6338 * documented that no one knows which bars are which 6339 * so the developer has to map all bars. This hack 6340 * allows shoddy garbage to use MSI-X in this framework. 6341 */ 6342 if (bar != -1) { 6343 ctx->ifc_msix_mem = bus_alloc_resource_any(dev, 6344 SYS_RES_MEMORY, &bar, RF_ACTIVE); 6345 if (ctx->ifc_msix_mem == NULL) { 6346 device_printf(dev, "Unable to map MSI-X table\n"); 6347 goto msi; 6348 } 6349 } 6350 6351 admincnt = sctx->isc_admin_intrcnt; 6352 #if IFLIB_DEBUG 6353 /* use only 1 qset in debug mode */ 6354 queuemsgs = min(msgs - admincnt, 1); 6355 #else 6356 queuemsgs = msgs - admincnt; 6357 #endif 6358 #ifdef RSS 6359 queues = imin(queuemsgs, rss_getnumbuckets()); 6360 #else 6361 queues = queuemsgs; 6362 #endif 6363 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); 6364 if (bootverbose) 6365 device_printf(dev, 6366 "intr CPUs: %d queue msgs: %d admincnt: %d\n", 6367 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); 6368 #ifdef RSS 6369 /* If we're doing RSS, clamp at the number of RSS buckets */ 6370 if (queues > rss_getnumbuckets()) 6371 queues = rss_getnumbuckets(); 6372 #endif 6373 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) 6374 rx_queues = iflib_num_rx_queues; 6375 else 6376 rx_queues = queues; 6377 6378 if (rx_queues > scctx->isc_nrxqsets) 6379 rx_queues = scctx->isc_nrxqsets; 6380 6381 /* 6382 * We want this to be all logical CPUs by default 6383 */ 6384 if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) 6385 tx_queues = iflib_num_tx_queues; 6386 else 6387 tx_queues = mp_ncpus; 6388 6389 if (tx_queues > scctx->isc_ntxqsets) 6390 tx_queues = scctx->isc_ntxqsets; 6391 6392 if (ctx->ifc_sysctl_qs_eq_override == 0) { 6393 #ifdef INVARIANTS 6394 if (tx_queues != rx_queues) 6395 device_printf(dev, 6396 "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", 6397 min(rx_queues, tx_queues), min(rx_queues, tx_queues)); 6398 #endif 6399 tx_queues = min(rx_queues, tx_queues); 6400 rx_queues = min(rx_queues, tx_queues); 6401 } 6402 6403 vectors = rx_queues + admincnt; 6404 if (msgs < vectors) { 6405 device_printf(dev, 6406 "insufficient number of MSI-X vectors " 6407 "(supported %d, need %d)\n", msgs, vectors); 6408 goto msi; 6409 } 6410 6411 device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues, 6412 tx_queues); 6413 msgs = vectors; 6414 if ((err = pci_alloc_msix(dev, &vectors)) == 0) { 6415 if (vectors != msgs) { 6416 device_printf(dev, 6417 "Unable to allocate sufficient MSI-X vectors " 6418 "(got %d, need %d)\n", vectors, msgs); 6419 pci_release_msi(dev); 6420 if (bar != -1) { 6421 bus_release_resource(dev, SYS_RES_MEMORY, bar, 6422 ctx->ifc_msix_mem); 6423 ctx->ifc_msix_mem = NULL; 6424 } 6425 goto msi; 6426 } 6427 device_printf(dev, "Using MSI-X interrupts with %d vectors\n", 6428 vectors); 6429 scctx->isc_vectors = vectors; 6430 scctx->isc_nrxqsets = rx_queues; 6431 scctx->isc_ntxqsets = tx_queues; 6432 scctx->isc_intr = IFLIB_INTR_MSIX; 6433 6434 return (vectors); 6435 } else { 6436 device_printf(dev, 6437 "failed to allocate %d MSI-X vectors, err: %d\n", vectors, 6438 err); 6439 if (bar != -1) { 6440 bus_release_resource(dev, SYS_RES_MEMORY, bar, 6441 ctx->ifc_msix_mem); 6442 ctx->ifc_msix_mem = NULL; 6443 } 6444 } 6445 6446 msi: 6447 vectors = pci_msi_count(dev); 6448 scctx->isc_nrxqsets = 1; 6449 scctx->isc_ntxqsets = 1; 6450 scctx->isc_vectors = vectors; 6451 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { 6452 device_printf(dev,"Using an MSI interrupt\n"); 6453 scctx->isc_intr = IFLIB_INTR_MSI; 6454 } else { 6455 scctx->isc_vectors = 1; 6456 device_printf(dev,"Using a Legacy interrupt\n"); 6457 scctx->isc_intr = IFLIB_INTR_LEGACY; 6458 } 6459 6460 return (vectors); 6461 } 6462 6463 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; 6464 6465 static int 6466 mp_ring_state_handler(SYSCTL_HANDLER_ARGS) 6467 { 6468 int rc; 6469 uint16_t *state = ((uint16_t *)oidp->oid_arg1); 6470 struct sbuf *sb; 6471 const char *ring_state = "UNKNOWN"; 6472 6473 /* XXX needed ? */ 6474 rc = sysctl_wire_old_buffer(req, 0); 6475 MPASS(rc == 0); 6476 if (rc != 0) 6477 return (rc); 6478 sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); 6479 MPASS(sb != NULL); 6480 if (sb == NULL) 6481 return (ENOMEM); 6482 if (state[3] <= 3) 6483 ring_state = ring_states[state[3]]; 6484 6485 sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", 6486 state[0], state[1], state[2], ring_state); 6487 rc = sbuf_finish(sb); 6488 sbuf_delete(sb); 6489 return(rc); 6490 } 6491 6492 enum iflib_ndesc_handler { 6493 IFLIB_NTXD_HANDLER, 6494 IFLIB_NRXD_HANDLER, 6495 }; 6496 6497 static int 6498 mp_ndesc_handler(SYSCTL_HANDLER_ARGS) 6499 { 6500 if_ctx_t ctx = (void *)arg1; 6501 enum iflib_ndesc_handler type = arg2; 6502 char buf[256] = {0}; 6503 qidx_t *ndesc; 6504 char *p, *next; 6505 int nqs, rc, i; 6506 6507 nqs = 8; 6508 switch(type) { 6509 case IFLIB_NTXD_HANDLER: 6510 ndesc = ctx->ifc_sysctl_ntxds; 6511 if (ctx->ifc_sctx) 6512 nqs = ctx->ifc_sctx->isc_ntxqs; 6513 break; 6514 case IFLIB_NRXD_HANDLER: 6515 ndesc = ctx->ifc_sysctl_nrxds; 6516 if (ctx->ifc_sctx) 6517 nqs = ctx->ifc_sctx->isc_nrxqs; 6518 break; 6519 default: 6520 printf("%s: unhandled type\n", __func__); 6521 return (EINVAL); 6522 } 6523 if (nqs == 0) 6524 nqs = 8; 6525 6526 for (i=0; i<8; i++) { 6527 if (i >= nqs) 6528 break; 6529 if (i) 6530 strcat(buf, ","); 6531 sprintf(strchr(buf, 0), "%d", ndesc[i]); 6532 } 6533 6534 rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); 6535 if (rc || req->newptr == NULL) 6536 return rc; 6537 6538 for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; 6539 i++, p = strsep(&next, " ,")) { 6540 ndesc[i] = strtoul(p, NULL, 10); 6541 } 6542 6543 return(rc); 6544 } 6545 6546 #define NAME_BUFLEN 32 6547 static void 6548 iflib_add_device_sysctl_pre(if_ctx_t ctx) 6549 { 6550 device_t dev = iflib_get_dev(ctx); 6551 struct sysctl_oid_list *child, *oid_list; 6552 struct sysctl_ctx_list *ctx_list; 6553 struct sysctl_oid *node; 6554 6555 ctx_list = device_get_sysctl_ctx(dev); 6556 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 6557 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", 6558 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IFLIB fields"); 6559 oid_list = SYSCTL_CHILDREN(node); 6560 6561 SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", 6562 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 6563 "driver version"); 6564 6565 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", 6566 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, 6567 "# of txqs to use, 0 => use default #"); 6568 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", 6569 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, 6570 "# of rxqs to use, 0 => use default #"); 6571 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", 6572 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, 6573 "permit #txq != #rxq"); 6574 SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", 6575 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, 6576 "disable MSI-X (default 0)"); 6577 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", 6578 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, 6579 "set the RX budget"); 6580 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate", 6581 CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0, 6582 "cause TX to abdicate instead of running to completion"); 6583 ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED; 6584 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset", 6585 CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0, 6586 "offset to start using cores at"); 6587 SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx", 6588 CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0, 6589 "use separate cores for TX and RX"); 6590 6591 /* XXX change for per-queue sizes */ 6592 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", 6593 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx, 6594 IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A", 6595 "list of # of TX descriptors to use, 0 = use default #"); 6596 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", 6597 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, ctx, 6598 IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A", 6599 "list of # of RX descriptors to use, 0 = use default #"); 6600 } 6601 6602 static void 6603 iflib_add_device_sysctl_post(if_ctx_t ctx) 6604 { 6605 if_shared_ctx_t sctx = ctx->ifc_sctx; 6606 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 6607 device_t dev = iflib_get_dev(ctx); 6608 struct sysctl_oid_list *child; 6609 struct sysctl_ctx_list *ctx_list; 6610 iflib_fl_t fl; 6611 iflib_txq_t txq; 6612 iflib_rxq_t rxq; 6613 int i, j; 6614 char namebuf[NAME_BUFLEN]; 6615 char *qfmt; 6616 struct sysctl_oid *queue_node, *fl_node, *node; 6617 struct sysctl_oid_list *queue_list, *fl_list; 6618 ctx_list = device_get_sysctl_ctx(dev); 6619 6620 node = ctx->ifc_sysctl_node; 6621 child = SYSCTL_CHILDREN(node); 6622 6623 if (scctx->isc_ntxqsets > 100) 6624 qfmt = "txq%03d"; 6625 else if (scctx->isc_ntxqsets > 10) 6626 qfmt = "txq%02d"; 6627 else 6628 qfmt = "txq%d"; 6629 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { 6630 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6631 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6632 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 6633 queue_list = SYSCTL_CHILDREN(queue_node); 6634 #if MEMORY_LOGGING 6635 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", 6636 CTLFLAG_RD, 6637 &txq->ift_dequeued, "total mbufs freed"); 6638 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", 6639 CTLFLAG_RD, 6640 &txq->ift_enqueued, "total mbufs enqueued"); 6641 #endif 6642 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", 6643 CTLFLAG_RD, 6644 &txq->ift_mbuf_defrag, "# of times m_defrag was called"); 6645 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", 6646 CTLFLAG_RD, 6647 &txq->ift_pullups, "# of times m_pullup was called"); 6648 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", 6649 CTLFLAG_RD, 6650 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); 6651 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", 6652 CTLFLAG_RD, 6653 &txq->ift_no_desc_avail, "# of times no descriptors were available"); 6654 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", 6655 CTLFLAG_RD, 6656 &txq->ift_map_failed, "# of times DMA map failed"); 6657 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", 6658 CTLFLAG_RD, 6659 &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); 6660 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", 6661 CTLFLAG_RD, 6662 &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); 6663 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", 6664 CTLFLAG_RD, 6665 &txq->ift_pidx, 1, "Producer Index"); 6666 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", 6667 CTLFLAG_RD, 6668 &txq->ift_cidx, 1, "Consumer Index"); 6669 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", 6670 CTLFLAG_RD, 6671 &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); 6672 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", 6673 CTLFLAG_RD, 6674 &txq->ift_in_use, 1, "descriptors in use"); 6675 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", 6676 CTLFLAG_RD, 6677 &txq->ift_processed, "descriptors procesed for clean"); 6678 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", 6679 CTLFLAG_RD, 6680 &txq->ift_cleaned, "total cleaned"); 6681 SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", 6682 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 6683 __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0, 6684 mp_ring_state_handler, "A", "soft ring state"); 6685 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", 6686 CTLFLAG_RD, &txq->ift_br->enqueues, 6687 "# of enqueues to the mp_ring for this queue"); 6688 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", 6689 CTLFLAG_RD, &txq->ift_br->drops, 6690 "# of drops in the mp_ring for this queue"); 6691 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", 6692 CTLFLAG_RD, &txq->ift_br->starts, 6693 "# of normal consumer starts in the mp_ring for this queue"); 6694 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", 6695 CTLFLAG_RD, &txq->ift_br->stalls, 6696 "# of consumer stalls in the mp_ring for this queue"); 6697 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", 6698 CTLFLAG_RD, &txq->ift_br->restarts, 6699 "# of consumer restarts in the mp_ring for this queue"); 6700 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", 6701 CTLFLAG_RD, &txq->ift_br->abdications, 6702 "# of consumer abdications in the mp_ring for this queue"); 6703 } 6704 6705 if (scctx->isc_nrxqsets > 100) 6706 qfmt = "rxq%03d"; 6707 else if (scctx->isc_nrxqsets > 10) 6708 qfmt = "rxq%02d"; 6709 else 6710 qfmt = "rxq%d"; 6711 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { 6712 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6713 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6714 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 6715 queue_list = SYSCTL_CHILDREN(queue_node); 6716 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 6717 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", 6718 CTLFLAG_RD, 6719 &rxq->ifr_cq_cidx, 1, "Consumer Index"); 6720 } 6721 6722 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 6723 snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); 6724 fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, 6725 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist Name"); 6726 fl_list = SYSCTL_CHILDREN(fl_node); 6727 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", 6728 CTLFLAG_RD, 6729 &fl->ifl_pidx, 1, "Producer Index"); 6730 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", 6731 CTLFLAG_RD, 6732 &fl->ifl_cidx, 1, "Consumer Index"); 6733 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", 6734 CTLFLAG_RD, 6735 &fl->ifl_credits, 1, "credits available"); 6736 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "buf_size", 6737 CTLFLAG_RD, 6738 &fl->ifl_buf_size, 1, "buffer size"); 6739 #if MEMORY_LOGGING 6740 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", 6741 CTLFLAG_RD, 6742 &fl->ifl_m_enqueued, "mbufs allocated"); 6743 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", 6744 CTLFLAG_RD, 6745 &fl->ifl_m_dequeued, "mbufs freed"); 6746 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", 6747 CTLFLAG_RD, 6748 &fl->ifl_cl_enqueued, "clusters allocated"); 6749 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", 6750 CTLFLAG_RD, 6751 &fl->ifl_cl_dequeued, "clusters freed"); 6752 #endif 6753 6754 } 6755 } 6756 6757 } 6758 6759 void 6760 iflib_request_reset(if_ctx_t ctx) 6761 { 6762 6763 STATE_LOCK(ctx); 6764 ctx->ifc_flags |= IFC_DO_RESET; 6765 STATE_UNLOCK(ctx); 6766 } 6767 6768 #ifndef __NO_STRICT_ALIGNMENT 6769 static struct mbuf * 6770 iflib_fixup_rx(struct mbuf *m) 6771 { 6772 struct mbuf *n; 6773 6774 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 6775 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 6776 m->m_data += ETHER_HDR_LEN; 6777 n = m; 6778 } else { 6779 MGETHDR(n, M_NOWAIT, MT_DATA); 6780 if (n == NULL) { 6781 m_freem(m); 6782 return (NULL); 6783 } 6784 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 6785 m->m_data += ETHER_HDR_LEN; 6786 m->m_len -= ETHER_HDR_LEN; 6787 n->m_len = ETHER_HDR_LEN; 6788 M_MOVE_PKTHDR(n, m); 6789 n->m_next = m; 6790 } 6791 return (n); 6792 } 6793 #endif 6794 6795 #ifdef DEBUGNET 6796 static void 6797 iflib_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) 6798 { 6799 if_ctx_t ctx; 6800 6801 ctx = if_getsoftc(ifp); 6802 CTX_LOCK(ctx); 6803 *nrxr = NRXQSETS(ctx); 6804 *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size; 6805 *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size; 6806 CTX_UNLOCK(ctx); 6807 } 6808 6809 static void 6810 iflib_debugnet_event(if_t ifp, enum debugnet_ev event) 6811 { 6812 if_ctx_t ctx; 6813 if_softc_ctx_t scctx; 6814 iflib_fl_t fl; 6815 iflib_rxq_t rxq; 6816 int i, j; 6817 6818 ctx = if_getsoftc(ifp); 6819 scctx = &ctx->ifc_softc_ctx; 6820 6821 switch (event) { 6822 case DEBUGNET_START: 6823 for (i = 0; i < scctx->isc_nrxqsets; i++) { 6824 rxq = &ctx->ifc_rxqs[i]; 6825 for (j = 0; j < rxq->ifr_nfl; j++) { 6826 fl = rxq->ifr_fl; 6827 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 6828 } 6829 } 6830 iflib_no_tx_batch = 1; 6831 break; 6832 default: 6833 break; 6834 } 6835 } 6836 6837 static int 6838 iflib_debugnet_transmit(if_t ifp, struct mbuf *m) 6839 { 6840 if_ctx_t ctx; 6841 iflib_txq_t txq; 6842 int error; 6843 6844 ctx = if_getsoftc(ifp); 6845 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6846 IFF_DRV_RUNNING) 6847 return (EBUSY); 6848 6849 txq = &ctx->ifc_txqs[0]; 6850 error = iflib_encap(txq, &m); 6851 if (error == 0) 6852 (void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use); 6853 return (error); 6854 } 6855 6856 static int 6857 iflib_debugnet_poll(if_t ifp, int count) 6858 { 6859 struct epoch_tracker et; 6860 if_ctx_t ctx; 6861 if_softc_ctx_t scctx; 6862 iflib_txq_t txq; 6863 int i; 6864 6865 ctx = if_getsoftc(ifp); 6866 scctx = &ctx->ifc_softc_ctx; 6867 6868 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6869 IFF_DRV_RUNNING) 6870 return (EBUSY); 6871 6872 txq = &ctx->ifc_txqs[0]; 6873 (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 6874 6875 NET_EPOCH_ENTER(et); 6876 for (i = 0; i < scctx->isc_nrxqsets; i++) 6877 (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */); 6878 NET_EPOCH_EXIT(et); 6879 return (0); 6880 } 6881 #endif /* DEBUGNET */ 6882