1 /*- 2 * Copyright (c) 2014-2017, Matthew Macy <mmacy@mattmacy.io> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Neither the name of Matthew Macy nor the names of its 12 * contributors may be used to endorse or promote products derived from 13 * this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_acpi.h" 34 #include "opt_sched.h" 35 36 #include <sys/param.h> 37 #include <sys/types.h> 38 #include <sys/bus.h> 39 #include <sys/eventhandler.h> 40 #include <sys/sockio.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/module.h> 45 #include <sys/kobj.h> 46 #include <sys/rman.h> 47 #include <sys/sbuf.h> 48 #include <sys/smp.h> 49 #include <sys/socket.h> 50 #include <sys/sysctl.h> 51 #include <sys/syslog.h> 52 #include <sys/taskqueue.h> 53 #include <sys/limits.h> 54 55 56 #include <net/if.h> 57 #include <net/if_var.h> 58 #include <net/if_types.h> 59 #include <net/if_media.h> 60 #include <net/bpf.h> 61 #include <net/ethernet.h> 62 #include <net/mp_ring.h> 63 #include <net/vnet.h> 64 65 #include <netinet/in.h> 66 #include <netinet/in_pcb.h> 67 #include <netinet/tcp_lro.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/if_ether.h> 70 #include <netinet/ip.h> 71 #include <netinet/ip6.h> 72 #include <netinet/tcp.h> 73 #include <netinet/ip_var.h> 74 #include <netinet6/ip6_var.h> 75 76 #include <machine/bus.h> 77 #include <machine/in_cksum.h> 78 79 #include <vm/vm.h> 80 #include <vm/pmap.h> 81 82 #include <dev/led/led.h> 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 #include <dev/pci/pci_private.h> 86 87 #include <net/iflib.h> 88 89 #include "ifdi_if.h" 90 91 #if defined(__i386__) || defined(__amd64__) 92 #include <sys/memdesc.h> 93 #include <machine/bus.h> 94 #include <machine/md_var.h> 95 #include <machine/specialreg.h> 96 #include <x86/include/busdma_impl.h> 97 #include <x86/iommu/busdma_dmar.h> 98 #endif 99 100 #include <sys/bitstring.h> 101 /* 102 * enable accounting of every mbuf as it comes in to and goes out of 103 * iflib's software descriptor references 104 */ 105 #define MEMORY_LOGGING 0 106 /* 107 * Enable mbuf vectors for compressing long mbuf chains 108 */ 109 110 /* 111 * NB: 112 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead 113 * we prefetch needs to be determined by the time spent in m_free vis a vis 114 * the cost of a prefetch. This will of course vary based on the workload: 115 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which 116 * is quite expensive, thus suggesting very little prefetch. 117 * - small packet forwarding which is just returning a single mbuf to 118 * UMA will typically be very fast vis a vis the cost of a memory 119 * access. 120 */ 121 122 123 /* 124 * File organization: 125 * - private structures 126 * - iflib private utility functions 127 * - ifnet functions 128 * - vlan registry and other exported functions 129 * - iflib public core functions 130 * 131 * 132 */ 133 static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); 134 135 struct iflib_txq; 136 typedef struct iflib_txq *iflib_txq_t; 137 struct iflib_rxq; 138 typedef struct iflib_rxq *iflib_rxq_t; 139 struct iflib_fl; 140 typedef struct iflib_fl *iflib_fl_t; 141 142 struct iflib_ctx; 143 144 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); 145 146 typedef struct iflib_filter_info { 147 driver_filter_t *ifi_filter; 148 void *ifi_filter_arg; 149 struct grouptask *ifi_task; 150 void *ifi_ctx; 151 } *iflib_filter_info_t; 152 153 struct iflib_ctx { 154 KOBJ_FIELDS; 155 /* 156 * Pointer to hardware driver's softc 157 */ 158 void *ifc_softc; 159 device_t ifc_dev; 160 if_t ifc_ifp; 161 162 cpuset_t ifc_cpus; 163 if_shared_ctx_t ifc_sctx; 164 struct if_softc_ctx ifc_softc_ctx; 165 166 struct mtx ifc_mtx; 167 168 uint16_t ifc_nhwtxqs; 169 170 iflib_txq_t ifc_txqs; 171 iflib_rxq_t ifc_rxqs; 172 uint32_t ifc_if_flags; 173 uint32_t ifc_flags; 174 uint32_t ifc_max_fl_buf_size; 175 int ifc_in_detach; 176 177 int ifc_link_state; 178 int ifc_link_irq; 179 int ifc_watchdog_events; 180 struct cdev *ifc_led_dev; 181 struct resource *ifc_msix_mem; 182 183 struct if_irq ifc_legacy_irq; 184 struct grouptask ifc_admin_task; 185 struct grouptask ifc_vflr_task; 186 struct iflib_filter_info ifc_filter_info; 187 struct ifmedia ifc_media; 188 189 struct sysctl_oid *ifc_sysctl_node; 190 uint16_t ifc_sysctl_ntxqs; 191 uint16_t ifc_sysctl_nrxqs; 192 uint16_t ifc_sysctl_qs_eq_override; 193 uint16_t ifc_sysctl_rx_budget; 194 195 qidx_t ifc_sysctl_ntxds[8]; 196 qidx_t ifc_sysctl_nrxds[8]; 197 struct if_txrx ifc_txrx; 198 #define isc_txd_encap ifc_txrx.ift_txd_encap 199 #define isc_txd_flush ifc_txrx.ift_txd_flush 200 #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update 201 #define isc_rxd_available ifc_txrx.ift_rxd_available 202 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get 203 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 204 #define isc_rxd_flush ifc_txrx.ift_rxd_flush 205 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 206 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 207 #define isc_legacy_intr ifc_txrx.ift_legacy_intr 208 eventhandler_tag ifc_vlan_attach_event; 209 eventhandler_tag ifc_vlan_detach_event; 210 uint8_t ifc_mac[ETHER_ADDR_LEN]; 211 char ifc_mtx_name[16]; 212 }; 213 214 215 void * 216 iflib_get_softc(if_ctx_t ctx) 217 { 218 219 return (ctx->ifc_softc); 220 } 221 222 device_t 223 iflib_get_dev(if_ctx_t ctx) 224 { 225 226 return (ctx->ifc_dev); 227 } 228 229 if_t 230 iflib_get_ifp(if_ctx_t ctx) 231 { 232 233 return (ctx->ifc_ifp); 234 } 235 236 struct ifmedia * 237 iflib_get_media(if_ctx_t ctx) 238 { 239 240 return (&ctx->ifc_media); 241 } 242 243 void 244 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) 245 { 246 247 bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); 248 } 249 250 if_softc_ctx_t 251 iflib_get_softc_ctx(if_ctx_t ctx) 252 { 253 254 return (&ctx->ifc_softc_ctx); 255 } 256 257 if_shared_ctx_t 258 iflib_get_sctx(if_ctx_t ctx) 259 { 260 261 return (ctx->ifc_sctx); 262 } 263 264 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) 265 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) 266 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) 267 268 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) 269 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) 270 271 #define RX_SW_DESC_MAP_CREATED (1 << 0) 272 #define TX_SW_DESC_MAP_CREATED (1 << 1) 273 #define RX_SW_DESC_INUSE (1 << 3) 274 #define TX_SW_DESC_MAPPED (1 << 4) 275 276 #define M_TOOBIG M_PROTO1 277 278 typedef struct iflib_sw_rx_desc_array { 279 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 280 struct mbuf **ifsd_m; /* pkthdr mbufs */ 281 caddr_t *ifsd_cl; /* direct cluster pointer for rx */ 282 uint8_t *ifsd_flags; 283 } iflib_rxsd_array_t; 284 285 typedef struct iflib_sw_tx_desc_array { 286 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 287 struct mbuf **ifsd_m; /* pkthdr mbufs */ 288 uint8_t *ifsd_flags; 289 } if_txsd_vec_t; 290 291 292 /* magic number that should be high enough for any hardware */ 293 #define IFLIB_MAX_TX_SEGS 128 294 /* bnxt supports 64 with hardware LRO enabled */ 295 #define IFLIB_MAX_RX_SEGS 64 296 #define IFLIB_RX_COPY_THRESH 128 297 #define IFLIB_MAX_RX_REFRESH 32 298 /* The minimum descriptors per second before we start coalescing */ 299 #define IFLIB_MIN_DESC_SEC 16384 300 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 301 #define IFLIB_QUEUE_IDLE 0 302 #define IFLIB_QUEUE_HUNG 1 303 #define IFLIB_QUEUE_WORKING 2 304 /* maximum number of txqs that can share an rx interrupt */ 305 #define IFLIB_MAX_TX_SHARED_INTR 4 306 307 /* this should really scale with ring size - this is a fairly arbitrary value */ 308 #define TX_BATCH_SIZE 32 309 310 #define IFLIB_RESTART_BUDGET 8 311 312 #define IFC_LEGACY 0x001 313 #define IFC_QFLUSH 0x002 314 #define IFC_MULTISEG 0x004 315 #define IFC_DMAR 0x008 316 #define IFC_SC_ALLOCATED 0x010 317 #define IFC_INIT_DONE 0x020 318 #define IFC_PREFETCH 0x040 319 #define IFC_DO_RESET 0x080 320 #define IFC_CHECK_HUNG 0x100 321 322 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ 323 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ 324 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) 325 struct iflib_txq { 326 qidx_t ift_in_use; 327 qidx_t ift_cidx; 328 qidx_t ift_cidx_processed; 329 qidx_t ift_pidx; 330 uint8_t ift_gen; 331 uint8_t ift_br_offset; 332 uint16_t ift_npending; 333 uint16_t ift_db_pending; 334 uint16_t ift_rs_pending; 335 /* implicit pad */ 336 uint8_t ift_txd_size[8]; 337 uint64_t ift_processed; 338 uint64_t ift_cleaned; 339 uint64_t ift_cleaned_prev; 340 #if MEMORY_LOGGING 341 uint64_t ift_enqueued; 342 uint64_t ift_dequeued; 343 #endif 344 uint64_t ift_no_tx_dma_setup; 345 uint64_t ift_no_desc_avail; 346 uint64_t ift_mbuf_defrag_failed; 347 uint64_t ift_mbuf_defrag; 348 uint64_t ift_map_failed; 349 uint64_t ift_txd_encap_efbig; 350 uint64_t ift_pullups; 351 352 struct mtx ift_mtx; 353 struct mtx ift_db_mtx; 354 355 /* constant values */ 356 if_ctx_t ift_ctx; 357 struct ifmp_ring *ift_br; 358 struct grouptask ift_task; 359 qidx_t ift_size; 360 uint16_t ift_id; 361 struct callout ift_timer; 362 363 if_txsd_vec_t ift_sds; 364 uint8_t ift_qstatus; 365 uint8_t ift_closed; 366 uint8_t ift_update_freq; 367 struct iflib_filter_info ift_filter_info; 368 bus_dma_tag_t ift_desc_tag; 369 bus_dma_tag_t ift_tso_desc_tag; 370 iflib_dma_info_t ift_ifdi; 371 #define MTX_NAME_LEN 16 372 char ift_mtx_name[MTX_NAME_LEN]; 373 char ift_db_mtx_name[MTX_NAME_LEN]; 374 bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); 375 #ifdef IFLIB_DIAGNOSTICS 376 uint64_t ift_cpu_exec_count[256]; 377 #endif 378 } __aligned(CACHE_LINE_SIZE); 379 380 struct iflib_fl { 381 qidx_t ifl_cidx; 382 qidx_t ifl_pidx; 383 qidx_t ifl_credits; 384 uint8_t ifl_gen; 385 uint8_t ifl_rxd_size; 386 #if MEMORY_LOGGING 387 uint64_t ifl_m_enqueued; 388 uint64_t ifl_m_dequeued; 389 uint64_t ifl_cl_enqueued; 390 uint64_t ifl_cl_dequeued; 391 #endif 392 /* implicit pad */ 393 394 bitstr_t *ifl_rx_bitmap; 395 qidx_t ifl_fragidx; 396 /* constant */ 397 qidx_t ifl_size; 398 uint16_t ifl_buf_size; 399 uint16_t ifl_cltype; 400 uma_zone_t ifl_zone; 401 iflib_rxsd_array_t ifl_sds; 402 iflib_rxq_t ifl_rxq; 403 uint8_t ifl_id; 404 bus_dma_tag_t ifl_desc_tag; 405 iflib_dma_info_t ifl_ifdi; 406 uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); 407 caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; 408 qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; 409 } __aligned(CACHE_LINE_SIZE); 410 411 static inline qidx_t 412 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) 413 { 414 qidx_t used; 415 416 if (pidx > cidx) 417 used = pidx - cidx; 418 else if (pidx < cidx) 419 used = size - cidx + pidx; 420 else if (gen == 0 && pidx == cidx) 421 used = 0; 422 else if (gen == 1 && pidx == cidx) 423 used = size; 424 else 425 panic("bad state"); 426 427 return (used); 428 } 429 430 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) 431 432 #define IDXDIFF(head, tail, wrap) \ 433 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 434 435 struct iflib_rxq { 436 /* If there is a separate completion queue - 437 * these are the cq cidx and pidx. Otherwise 438 * these are unused. 439 */ 440 qidx_t ifr_size; 441 qidx_t ifr_cq_cidx; 442 qidx_t ifr_cq_pidx; 443 uint8_t ifr_cq_gen; 444 uint8_t ifr_fl_offset; 445 446 if_ctx_t ifr_ctx; 447 iflib_fl_t ifr_fl; 448 uint64_t ifr_rx_irq; 449 uint16_t ifr_id; 450 uint8_t ifr_lro_enabled; 451 uint8_t ifr_nfl; 452 uint8_t ifr_ntxqirq; 453 uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; 454 struct lro_ctrl ifr_lc; 455 struct grouptask ifr_task; 456 struct iflib_filter_info ifr_filter_info; 457 iflib_dma_info_t ifr_ifdi; 458 459 /* dynamically allocate if any drivers need a value substantially larger than this */ 460 struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); 461 #ifdef IFLIB_DIAGNOSTICS 462 uint64_t ifr_cpu_exec_count[256]; 463 #endif 464 } __aligned(CACHE_LINE_SIZE); 465 466 typedef struct if_rxsd { 467 caddr_t *ifsd_cl; 468 struct mbuf **ifsd_m; 469 iflib_fl_t ifsd_fl; 470 qidx_t ifsd_cidx; 471 } *if_rxsd_t; 472 473 /* multiple of word size */ 474 #ifdef __LP64__ 475 #define PKT_INFO_SIZE 6 476 #define RXD_INFO_SIZE 5 477 #define PKT_TYPE uint64_t 478 #else 479 #define PKT_INFO_SIZE 11 480 #define RXD_INFO_SIZE 8 481 #define PKT_TYPE uint32_t 482 #endif 483 #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) 484 #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) 485 486 typedef struct if_pkt_info_pad { 487 PKT_TYPE pkt_val[PKT_INFO_SIZE]; 488 } *if_pkt_info_pad_t; 489 typedef struct if_rxd_info_pad { 490 PKT_TYPE rxd_val[RXD_INFO_SIZE]; 491 } *if_rxd_info_pad_t; 492 493 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); 494 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); 495 496 497 static inline void 498 pkt_info_zero(if_pkt_info_t pi) 499 { 500 if_pkt_info_pad_t pi_pad; 501 502 pi_pad = (if_pkt_info_pad_t)pi; 503 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; 504 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; 505 #ifndef __LP64__ 506 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; 507 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; 508 #endif 509 } 510 511 static inline void 512 rxd_info_zero(if_rxd_info_t ri) 513 { 514 if_rxd_info_pad_t ri_pad; 515 int i; 516 517 ri_pad = (if_rxd_info_pad_t)ri; 518 for (i = 0; i < RXD_LOOP_BOUND; i += 4) { 519 ri_pad->rxd_val[i] = 0; 520 ri_pad->rxd_val[i+1] = 0; 521 ri_pad->rxd_val[i+2] = 0; 522 ri_pad->rxd_val[i+3] = 0; 523 } 524 #ifdef __LP64__ 525 ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; 526 #endif 527 } 528 529 /* 530 * Only allow a single packet to take up most 1/nth of the tx ring 531 */ 532 #define MAX_SINGLE_PACKET_FRACTION 12 533 #define IF_BAD_DMA (bus_addr_t)-1 534 535 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) 536 537 #define CTX_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_mtx, _name, "iflib ctx lock", MTX_DEF) 538 539 #define CTX_LOCK(ctx) mtx_lock(&(ctx)->ifc_mtx) 540 #define CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_mtx) 541 #define CTX_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_mtx) 542 543 544 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) 545 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) 546 547 548 /* Our boot-time initialization hook */ 549 static int iflib_module_event_handler(module_t, int, void *); 550 551 static moduledata_t iflib_moduledata = { 552 "iflib", 553 iflib_module_event_handler, 554 NULL 555 }; 556 557 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); 558 MODULE_VERSION(iflib, 1); 559 560 MODULE_DEPEND(iflib, pci, 1, 1, 1); 561 MODULE_DEPEND(iflib, ether, 1, 1, 1); 562 563 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); 564 TASKQGROUP_DEFINE(if_config_tqg, 1, 1); 565 566 #ifndef IFLIB_DEBUG_COUNTERS 567 #ifdef INVARIANTS 568 #define IFLIB_DEBUG_COUNTERS 1 569 #else 570 #define IFLIB_DEBUG_COUNTERS 0 571 #endif /* !INVARIANTS */ 572 #endif 573 574 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, 575 "iflib driver parameters"); 576 577 /* 578 * XXX need to ensure that this can't accidentally cause the head to be moved backwards 579 */ 580 static int iflib_min_tx_latency = 0; 581 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, 582 &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); 583 static int iflib_no_tx_batch = 0; 584 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, 585 &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); 586 587 588 #if IFLIB_DEBUG_COUNTERS 589 590 static int iflib_tx_seen; 591 static int iflib_tx_sent; 592 static int iflib_tx_encap; 593 static int iflib_rx_allocs; 594 static int iflib_fl_refills; 595 static int iflib_fl_refills_large; 596 static int iflib_tx_frees; 597 598 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, 599 &iflib_tx_seen, 0, "# tx mbufs seen"); 600 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, 601 &iflib_tx_sent, 0, "# tx mbufs sent"); 602 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, 603 &iflib_tx_encap, 0, "# tx mbufs encapped"); 604 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, 605 &iflib_tx_frees, 0, "# tx frees"); 606 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, 607 &iflib_rx_allocs, 0, "# rx allocations"); 608 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, 609 &iflib_fl_refills, 0, "# refills"); 610 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, 611 &iflib_fl_refills_large, 0, "# large refills"); 612 613 614 static int iflib_txq_drain_flushing; 615 static int iflib_txq_drain_oactive; 616 static int iflib_txq_drain_notready; 617 static int iflib_txq_drain_encapfail; 618 619 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, 620 &iflib_txq_drain_flushing, 0, "# drain flushes"); 621 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, 622 &iflib_txq_drain_oactive, 0, "# drain oactives"); 623 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, 624 &iflib_txq_drain_notready, 0, "# drain notready"); 625 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD, 626 &iflib_txq_drain_encapfail, 0, "# drain encap fails"); 627 628 629 static int iflib_encap_load_mbuf_fail; 630 static int iflib_encap_pad_mbuf_fail; 631 static int iflib_encap_txq_avail_fail; 632 static int iflib_encap_txd_encap_fail; 633 634 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, 635 &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); 636 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, 637 &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); 638 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, 639 &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); 640 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, 641 &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); 642 643 static int iflib_task_fn_rxs; 644 static int iflib_rx_intr_enables; 645 static int iflib_fast_intrs; 646 static int iflib_intr_link; 647 static int iflib_intr_msix; 648 static int iflib_rx_unavail; 649 static int iflib_rx_ctx_inactive; 650 static int iflib_rx_zero_len; 651 static int iflib_rx_if_input; 652 static int iflib_rx_mbuf_null; 653 static int iflib_rxd_flush; 654 655 static int iflib_verbose_debug; 656 657 SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD, 658 &iflib_intr_link, 0, "# intr link calls"); 659 SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD, 660 &iflib_intr_msix, 0, "# intr msix calls"); 661 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, 662 &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); 663 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, 664 &iflib_rx_intr_enables, 0, "# rx intr enables"); 665 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, 666 &iflib_fast_intrs, 0, "# fast_intr calls"); 667 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, 668 &iflib_rx_unavail, 0, "# times rxeof called with no available data"); 669 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, 670 &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); 671 SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD, 672 &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf"); 673 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, 674 &iflib_rx_if_input, 0, "# times rxeof called if_input"); 675 SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, 676 &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); 677 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, 678 &iflib_rxd_flush, 0, "# times rxd_flush called"); 679 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, 680 &iflib_verbose_debug, 0, "enable verbose debugging"); 681 682 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) 683 static void 684 iflib_debug_reset(void) 685 { 686 iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = 687 iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = 688 iflib_txq_drain_flushing = iflib_txq_drain_oactive = 689 iflib_txq_drain_notready = iflib_txq_drain_encapfail = 690 iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = 691 iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = 692 iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = 693 iflib_intr_link = iflib_intr_msix = iflib_rx_unavail = 694 iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input = 695 iflib_rx_mbuf_null = iflib_rxd_flush = 0; 696 } 697 698 #else 699 #define DBG_COUNTER_INC(name) 700 static void iflib_debug_reset(void) {} 701 #endif 702 703 704 705 #define IFLIB_DEBUG 0 706 707 static void iflib_tx_structures_free(if_ctx_t ctx); 708 static void iflib_rx_structures_free(if_ctx_t ctx); 709 static int iflib_queues_alloc(if_ctx_t ctx); 710 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); 711 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); 712 static int iflib_qset_structures_setup(if_ctx_t ctx); 713 static int iflib_msix_init(if_ctx_t ctx); 714 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str); 715 static void iflib_txq_check_drain(iflib_txq_t txq, int budget); 716 static uint32_t iflib_txq_can_drain(struct ifmp_ring *); 717 static int iflib_register(if_ctx_t); 718 static void iflib_init_locked(if_ctx_t ctx); 719 static void iflib_add_device_sysctl_pre(if_ctx_t ctx); 720 static void iflib_add_device_sysctl_post(if_ctx_t ctx); 721 static void iflib_ifmp_purge(iflib_txq_t txq); 722 static void _iflib_pre_assert(if_softc_ctx_t scctx); 723 static void iflib_stop(if_ctx_t ctx); 724 static void iflib_if_init_locked(if_ctx_t ctx); 725 #ifndef __NO_STRICT_ALIGNMENT 726 static struct mbuf * iflib_fixup_rx(struct mbuf *m); 727 #endif 728 729 #ifdef DEV_NETMAP 730 #include <sys/selinfo.h> 731 #include <net/netmap.h> 732 #include <dev/netmap/netmap_kern.h> 733 734 MODULE_DEPEND(iflib, netmap, 1, 1, 1); 735 736 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); 737 738 /* 739 * device-specific sysctl variables: 740 * 741 * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. 742 * During regular operations the CRC is stripped, but on some 743 * hardware reception of frames not multiple of 64 is slower, 744 * so using crcstrip=0 helps in benchmarks. 745 * 746 * iflib_rx_miss, iflib_rx_miss_bufs: 747 * count packets that might be missed due to lost interrupts. 748 */ 749 SYSCTL_DECL(_dev_netmap); 750 /* 751 * The xl driver by default strips CRCs and we do not override it. 752 */ 753 754 int iflib_crcstrip = 1; 755 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, 756 CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames"); 757 758 int iflib_rx_miss, iflib_rx_miss_bufs; 759 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, 760 CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr"); 761 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, 762 CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs"); 763 764 /* 765 * Register/unregister. We are already under netmap lock. 766 * Only called on the first register or the last unregister. 767 */ 768 static int 769 iflib_netmap_register(struct netmap_adapter *na, int onoff) 770 { 771 struct ifnet *ifp = na->ifp; 772 if_ctx_t ctx = ifp->if_softc; 773 int status; 774 775 CTX_LOCK(ctx); 776 IFDI_INTR_DISABLE(ctx); 777 778 /* Tell the stack that the interface is no longer active */ 779 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 780 781 if (!CTX_IS_VF(ctx)) 782 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); 783 784 /* enable or disable flags and callbacks in na and ifp */ 785 if (onoff) { 786 nm_set_native_flags(na); 787 } else { 788 nm_clear_native_flags(na); 789 } 790 iflib_stop(ctx); 791 iflib_init_locked(ctx); 792 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? 793 status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; 794 if (status) 795 nm_clear_native_flags(na); 796 CTX_UNLOCK(ctx); 797 return (status); 798 } 799 800 static int 801 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) 802 { 803 struct netmap_adapter *na = kring->na; 804 u_int const lim = kring->nkr_num_slots - 1; 805 u_int head = kring->rhead; 806 struct netmap_ring *ring = kring->ring; 807 bus_dmamap_t *map; 808 struct if_rxd_update iru; 809 if_ctx_t ctx = rxq->ifr_ctx; 810 iflib_fl_t fl = &rxq->ifr_fl[0]; 811 uint32_t refill_pidx, nic_i; 812 813 if (nm_i == head && __predict_true(!init)) 814 return 0; 815 iru_init(&iru, rxq, 0 /* flid */); 816 map = fl->ifl_sds.ifsd_map; 817 refill_pidx = netmap_idx_k2n(kring, nm_i); 818 /* 819 * IMPORTANT: we must leave one free slot in the ring, 820 * so move head back by one unit 821 */ 822 head = nm_prev(head, lim); 823 while (nm_i != head) { 824 for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { 825 struct netmap_slot *slot = &ring->slot[nm_i]; 826 void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); 827 uint32_t nic_i_dma = refill_pidx; 828 nic_i = netmap_idx_k2n(kring, nm_i); 829 830 MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); 831 832 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 833 return netmap_ring_reinit(kring); 834 835 fl->ifl_vm_addrs[tmp_pidx] = addr; 836 if (__predict_false(init) && map) { 837 netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 838 } else if (map && (slot->flags & NS_BUF_CHANGED)) { 839 /* buffer has changed, reload map */ 840 netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 841 } 842 slot->flags &= ~NS_BUF_CHANGED; 843 844 nm_i = nm_next(nm_i, lim); 845 fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); 846 if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) 847 continue; 848 849 iru.iru_pidx = refill_pidx; 850 iru.iru_count = tmp_pidx+1; 851 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 852 853 refill_pidx = nic_i; 854 if (map == NULL) 855 continue; 856 857 for (int n = 0; n < iru.iru_count; n++) { 858 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma], 859 BUS_DMASYNC_PREREAD); 860 /* XXX - change this to not use the netmap func*/ 861 nic_i_dma = nm_next(nic_i_dma, lim); 862 } 863 } 864 } 865 kring->nr_hwcur = head; 866 867 if (map) 868 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 869 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 870 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); 871 return (0); 872 } 873 874 /* 875 * Reconcile kernel and user view of the transmit ring. 876 * 877 * All information is in the kring. 878 * Userspace wants to send packets up to the one before kring->rhead, 879 * kernel knows kring->nr_hwcur is the first unsent packet. 880 * 881 * Here we push packets out (as many as possible), and possibly 882 * reclaim buffers from previously completed transmission. 883 * 884 * The caller (netmap) guarantees that there is only one instance 885 * running at any time. Any interference with other driver 886 * methods should be handled by the individual drivers. 887 */ 888 static int 889 iflib_netmap_txsync(struct netmap_kring *kring, int flags) 890 { 891 struct netmap_adapter *na = kring->na; 892 struct ifnet *ifp = na->ifp; 893 struct netmap_ring *ring = kring->ring; 894 u_int nm_i; /* index into the netmap ring */ 895 u_int nic_i; /* index into the NIC ring */ 896 u_int n; 897 u_int const lim = kring->nkr_num_slots - 1; 898 u_int const head = kring->rhead; 899 struct if_pkt_info pi; 900 901 /* 902 * interrupts on every tx packet are expensive so request 903 * them every half ring, or where NS_REPORT is set 904 */ 905 u_int report_frequency = kring->nkr_num_slots >> 1; 906 /* device-specific */ 907 if_ctx_t ctx = ifp->if_softc; 908 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; 909 910 if (txq->ift_sds.ifsd_map) 911 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 912 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 913 914 915 /* 916 * First part: process new packets to send. 917 * nm_i is the current index in the netmap ring, 918 * nic_i is the corresponding index in the NIC ring. 919 * 920 * If we have packets to send (nm_i != head) 921 * iterate over the netmap ring, fetch length and update 922 * the corresponding slot in the NIC ring. Some drivers also 923 * need to update the buffer's physical address in the NIC slot 924 * even NS_BUF_CHANGED is not set (PNMB computes the addresses). 925 * 926 * The netmap_reload_map() calls is especially expensive, 927 * even when (as in this case) the tag is 0, so do only 928 * when the buffer has actually changed. 929 * 930 * If possible do not set the report/intr bit on all slots, 931 * but only a few times per ring or when NS_REPORT is set. 932 * 933 * Finally, on 10G and faster drivers, it might be useful 934 * to prefetch the next slot and txr entry. 935 */ 936 937 nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); 938 pkt_info_zero(&pi); 939 pi.ipi_segs = txq->ift_segs; 940 pi.ipi_qsidx = kring->ring_id; 941 if (nm_i != head) { /* we have new packets to send */ 942 nic_i = netmap_idx_k2n(kring, nm_i); 943 944 __builtin_prefetch(&ring->slot[nm_i]); 945 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); 946 if (txq->ift_sds.ifsd_map) 947 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); 948 949 for (n = 0; nm_i != head; n++) { 950 struct netmap_slot *slot = &ring->slot[nm_i]; 951 u_int len = slot->len; 952 uint64_t paddr; 953 void *addr = PNMB(na, slot, &paddr); 954 int flags = (slot->flags & NS_REPORT || 955 nic_i == 0 || nic_i == report_frequency) ? 956 IPI_TX_INTR : 0; 957 958 /* device-specific */ 959 pi.ipi_len = len; 960 pi.ipi_segs[0].ds_addr = paddr; 961 pi.ipi_segs[0].ds_len = len; 962 pi.ipi_nsegs = 1; 963 pi.ipi_ndescs = 0; 964 pi.ipi_pidx = nic_i; 965 pi.ipi_flags = flags; 966 967 /* Fill the slot in the NIC ring. */ 968 ctx->isc_txd_encap(ctx->ifc_softc, &pi); 969 970 /* prefetch for next round */ 971 __builtin_prefetch(&ring->slot[nm_i + 1]); 972 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); 973 if (txq->ift_sds.ifsd_map) { 974 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); 975 976 NM_CHECK_ADDR_LEN(na, addr, len); 977 978 if (slot->flags & NS_BUF_CHANGED) { 979 /* buffer has changed, reload map */ 980 netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr); 981 } 982 /* make sure changes to the buffer are synced */ 983 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i], 984 BUS_DMASYNC_PREWRITE); 985 } 986 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 987 nm_i = nm_next(nm_i, lim); 988 nic_i = nm_next(nic_i, lim); 989 } 990 kring->nr_hwcur = head; 991 992 /* synchronize the NIC ring */ 993 if (txq->ift_sds.ifsd_map) 994 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 995 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 996 997 /* (re)start the tx unit up to slot nic_i (excluded) */ 998 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); 999 } 1000 1001 /* 1002 * Second part: reclaim buffers for completed transmissions. 1003 */ 1004 if (iflib_tx_credits_update(ctx, txq)) { 1005 /* some tx completed, increment avail */ 1006 nic_i = txq->ift_cidx_processed; 1007 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 1008 } 1009 return (0); 1010 } 1011 1012 /* 1013 * Reconcile kernel and user view of the receive ring. 1014 * Same as for the txsync, this routine must be efficient. 1015 * The caller guarantees a single invocations, but races against 1016 * the rest of the driver should be handled here. 1017 * 1018 * On call, kring->rhead is the first packet that userspace wants 1019 * to keep, and kring->rcur is the wakeup point. 1020 * The kernel has previously reported packets up to kring->rtail. 1021 * 1022 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective 1023 * of whether or not we received an interrupt. 1024 */ 1025 static int 1026 iflib_netmap_rxsync(struct netmap_kring *kring, int flags) 1027 { 1028 struct netmap_adapter *na = kring->na; 1029 struct netmap_ring *ring = kring->ring; 1030 uint32_t nm_i; /* index into the netmap ring */ 1031 uint32_t nic_i; /* index into the NIC ring */ 1032 u_int i, n; 1033 u_int const lim = kring->nkr_num_slots - 1; 1034 u_int const head = netmap_idx_n2k(kring, kring->rhead); 1035 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1036 struct if_rxd_info ri; 1037 1038 struct ifnet *ifp = na->ifp; 1039 if_ctx_t ctx = ifp->if_softc; 1040 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; 1041 iflib_fl_t fl = rxq->ifr_fl; 1042 if (head > lim) 1043 return netmap_ring_reinit(kring); 1044 1045 /* XXX check sync modes */ 1046 for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { 1047 if (fl->ifl_sds.ifsd_map == NULL) 1048 continue; 1049 bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map, 1050 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1051 } 1052 /* 1053 * First part: import newly received packets. 1054 * 1055 * nm_i is the index of the next free slot in the netmap ring, 1056 * nic_i is the index of the next received packet in the NIC ring, 1057 * and they may differ in case if_init() has been called while 1058 * in netmap mode. For the receive ring we have 1059 * 1060 * nic_i = rxr->next_check; 1061 * nm_i = kring->nr_hwtail (previous) 1062 * and 1063 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1064 * 1065 * rxr->next_check is set to 0 on a ring reinit 1066 */ 1067 if (netmap_no_pendintr || force_update) { 1068 int crclen = iflib_crcstrip ? 0 : 4; 1069 int error, avail; 1070 1071 for (i = 0; i < rxq->ifr_nfl; i++) { 1072 fl = &rxq->ifr_fl[i]; 1073 nic_i = fl->ifl_cidx; 1074 nm_i = netmap_idx_n2k(kring, nic_i); 1075 avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); 1076 for (n = 0; avail > 0; n++, avail--) { 1077 rxd_info_zero(&ri); 1078 ri.iri_frags = rxq->ifr_frags; 1079 ri.iri_qsidx = kring->ring_id; 1080 ri.iri_ifp = ctx->ifc_ifp; 1081 ri.iri_cidx = nic_i; 1082 1083 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 1084 ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; 1085 ring->slot[nm_i].flags = 0; 1086 if (fl->ifl_sds.ifsd_map) 1087 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, 1088 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); 1089 nm_i = nm_next(nm_i, lim); 1090 nic_i = nm_next(nic_i, lim); 1091 } 1092 if (n) { /* update the state variables */ 1093 if (netmap_no_pendintr && !force_update) { 1094 /* diagnostics */ 1095 iflib_rx_miss ++; 1096 iflib_rx_miss_bufs += n; 1097 } 1098 fl->ifl_cidx = nic_i; 1099 kring->nr_hwtail = netmap_idx_k2n(kring, nm_i); 1100 } 1101 kring->nr_kflags &= ~NKR_PENDINTR; 1102 } 1103 } 1104 /* 1105 * Second part: skip past packets that userspace has released. 1106 * (kring->nr_hwcur to head excluded), 1107 * and make the buffers available for reception. 1108 * As usual nm_i is the index in the netmap ring, 1109 * nic_i is the index in the NIC ring, and 1110 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1111 */ 1112 /* XXX not sure how this will work with multiple free lists */ 1113 nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); 1114 1115 return (netmap_fl_refill(rxq, kring, nm_i, false)); 1116 } 1117 1118 static void 1119 iflib_netmap_intr(struct netmap_adapter *na, int onoff) 1120 { 1121 struct ifnet *ifp = na->ifp; 1122 if_ctx_t ctx = ifp->if_softc; 1123 1124 CTX_LOCK(ctx); 1125 if (onoff) { 1126 IFDI_INTR_ENABLE(ctx); 1127 } else { 1128 IFDI_INTR_DISABLE(ctx); 1129 } 1130 CTX_UNLOCK(ctx); 1131 } 1132 1133 1134 static int 1135 iflib_netmap_attach(if_ctx_t ctx) 1136 { 1137 struct netmap_adapter na; 1138 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1139 1140 bzero(&na, sizeof(na)); 1141 1142 na.ifp = ctx->ifc_ifp; 1143 na.na_flags = NAF_BDG_MAYSLEEP; 1144 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); 1145 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); 1146 1147 na.num_tx_desc = scctx->isc_ntxd[0]; 1148 na.num_rx_desc = scctx->isc_nrxd[0]; 1149 na.nm_txsync = iflib_netmap_txsync; 1150 na.nm_rxsync = iflib_netmap_rxsync; 1151 na.nm_register = iflib_netmap_register; 1152 na.nm_intr = iflib_netmap_intr; 1153 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; 1154 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; 1155 return (netmap_attach(&na)); 1156 } 1157 1158 static void 1159 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) 1160 { 1161 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1162 struct netmap_slot *slot; 1163 1164 slot = netmap_reset(na, NR_TX, txq->ift_id, 0); 1165 if (slot == NULL) 1166 return; 1167 if (txq->ift_sds.ifsd_map == NULL) 1168 return; 1169 1170 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { 1171 1172 /* 1173 * In netmap mode, set the map for the packet buffer. 1174 * NOTE: Some drivers (not this one) also need to set 1175 * the physical buffer address in the NIC ring. 1176 * netmap_idx_n2k() maps a nic index, i, into the corresponding 1177 * netmap slot index, si 1178 */ 1179 int si = netmap_idx_n2k(&na->tx_rings[txq->ift_id], i); 1180 netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); 1181 } 1182 } 1183 1184 static void 1185 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) 1186 { 1187 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1188 struct netmap_kring *kring = &na->rx_rings[rxq->ifr_id]; 1189 struct netmap_slot *slot; 1190 uint32_t nm_i; 1191 1192 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); 1193 if (slot == NULL) 1194 return; 1195 nm_i = netmap_idx_n2k(kring, 0); 1196 netmap_fl_refill(rxq, kring, nm_i, true); 1197 } 1198 1199 #define iflib_netmap_detach(ifp) netmap_detach(ifp) 1200 1201 #else 1202 #define iflib_netmap_txq_init(ctx, txq) 1203 #define iflib_netmap_rxq_init(ctx, rxq) 1204 #define iflib_netmap_detach(ifp) 1205 1206 #define iflib_netmap_attach(ctx) (0) 1207 #define netmap_rx_irq(ifp, qid, budget) (0) 1208 #define netmap_tx_irq(ifp, qid) do {} while (0) 1209 1210 #endif 1211 1212 #if defined(__i386__) || defined(__amd64__) 1213 static __inline void 1214 prefetch(void *x) 1215 { 1216 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1217 } 1218 static __inline void 1219 prefetch2cachelines(void *x) 1220 { 1221 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1222 #if (CACHE_LINE_SIZE < 128) 1223 __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); 1224 #endif 1225 } 1226 #else 1227 #define prefetch(x) 1228 #define prefetch2cachelines(x) 1229 #endif 1230 1231 static void 1232 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) 1233 { 1234 iflib_fl_t fl; 1235 1236 fl = &rxq->ifr_fl[flid]; 1237 iru->iru_paddrs = fl->ifl_bus_addrs; 1238 iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; 1239 iru->iru_idxs = fl->ifl_rxd_idxs; 1240 iru->iru_qsidx = rxq->ifr_id; 1241 iru->iru_buf_size = fl->ifl_buf_size; 1242 iru->iru_flidx = fl->ifl_id; 1243 } 1244 1245 static void 1246 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) 1247 { 1248 if (err) 1249 return; 1250 *(bus_addr_t *) arg = segs[0].ds_addr; 1251 } 1252 1253 int 1254 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) 1255 { 1256 int err; 1257 if_shared_ctx_t sctx = ctx->ifc_sctx; 1258 device_t dev = ctx->ifc_dev; 1259 1260 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); 1261 1262 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1263 sctx->isc_q_align, 0, /* alignment, bounds */ 1264 BUS_SPACE_MAXADDR, /* lowaddr */ 1265 BUS_SPACE_MAXADDR, /* highaddr */ 1266 NULL, NULL, /* filter, filterarg */ 1267 size, /* maxsize */ 1268 1, /* nsegments */ 1269 size, /* maxsegsize */ 1270 BUS_DMA_ALLOCNOW, /* flags */ 1271 NULL, /* lockfunc */ 1272 NULL, /* lockarg */ 1273 &dma->idi_tag); 1274 if (err) { 1275 device_printf(dev, 1276 "%s: bus_dma_tag_create failed: %d\n", 1277 __func__, err); 1278 goto fail_0; 1279 } 1280 1281 err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, 1282 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); 1283 if (err) { 1284 device_printf(dev, 1285 "%s: bus_dmamem_alloc(%ju) failed: %d\n", 1286 __func__, (uintmax_t)size, err); 1287 goto fail_1; 1288 } 1289 1290 dma->idi_paddr = IF_BAD_DMA; 1291 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, 1292 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); 1293 if (err || dma->idi_paddr == IF_BAD_DMA) { 1294 device_printf(dev, 1295 "%s: bus_dmamap_load failed: %d\n", 1296 __func__, err); 1297 goto fail_2; 1298 } 1299 1300 dma->idi_size = size; 1301 return (0); 1302 1303 fail_2: 1304 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1305 fail_1: 1306 bus_dma_tag_destroy(dma->idi_tag); 1307 fail_0: 1308 dma->idi_tag = NULL; 1309 1310 return (err); 1311 } 1312 1313 int 1314 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) 1315 { 1316 int i, err; 1317 iflib_dma_info_t *dmaiter; 1318 1319 dmaiter = dmalist; 1320 for (i = 0; i < count; i++, dmaiter++) { 1321 if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) 1322 break; 1323 } 1324 if (err) 1325 iflib_dma_free_multi(dmalist, i); 1326 return (err); 1327 } 1328 1329 void 1330 iflib_dma_free(iflib_dma_info_t dma) 1331 { 1332 if (dma->idi_tag == NULL) 1333 return; 1334 if (dma->idi_paddr != IF_BAD_DMA) { 1335 bus_dmamap_sync(dma->idi_tag, dma->idi_map, 1336 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1337 bus_dmamap_unload(dma->idi_tag, dma->idi_map); 1338 dma->idi_paddr = IF_BAD_DMA; 1339 } 1340 if (dma->idi_vaddr != NULL) { 1341 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1342 dma->idi_vaddr = NULL; 1343 } 1344 bus_dma_tag_destroy(dma->idi_tag); 1345 dma->idi_tag = NULL; 1346 } 1347 1348 void 1349 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) 1350 { 1351 int i; 1352 iflib_dma_info_t *dmaiter = dmalist; 1353 1354 for (i = 0; i < count; i++, dmaiter++) 1355 iflib_dma_free(*dmaiter); 1356 } 1357 1358 #ifdef EARLY_AP_STARTUP 1359 static const int iflib_started = 1; 1360 #else 1361 /* 1362 * We used to abuse the smp_started flag to decide if the queues have been 1363 * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). 1364 * That gave bad races, since the SYSINIT() runs strictly after smp_started 1365 * is set. Run a SYSINIT() strictly after that to just set a usable 1366 * completion flag. 1367 */ 1368 1369 static int iflib_started; 1370 1371 static void 1372 iflib_record_started(void *arg) 1373 { 1374 iflib_started = 1; 1375 } 1376 1377 SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, 1378 iflib_record_started, NULL); 1379 #endif 1380 1381 static int 1382 iflib_fast_intr(void *arg) 1383 { 1384 iflib_filter_info_t info = arg; 1385 struct grouptask *gtask = info->ifi_task; 1386 if (!iflib_started) 1387 return (FILTER_HANDLED); 1388 1389 DBG_COUNTER_INC(fast_intrs); 1390 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1391 return (FILTER_HANDLED); 1392 1393 GROUPTASK_ENQUEUE(gtask); 1394 return (FILTER_HANDLED); 1395 } 1396 1397 static int 1398 iflib_fast_intr_rxtx(void *arg) 1399 { 1400 iflib_filter_info_t info = arg; 1401 struct grouptask *gtask = info->ifi_task; 1402 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; 1403 if_ctx_t ctx; 1404 int i, cidx; 1405 1406 if (!iflib_started) 1407 return (FILTER_HANDLED); 1408 1409 DBG_COUNTER_INC(fast_intrs); 1410 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1411 return (FILTER_HANDLED); 1412 1413 for (i = 0; i < rxq->ifr_ntxqirq; i++) { 1414 qidx_t txqid = rxq->ifr_txqid[i]; 1415 1416 ctx = rxq->ifr_ctx; 1417 1418 if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) { 1419 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); 1420 continue; 1421 } 1422 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 1423 } 1424 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) 1425 cidx = rxq->ifr_cq_cidx; 1426 else 1427 cidx = rxq->ifr_fl[0].ifl_cidx; 1428 if (iflib_rxd_avail(ctx, rxq, cidx, 1)) 1429 GROUPTASK_ENQUEUE(gtask); 1430 else 1431 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 1432 return (FILTER_HANDLED); 1433 } 1434 1435 1436 static int 1437 iflib_fast_intr_ctx(void *arg) 1438 { 1439 iflib_filter_info_t info = arg; 1440 struct grouptask *gtask = info->ifi_task; 1441 1442 if (!iflib_started) 1443 return (FILTER_HANDLED); 1444 1445 DBG_COUNTER_INC(fast_intrs); 1446 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1447 return (FILTER_HANDLED); 1448 1449 GROUPTASK_ENQUEUE(gtask); 1450 return (FILTER_HANDLED); 1451 } 1452 1453 static int 1454 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 1455 driver_filter_t filter, driver_intr_t handler, void *arg, 1456 char *name) 1457 { 1458 int rc, flags; 1459 struct resource *res; 1460 void *tag = NULL; 1461 device_t dev = ctx->ifc_dev; 1462 1463 flags = RF_ACTIVE; 1464 if (ctx->ifc_flags & IFC_LEGACY) 1465 flags |= RF_SHAREABLE; 1466 MPASS(rid < 512); 1467 irq->ii_rid = rid; 1468 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags); 1469 if (res == NULL) { 1470 device_printf(dev, 1471 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 1472 return (ENOMEM); 1473 } 1474 irq->ii_res = res; 1475 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); 1476 rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, 1477 filter, handler, arg, &tag); 1478 if (rc != 0) { 1479 device_printf(dev, 1480 "failed to setup interrupt for rid %d, name %s: %d\n", 1481 rid, name ? name : "unknown", rc); 1482 return (rc); 1483 } else if (name) 1484 bus_describe_intr(dev, res, tag, "%s", name); 1485 1486 irq->ii_tag = tag; 1487 return (0); 1488 } 1489 1490 1491 /********************************************************************* 1492 * 1493 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1494 * the information needed to transmit a packet on the wire. This is 1495 * called only once at attach, setup is done every reset. 1496 * 1497 **********************************************************************/ 1498 1499 static int 1500 iflib_txsd_alloc(iflib_txq_t txq) 1501 { 1502 if_ctx_t ctx = txq->ift_ctx; 1503 if_shared_ctx_t sctx = ctx->ifc_sctx; 1504 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1505 device_t dev = ctx->ifc_dev; 1506 int err, nsegments, ntsosegments; 1507 1508 nsegments = scctx->isc_tx_nsegments; 1509 ntsosegments = scctx->isc_tx_tso_segments_max; 1510 MPASS(scctx->isc_ntxd[0] > 0); 1511 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); 1512 MPASS(nsegments > 0); 1513 MPASS(ntsosegments > 0); 1514 /* 1515 * Setup DMA descriptor areas. 1516 */ 1517 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1518 1, 0, /* alignment, bounds */ 1519 BUS_SPACE_MAXADDR, /* lowaddr */ 1520 BUS_SPACE_MAXADDR, /* highaddr */ 1521 NULL, NULL, /* filter, filterarg */ 1522 sctx->isc_tx_maxsize, /* maxsize */ 1523 nsegments, /* nsegments */ 1524 sctx->isc_tx_maxsegsize, /* maxsegsize */ 1525 0, /* flags */ 1526 NULL, /* lockfunc */ 1527 NULL, /* lockfuncarg */ 1528 &txq->ift_desc_tag))) { 1529 device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); 1530 device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", 1531 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); 1532 goto fail; 1533 } 1534 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1535 1, 0, /* alignment, bounds */ 1536 BUS_SPACE_MAXADDR, /* lowaddr */ 1537 BUS_SPACE_MAXADDR, /* highaddr */ 1538 NULL, NULL, /* filter, filterarg */ 1539 scctx->isc_tx_tso_size_max, /* maxsize */ 1540 ntsosegments, /* nsegments */ 1541 scctx->isc_tx_tso_segsize_max, /* maxsegsize */ 1542 0, /* flags */ 1543 NULL, /* lockfunc */ 1544 NULL, /* lockfuncarg */ 1545 &txq->ift_tso_desc_tag))) { 1546 device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); 1547 1548 goto fail; 1549 } 1550 if (!(txq->ift_sds.ifsd_flags = 1551 (uint8_t *) malloc(sizeof(uint8_t) * 1552 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1553 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1554 err = ENOMEM; 1555 goto fail; 1556 } 1557 if (!(txq->ift_sds.ifsd_m = 1558 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1559 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1560 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1561 err = ENOMEM; 1562 goto fail; 1563 } 1564 1565 /* Create the descriptor buffer dma maps */ 1566 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1567 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1568 return (0); 1569 1570 if (!(txq->ift_sds.ifsd_map = 1571 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1572 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1573 err = ENOMEM; 1574 goto fail; 1575 } 1576 1577 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { 1578 err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); 1579 if (err != 0) { 1580 device_printf(dev, "Unable to create TX DMA map\n"); 1581 goto fail; 1582 } 1583 } 1584 #endif 1585 return (0); 1586 fail: 1587 /* We free all, it handles case where we are in the middle */ 1588 iflib_tx_structures_free(ctx); 1589 return (err); 1590 } 1591 1592 static void 1593 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) 1594 { 1595 bus_dmamap_t map; 1596 1597 map = NULL; 1598 if (txq->ift_sds.ifsd_map != NULL) 1599 map = txq->ift_sds.ifsd_map[i]; 1600 if (map != NULL) { 1601 bus_dmamap_unload(txq->ift_desc_tag, map); 1602 bus_dmamap_destroy(txq->ift_desc_tag, map); 1603 txq->ift_sds.ifsd_map[i] = NULL; 1604 } 1605 } 1606 1607 static void 1608 iflib_txq_destroy(iflib_txq_t txq) 1609 { 1610 if_ctx_t ctx = txq->ift_ctx; 1611 1612 for (int i = 0; i < txq->ift_size; i++) 1613 iflib_txsd_destroy(ctx, txq, i); 1614 if (txq->ift_sds.ifsd_map != NULL) { 1615 free(txq->ift_sds.ifsd_map, M_IFLIB); 1616 txq->ift_sds.ifsd_map = NULL; 1617 } 1618 if (txq->ift_sds.ifsd_m != NULL) { 1619 free(txq->ift_sds.ifsd_m, M_IFLIB); 1620 txq->ift_sds.ifsd_m = NULL; 1621 } 1622 if (txq->ift_sds.ifsd_flags != NULL) { 1623 free(txq->ift_sds.ifsd_flags, M_IFLIB); 1624 txq->ift_sds.ifsd_flags = NULL; 1625 } 1626 if (txq->ift_desc_tag != NULL) { 1627 bus_dma_tag_destroy(txq->ift_desc_tag); 1628 txq->ift_desc_tag = NULL; 1629 } 1630 if (txq->ift_tso_desc_tag != NULL) { 1631 bus_dma_tag_destroy(txq->ift_tso_desc_tag); 1632 txq->ift_tso_desc_tag = NULL; 1633 } 1634 } 1635 1636 static void 1637 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) 1638 { 1639 struct mbuf **mp; 1640 1641 mp = &txq->ift_sds.ifsd_m[i]; 1642 if (*mp == NULL) 1643 return; 1644 1645 if (txq->ift_sds.ifsd_map != NULL) { 1646 bus_dmamap_sync(txq->ift_desc_tag, 1647 txq->ift_sds.ifsd_map[i], 1648 BUS_DMASYNC_POSTWRITE); 1649 bus_dmamap_unload(txq->ift_desc_tag, 1650 txq->ift_sds.ifsd_map[i]); 1651 } 1652 m_free(*mp); 1653 DBG_COUNTER_INC(tx_frees); 1654 *mp = NULL; 1655 } 1656 1657 static int 1658 iflib_txq_setup(iflib_txq_t txq) 1659 { 1660 if_ctx_t ctx = txq->ift_ctx; 1661 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1662 iflib_dma_info_t di; 1663 int i; 1664 1665 /* Set number of descriptors available */ 1666 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 1667 /* XXX make configurable */ 1668 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; 1669 1670 /* Reset indices */ 1671 txq->ift_cidx_processed = 0; 1672 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; 1673 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; 1674 1675 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1676 bzero((void *)di->idi_vaddr, di->idi_size); 1677 1678 IFDI_TXQ_SETUP(ctx, txq->ift_id); 1679 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1680 bus_dmamap_sync(di->idi_tag, di->idi_map, 1681 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1682 return (0); 1683 } 1684 1685 /********************************************************************* 1686 * 1687 * Allocate memory for rx_buffer structures. Since we use one 1688 * rx_buffer per received packet, the maximum number of rx_buffer's 1689 * that we'll need is equal to the number of receive descriptors 1690 * that we've allocated. 1691 * 1692 **********************************************************************/ 1693 static int 1694 iflib_rxsd_alloc(iflib_rxq_t rxq) 1695 { 1696 if_ctx_t ctx = rxq->ifr_ctx; 1697 if_shared_ctx_t sctx = ctx->ifc_sctx; 1698 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1699 device_t dev = ctx->ifc_dev; 1700 iflib_fl_t fl; 1701 int err; 1702 1703 MPASS(scctx->isc_nrxd[0] > 0); 1704 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); 1705 1706 fl = rxq->ifr_fl; 1707 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { 1708 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ 1709 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1710 1, 0, /* alignment, bounds */ 1711 BUS_SPACE_MAXADDR, /* lowaddr */ 1712 BUS_SPACE_MAXADDR, /* highaddr */ 1713 NULL, NULL, /* filter, filterarg */ 1714 sctx->isc_rx_maxsize, /* maxsize */ 1715 sctx->isc_rx_nsegments, /* nsegments */ 1716 sctx->isc_rx_maxsegsize, /* maxsegsize */ 1717 0, /* flags */ 1718 NULL, /* lockfunc */ 1719 NULL, /* lockarg */ 1720 &fl->ifl_desc_tag); 1721 if (err) { 1722 device_printf(dev, "%s: bus_dma_tag_create failed %d\n", 1723 __func__, err); 1724 goto fail; 1725 } 1726 if (!(fl->ifl_sds.ifsd_flags = 1727 (uint8_t *) malloc(sizeof(uint8_t) * 1728 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1729 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1730 err = ENOMEM; 1731 goto fail; 1732 } 1733 if (!(fl->ifl_sds.ifsd_m = 1734 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1735 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1736 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1737 err = ENOMEM; 1738 goto fail; 1739 } 1740 if (!(fl->ifl_sds.ifsd_cl = 1741 (caddr_t *) malloc(sizeof(caddr_t) * 1742 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1743 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1744 err = ENOMEM; 1745 goto fail; 1746 } 1747 1748 /* Create the descriptor buffer dma maps */ 1749 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1750 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1751 continue; 1752 1753 if (!(fl->ifl_sds.ifsd_map = 1754 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1755 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1756 err = ENOMEM; 1757 goto fail; 1758 } 1759 1760 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { 1761 err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]); 1762 if (err != 0) { 1763 device_printf(dev, "Unable to create RX buffer DMA map\n"); 1764 goto fail; 1765 } 1766 } 1767 #endif 1768 } 1769 return (0); 1770 1771 fail: 1772 iflib_rx_structures_free(ctx); 1773 return (err); 1774 } 1775 1776 1777 /* 1778 * Internal service routines 1779 */ 1780 1781 struct rxq_refill_cb_arg { 1782 int error; 1783 bus_dma_segment_t seg; 1784 int nseg; 1785 }; 1786 1787 static void 1788 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1789 { 1790 struct rxq_refill_cb_arg *cb_arg = arg; 1791 1792 cb_arg->error = error; 1793 cb_arg->seg = segs[0]; 1794 cb_arg->nseg = nseg; 1795 } 1796 1797 1798 #ifdef ACPI_DMAR 1799 #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR) 1800 #else 1801 #define IS_DMAR(ctx) (0) 1802 #endif 1803 1804 /** 1805 * rxq_refill - refill an rxq free-buffer list 1806 * @ctx: the iflib context 1807 * @rxq: the free-list to refill 1808 * @n: the number of new buffers to allocate 1809 * 1810 * (Re)populate an rxq free-buffer list with up to @n new packet buffers. 1811 * The caller must assure that @n does not exceed the queue's capacity. 1812 */ 1813 static void 1814 _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) 1815 { 1816 struct mbuf *m; 1817 int idx, frag_idx = fl->ifl_fragidx; 1818 int pidx = fl->ifl_pidx; 1819 caddr_t cl, *sd_cl; 1820 struct mbuf **sd_m; 1821 uint8_t *sd_flags; 1822 struct if_rxd_update iru; 1823 bus_dmamap_t *sd_map; 1824 int n, i = 0; 1825 uint64_t bus_addr; 1826 int err; 1827 qidx_t credits; 1828 1829 sd_m = fl->ifl_sds.ifsd_m; 1830 sd_map = fl->ifl_sds.ifsd_map; 1831 sd_cl = fl->ifl_sds.ifsd_cl; 1832 sd_flags = fl->ifl_sds.ifsd_flags; 1833 idx = pidx; 1834 credits = fl->ifl_credits; 1835 1836 n = count; 1837 MPASS(n > 0); 1838 MPASS(credits + n <= fl->ifl_size); 1839 1840 if (pidx < fl->ifl_cidx) 1841 MPASS(pidx + n <= fl->ifl_cidx); 1842 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) 1843 MPASS(fl->ifl_gen == 0); 1844 if (pidx > fl->ifl_cidx) 1845 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); 1846 1847 DBG_COUNTER_INC(fl_refills); 1848 if (n > 8) 1849 DBG_COUNTER_INC(fl_refills_large); 1850 iru_init(&iru, fl->ifl_rxq, fl->ifl_id); 1851 while (n--) { 1852 /* 1853 * We allocate an uninitialized mbuf + cluster, mbuf is 1854 * initialized after rx. 1855 * 1856 * If the cluster is still set then we know a minimum sized packet was received 1857 */ 1858 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); 1859 if ((frag_idx < 0) || (frag_idx >= fl->ifl_size)) 1860 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); 1861 if ((cl = sd_cl[frag_idx]) == NULL) { 1862 if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) 1863 break; 1864 #if MEMORY_LOGGING 1865 fl->ifl_cl_enqueued++; 1866 #endif 1867 } 1868 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { 1869 break; 1870 } 1871 #if MEMORY_LOGGING 1872 fl->ifl_m_enqueued++; 1873 #endif 1874 1875 DBG_COUNTER_INC(rx_allocs); 1876 #if defined(__i386__) || defined(__amd64__) 1877 if (!IS_DMAR(ctx)) { 1878 bus_addr = pmap_kextract((vm_offset_t)cl); 1879 } else 1880 #endif 1881 { 1882 struct rxq_refill_cb_arg cb_arg; 1883 iflib_rxq_t q; 1884 1885 cb_arg.error = 0; 1886 q = fl->ifl_rxq; 1887 MPASS(sd_map != NULL); 1888 MPASS(sd_map[frag_idx] != NULL); 1889 err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx], 1890 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0); 1891 bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx], 1892 BUS_DMASYNC_PREREAD); 1893 1894 if (err != 0 || cb_arg.error) { 1895 /* 1896 * !zone_pack ? 1897 */ 1898 if (fl->ifl_zone == zone_pack) 1899 uma_zfree(fl->ifl_zone, cl); 1900 m_free(m); 1901 n = 0; 1902 goto done; 1903 } 1904 bus_addr = cb_arg.seg.ds_addr; 1905 } 1906 bit_set(fl->ifl_rx_bitmap, frag_idx); 1907 sd_flags[frag_idx] |= RX_SW_DESC_INUSE; 1908 1909 MPASS(sd_m[frag_idx] == NULL); 1910 sd_cl[frag_idx] = cl; 1911 sd_m[frag_idx] = m; 1912 fl->ifl_rxd_idxs[i] = frag_idx; 1913 fl->ifl_bus_addrs[i] = bus_addr; 1914 fl->ifl_vm_addrs[i] = cl; 1915 credits++; 1916 i++; 1917 MPASS(credits <= fl->ifl_size); 1918 if (++idx == fl->ifl_size) { 1919 fl->ifl_gen = 1; 1920 idx = 0; 1921 } 1922 if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { 1923 iru.iru_pidx = pidx; 1924 iru.iru_count = i; 1925 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 1926 i = 0; 1927 pidx = idx; 1928 fl->ifl_pidx = idx; 1929 fl->ifl_credits = credits; 1930 } 1931 1932 } 1933 done: 1934 if (i) { 1935 iru.iru_pidx = pidx; 1936 iru.iru_count = i; 1937 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 1938 fl->ifl_pidx = idx; 1939 fl->ifl_credits = credits; 1940 } 1941 DBG_COUNTER_INC(rxd_flush); 1942 if (fl->ifl_pidx == 0) 1943 pidx = fl->ifl_size - 1; 1944 else 1945 pidx = fl->ifl_pidx - 1; 1946 1947 if (sd_map) 1948 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 1949 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1950 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); 1951 fl->ifl_fragidx = frag_idx; 1952 } 1953 1954 static __inline void 1955 __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) 1956 { 1957 /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ 1958 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; 1959 #ifdef INVARIANTS 1960 int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; 1961 #endif 1962 1963 MPASS(fl->ifl_credits <= fl->ifl_size); 1964 MPASS(reclaimable == delta); 1965 1966 if (reclaimable > 0) 1967 _iflib_fl_refill(ctx, fl, min(max, reclaimable)); 1968 } 1969 1970 static void 1971 iflib_fl_bufs_free(iflib_fl_t fl) 1972 { 1973 iflib_dma_info_t idi = fl->ifl_ifdi; 1974 uint32_t i; 1975 1976 for (i = 0; i < fl->ifl_size; i++) { 1977 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; 1978 uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i]; 1979 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; 1980 1981 if (*sd_flags & RX_SW_DESC_INUSE) { 1982 if (fl->ifl_sds.ifsd_map != NULL) { 1983 bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i]; 1984 bus_dmamap_unload(fl->ifl_desc_tag, sd_map); 1985 if (fl->ifl_rxq->ifr_ctx->ifc_in_detach) 1986 bus_dmamap_destroy(fl->ifl_desc_tag, sd_map); 1987 } 1988 if (*sd_m != NULL) { 1989 m_init(*sd_m, M_NOWAIT, MT_DATA, 0); 1990 uma_zfree(zone_mbuf, *sd_m); 1991 } 1992 if (*sd_cl != NULL) 1993 uma_zfree(fl->ifl_zone, *sd_cl); 1994 *sd_flags = 0; 1995 } else { 1996 MPASS(*sd_cl == NULL); 1997 MPASS(*sd_m == NULL); 1998 } 1999 #if MEMORY_LOGGING 2000 fl->ifl_m_dequeued++; 2001 fl->ifl_cl_dequeued++; 2002 #endif 2003 *sd_cl = NULL; 2004 *sd_m = NULL; 2005 } 2006 #ifdef INVARIANTS 2007 for (i = 0; i < fl->ifl_size; i++) { 2008 MPASS(fl->ifl_sds.ifsd_flags[i] == 0); 2009 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); 2010 MPASS(fl->ifl_sds.ifsd_m[i] == NULL); 2011 } 2012 #endif 2013 /* 2014 * Reset free list values 2015 */ 2016 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; 2017 bzero(idi->idi_vaddr, idi->idi_size); 2018 } 2019 2020 /********************************************************************* 2021 * 2022 * Initialize a receive ring and its buffers. 2023 * 2024 **********************************************************************/ 2025 static int 2026 iflib_fl_setup(iflib_fl_t fl) 2027 { 2028 iflib_rxq_t rxq = fl->ifl_rxq; 2029 if_ctx_t ctx = rxq->ifr_ctx; 2030 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2031 2032 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); 2033 /* 2034 ** Free current RX buffer structs and their mbufs 2035 */ 2036 iflib_fl_bufs_free(fl); 2037 /* Now replenish the mbufs */ 2038 MPASS(fl->ifl_credits == 0); 2039 /* 2040 * XXX don't set the max_frame_size to larger 2041 * than the hardware can handle 2042 */ 2043 if (sctx->isc_max_frame_size <= 2048) 2044 fl->ifl_buf_size = MCLBYTES; 2045 #ifndef CONTIGMALLOC_WORKS 2046 else 2047 fl->ifl_buf_size = MJUMPAGESIZE; 2048 #else 2049 else if (sctx->isc_max_frame_size <= 4096) 2050 fl->ifl_buf_size = MJUMPAGESIZE; 2051 else if (sctx->isc_max_frame_size <= 9216) 2052 fl->ifl_buf_size = MJUM9BYTES; 2053 else 2054 fl->ifl_buf_size = MJUM16BYTES; 2055 #endif 2056 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) 2057 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; 2058 fl->ifl_cltype = m_gettype(fl->ifl_buf_size); 2059 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 2060 2061 2062 /* avoid pre-allocating zillions of clusters to an idle card 2063 * potentially speeding up attach 2064 */ 2065 _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); 2066 MPASS(min(128, fl->ifl_size) == fl->ifl_credits); 2067 if (min(128, fl->ifl_size) != fl->ifl_credits) 2068 return (ENOBUFS); 2069 /* 2070 * handle failure 2071 */ 2072 MPASS(rxq != NULL); 2073 MPASS(fl->ifl_ifdi != NULL); 2074 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2075 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2076 return (0); 2077 } 2078 2079 /********************************************************************* 2080 * 2081 * Free receive ring data structures 2082 * 2083 **********************************************************************/ 2084 static void 2085 iflib_rx_sds_free(iflib_rxq_t rxq) 2086 { 2087 iflib_fl_t fl; 2088 int i; 2089 2090 if (rxq->ifr_fl != NULL) { 2091 for (i = 0; i < rxq->ifr_nfl; i++) { 2092 fl = &rxq->ifr_fl[i]; 2093 if (fl->ifl_desc_tag != NULL) { 2094 bus_dma_tag_destroy(fl->ifl_desc_tag); 2095 fl->ifl_desc_tag = NULL; 2096 } 2097 free(fl->ifl_sds.ifsd_m, M_IFLIB); 2098 free(fl->ifl_sds.ifsd_cl, M_IFLIB); 2099 /* XXX destroy maps first */ 2100 free(fl->ifl_sds.ifsd_map, M_IFLIB); 2101 fl->ifl_sds.ifsd_m = NULL; 2102 fl->ifl_sds.ifsd_cl = NULL; 2103 fl->ifl_sds.ifsd_map = NULL; 2104 } 2105 free(rxq->ifr_fl, M_IFLIB); 2106 rxq->ifr_fl = NULL; 2107 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 2108 } 2109 } 2110 2111 /* 2112 * MI independent logic 2113 * 2114 */ 2115 static void 2116 iflib_timer(void *arg) 2117 { 2118 iflib_txq_t txq = arg; 2119 if_ctx_t ctx = txq->ift_ctx; 2120 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2121 2122 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 2123 return; 2124 /* 2125 ** Check on the state of the TX queue(s), this 2126 ** can be done without the lock because its RO 2127 ** and the HUNG state will be static if set. 2128 */ 2129 IFDI_TIMER(ctx, txq->ift_id); 2130 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && 2131 ((txq->ift_cleaned_prev == txq->ift_cleaned) || 2132 (sctx->isc_pause_frames == 0))) 2133 goto hung; 2134 2135 if (ifmp_ring_is_stalled(txq->ift_br)) 2136 txq->ift_qstatus = IFLIB_QUEUE_HUNG; 2137 txq->ift_cleaned_prev = txq->ift_cleaned; 2138 /* handle any laggards */ 2139 if (txq->ift_db_pending) 2140 GROUPTASK_ENQUEUE(&txq->ift_task); 2141 2142 sctx->isc_pause_frames = 0; 2143 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) 2144 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); 2145 return; 2146 hung: 2147 CTX_LOCK(ctx); 2148 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2149 device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n", 2150 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); 2151 2152 IFDI_WATCHDOG_RESET(ctx); 2153 ctx->ifc_watchdog_events++; 2154 2155 ctx->ifc_flags |= IFC_DO_RESET; 2156 iflib_admin_intr_deferred(ctx); 2157 CTX_UNLOCK(ctx); 2158 } 2159 2160 static void 2161 iflib_init_locked(if_ctx_t ctx) 2162 { 2163 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2164 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2165 if_t ifp = ctx->ifc_ifp; 2166 iflib_fl_t fl; 2167 iflib_txq_t txq; 2168 iflib_rxq_t rxq; 2169 int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; 2170 2171 2172 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2173 IFDI_INTR_DISABLE(ctx); 2174 2175 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); 2176 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); 2177 /* Set hardware offload abilities */ 2178 if_clearhwassist(ifp); 2179 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 2180 if_sethwassistbits(ifp, tx_ip_csum_flags, 0); 2181 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 2182 if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); 2183 if (if_getcapenable(ifp) & IFCAP_TSO4) 2184 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 2185 if (if_getcapenable(ifp) & IFCAP_TSO6) 2186 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 2187 2188 for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { 2189 CALLOUT_LOCK(txq); 2190 callout_stop(&txq->ift_timer); 2191 CALLOUT_UNLOCK(txq); 2192 iflib_netmap_txq_init(ctx, txq); 2193 } 2194 #ifdef INVARIANTS 2195 i = if_getdrvflags(ifp); 2196 #endif 2197 IFDI_INIT(ctx); 2198 MPASS(if_getdrvflags(ifp) == i); 2199 for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { 2200 /* XXX this should really be done on a per-queue basis */ 2201 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 2202 MPASS(rxq->ifr_id == i); 2203 iflib_netmap_rxq_init(ctx, rxq); 2204 continue; 2205 } 2206 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 2207 if (iflib_fl_setup(fl)) { 2208 device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); 2209 goto done; 2210 } 2211 } 2212 } 2213 done: 2214 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 2215 IFDI_INTR_ENABLE(ctx); 2216 txq = ctx->ifc_txqs; 2217 for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) 2218 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, 2219 txq->ift_timer.c_cpu); 2220 } 2221 2222 static int 2223 iflib_media_change(if_t ifp) 2224 { 2225 if_ctx_t ctx = if_getsoftc(ifp); 2226 int err; 2227 2228 CTX_LOCK(ctx); 2229 if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) 2230 iflib_init_locked(ctx); 2231 CTX_UNLOCK(ctx); 2232 return (err); 2233 } 2234 2235 static void 2236 iflib_media_status(if_t ifp, struct ifmediareq *ifmr) 2237 { 2238 if_ctx_t ctx = if_getsoftc(ifp); 2239 2240 CTX_LOCK(ctx); 2241 IFDI_UPDATE_ADMIN_STATUS(ctx); 2242 IFDI_MEDIA_STATUS(ctx, ifmr); 2243 CTX_UNLOCK(ctx); 2244 } 2245 2246 static void 2247 iflib_stop(if_ctx_t ctx) 2248 { 2249 iflib_txq_t txq = ctx->ifc_txqs; 2250 iflib_rxq_t rxq = ctx->ifc_rxqs; 2251 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2252 iflib_dma_info_t di; 2253 iflib_fl_t fl; 2254 int i, j; 2255 2256 /* Tell the stack that the interface is no longer active */ 2257 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2258 2259 IFDI_INTR_DISABLE(ctx); 2260 DELAY(1000); 2261 IFDI_STOP(ctx); 2262 DELAY(1000); 2263 2264 iflib_debug_reset(); 2265 /* Wait for current tx queue users to exit to disarm watchdog timer. */ 2266 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { 2267 /* make sure all transmitters have completed before proceeding XXX */ 2268 2269 CALLOUT_LOCK(txq); 2270 callout_stop(&txq->ift_timer); 2271 CALLOUT_UNLOCK(txq); 2272 2273 /* clean any enqueued buffers */ 2274 iflib_ifmp_purge(txq); 2275 /* Free any existing tx buffers. */ 2276 for (j = 0; j < txq->ift_size; j++) { 2277 iflib_txsd_free(ctx, txq, j); 2278 } 2279 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; 2280 txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; 2281 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; 2282 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; 2283 txq->ift_pullups = 0; 2284 ifmp_ring_reset_stats(txq->ift_br); 2285 for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++) 2286 bzero((void *)di->idi_vaddr, di->idi_size); 2287 } 2288 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { 2289 /* make sure all transmitters have completed before proceeding XXX */ 2290 2291 for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++) 2292 bzero((void *)di->idi_vaddr, di->idi_size); 2293 /* also resets the free lists pidx/cidx */ 2294 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 2295 iflib_fl_bufs_free(fl); 2296 } 2297 } 2298 2299 static inline caddr_t 2300 calc_next_rxd(iflib_fl_t fl, int cidx) 2301 { 2302 qidx_t size; 2303 int nrxd; 2304 caddr_t start, end, cur, next; 2305 2306 nrxd = fl->ifl_size; 2307 size = fl->ifl_rxd_size; 2308 start = fl->ifl_ifdi->idi_vaddr; 2309 2310 if (__predict_false(size == 0)) 2311 return (start); 2312 cur = start + size*cidx; 2313 end = start + size*nrxd; 2314 next = CACHE_PTR_NEXT(cur); 2315 return (next < end ? next : start); 2316 } 2317 2318 static inline void 2319 prefetch_pkts(iflib_fl_t fl, int cidx) 2320 { 2321 int nextptr; 2322 int nrxd = fl->ifl_size; 2323 caddr_t next_rxd; 2324 2325 2326 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); 2327 prefetch(&fl->ifl_sds.ifsd_m[nextptr]); 2328 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); 2329 next_rxd = calc_next_rxd(fl, cidx); 2330 prefetch(next_rxd); 2331 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); 2332 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); 2333 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); 2334 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); 2335 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); 2336 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); 2337 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); 2338 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); 2339 } 2340 2341 static void 2342 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd) 2343 { 2344 int flid, cidx; 2345 bus_dmamap_t map; 2346 iflib_fl_t fl; 2347 iflib_dma_info_t di; 2348 int next; 2349 2350 map = NULL; 2351 flid = irf->irf_flid; 2352 cidx = irf->irf_idx; 2353 fl = &rxq->ifr_fl[flid]; 2354 sd->ifsd_fl = fl; 2355 sd->ifsd_cidx = cidx; 2356 sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx]; 2357 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; 2358 fl->ifl_credits--; 2359 #if MEMORY_LOGGING 2360 fl->ifl_m_dequeued++; 2361 #endif 2362 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) 2363 prefetch_pkts(fl, cidx); 2364 if (fl->ifl_sds.ifsd_map != NULL) { 2365 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); 2366 prefetch(&fl->ifl_sds.ifsd_map[next]); 2367 map = fl->ifl_sds.ifsd_map[cidx]; 2368 di = fl->ifl_ifdi; 2369 next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1); 2370 prefetch(&fl->ifl_sds.ifsd_flags[next]); 2371 bus_dmamap_sync(di->idi_tag, di->idi_map, 2372 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2373 2374 /* not valid assert if bxe really does SGE from non-contiguous elements */ 2375 MPASS(fl->ifl_cidx == cidx); 2376 if (unload) 2377 bus_dmamap_unload(fl->ifl_desc_tag, map); 2378 } 2379 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); 2380 if (__predict_false(fl->ifl_cidx == 0)) 2381 fl->ifl_gen = 0; 2382 if (map != NULL) 2383 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2384 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2385 bit_clear(fl->ifl_rx_bitmap, cidx); 2386 } 2387 2388 static struct mbuf * 2389 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd) 2390 { 2391 int i, padlen , flags; 2392 struct mbuf *m, *mh, *mt; 2393 caddr_t cl; 2394 2395 i = 0; 2396 mh = NULL; 2397 do { 2398 rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd); 2399 2400 MPASS(*sd->ifsd_cl != NULL); 2401 MPASS(*sd->ifsd_m != NULL); 2402 2403 /* Don't include zero-length frags */ 2404 if (ri->iri_frags[i].irf_len == 0) { 2405 /* XXX we can save the cluster here, but not the mbuf */ 2406 m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0); 2407 m_free(*sd->ifsd_m); 2408 *sd->ifsd_m = NULL; 2409 continue; 2410 } 2411 m = *sd->ifsd_m; 2412 *sd->ifsd_m = NULL; 2413 if (mh == NULL) { 2414 flags = M_PKTHDR|M_EXT; 2415 mh = mt = m; 2416 padlen = ri->iri_pad; 2417 } else { 2418 flags = M_EXT; 2419 mt->m_next = m; 2420 mt = m; 2421 /* assuming padding is only on the first fragment */ 2422 padlen = 0; 2423 } 2424 cl = *sd->ifsd_cl; 2425 *sd->ifsd_cl = NULL; 2426 2427 /* Can these two be made one ? */ 2428 m_init(m, M_NOWAIT, MT_DATA, flags); 2429 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); 2430 /* 2431 * These must follow m_init and m_cljset 2432 */ 2433 m->m_data += padlen; 2434 ri->iri_len -= padlen; 2435 m->m_len = ri->iri_frags[i].irf_len; 2436 } while (++i < ri->iri_nfrags); 2437 2438 return (mh); 2439 } 2440 2441 /* 2442 * Process one software descriptor 2443 */ 2444 static struct mbuf * 2445 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) 2446 { 2447 struct if_rxsd sd; 2448 struct mbuf *m; 2449 2450 /* should I merge this back in now that the two paths are basically duplicated? */ 2451 if (ri->iri_nfrags == 1 && 2452 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { 2453 rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd); 2454 m = *sd.ifsd_m; 2455 *sd.ifsd_m = NULL; 2456 m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); 2457 #ifndef __NO_STRICT_ALIGNMENT 2458 if (!IP_ALIGNED(m)) 2459 m->m_data += 2; 2460 #endif 2461 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); 2462 m->m_len = ri->iri_frags[0].irf_len; 2463 } else { 2464 m = assemble_segments(rxq, ri, &sd); 2465 } 2466 m->m_pkthdr.len = ri->iri_len; 2467 m->m_pkthdr.rcvif = ri->iri_ifp; 2468 m->m_flags |= ri->iri_flags; 2469 m->m_pkthdr.ether_vtag = ri->iri_vtag; 2470 m->m_pkthdr.flowid = ri->iri_flowid; 2471 M_HASHTYPE_SET(m, ri->iri_rsstype); 2472 m->m_pkthdr.csum_flags = ri->iri_csum_flags; 2473 m->m_pkthdr.csum_data = ri->iri_csum_data; 2474 return (m); 2475 } 2476 2477 #if defined(INET6) || defined(INET) 2478 static void 2479 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) 2480 { 2481 CURVNET_SET(lc->ifp->if_vnet); 2482 #if defined(INET6) 2483 *v6 = VNET(ip6_forwarding); 2484 #endif 2485 #if defined(INET) 2486 *v4 = VNET(ipforwarding); 2487 #endif 2488 CURVNET_RESTORE(); 2489 } 2490 2491 /* 2492 * Returns true if it's possible this packet could be LROed. 2493 * if it returns false, it is guaranteed that tcp_lro_rx() 2494 * would not return zero. 2495 */ 2496 static bool 2497 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) 2498 { 2499 struct ether_header *eh; 2500 uint16_t eh_type; 2501 2502 eh = mtod(m, struct ether_header *); 2503 eh_type = ntohs(eh->ether_type); 2504 switch (eh_type) { 2505 #if defined(INET6) 2506 case ETHERTYPE_IPV6: 2507 return !v6_forwarding; 2508 #endif 2509 #if defined (INET) 2510 case ETHERTYPE_IP: 2511 return !v4_forwarding; 2512 #endif 2513 } 2514 2515 return false; 2516 } 2517 #else 2518 static void 2519 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) 2520 { 2521 } 2522 #endif 2523 2524 static bool 2525 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) 2526 { 2527 if_ctx_t ctx = rxq->ifr_ctx; 2528 if_shared_ctx_t sctx = ctx->ifc_sctx; 2529 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2530 int avail, i; 2531 qidx_t *cidxp; 2532 struct if_rxd_info ri; 2533 int err, budget_left, rx_bytes, rx_pkts; 2534 iflib_fl_t fl; 2535 struct ifnet *ifp; 2536 int lro_enabled; 2537 bool lro_possible = false; 2538 bool v4_forwarding, v6_forwarding; 2539 2540 /* 2541 * XXX early demux data packets so that if_input processing only handles 2542 * acks in interrupt context 2543 */ 2544 struct mbuf *m, *mh, *mt, *mf; 2545 2546 ifp = ctx->ifc_ifp; 2547 mh = mt = NULL; 2548 MPASS(budget > 0); 2549 rx_pkts = rx_bytes = 0; 2550 if (sctx->isc_flags & IFLIB_HAS_RXCQ) 2551 cidxp = &rxq->ifr_cq_cidx; 2552 else 2553 cidxp = &rxq->ifr_fl[0].ifl_cidx; 2554 if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { 2555 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2556 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2557 DBG_COUNTER_INC(rx_unavail); 2558 return (false); 2559 } 2560 2561 for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) { 2562 if (__predict_false(!CTX_ACTIVE(ctx))) { 2563 DBG_COUNTER_INC(rx_ctx_inactive); 2564 break; 2565 } 2566 /* 2567 * Reset client set fields to their default values 2568 */ 2569 rxd_info_zero(&ri); 2570 ri.iri_qsidx = rxq->ifr_id; 2571 ri.iri_cidx = *cidxp; 2572 ri.iri_ifp = ifp; 2573 ri.iri_frags = rxq->ifr_frags; 2574 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 2575 2576 if (err) 2577 goto err; 2578 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 2579 *cidxp = ri.iri_cidx; 2580 /* Update our consumer index */ 2581 /* XXX NB: shurd - check if this is still safe */ 2582 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { 2583 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; 2584 rxq->ifr_cq_gen = 0; 2585 } 2586 /* was this only a completion queue message? */ 2587 if (__predict_false(ri.iri_nfrags == 0)) 2588 continue; 2589 } 2590 MPASS(ri.iri_nfrags != 0); 2591 MPASS(ri.iri_len != 0); 2592 2593 /* will advance the cidx on the corresponding free lists */ 2594 m = iflib_rxd_pkt_get(rxq, &ri); 2595 if (avail == 0 && budget_left) 2596 avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); 2597 2598 if (__predict_false(m == NULL)) { 2599 DBG_COUNTER_INC(rx_mbuf_null); 2600 continue; 2601 } 2602 /* imm_pkt: -- cxgb */ 2603 if (mh == NULL) 2604 mh = mt = m; 2605 else { 2606 mt->m_nextpkt = m; 2607 mt = m; 2608 } 2609 } 2610 /* make sure that we can refill faster than drain */ 2611 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2612 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2613 2614 lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); 2615 if (lro_enabled) 2616 iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); 2617 mt = mf = NULL; 2618 while (mh != NULL) { 2619 m = mh; 2620 mh = mh->m_nextpkt; 2621 m->m_nextpkt = NULL; 2622 #ifndef __NO_STRICT_ALIGNMENT 2623 if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) 2624 continue; 2625 #endif 2626 rx_bytes += m->m_pkthdr.len; 2627 rx_pkts++; 2628 #if defined(INET6) || defined(INET) 2629 if (lro_enabled) { 2630 if (!lro_possible) { 2631 lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); 2632 if (lro_possible && mf != NULL) { 2633 ifp->if_input(ifp, mf); 2634 DBG_COUNTER_INC(rx_if_input); 2635 mt = mf = NULL; 2636 } 2637 } 2638 if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == 2639 (CSUM_L4_CALC|CSUM_L4_VALID)) { 2640 if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) 2641 continue; 2642 } 2643 } 2644 #endif 2645 if (lro_possible) { 2646 ifp->if_input(ifp, m); 2647 DBG_COUNTER_INC(rx_if_input); 2648 continue; 2649 } 2650 2651 if (mf == NULL) 2652 mf = m; 2653 if (mt != NULL) 2654 mt->m_nextpkt = m; 2655 mt = m; 2656 } 2657 if (mf != NULL) { 2658 ifp->if_input(ifp, mf); 2659 DBG_COUNTER_INC(rx_if_input); 2660 } 2661 2662 if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); 2663 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); 2664 2665 /* 2666 * Flush any outstanding LRO work 2667 */ 2668 #if defined(INET6) || defined(INET) 2669 tcp_lro_flush_all(&rxq->ifr_lc); 2670 #endif 2671 if (avail) 2672 return true; 2673 return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); 2674 err: 2675 CTX_LOCK(ctx); 2676 ctx->ifc_flags |= IFC_DO_RESET; 2677 iflib_admin_intr_deferred(ctx); 2678 CTX_UNLOCK(ctx); 2679 return (false); 2680 } 2681 2682 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) 2683 static inline qidx_t 2684 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) 2685 { 2686 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2687 qidx_t minthresh = txq->ift_size / 8; 2688 if (in_use > 4*minthresh) 2689 return (notify_count); 2690 if (in_use > 2*minthresh) 2691 return (notify_count >> 1); 2692 if (in_use > minthresh) 2693 return (notify_count >> 3); 2694 return (0); 2695 } 2696 2697 static inline qidx_t 2698 txq_max_rs_deferred(iflib_txq_t txq) 2699 { 2700 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2701 qidx_t minthresh = txq->ift_size / 8; 2702 if (txq->ift_in_use > 4*minthresh) 2703 return (notify_count); 2704 if (txq->ift_in_use > 2*minthresh) 2705 return (notify_count >> 1); 2706 if (txq->ift_in_use > minthresh) 2707 return (notify_count >> 2); 2708 return (2); 2709 } 2710 2711 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) 2712 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) 2713 2714 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) 2715 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) 2716 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) 2717 2718 /* forward compatibility for cxgb */ 2719 #define FIRST_QSET(ctx) 0 2720 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) 2721 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) 2722 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) 2723 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) 2724 2725 /* XXX we should be setting this to something other than zero */ 2726 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) 2727 #define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max) 2728 2729 static inline bool 2730 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) 2731 { 2732 qidx_t dbval, max; 2733 bool rang; 2734 2735 rang = false; 2736 max = TXQ_MAX_DB_DEFERRED(txq, in_use); 2737 if (ring || txq->ift_db_pending >= max) { 2738 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; 2739 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); 2740 txq->ift_db_pending = txq->ift_npending = 0; 2741 rang = true; 2742 } 2743 return (rang); 2744 } 2745 2746 #ifdef PKT_DEBUG 2747 static void 2748 print_pkt(if_pkt_info_t pi) 2749 { 2750 printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", 2751 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); 2752 printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", 2753 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); 2754 printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", 2755 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); 2756 } 2757 #endif 2758 2759 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) 2760 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) 2761 2762 static int 2763 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) 2764 { 2765 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; 2766 struct ether_vlan_header *eh; 2767 struct mbuf *m, *n; 2768 2769 n = m = *mp; 2770 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && 2771 M_WRITABLE(m) == 0) { 2772 if ((m = m_dup(m, M_NOWAIT)) == NULL) { 2773 return (ENOMEM); 2774 } else { 2775 m_freem(*mp); 2776 n = *mp = m; 2777 } 2778 } 2779 2780 /* 2781 * Determine where frame payload starts. 2782 * Jump over vlan headers if already present, 2783 * helpful for QinQ too. 2784 */ 2785 if (__predict_false(m->m_len < sizeof(*eh))) { 2786 txq->ift_pullups++; 2787 if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) 2788 return (ENOMEM); 2789 } 2790 eh = mtod(m, struct ether_vlan_header *); 2791 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2792 pi->ipi_etype = ntohs(eh->evl_proto); 2793 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2794 } else { 2795 pi->ipi_etype = ntohs(eh->evl_encap_proto); 2796 pi->ipi_ehdrlen = ETHER_HDR_LEN; 2797 } 2798 2799 switch (pi->ipi_etype) { 2800 #ifdef INET 2801 case ETHERTYPE_IP: 2802 { 2803 struct ip *ip = NULL; 2804 struct tcphdr *th = NULL; 2805 int minthlen; 2806 2807 minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); 2808 if (__predict_false(m->m_len < minthlen)) { 2809 /* 2810 * if this code bloat is causing too much of a hit 2811 * move it to a separate function and mark it noinline 2812 */ 2813 if (m->m_len == pi->ipi_ehdrlen) { 2814 n = m->m_next; 2815 MPASS(n); 2816 if (n->m_len >= sizeof(*ip)) { 2817 ip = (struct ip *)n->m_data; 2818 if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2819 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2820 } else { 2821 txq->ift_pullups++; 2822 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2823 return (ENOMEM); 2824 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2825 } 2826 } else { 2827 txq->ift_pullups++; 2828 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2829 return (ENOMEM); 2830 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2831 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2832 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2833 } 2834 } else { 2835 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2836 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2837 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2838 } 2839 pi->ipi_ip_hlen = ip->ip_hl << 2; 2840 pi->ipi_ipproto = ip->ip_p; 2841 pi->ipi_flags |= IPI_TX_IPV4; 2842 2843 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) 2844 ip->ip_sum = 0; 2845 2846 if (IS_TSO4(pi)) { 2847 if (pi->ipi_ipproto == IPPROTO_TCP) { 2848 if (__predict_false(th == NULL)) { 2849 txq->ift_pullups++; 2850 if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) 2851 return (ENOMEM); 2852 th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); 2853 } 2854 pi->ipi_tcp_hflags = th->th_flags; 2855 pi->ipi_tcp_hlen = th->th_off << 2; 2856 pi->ipi_tcp_seq = th->th_seq; 2857 } 2858 if (__predict_false(ip->ip_p != IPPROTO_TCP)) 2859 return (ENXIO); 2860 th->th_sum = in_pseudo(ip->ip_src.s_addr, 2861 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2862 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 2863 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { 2864 ip->ip_sum = 0; 2865 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); 2866 } 2867 } 2868 break; 2869 } 2870 #endif 2871 #ifdef INET6 2872 case ETHERTYPE_IPV6: 2873 { 2874 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); 2875 struct tcphdr *th; 2876 pi->ipi_ip_hlen = sizeof(struct ip6_hdr); 2877 2878 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { 2879 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) 2880 return (ENOMEM); 2881 } 2882 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); 2883 2884 /* XXX-BZ this will go badly in case of ext hdrs. */ 2885 pi->ipi_ipproto = ip6->ip6_nxt; 2886 pi->ipi_flags |= IPI_TX_IPV6; 2887 2888 if (IS_TSO6(pi)) { 2889 if (pi->ipi_ipproto == IPPROTO_TCP) { 2890 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { 2891 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) 2892 return (ENOMEM); 2893 } 2894 pi->ipi_tcp_hflags = th->th_flags; 2895 pi->ipi_tcp_hlen = th->th_off << 2; 2896 } 2897 2898 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) 2899 return (ENXIO); 2900 /* 2901 * The corresponding flag is set by the stack in the IPv4 2902 * TSO case, but not in IPv6 (at least in FreeBSD 10.2). 2903 * So, set it here because the rest of the flow requires it. 2904 */ 2905 pi->ipi_csum_flags |= CSUM_TCP_IPV6; 2906 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); 2907 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 2908 } 2909 break; 2910 } 2911 #endif 2912 default: 2913 pi->ipi_csum_flags &= ~CSUM_OFFLOAD; 2914 pi->ipi_ip_hlen = 0; 2915 break; 2916 } 2917 *mp = m; 2918 2919 return (0); 2920 } 2921 2922 static __noinline struct mbuf * 2923 collapse_pkthdr(struct mbuf *m0) 2924 { 2925 struct mbuf *m, *m_next, *tmp; 2926 2927 m = m0; 2928 m_next = m->m_next; 2929 while (m_next != NULL && m_next->m_len == 0) { 2930 m = m_next; 2931 m->m_next = NULL; 2932 m_free(m); 2933 m_next = m_next->m_next; 2934 } 2935 m = m0; 2936 m->m_next = m_next; 2937 if ((m_next->m_flags & M_EXT) == 0) { 2938 m = m_defrag(m, M_NOWAIT); 2939 } else { 2940 tmp = m_next->m_next; 2941 memcpy(m_next, m, MPKTHSIZE); 2942 m = m_next; 2943 m->m_next = tmp; 2944 } 2945 return (m); 2946 } 2947 2948 /* 2949 * If dodgy hardware rejects the scatter gather chain we've handed it 2950 * we'll need to remove the mbuf chain from ifsg_m[] before we can add the 2951 * m_defrag'd mbufs 2952 */ 2953 static __noinline struct mbuf * 2954 iflib_remove_mbuf(iflib_txq_t txq) 2955 { 2956 int ntxd, i, pidx; 2957 struct mbuf *m, *mh, **ifsd_m; 2958 2959 pidx = txq->ift_pidx; 2960 ifsd_m = txq->ift_sds.ifsd_m; 2961 ntxd = txq->ift_size; 2962 mh = m = ifsd_m[pidx]; 2963 ifsd_m[pidx] = NULL; 2964 #if MEMORY_LOGGING 2965 txq->ift_dequeued++; 2966 #endif 2967 i = 1; 2968 2969 while (m) { 2970 ifsd_m[(pidx + i) & (ntxd -1)] = NULL; 2971 #if MEMORY_LOGGING 2972 txq->ift_dequeued++; 2973 #endif 2974 m = m->m_next; 2975 i++; 2976 } 2977 return (mh); 2978 } 2979 2980 static int 2981 iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, 2982 struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, 2983 int max_segs, int flags) 2984 { 2985 if_ctx_t ctx; 2986 if_shared_ctx_t sctx; 2987 if_softc_ctx_t scctx; 2988 int i, next, pidx, err, ntxd, count; 2989 struct mbuf *m, *tmp, **ifsd_m; 2990 2991 m = *m0; 2992 2993 /* 2994 * Please don't ever do this 2995 */ 2996 if (__predict_false(m->m_len == 0)) 2997 *m0 = m = collapse_pkthdr(m); 2998 2999 ctx = txq->ift_ctx; 3000 sctx = ctx->ifc_sctx; 3001 scctx = &ctx->ifc_softc_ctx; 3002 ifsd_m = txq->ift_sds.ifsd_m; 3003 ntxd = txq->ift_size; 3004 pidx = txq->ift_pidx; 3005 if (map != NULL) { 3006 uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; 3007 3008 err = bus_dmamap_load_mbuf_sg(tag, map, 3009 *m0, segs, nsegs, BUS_DMA_NOWAIT); 3010 if (err) 3011 return (err); 3012 ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; 3013 count = 0; 3014 m = *m0; 3015 do { 3016 if (__predict_false(m->m_len <= 0)) { 3017 tmp = m; 3018 m = m->m_next; 3019 tmp->m_next = NULL; 3020 m_free(tmp); 3021 continue; 3022 } 3023 m = m->m_next; 3024 count++; 3025 } while (m != NULL); 3026 if (count > *nsegs) { 3027 ifsd_m[pidx] = *m0; 3028 ifsd_m[pidx]->m_flags |= M_TOOBIG; 3029 return (0); 3030 } 3031 m = *m0; 3032 count = 0; 3033 do { 3034 next = (pidx + count) & (ntxd-1); 3035 MPASS(ifsd_m[next] == NULL); 3036 ifsd_m[next] = m; 3037 count++; 3038 tmp = m; 3039 m = m->m_next; 3040 } while (m != NULL); 3041 } else { 3042 int buflen, sgsize, maxsegsz, max_sgsize; 3043 vm_offset_t vaddr; 3044 vm_paddr_t curaddr; 3045 3046 count = i = 0; 3047 m = *m0; 3048 if (m->m_pkthdr.csum_flags & CSUM_TSO) 3049 maxsegsz = scctx->isc_tx_tso_segsize_max; 3050 else 3051 maxsegsz = sctx->isc_tx_maxsegsize; 3052 3053 do { 3054 if (__predict_false(m->m_len <= 0)) { 3055 tmp = m; 3056 m = m->m_next; 3057 tmp->m_next = NULL; 3058 m_free(tmp); 3059 continue; 3060 } 3061 buflen = m->m_len; 3062 vaddr = (vm_offset_t)m->m_data; 3063 /* 3064 * see if we can't be smarter about physically 3065 * contiguous mappings 3066 */ 3067 next = (pidx + count) & (ntxd-1); 3068 MPASS(ifsd_m[next] == NULL); 3069 #if MEMORY_LOGGING 3070 txq->ift_enqueued++; 3071 #endif 3072 ifsd_m[next] = m; 3073 while (buflen > 0) { 3074 if (i >= max_segs) 3075 goto err; 3076 max_sgsize = MIN(buflen, maxsegsz); 3077 curaddr = pmap_kextract(vaddr); 3078 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 3079 sgsize = MIN(sgsize, max_sgsize); 3080 segs[i].ds_addr = curaddr; 3081 segs[i].ds_len = sgsize; 3082 vaddr += sgsize; 3083 buflen -= sgsize; 3084 i++; 3085 } 3086 count++; 3087 tmp = m; 3088 m = m->m_next; 3089 } while (m != NULL); 3090 *nsegs = i; 3091 } 3092 return (0); 3093 err: 3094 *m0 = iflib_remove_mbuf(txq); 3095 return (EFBIG); 3096 } 3097 3098 static inline caddr_t 3099 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) 3100 { 3101 qidx_t size; 3102 int ntxd; 3103 caddr_t start, end, cur, next; 3104 3105 ntxd = txq->ift_size; 3106 size = txq->ift_txd_size[qid]; 3107 start = txq->ift_ifdi[qid].idi_vaddr; 3108 3109 if (__predict_false(size == 0)) 3110 return (start); 3111 cur = start + size*cidx; 3112 end = start + size*ntxd; 3113 next = CACHE_PTR_NEXT(cur); 3114 return (next < end ? next : start); 3115 } 3116 3117 /* 3118 * Pad an mbuf to ensure a minimum ethernet frame size. 3119 * min_frame_size is the frame size (less CRC) to pad the mbuf to 3120 */ 3121 static __noinline int 3122 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) 3123 { 3124 /* 3125 * 18 is enough bytes to pad an ARP packet to 46 bytes, and 3126 * and ARP message is the smallest common payload I can think of 3127 */ 3128 static char pad[18]; /* just zeros */ 3129 int n; 3130 struct mbuf *new_head; 3131 3132 if (!M_WRITABLE(*m_head)) { 3133 new_head = m_dup(*m_head, M_NOWAIT); 3134 if (new_head == NULL) { 3135 m_freem(*m_head); 3136 device_printf(dev, "cannot pad short frame, m_dup() failed"); 3137 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3138 return ENOMEM; 3139 } 3140 m_freem(*m_head); 3141 *m_head = new_head; 3142 } 3143 3144 for (n = min_frame_size - (*m_head)->m_pkthdr.len; 3145 n > 0; n -= sizeof(pad)) 3146 if (!m_append(*m_head, min(n, sizeof(pad)), pad)) 3147 break; 3148 3149 if (n > 0) { 3150 m_freem(*m_head); 3151 device_printf(dev, "cannot pad short frame\n"); 3152 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3153 return (ENOBUFS); 3154 } 3155 3156 return 0; 3157 } 3158 3159 static int 3160 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) 3161 { 3162 if_ctx_t ctx; 3163 if_shared_ctx_t sctx; 3164 if_softc_ctx_t scctx; 3165 bus_dma_segment_t *segs; 3166 struct mbuf *m_head; 3167 void *next_txd; 3168 bus_dmamap_t map; 3169 struct if_pkt_info pi; 3170 int remap = 0; 3171 int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; 3172 bus_dma_tag_t desc_tag; 3173 3174 segs = txq->ift_segs; 3175 ctx = txq->ift_ctx; 3176 sctx = ctx->ifc_sctx; 3177 scctx = &ctx->ifc_softc_ctx; 3178 segs = txq->ift_segs; 3179 ntxd = txq->ift_size; 3180 m_head = *m_headp; 3181 map = NULL; 3182 3183 /* 3184 * If we're doing TSO the next descriptor to clean may be quite far ahead 3185 */ 3186 cidx = txq->ift_cidx; 3187 pidx = txq->ift_pidx; 3188 if (ctx->ifc_flags & IFC_PREFETCH) { 3189 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); 3190 if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { 3191 next_txd = calc_next_txd(txq, cidx, 0); 3192 prefetch(next_txd); 3193 } 3194 3195 /* prefetch the next cache line of mbuf pointers and flags */ 3196 prefetch(&txq->ift_sds.ifsd_m[next]); 3197 if (txq->ift_sds.ifsd_map != NULL) { 3198 prefetch(&txq->ift_sds.ifsd_map[next]); 3199 next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); 3200 prefetch(&txq->ift_sds.ifsd_flags[next]); 3201 } 3202 } else if (txq->ift_sds.ifsd_map != NULL) 3203 map = txq->ift_sds.ifsd_map[pidx]; 3204 3205 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3206 desc_tag = txq->ift_tso_desc_tag; 3207 max_segs = scctx->isc_tx_tso_segments_max; 3208 } else { 3209 desc_tag = txq->ift_desc_tag; 3210 max_segs = scctx->isc_tx_nsegments; 3211 } 3212 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && 3213 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { 3214 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); 3215 if (err) 3216 return err; 3217 } 3218 m_head = *m_headp; 3219 3220 pkt_info_zero(&pi); 3221 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); 3222 pi.ipi_pidx = pidx; 3223 pi.ipi_qsidx = txq->ift_id; 3224 pi.ipi_len = m_head->m_pkthdr.len; 3225 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; 3226 pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; 3227 3228 /* deliberate bitwise OR to make one condition */ 3229 if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { 3230 if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) 3231 return (err); 3232 m_head = *m_headp; 3233 } 3234 3235 retry: 3236 err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT); 3237 defrag: 3238 if (__predict_false(err)) { 3239 switch (err) { 3240 case EFBIG: 3241 /* try collapse once and defrag once */ 3242 if (remap == 0) 3243 m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); 3244 if (remap == 1) 3245 m_head = m_defrag(*m_headp, M_NOWAIT); 3246 remap++; 3247 if (__predict_false(m_head == NULL)) 3248 goto defrag_failed; 3249 txq->ift_mbuf_defrag++; 3250 *m_headp = m_head; 3251 goto retry; 3252 break; 3253 case ENOMEM: 3254 txq->ift_no_tx_dma_setup++; 3255 break; 3256 default: 3257 txq->ift_no_tx_dma_setup++; 3258 m_freem(*m_headp); 3259 DBG_COUNTER_INC(tx_frees); 3260 *m_headp = NULL; 3261 break; 3262 } 3263 txq->ift_map_failed++; 3264 DBG_COUNTER_INC(encap_load_mbuf_fail); 3265 return (err); 3266 } 3267 3268 /* 3269 * XXX assumes a 1 to 1 relationship between segments and 3270 * descriptors - this does not hold true on all drivers, e.g. 3271 * cxgb 3272 */ 3273 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { 3274 txq->ift_no_desc_avail++; 3275 if (map != NULL) 3276 bus_dmamap_unload(desc_tag, map); 3277 DBG_COUNTER_INC(encap_txq_avail_fail); 3278 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) 3279 GROUPTASK_ENQUEUE(&txq->ift_task); 3280 return (ENOBUFS); 3281 } 3282 /* 3283 * On Intel cards we can greatly reduce the number of TX interrupts 3284 * we see by only setting report status on every Nth descriptor. 3285 * However, this also means that the driver will need to keep track 3286 * of the descriptors that RS was set on to check them for the DD bit. 3287 */ 3288 txq->ift_rs_pending += nsegs + 1; 3289 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || 3290 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs - 1) <= MAX_TX_DESC(ctx)) { 3291 pi.ipi_flags |= IPI_TX_INTR; 3292 txq->ift_rs_pending = 0; 3293 } 3294 3295 pi.ipi_segs = segs; 3296 pi.ipi_nsegs = nsegs; 3297 3298 MPASS(pidx >= 0 && pidx < txq->ift_size); 3299 #ifdef PKT_DEBUG 3300 print_pkt(&pi); 3301 #endif 3302 if (map != NULL) 3303 bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE); 3304 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { 3305 if (map != NULL) 3306 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 3307 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3308 DBG_COUNTER_INC(tx_encap); 3309 MPASS(pi.ipi_new_pidx < txq->ift_size); 3310 3311 ndesc = pi.ipi_new_pidx - pi.ipi_pidx; 3312 if (pi.ipi_new_pidx < pi.ipi_pidx) { 3313 ndesc += txq->ift_size; 3314 txq->ift_gen = 1; 3315 } 3316 /* 3317 * drivers can need as many as 3318 * two sentinels 3319 */ 3320 MPASS(ndesc <= pi.ipi_nsegs + 2); 3321 MPASS(pi.ipi_new_pidx != pidx); 3322 MPASS(ndesc > 0); 3323 txq->ift_in_use += ndesc; 3324 3325 /* 3326 * We update the last software descriptor again here because there may 3327 * be a sentinel and/or there may be more mbufs than segments 3328 */ 3329 txq->ift_pidx = pi.ipi_new_pidx; 3330 txq->ift_npending += pi.ipi_ndescs; 3331 } else if (__predict_false(err == EFBIG && remap < 2)) { 3332 *m_headp = m_head = iflib_remove_mbuf(txq); 3333 remap = 1; 3334 txq->ift_txd_encap_efbig++; 3335 goto defrag; 3336 } else 3337 DBG_COUNTER_INC(encap_txd_encap_fail); 3338 return (err); 3339 3340 defrag_failed: 3341 txq->ift_mbuf_defrag_failed++; 3342 txq->ift_map_failed++; 3343 m_freem(*m_headp); 3344 DBG_COUNTER_INC(tx_frees); 3345 *m_headp = NULL; 3346 return (ENOMEM); 3347 } 3348 3349 static void 3350 iflib_tx_desc_free(iflib_txq_t txq, int n) 3351 { 3352 int hasmap; 3353 uint32_t qsize, cidx, mask, gen; 3354 struct mbuf *m, **ifsd_m; 3355 uint8_t *ifsd_flags; 3356 bus_dmamap_t *ifsd_map; 3357 bool do_prefetch; 3358 3359 cidx = txq->ift_cidx; 3360 gen = txq->ift_gen; 3361 qsize = txq->ift_size; 3362 mask = qsize-1; 3363 hasmap = txq->ift_sds.ifsd_map != NULL; 3364 ifsd_flags = txq->ift_sds.ifsd_flags; 3365 ifsd_m = txq->ift_sds.ifsd_m; 3366 ifsd_map = txq->ift_sds.ifsd_map; 3367 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); 3368 3369 while (n--) { 3370 if (do_prefetch) { 3371 prefetch(ifsd_m[(cidx + 3) & mask]); 3372 prefetch(ifsd_m[(cidx + 4) & mask]); 3373 } 3374 if (ifsd_m[cidx] != NULL) { 3375 prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); 3376 prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]); 3377 if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) { 3378 /* 3379 * does it matter if it's not the TSO tag? If so we'll 3380 * have to add the type to flags 3381 */ 3382 bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); 3383 ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; 3384 } 3385 if ((m = ifsd_m[cidx]) != NULL) { 3386 /* XXX we don't support any drivers that batch packets yet */ 3387 MPASS(m->m_nextpkt == NULL); 3388 /* if the number of clusters exceeds the number of segments 3389 * there won't be space on the ring to save a pointer to each 3390 * cluster so we simply free the list here 3391 */ 3392 if (m->m_flags & M_TOOBIG) { 3393 m_freem(m); 3394 } else { 3395 m_free(m); 3396 } 3397 ifsd_m[cidx] = NULL; 3398 #if MEMORY_LOGGING 3399 txq->ift_dequeued++; 3400 #endif 3401 DBG_COUNTER_INC(tx_frees); 3402 } 3403 } 3404 if (__predict_false(++cidx == qsize)) { 3405 cidx = 0; 3406 gen = 0; 3407 } 3408 } 3409 txq->ift_cidx = cidx; 3410 txq->ift_gen = gen; 3411 } 3412 3413 static __inline int 3414 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) 3415 { 3416 int reclaim; 3417 if_ctx_t ctx = txq->ift_ctx; 3418 3419 KASSERT(thresh >= 0, ("invalid threshold to reclaim")); 3420 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); 3421 3422 /* 3423 * Need a rate-limiting check so that this isn't called every time 3424 */ 3425 iflib_tx_credits_update(ctx, txq); 3426 reclaim = DESC_RECLAIMABLE(txq); 3427 3428 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { 3429 #ifdef INVARIANTS 3430 if (iflib_verbose_debug) { 3431 printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, 3432 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, 3433 reclaim, thresh); 3434 3435 } 3436 #endif 3437 return (0); 3438 } 3439 iflib_tx_desc_free(txq, reclaim); 3440 txq->ift_cleaned += reclaim; 3441 txq->ift_in_use -= reclaim; 3442 3443 return (reclaim); 3444 } 3445 3446 static struct mbuf ** 3447 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) 3448 { 3449 int next, size; 3450 struct mbuf **items; 3451 3452 size = r->size; 3453 next = (cidx + CACHE_PTR_INCREMENT) & (size-1); 3454 items = __DEVOLATILE(struct mbuf **, &r->items[0]); 3455 3456 prefetch(items[(cidx + offset) & (size-1)]); 3457 if (remaining > 1) { 3458 prefetch2cachelines(&items[next]); 3459 prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); 3460 prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); 3461 prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); 3462 } 3463 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); 3464 } 3465 3466 static void 3467 iflib_txq_check_drain(iflib_txq_t txq, int budget) 3468 { 3469 3470 ifmp_ring_check_drainage(txq->ift_br, budget); 3471 } 3472 3473 static uint32_t 3474 iflib_txq_can_drain(struct ifmp_ring *r) 3475 { 3476 iflib_txq_t txq = r->cookie; 3477 if_ctx_t ctx = txq->ift_ctx; 3478 3479 return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) || 3480 ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); 3481 } 3482 3483 static uint32_t 3484 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3485 { 3486 iflib_txq_t txq = r->cookie; 3487 if_ctx_t ctx = txq->ift_ctx; 3488 struct ifnet *ifp = ctx->ifc_ifp; 3489 struct mbuf **mp, *m; 3490 int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail; 3491 int reclaimed, err, in_use_prev, desc_used; 3492 bool do_prefetch, ring, rang; 3493 3494 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || 3495 !LINK_ACTIVE(ctx))) { 3496 DBG_COUNTER_INC(txq_drain_notready); 3497 return (0); 3498 } 3499 reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 3500 rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); 3501 avail = IDXDIFF(pidx, cidx, r->size); 3502 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { 3503 DBG_COUNTER_INC(txq_drain_flushing); 3504 for (i = 0; i < avail; i++) { 3505 m_free(r->items[(cidx + i) & (r->size-1)]); 3506 r->items[(cidx + i) & (r->size-1)] = NULL; 3507 } 3508 return (avail); 3509 } 3510 3511 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { 3512 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3513 CALLOUT_LOCK(txq); 3514 callout_stop(&txq->ift_timer); 3515 CALLOUT_UNLOCK(txq); 3516 DBG_COUNTER_INC(txq_drain_oactive); 3517 return (0); 3518 } 3519 if (reclaimed) 3520 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3521 consumed = mcast_sent = bytes_sent = pkt_sent = 0; 3522 count = MIN(avail, TX_BATCH_SIZE); 3523 #ifdef INVARIANTS 3524 if (iflib_verbose_debug) 3525 printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, 3526 avail, ctx->ifc_flags, TXQ_AVAIL(txq)); 3527 #endif 3528 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); 3529 avail = TXQ_AVAIL(txq); 3530 for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) { 3531 int pidx_prev, rem = do_prefetch ? count - i : 0; 3532 3533 mp = _ring_peek_one(r, cidx, i, rem); 3534 MPASS(mp != NULL && *mp != NULL); 3535 if (__predict_false(*mp == (struct mbuf *)txq)) { 3536 consumed++; 3537 reclaimed++; 3538 continue; 3539 } 3540 in_use_prev = txq->ift_in_use; 3541 pidx_prev = txq->ift_pidx; 3542 err = iflib_encap(txq, mp); 3543 if (__predict_false(err)) { 3544 DBG_COUNTER_INC(txq_drain_encapfail); 3545 /* no room - bail out */ 3546 if (err == ENOBUFS) 3547 break; 3548 consumed++; 3549 DBG_COUNTER_INC(txq_drain_encapfail); 3550 /* we can't send this packet - skip it */ 3551 continue; 3552 } 3553 consumed++; 3554 pkt_sent++; 3555 m = *mp; 3556 DBG_COUNTER_INC(tx_sent); 3557 bytes_sent += m->m_pkthdr.len; 3558 mcast_sent += !!(m->m_flags & M_MCAST); 3559 avail = TXQ_AVAIL(txq); 3560 3561 txq->ift_db_pending += (txq->ift_in_use - in_use_prev); 3562 desc_used += (txq->ift_in_use - in_use_prev); 3563 ETHER_BPF_MTAP(ifp, m); 3564 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) 3565 break; 3566 rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); 3567 } 3568 3569 /* deliberate use of bitwise or to avoid gratuitous short-circuit */ 3570 ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); 3571 iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); 3572 if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); 3573 if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); 3574 if (mcast_sent) 3575 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); 3576 #ifdef INVARIANTS 3577 if (iflib_verbose_debug) 3578 printf("consumed=%d\n", consumed); 3579 #endif 3580 return (consumed); 3581 } 3582 3583 static uint32_t 3584 iflib_txq_drain_always(struct ifmp_ring *r) 3585 { 3586 return (1); 3587 } 3588 3589 static uint32_t 3590 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3591 { 3592 int i, avail; 3593 struct mbuf **mp; 3594 iflib_txq_t txq; 3595 3596 txq = r->cookie; 3597 3598 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3599 CALLOUT_LOCK(txq); 3600 callout_stop(&txq->ift_timer); 3601 CALLOUT_UNLOCK(txq); 3602 3603 avail = IDXDIFF(pidx, cidx, r->size); 3604 for (i = 0; i < avail; i++) { 3605 mp = _ring_peek_one(r, cidx, i, avail - i); 3606 if (__predict_false(*mp == (struct mbuf *)txq)) 3607 continue; 3608 m_freem(*mp); 3609 } 3610 MPASS(ifmp_ring_is_stalled(r) == 0); 3611 return (avail); 3612 } 3613 3614 static void 3615 iflib_ifmp_purge(iflib_txq_t txq) 3616 { 3617 struct ifmp_ring *r; 3618 3619 r = txq->ift_br; 3620 r->drain = iflib_txq_drain_free; 3621 r->can_drain = iflib_txq_drain_always; 3622 3623 ifmp_ring_check_drainage(r, r->size); 3624 3625 r->drain = iflib_txq_drain; 3626 r->can_drain = iflib_txq_can_drain; 3627 } 3628 3629 static void 3630 _task_fn_tx(void *context) 3631 { 3632 iflib_txq_t txq = context; 3633 if_ctx_t ctx = txq->ift_ctx; 3634 struct ifnet *ifp = ctx->ifc_ifp; 3635 int rc; 3636 3637 #ifdef IFLIB_DIAGNOSTICS 3638 txq->ift_cpu_exec_count[curcpu]++; 3639 #endif 3640 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 3641 return; 3642 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 3643 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)) 3644 netmap_tx_irq(ifp, txq->ift_id); 3645 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3646 return; 3647 } 3648 if (txq->ift_db_pending) 3649 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE); 3650 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3651 if (ctx->ifc_flags & IFC_LEGACY) 3652 IFDI_INTR_ENABLE(ctx); 3653 else { 3654 rc = IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3655 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3656 } 3657 } 3658 3659 static void 3660 _task_fn_rx(void *context) 3661 { 3662 iflib_rxq_t rxq = context; 3663 if_ctx_t ctx = rxq->ifr_ctx; 3664 bool more; 3665 int rc; 3666 uint16_t budget; 3667 3668 #ifdef IFLIB_DIAGNOSTICS 3669 rxq->ifr_cpu_exec_count[curcpu]++; 3670 #endif 3671 DBG_COUNTER_INC(task_fn_rxs); 3672 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3673 return; 3674 more = true; 3675 #ifdef DEV_NETMAP 3676 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { 3677 u_int work = 0; 3678 if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { 3679 more = false; 3680 } 3681 } 3682 #endif 3683 budget = ctx->ifc_sysctl_rx_budget; 3684 if (budget == 0) 3685 budget = 16; /* XXX */ 3686 if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { 3687 if (ctx->ifc_flags & IFC_LEGACY) 3688 IFDI_INTR_ENABLE(ctx); 3689 else { 3690 DBG_COUNTER_INC(rx_intr_enables); 3691 rc = IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 3692 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3693 } 3694 } 3695 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3696 return; 3697 if (more) 3698 GROUPTASK_ENQUEUE(&rxq->ifr_task); 3699 } 3700 3701 static void 3702 _task_fn_admin(void *context) 3703 { 3704 if_ctx_t ctx = context; 3705 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 3706 iflib_txq_t txq; 3707 int i; 3708 3709 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) { 3710 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { 3711 return; 3712 } 3713 } 3714 3715 CTX_LOCK(ctx); 3716 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3717 CALLOUT_LOCK(txq); 3718 callout_stop(&txq->ift_timer); 3719 CALLOUT_UNLOCK(txq); 3720 } 3721 IFDI_UPDATE_ADMIN_STATUS(ctx); 3722 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) 3723 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); 3724 IFDI_LINK_INTR_ENABLE(ctx); 3725 if (ctx->ifc_flags & IFC_DO_RESET) { 3726 ctx->ifc_flags &= ~IFC_DO_RESET; 3727 iflib_if_init_locked(ctx); 3728 } 3729 CTX_UNLOCK(ctx); 3730 3731 if (LINK_ACTIVE(ctx) == 0) 3732 return; 3733 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) 3734 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 3735 } 3736 3737 3738 static void 3739 _task_fn_iov(void *context) 3740 { 3741 if_ctx_t ctx = context; 3742 3743 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 3744 return; 3745 3746 CTX_LOCK(ctx); 3747 IFDI_VFLR_HANDLE(ctx); 3748 CTX_UNLOCK(ctx); 3749 } 3750 3751 static int 3752 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 3753 { 3754 int err; 3755 if_int_delay_info_t info; 3756 if_ctx_t ctx; 3757 3758 info = (if_int_delay_info_t)arg1; 3759 ctx = info->iidi_ctx; 3760 info->iidi_req = req; 3761 info->iidi_oidp = oidp; 3762 CTX_LOCK(ctx); 3763 err = IFDI_SYSCTL_INT_DELAY(ctx, info); 3764 CTX_UNLOCK(ctx); 3765 return (err); 3766 } 3767 3768 /********************************************************************* 3769 * 3770 * IFNET FUNCTIONS 3771 * 3772 **********************************************************************/ 3773 3774 static void 3775 iflib_if_init_locked(if_ctx_t ctx) 3776 { 3777 iflib_stop(ctx); 3778 iflib_init_locked(ctx); 3779 } 3780 3781 3782 static void 3783 iflib_if_init(void *arg) 3784 { 3785 if_ctx_t ctx = arg; 3786 3787 CTX_LOCK(ctx); 3788 iflib_if_init_locked(ctx); 3789 CTX_UNLOCK(ctx); 3790 } 3791 3792 static int 3793 iflib_if_transmit(if_t ifp, struct mbuf *m) 3794 { 3795 if_ctx_t ctx = if_getsoftc(ifp); 3796 3797 iflib_txq_t txq; 3798 int err, qidx; 3799 3800 if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { 3801 DBG_COUNTER_INC(tx_frees); 3802 m_freem(m); 3803 return (ENOBUFS); 3804 } 3805 3806 MPASS(m->m_nextpkt == NULL); 3807 qidx = 0; 3808 if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m)) 3809 qidx = QIDX(ctx, m); 3810 /* 3811 * XXX calculate buf_ring based on flowid (divvy up bits?) 3812 */ 3813 txq = &ctx->ifc_txqs[qidx]; 3814 3815 #ifdef DRIVER_BACKPRESSURE 3816 if (txq->ift_closed) { 3817 while (m != NULL) { 3818 next = m->m_nextpkt; 3819 m->m_nextpkt = NULL; 3820 m_freem(m); 3821 m = next; 3822 } 3823 return (ENOBUFS); 3824 } 3825 #endif 3826 #ifdef notyet 3827 qidx = count = 0; 3828 mp = marr; 3829 next = m; 3830 do { 3831 count++; 3832 next = next->m_nextpkt; 3833 } while (next != NULL); 3834 3835 if (count > nitems(marr)) 3836 if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { 3837 /* XXX check nextpkt */ 3838 m_freem(m); 3839 /* XXX simplify for now */ 3840 DBG_COUNTER_INC(tx_frees); 3841 return (ENOBUFS); 3842 } 3843 for (next = m, i = 0; next != NULL; i++) { 3844 mp[i] = next; 3845 next = next->m_nextpkt; 3846 mp[i]->m_nextpkt = NULL; 3847 } 3848 #endif 3849 DBG_COUNTER_INC(tx_seen); 3850 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE); 3851 3852 GROUPTASK_ENQUEUE(&txq->ift_task); 3853 if (err) { 3854 /* support forthcoming later */ 3855 #ifdef DRIVER_BACKPRESSURE 3856 txq->ift_closed = TRUE; 3857 #endif 3858 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3859 m_freem(m); 3860 } 3861 3862 return (err); 3863 } 3864 3865 static void 3866 iflib_if_qflush(if_t ifp) 3867 { 3868 if_ctx_t ctx = if_getsoftc(ifp); 3869 iflib_txq_t txq = ctx->ifc_txqs; 3870 int i; 3871 3872 CTX_LOCK(ctx); 3873 ctx->ifc_flags |= IFC_QFLUSH; 3874 CTX_UNLOCK(ctx); 3875 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 3876 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) 3877 iflib_txq_check_drain(txq, 0); 3878 CTX_LOCK(ctx); 3879 ctx->ifc_flags &= ~IFC_QFLUSH; 3880 CTX_UNLOCK(ctx); 3881 3882 if_qflush(ifp); 3883 } 3884 3885 3886 #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ 3887 IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ 3888 IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) 3889 3890 static int 3891 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) 3892 { 3893 if_ctx_t ctx = if_getsoftc(ifp); 3894 struct ifreq *ifr = (struct ifreq *)data; 3895 #if defined(INET) || defined(INET6) 3896 struct ifaddr *ifa = (struct ifaddr *)data; 3897 #endif 3898 bool avoid_reset = FALSE; 3899 int err = 0, reinit = 0, bits; 3900 3901 switch (command) { 3902 case SIOCSIFADDR: 3903 #ifdef INET 3904 if (ifa->ifa_addr->sa_family == AF_INET) 3905 avoid_reset = TRUE; 3906 #endif 3907 #ifdef INET6 3908 if (ifa->ifa_addr->sa_family == AF_INET6) 3909 avoid_reset = TRUE; 3910 #endif 3911 /* 3912 ** Calling init results in link renegotiation, 3913 ** so we avoid doing it when possible. 3914 */ 3915 if (avoid_reset) { 3916 if_setflagbits(ifp, IFF_UP,0); 3917 if (!(if_getdrvflags(ifp)& IFF_DRV_RUNNING)) 3918 reinit = 1; 3919 #ifdef INET 3920 if (!(if_getflags(ifp) & IFF_NOARP)) 3921 arp_ifinit(ifp, ifa); 3922 #endif 3923 } else 3924 err = ether_ioctl(ifp, command, data); 3925 break; 3926 case SIOCSIFMTU: 3927 CTX_LOCK(ctx); 3928 if (ifr->ifr_mtu == if_getmtu(ifp)) { 3929 CTX_UNLOCK(ctx); 3930 break; 3931 } 3932 bits = if_getdrvflags(ifp); 3933 /* stop the driver and free any clusters before proceeding */ 3934 iflib_stop(ctx); 3935 3936 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { 3937 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) 3938 ctx->ifc_flags |= IFC_MULTISEG; 3939 else 3940 ctx->ifc_flags &= ~IFC_MULTISEG; 3941 err = if_setmtu(ifp, ifr->ifr_mtu); 3942 } 3943 iflib_init_locked(ctx); 3944 if_setdrvflags(ifp, bits); 3945 CTX_UNLOCK(ctx); 3946 break; 3947 case SIOCSIFFLAGS: 3948 CTX_LOCK(ctx); 3949 if (if_getflags(ifp) & IFF_UP) { 3950 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 3951 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & 3952 (IFF_PROMISC | IFF_ALLMULTI)) { 3953 err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); 3954 } 3955 } else 3956 reinit = 1; 3957 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 3958 iflib_stop(ctx); 3959 } 3960 ctx->ifc_if_flags = if_getflags(ifp); 3961 CTX_UNLOCK(ctx); 3962 break; 3963 case SIOCADDMULTI: 3964 case SIOCDELMULTI: 3965 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 3966 CTX_LOCK(ctx); 3967 IFDI_INTR_DISABLE(ctx); 3968 IFDI_MULTI_SET(ctx); 3969 IFDI_INTR_ENABLE(ctx); 3970 CTX_UNLOCK(ctx); 3971 } 3972 break; 3973 case SIOCSIFMEDIA: 3974 CTX_LOCK(ctx); 3975 IFDI_MEDIA_SET(ctx); 3976 CTX_UNLOCK(ctx); 3977 /* falls thru */ 3978 case SIOCGIFMEDIA: 3979 case SIOCGIFXMEDIA: 3980 err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); 3981 break; 3982 case SIOCGI2C: 3983 { 3984 struct ifi2creq i2c; 3985 3986 err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 3987 if (err != 0) 3988 break; 3989 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 3990 err = EINVAL; 3991 break; 3992 } 3993 if (i2c.len > sizeof(i2c.data)) { 3994 err = EINVAL; 3995 break; 3996 } 3997 3998 if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) 3999 err = copyout(&i2c, ifr_data_get_ptr(ifr), 4000 sizeof(i2c)); 4001 break; 4002 } 4003 case SIOCSIFCAP: 4004 { 4005 int mask, setmask; 4006 4007 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 4008 setmask = 0; 4009 #ifdef TCP_OFFLOAD 4010 setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); 4011 #endif 4012 setmask |= (mask & IFCAP_FLAGS); 4013 4014 if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) 4015 setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); 4016 if ((mask & IFCAP_WOL) && 4017 (if_getcapabilities(ifp) & IFCAP_WOL) != 0) 4018 setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC)); 4019 if_vlancap(ifp); 4020 /* 4021 * want to ensure that traffic has stopped before we change any of the flags 4022 */ 4023 if (setmask) { 4024 CTX_LOCK(ctx); 4025 bits = if_getdrvflags(ifp); 4026 if (bits & IFF_DRV_RUNNING) 4027 iflib_stop(ctx); 4028 if_togglecapenable(ifp, setmask); 4029 if (bits & IFF_DRV_RUNNING) 4030 iflib_init_locked(ctx); 4031 if_setdrvflags(ifp, bits); 4032 CTX_UNLOCK(ctx); 4033 } 4034 break; 4035 } 4036 case SIOCGPRIVATE_0: 4037 case SIOCSDRVSPEC: 4038 case SIOCGDRVSPEC: 4039 CTX_LOCK(ctx); 4040 err = IFDI_PRIV_IOCTL(ctx, command, data); 4041 CTX_UNLOCK(ctx); 4042 break; 4043 default: 4044 err = ether_ioctl(ifp, command, data); 4045 break; 4046 } 4047 if (reinit) 4048 iflib_if_init(ctx); 4049 return (err); 4050 } 4051 4052 static uint64_t 4053 iflib_if_get_counter(if_t ifp, ift_counter cnt) 4054 { 4055 if_ctx_t ctx = if_getsoftc(ifp); 4056 4057 return (IFDI_GET_COUNTER(ctx, cnt)); 4058 } 4059 4060 /********************************************************************* 4061 * 4062 * OTHER FUNCTIONS EXPORTED TO THE STACK 4063 * 4064 **********************************************************************/ 4065 4066 static void 4067 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) 4068 { 4069 if_ctx_t ctx = if_getsoftc(ifp); 4070 4071 if ((void *)ctx != arg) 4072 return; 4073 4074 if ((vtag == 0) || (vtag > 4095)) 4075 return; 4076 4077 CTX_LOCK(ctx); 4078 IFDI_VLAN_REGISTER(ctx, vtag); 4079 /* Re-init to load the changes */ 4080 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4081 iflib_if_init_locked(ctx); 4082 CTX_UNLOCK(ctx); 4083 } 4084 4085 static void 4086 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) 4087 { 4088 if_ctx_t ctx = if_getsoftc(ifp); 4089 4090 if ((void *)ctx != arg) 4091 return; 4092 4093 if ((vtag == 0) || (vtag > 4095)) 4094 return; 4095 4096 CTX_LOCK(ctx); 4097 IFDI_VLAN_UNREGISTER(ctx, vtag); 4098 /* Re-init to load the changes */ 4099 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4100 iflib_if_init_locked(ctx); 4101 CTX_UNLOCK(ctx); 4102 } 4103 4104 static void 4105 iflib_led_func(void *arg, int onoff) 4106 { 4107 if_ctx_t ctx = arg; 4108 4109 CTX_LOCK(ctx); 4110 IFDI_LED_FUNC(ctx, onoff); 4111 CTX_UNLOCK(ctx); 4112 } 4113 4114 /********************************************************************* 4115 * 4116 * BUS FUNCTION DEFINITIONS 4117 * 4118 **********************************************************************/ 4119 4120 int 4121 iflib_device_probe(device_t dev) 4122 { 4123 pci_vendor_info_t *ent; 4124 4125 uint16_t pci_vendor_id, pci_device_id; 4126 uint16_t pci_subvendor_id, pci_subdevice_id; 4127 uint16_t pci_rev_id; 4128 if_shared_ctx_t sctx; 4129 4130 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4131 return (ENOTSUP); 4132 4133 pci_vendor_id = pci_get_vendor(dev); 4134 pci_device_id = pci_get_device(dev); 4135 pci_subvendor_id = pci_get_subvendor(dev); 4136 pci_subdevice_id = pci_get_subdevice(dev); 4137 pci_rev_id = pci_get_revid(dev); 4138 if (sctx->isc_parse_devinfo != NULL) 4139 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); 4140 4141 ent = sctx->isc_vendor_info; 4142 while (ent->pvi_vendor_id != 0) { 4143 if (pci_vendor_id != ent->pvi_vendor_id) { 4144 ent++; 4145 continue; 4146 } 4147 if ((pci_device_id == ent->pvi_device_id) && 4148 ((pci_subvendor_id == ent->pvi_subvendor_id) || 4149 (ent->pvi_subvendor_id == 0)) && 4150 ((pci_subdevice_id == ent->pvi_subdevice_id) || 4151 (ent->pvi_subdevice_id == 0)) && 4152 ((pci_rev_id == ent->pvi_rev_id) || 4153 (ent->pvi_rev_id == 0))) { 4154 4155 device_set_desc_copy(dev, ent->pvi_name); 4156 /* this needs to be changed to zero if the bus probing code 4157 * ever stops re-probing on best match because the sctx 4158 * may have its values over written by register calls 4159 * in subsequent probes 4160 */ 4161 return (BUS_PROBE_DEFAULT); 4162 } 4163 ent++; 4164 } 4165 return (ENXIO); 4166 } 4167 4168 int 4169 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) 4170 { 4171 int err, rid, msix, msix_bar; 4172 if_ctx_t ctx; 4173 if_t ifp; 4174 if_softc_ctx_t scctx; 4175 int i; 4176 uint16_t main_txq; 4177 uint16_t main_rxq; 4178 4179 4180 ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); 4181 4182 if (sc == NULL) { 4183 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4184 device_set_softc(dev, ctx); 4185 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4186 } 4187 4188 ctx->ifc_sctx = sctx; 4189 ctx->ifc_dev = dev; 4190 ctx->ifc_softc = sc; 4191 4192 if ((err = iflib_register(ctx)) != 0) { 4193 device_printf(dev, "iflib_register failed %d\n", err); 4194 return (err); 4195 } 4196 iflib_add_device_sysctl_pre(ctx); 4197 4198 scctx = &ctx->ifc_softc_ctx; 4199 ifp = ctx->ifc_ifp; 4200 ctx->ifc_nhwtxqs = sctx->isc_ntxqs; 4201 4202 /* 4203 * XXX sanity check that ntxd & nrxd are a power of 2 4204 */ 4205 if (ctx->ifc_sysctl_ntxqs != 0) 4206 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; 4207 if (ctx->ifc_sysctl_nrxqs != 0) 4208 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; 4209 4210 for (i = 0; i < sctx->isc_ntxqs; i++) { 4211 if (ctx->ifc_sysctl_ntxds[i] != 0) 4212 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; 4213 else 4214 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; 4215 } 4216 4217 for (i = 0; i < sctx->isc_nrxqs; i++) { 4218 if (ctx->ifc_sysctl_nrxds[i] != 0) 4219 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; 4220 else 4221 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; 4222 } 4223 4224 for (i = 0; i < sctx->isc_nrxqs; i++) { 4225 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { 4226 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", 4227 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); 4228 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; 4229 } 4230 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { 4231 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", 4232 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); 4233 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; 4234 } 4235 } 4236 4237 for (i = 0; i < sctx->isc_ntxqs; i++) { 4238 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { 4239 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", 4240 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); 4241 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; 4242 } 4243 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { 4244 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", 4245 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); 4246 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; 4247 } 4248 } 4249 4250 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4251 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4252 return (err); 4253 } 4254 _iflib_pre_assert(scctx); 4255 ctx->ifc_txrx = *scctx->isc_txrx; 4256 4257 #ifdef INVARIANTS 4258 MPASS(scctx->isc_capenable); 4259 if (scctx->isc_capenable & IFCAP_TXCSUM) 4260 MPASS(scctx->isc_tx_csum_flags); 4261 #endif 4262 4263 if_setcapabilities(ifp, scctx->isc_capenable | IFCAP_HWSTATS); 4264 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS); 4265 4266 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4267 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4268 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4269 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4270 4271 #ifdef ACPI_DMAR 4272 if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) 4273 ctx->ifc_flags |= IFC_DMAR; 4274 #elif !(defined(__i386__) || defined(__amd64__)) 4275 /* set unconditionally for !x86 */ 4276 ctx->ifc_flags |= IFC_DMAR; 4277 #endif 4278 4279 msix_bar = scctx->isc_msix_bar; 4280 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4281 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4282 4283 /* XXX change for per-queue sizes */ 4284 device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", 4285 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4286 for (i = 0; i < sctx->isc_nrxqs; i++) { 4287 if (!powerof2(scctx->isc_nrxd[i])) { 4288 /* round down instead? */ 4289 device_printf(dev, "# rx descriptors must be a power of 2\n"); 4290 err = EINVAL; 4291 goto fail; 4292 } 4293 } 4294 for (i = 0; i < sctx->isc_ntxqs; i++) { 4295 if (!powerof2(scctx->isc_ntxd[i])) { 4296 device_printf(dev, 4297 "# tx descriptors must be a power of 2"); 4298 err = EINVAL; 4299 goto fail; 4300 } 4301 } 4302 4303 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4304 MAX_SINGLE_PACKET_FRACTION) 4305 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4306 MAX_SINGLE_PACKET_FRACTION); 4307 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4308 MAX_SINGLE_PACKET_FRACTION) 4309 scctx->isc_tx_tso_segments_max = max(1, 4310 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4311 4312 /* 4313 * Protect the stack against modern hardware 4314 */ 4315 if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX) 4316 scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX; 4317 4318 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4319 ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max; 4320 ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max; 4321 ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max; 4322 if (scctx->isc_rss_table_size == 0) 4323 scctx->isc_rss_table_size = 64; 4324 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4325 4326 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4327 /* XXX format name */ 4328 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); 4329 4330 /* Set up cpu set. If it fails, use the set of all CPUs. */ 4331 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { 4332 device_printf(dev, "Unable to fetch CPU list\n"); 4333 CPU_COPY(&all_cpus, &ctx->ifc_cpus); 4334 } 4335 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); 4336 4337 /* 4338 ** Now setup MSI or MSI/X, should 4339 ** return us the number of supported 4340 ** vectors. (Will be 1 for MSI) 4341 */ 4342 if (sctx->isc_flags & IFLIB_SKIP_MSIX) { 4343 msix = scctx->isc_vectors; 4344 } else if (scctx->isc_msix_bar != 0) 4345 /* 4346 * The simple fact that isc_msix_bar is not 0 does not mean we 4347 * we have a good value there that is known to work. 4348 */ 4349 msix = iflib_msix_init(ctx); 4350 else { 4351 scctx->isc_vectors = 1; 4352 scctx->isc_ntxqsets = 1; 4353 scctx->isc_nrxqsets = 1; 4354 scctx->isc_intr = IFLIB_INTR_LEGACY; 4355 msix = 0; 4356 } 4357 /* Get memory for the station queues */ 4358 if ((err = iflib_queues_alloc(ctx))) { 4359 device_printf(dev, "Unable to allocate queue memory\n"); 4360 goto fail; 4361 } 4362 4363 if ((err = iflib_qset_structures_setup(ctx))) { 4364 device_printf(dev, "qset structure setup failed %d\n", err); 4365 goto fail_queues; 4366 } 4367 4368 /* 4369 * Group taskqueues aren't properly set up until SMP is started, 4370 * so we disable interrupts until we can handle them post 4371 * SI_SUB_SMP. 4372 * 4373 * XXX: disabling interrupts doesn't actually work, at least for 4374 * the non-MSI case. When they occur before SI_SUB_SMP completes, 4375 * we do null handling and depend on this not causing too large an 4376 * interrupt storm. 4377 */ 4378 IFDI_INTR_DISABLE(ctx); 4379 if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { 4380 device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); 4381 goto fail_intr_free; 4382 } 4383 if (msix <= 1) { 4384 rid = 0; 4385 if (scctx->isc_intr == IFLIB_INTR_MSI) { 4386 MPASS(msix == 1); 4387 rid = 1; 4388 } 4389 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { 4390 device_printf(dev, "iflib_legacy_setup failed %d\n", err); 4391 goto fail_intr_free; 4392 } 4393 } 4394 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4395 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4396 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4397 goto fail_detach; 4398 } 4399 if ((err = iflib_netmap_attach(ctx))) { 4400 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); 4401 goto fail_detach; 4402 } 4403 *ctxp = ctx; 4404 4405 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4406 iflib_add_device_sysctl_post(ctx); 4407 ctx->ifc_flags |= IFC_INIT_DONE; 4408 return (0); 4409 fail_detach: 4410 ether_ifdetach(ctx->ifc_ifp); 4411 fail_intr_free: 4412 if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) 4413 pci_release_msi(ctx->ifc_dev); 4414 fail_queues: 4415 /* XXX free queues */ 4416 fail: 4417 IFDI_DETACH(ctx); 4418 return (err); 4419 } 4420 4421 int 4422 iflib_device_attach(device_t dev) 4423 { 4424 if_ctx_t ctx; 4425 if_shared_ctx_t sctx; 4426 4427 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4428 return (ENOTSUP); 4429 4430 pci_enable_busmaster(dev); 4431 4432 return (iflib_device_register(dev, NULL, sctx, &ctx)); 4433 } 4434 4435 int 4436 iflib_device_deregister(if_ctx_t ctx) 4437 { 4438 if_t ifp = ctx->ifc_ifp; 4439 iflib_txq_t txq; 4440 iflib_rxq_t rxq; 4441 device_t dev = ctx->ifc_dev; 4442 int i, j; 4443 struct taskqgroup *tqg; 4444 iflib_fl_t fl; 4445 4446 /* Make sure VLANS are not using driver */ 4447 if (if_vlantrunkinuse(ifp)) { 4448 device_printf(dev,"Vlan in use, detach first\n"); 4449 return (EBUSY); 4450 } 4451 4452 CTX_LOCK(ctx); 4453 ctx->ifc_in_detach = 1; 4454 iflib_stop(ctx); 4455 CTX_UNLOCK(ctx); 4456 4457 /* Unregister VLAN events */ 4458 if (ctx->ifc_vlan_attach_event != NULL) 4459 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 4460 if (ctx->ifc_vlan_detach_event != NULL) 4461 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 4462 4463 iflib_netmap_detach(ifp); 4464 ether_ifdetach(ifp); 4465 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 4466 CTX_LOCK_DESTROY(ctx); 4467 if (ctx->ifc_led_dev != NULL) 4468 led_destroy(ctx->ifc_led_dev); 4469 /* XXX drain any dependent tasks */ 4470 tqg = qgroup_if_io_tqg; 4471 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 4472 callout_drain(&txq->ift_timer); 4473 if (txq->ift_task.gt_uniq != NULL) 4474 taskqgroup_detach(tqg, &txq->ift_task); 4475 } 4476 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 4477 if (rxq->ifr_task.gt_uniq != NULL) 4478 taskqgroup_detach(tqg, &rxq->ifr_task); 4479 4480 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 4481 free(fl->ifl_rx_bitmap, M_IFLIB); 4482 4483 } 4484 tqg = qgroup_if_config_tqg; 4485 if (ctx->ifc_admin_task.gt_uniq != NULL) 4486 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 4487 if (ctx->ifc_vflr_task.gt_uniq != NULL) 4488 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 4489 4490 IFDI_DETACH(ctx); 4491 device_set_softc(ctx->ifc_dev, NULL); 4492 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { 4493 pci_release_msi(dev); 4494 } 4495 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { 4496 iflib_irq_free(ctx, &ctx->ifc_legacy_irq); 4497 } 4498 if (ctx->ifc_msix_mem != NULL) { 4499 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, 4500 ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); 4501 ctx->ifc_msix_mem = NULL; 4502 } 4503 4504 bus_generic_detach(dev); 4505 if_free(ifp); 4506 4507 iflib_tx_structures_free(ctx); 4508 iflib_rx_structures_free(ctx); 4509 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4510 free(ctx->ifc_softc, M_IFLIB); 4511 free(ctx, M_IFLIB); 4512 return (0); 4513 } 4514 4515 4516 int 4517 iflib_device_detach(device_t dev) 4518 { 4519 if_ctx_t ctx = device_get_softc(dev); 4520 4521 return (iflib_device_deregister(ctx)); 4522 } 4523 4524 int 4525 iflib_device_suspend(device_t dev) 4526 { 4527 if_ctx_t ctx = device_get_softc(dev); 4528 4529 CTX_LOCK(ctx); 4530 IFDI_SUSPEND(ctx); 4531 CTX_UNLOCK(ctx); 4532 4533 return bus_generic_suspend(dev); 4534 } 4535 int 4536 iflib_device_shutdown(device_t dev) 4537 { 4538 if_ctx_t ctx = device_get_softc(dev); 4539 4540 CTX_LOCK(ctx); 4541 IFDI_SHUTDOWN(ctx); 4542 CTX_UNLOCK(ctx); 4543 4544 return bus_generic_suspend(dev); 4545 } 4546 4547 4548 int 4549 iflib_device_resume(device_t dev) 4550 { 4551 if_ctx_t ctx = device_get_softc(dev); 4552 iflib_txq_t txq = ctx->ifc_txqs; 4553 4554 CTX_LOCK(ctx); 4555 IFDI_RESUME(ctx); 4556 iflib_init_locked(ctx); 4557 CTX_UNLOCK(ctx); 4558 for (int i = 0; i < NTXQSETS(ctx); i++, txq++) 4559 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 4560 4561 return (bus_generic_resume(dev)); 4562 } 4563 4564 int 4565 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 4566 { 4567 int error; 4568 if_ctx_t ctx = device_get_softc(dev); 4569 4570 CTX_LOCK(ctx); 4571 error = IFDI_IOV_INIT(ctx, num_vfs, params); 4572 CTX_UNLOCK(ctx); 4573 4574 return (error); 4575 } 4576 4577 void 4578 iflib_device_iov_uninit(device_t dev) 4579 { 4580 if_ctx_t ctx = device_get_softc(dev); 4581 4582 CTX_LOCK(ctx); 4583 IFDI_IOV_UNINIT(ctx); 4584 CTX_UNLOCK(ctx); 4585 } 4586 4587 int 4588 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 4589 { 4590 int error; 4591 if_ctx_t ctx = device_get_softc(dev); 4592 4593 CTX_LOCK(ctx); 4594 error = IFDI_IOV_VF_ADD(ctx, vfnum, params); 4595 CTX_UNLOCK(ctx); 4596 4597 return (error); 4598 } 4599 4600 /********************************************************************* 4601 * 4602 * MODULE FUNCTION DEFINITIONS 4603 * 4604 **********************************************************************/ 4605 4606 /* 4607 * - Start a fast taskqueue thread for each core 4608 * - Start a taskqueue for control operations 4609 */ 4610 static int 4611 iflib_module_init(void) 4612 { 4613 return (0); 4614 } 4615 4616 static int 4617 iflib_module_event_handler(module_t mod, int what, void *arg) 4618 { 4619 int err; 4620 4621 switch (what) { 4622 case MOD_LOAD: 4623 if ((err = iflib_module_init()) != 0) 4624 return (err); 4625 break; 4626 case MOD_UNLOAD: 4627 return (EBUSY); 4628 default: 4629 return (EOPNOTSUPP); 4630 } 4631 4632 return (0); 4633 } 4634 4635 /********************************************************************* 4636 * 4637 * PUBLIC FUNCTION DEFINITIONS 4638 * ordered as in iflib.h 4639 * 4640 **********************************************************************/ 4641 4642 4643 static void 4644 _iflib_assert(if_shared_ctx_t sctx) 4645 { 4646 MPASS(sctx->isc_tx_maxsize); 4647 MPASS(sctx->isc_tx_maxsegsize); 4648 4649 MPASS(sctx->isc_rx_maxsize); 4650 MPASS(sctx->isc_rx_nsegments); 4651 MPASS(sctx->isc_rx_maxsegsize); 4652 4653 MPASS(sctx->isc_nrxd_min[0]); 4654 MPASS(sctx->isc_nrxd_max[0]); 4655 MPASS(sctx->isc_nrxd_default[0]); 4656 MPASS(sctx->isc_ntxd_min[0]); 4657 MPASS(sctx->isc_ntxd_max[0]); 4658 MPASS(sctx->isc_ntxd_default[0]); 4659 } 4660 4661 static void 4662 _iflib_pre_assert(if_softc_ctx_t scctx) 4663 { 4664 4665 MPASS(scctx->isc_txrx->ift_txd_encap); 4666 MPASS(scctx->isc_txrx->ift_txd_flush); 4667 MPASS(scctx->isc_txrx->ift_txd_credits_update); 4668 MPASS(scctx->isc_txrx->ift_rxd_available); 4669 MPASS(scctx->isc_txrx->ift_rxd_pkt_get); 4670 MPASS(scctx->isc_txrx->ift_rxd_refill); 4671 MPASS(scctx->isc_txrx->ift_rxd_flush); 4672 } 4673 4674 static int 4675 iflib_register(if_ctx_t ctx) 4676 { 4677 if_shared_ctx_t sctx = ctx->ifc_sctx; 4678 driver_t *driver = sctx->isc_driver; 4679 device_t dev = ctx->ifc_dev; 4680 if_t ifp; 4681 4682 _iflib_assert(sctx); 4683 4684 CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); 4685 4686 ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER); 4687 if (ifp == NULL) { 4688 device_printf(dev, "can not allocate ifnet structure\n"); 4689 return (ENOMEM); 4690 } 4691 4692 /* 4693 * Initialize our context's device specific methods 4694 */ 4695 kobj_init((kobj_t) ctx, (kobj_class_t) driver); 4696 kobj_class_compile((kobj_class_t) driver); 4697 driver->refs++; 4698 4699 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 4700 if_setsoftc(ifp, ctx); 4701 if_setdev(ifp, dev); 4702 if_setinitfn(ifp, iflib_if_init); 4703 if_setioctlfn(ifp, iflib_if_ioctl); 4704 if_settransmitfn(ifp, iflib_if_transmit); 4705 if_setqflushfn(ifp, iflib_if_qflush); 4706 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 4707 4708 ctx->ifc_vlan_attach_event = 4709 EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, 4710 EVENTHANDLER_PRI_FIRST); 4711 ctx->ifc_vlan_detach_event = 4712 EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, 4713 EVENTHANDLER_PRI_FIRST); 4714 4715 ifmedia_init(&ctx->ifc_media, IFM_IMASK, 4716 iflib_media_change, iflib_media_status); 4717 4718 return (0); 4719 } 4720 4721 4722 static int 4723 iflib_queues_alloc(if_ctx_t ctx) 4724 { 4725 if_shared_ctx_t sctx = ctx->ifc_sctx; 4726 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 4727 device_t dev = ctx->ifc_dev; 4728 int nrxqsets = scctx->isc_nrxqsets; 4729 int ntxqsets = scctx->isc_ntxqsets; 4730 iflib_txq_t txq; 4731 iflib_rxq_t rxq; 4732 iflib_fl_t fl = NULL; 4733 int i, j, cpu, err, txconf, rxconf; 4734 iflib_dma_info_t ifdip; 4735 uint32_t *rxqsizes = scctx->isc_rxqsizes; 4736 uint32_t *txqsizes = scctx->isc_txqsizes; 4737 uint8_t nrxqs = sctx->isc_nrxqs; 4738 uint8_t ntxqs = sctx->isc_ntxqs; 4739 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; 4740 caddr_t *vaddrs; 4741 uint64_t *paddrs; 4742 struct ifmp_ring **brscp; 4743 4744 KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); 4745 KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); 4746 4747 brscp = NULL; 4748 txq = NULL; 4749 rxq = NULL; 4750 4751 /* Allocate the TX ring struct memory */ 4752 if (!(txq = 4753 (iflib_txq_t) malloc(sizeof(struct iflib_txq) * 4754 ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 4755 device_printf(dev, "Unable to allocate TX ring memory\n"); 4756 err = ENOMEM; 4757 goto fail; 4758 } 4759 4760 /* Now allocate the RX */ 4761 if (!(rxq = 4762 (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * 4763 nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 4764 device_printf(dev, "Unable to allocate RX ring memory\n"); 4765 err = ENOMEM; 4766 goto rx_fail; 4767 } 4768 4769 ctx->ifc_txqs = txq; 4770 ctx->ifc_rxqs = rxq; 4771 4772 /* 4773 * XXX handle allocation failure 4774 */ 4775 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { 4776 /* Set up some basics */ 4777 4778 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 4779 device_printf(dev, "failed to allocate iflib_dma_info\n"); 4780 err = ENOMEM; 4781 goto err_tx_desc; 4782 } 4783 txq->ift_ifdi = ifdip; 4784 for (j = 0; j < ntxqs; j++, ifdip++) { 4785 if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 4786 device_printf(dev, "Unable to allocate Descriptor memory\n"); 4787 err = ENOMEM; 4788 goto err_tx_desc; 4789 } 4790 txq->ift_txd_size[j] = scctx->isc_txd_size[j]; 4791 bzero((void *)ifdip->idi_vaddr, txqsizes[j]); 4792 } 4793 txq->ift_ctx = ctx; 4794 txq->ift_id = i; 4795 if (sctx->isc_flags & IFLIB_HAS_TXCQ) { 4796 txq->ift_br_offset = 1; 4797 } else { 4798 txq->ift_br_offset = 0; 4799 } 4800 /* XXX fix this */ 4801 txq->ift_timer.c_cpu = cpu; 4802 4803 if (iflib_txsd_alloc(txq)) { 4804 device_printf(dev, "Critical Failure setting up TX buffers\n"); 4805 err = ENOMEM; 4806 goto err_tx_desc; 4807 } 4808 4809 /* Initialize the TX lock */ 4810 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout", 4811 device_get_nameunit(dev), txq->ift_id); 4812 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); 4813 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); 4814 4815 snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db", 4816 device_get_nameunit(dev), txq->ift_id); 4817 4818 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, 4819 iflib_txq_can_drain, M_IFLIB, M_WAITOK); 4820 if (err) { 4821 /* XXX free any allocated rings */ 4822 device_printf(dev, "Unable to allocate buf_ring\n"); 4823 goto err_tx_desc; 4824 } 4825 } 4826 4827 for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { 4828 /* Set up some basics */ 4829 4830 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 4831 device_printf(dev, "failed to allocate iflib_dma_info\n"); 4832 err = ENOMEM; 4833 goto err_tx_desc; 4834 } 4835 4836 rxq->ifr_ifdi = ifdip; 4837 /* XXX this needs to be changed if #rx queues != #tx queues */ 4838 rxq->ifr_ntxqirq = 1; 4839 rxq->ifr_txqid[0] = i; 4840 for (j = 0; j < nrxqs; j++, ifdip++) { 4841 if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 4842 device_printf(dev, "Unable to allocate Descriptor memory\n"); 4843 err = ENOMEM; 4844 goto err_tx_desc; 4845 } 4846 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); 4847 } 4848 rxq->ifr_ctx = ctx; 4849 rxq->ifr_id = i; 4850 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 4851 rxq->ifr_fl_offset = 1; 4852 } else { 4853 rxq->ifr_fl_offset = 0; 4854 } 4855 rxq->ifr_nfl = nfree_lists; 4856 if (!(fl = 4857 (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { 4858 device_printf(dev, "Unable to allocate free list memory\n"); 4859 err = ENOMEM; 4860 goto err_tx_desc; 4861 } 4862 rxq->ifr_fl = fl; 4863 for (j = 0; j < nfree_lists; j++) { 4864 fl[j].ifl_rxq = rxq; 4865 fl[j].ifl_id = j; 4866 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; 4867 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; 4868 } 4869 /* Allocate receive buffers for the ring*/ 4870 if (iflib_rxsd_alloc(rxq)) { 4871 device_printf(dev, 4872 "Critical Failure setting up receive buffers\n"); 4873 err = ENOMEM; 4874 goto err_rx_desc; 4875 } 4876 4877 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 4878 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO); 4879 } 4880 4881 /* TXQs */ 4882 vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 4883 paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 4884 for (i = 0; i < ntxqsets; i++) { 4885 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; 4886 4887 for (j = 0; j < ntxqs; j++, di++) { 4888 vaddrs[i*ntxqs + j] = di->idi_vaddr; 4889 paddrs[i*ntxqs + j] = di->idi_paddr; 4890 } 4891 } 4892 if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { 4893 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 4894 iflib_tx_structures_free(ctx); 4895 free(vaddrs, M_IFLIB); 4896 free(paddrs, M_IFLIB); 4897 goto err_rx_desc; 4898 } 4899 free(vaddrs, M_IFLIB); 4900 free(paddrs, M_IFLIB); 4901 4902 /* RXQs */ 4903 vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 4904 paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 4905 for (i = 0; i < nrxqsets; i++) { 4906 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; 4907 4908 for (j = 0; j < nrxqs; j++, di++) { 4909 vaddrs[i*nrxqs + j] = di->idi_vaddr; 4910 paddrs[i*nrxqs + j] = di->idi_paddr; 4911 } 4912 } 4913 if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { 4914 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 4915 iflib_tx_structures_free(ctx); 4916 free(vaddrs, M_IFLIB); 4917 free(paddrs, M_IFLIB); 4918 goto err_rx_desc; 4919 } 4920 free(vaddrs, M_IFLIB); 4921 free(paddrs, M_IFLIB); 4922 4923 return (0); 4924 4925 /* XXX handle allocation failure changes */ 4926 err_rx_desc: 4927 err_tx_desc: 4928 if (ctx->ifc_rxqs != NULL) 4929 free(ctx->ifc_rxqs, M_IFLIB); 4930 ctx->ifc_rxqs = NULL; 4931 if (ctx->ifc_txqs != NULL) 4932 free(ctx->ifc_txqs, M_IFLIB); 4933 ctx->ifc_txqs = NULL; 4934 rx_fail: 4935 if (brscp != NULL) 4936 free(brscp, M_IFLIB); 4937 if (rxq != NULL) 4938 free(rxq, M_IFLIB); 4939 if (txq != NULL) 4940 free(txq, M_IFLIB); 4941 fail: 4942 return (err); 4943 } 4944 4945 static int 4946 iflib_tx_structures_setup(if_ctx_t ctx) 4947 { 4948 iflib_txq_t txq = ctx->ifc_txqs; 4949 int i; 4950 4951 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 4952 iflib_txq_setup(txq); 4953 4954 return (0); 4955 } 4956 4957 static void 4958 iflib_tx_structures_free(if_ctx_t ctx) 4959 { 4960 iflib_txq_t txq = ctx->ifc_txqs; 4961 int i, j; 4962 4963 for (i = 0; i < NTXQSETS(ctx); i++, txq++) { 4964 iflib_txq_destroy(txq); 4965 for (j = 0; j < ctx->ifc_nhwtxqs; j++) 4966 iflib_dma_free(&txq->ift_ifdi[j]); 4967 } 4968 free(ctx->ifc_txqs, M_IFLIB); 4969 ctx->ifc_txqs = NULL; 4970 IFDI_QUEUES_FREE(ctx); 4971 } 4972 4973 /********************************************************************* 4974 * 4975 * Initialize all receive rings. 4976 * 4977 **********************************************************************/ 4978 static int 4979 iflib_rx_structures_setup(if_ctx_t ctx) 4980 { 4981 iflib_rxq_t rxq = ctx->ifc_rxqs; 4982 int q; 4983 #if defined(INET6) || defined(INET) 4984 int i, err; 4985 #endif 4986 4987 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { 4988 #if defined(INET6) || defined(INET) 4989 tcp_lro_free(&rxq->ifr_lc); 4990 if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, 4991 TCP_LRO_ENTRIES, min(1024, 4992 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { 4993 device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); 4994 goto fail; 4995 } 4996 rxq->ifr_lro_enabled = TRUE; 4997 #endif 4998 IFDI_RXQ_SETUP(ctx, rxq->ifr_id); 4999 } 5000 return (0); 5001 #if defined(INET6) || defined(INET) 5002 fail: 5003 /* 5004 * Free RX software descriptors allocated so far, we will only handle 5005 * the rings that completed, the failing case will have 5006 * cleaned up for itself. 'q' failed, so its the terminus. 5007 */ 5008 rxq = ctx->ifc_rxqs; 5009 for (i = 0; i < q; ++i, rxq++) { 5010 iflib_rx_sds_free(rxq); 5011 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 5012 } 5013 return (err); 5014 #endif 5015 } 5016 5017 /********************************************************************* 5018 * 5019 * Free all receive rings. 5020 * 5021 **********************************************************************/ 5022 static void 5023 iflib_rx_structures_free(if_ctx_t ctx) 5024 { 5025 iflib_rxq_t rxq = ctx->ifc_rxqs; 5026 5027 for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { 5028 iflib_rx_sds_free(rxq); 5029 } 5030 } 5031 5032 static int 5033 iflib_qset_structures_setup(if_ctx_t ctx) 5034 { 5035 int err; 5036 5037 if ((err = iflib_tx_structures_setup(ctx)) != 0) 5038 return (err); 5039 5040 if ((err = iflib_rx_structures_setup(ctx)) != 0) { 5041 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); 5042 iflib_tx_structures_free(ctx); 5043 iflib_rx_structures_free(ctx); 5044 } 5045 return (err); 5046 } 5047 5048 int 5049 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 5050 driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, char *name) 5051 { 5052 5053 return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); 5054 } 5055 5056 #ifdef SMP 5057 static int 5058 find_nth(if_ctx_t ctx, int qid) 5059 { 5060 cpuset_t cpus; 5061 int i, cpuid, eqid, count; 5062 5063 CPU_COPY(&ctx->ifc_cpus, &cpus); 5064 count = CPU_COUNT(&cpus); 5065 eqid = qid % count; 5066 /* clear up to the qid'th bit */ 5067 for (i = 0; i < eqid; i++) { 5068 cpuid = CPU_FFS(&cpus); 5069 MPASS(cpuid != 0); 5070 CPU_CLR(cpuid-1, &cpus); 5071 } 5072 cpuid = CPU_FFS(&cpus); 5073 MPASS(cpuid != 0); 5074 return (cpuid-1); 5075 } 5076 5077 #ifdef SCHED_ULE 5078 extern struct cpu_group *cpu_top; /* CPU topology */ 5079 5080 static int 5081 find_child_with_core(int cpu, struct cpu_group *grp) 5082 { 5083 int i; 5084 5085 if (grp->cg_children == 0) 5086 return -1; 5087 5088 MPASS(grp->cg_child); 5089 for (i = 0; i < grp->cg_children; i++) { 5090 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) 5091 return i; 5092 } 5093 5094 return -1; 5095 } 5096 5097 /* 5098 * Find the nth thread on the specified core 5099 */ 5100 static int 5101 find_thread(int cpu, int thread_num) 5102 { 5103 struct cpu_group *grp; 5104 int i; 5105 cpuset_t cs; 5106 5107 grp = cpu_top; 5108 if (grp == NULL) 5109 return cpu; 5110 i = 0; 5111 while ((i = find_child_with_core(cpu, grp)) != -1) { 5112 /* If the child only has one cpu, don't descend */ 5113 if (grp->cg_child[i].cg_count <= 1) 5114 break; 5115 grp = &grp->cg_child[i]; 5116 } 5117 5118 /* If they don't share at least an L2 cache, use the same CPU */ 5119 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) 5120 return cpu; 5121 5122 /* Now pick one */ 5123 CPU_COPY(&grp->cg_mask, &cs); 5124 for (i = thread_num % grp->cg_count; i > 0; i--) { 5125 MPASS(CPU_FFS(&cs)); 5126 CPU_CLR(CPU_FFS(&cs) - 1, &cs); 5127 } 5128 MPASS(CPU_FFS(&cs)); 5129 return CPU_FFS(&cs) - 1; 5130 } 5131 #else 5132 static int 5133 find_thread(int cpu, int thread_num __unused) 5134 { 5135 return cpu; 5136 } 5137 #endif 5138 5139 static int 5140 get_thread_num(if_ctx_t ctx, iflib_intr_type_t type, int qid) 5141 { 5142 switch (type) { 5143 case IFLIB_INTR_TX: 5144 /* TX queues get threads on the same core as the corresponding RX queue */ 5145 /* XXX handle multiple RX threads per core and more than two threads per core */ 5146 return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; 5147 case IFLIB_INTR_RX: 5148 case IFLIB_INTR_RXTX: 5149 /* RX queues get the first thread on their core */ 5150 return qid / CPU_COUNT(&ctx->ifc_cpus); 5151 default: 5152 return -1; 5153 } 5154 } 5155 #else 5156 #define get_thread_num(ctx, type, qid) CPU_FIRST() 5157 #define find_thread(cpuid, tid) CPU_FIRST() 5158 #define find_nth(ctx, gid) CPU_FIRST() 5159 #endif 5160 5161 /* Just to avoid copy/paste */ 5162 static inline int 5163 iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid, 5164 struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, char *name) 5165 { 5166 int cpuid; 5167 int err, tid; 5168 5169 cpuid = find_nth(ctx, qid); 5170 tid = get_thread_num(ctx, type, qid); 5171 MPASS(tid >= 0); 5172 cpuid = find_thread(cpuid, tid); 5173 err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name); 5174 if (err) { 5175 device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err); 5176 return (err); 5177 } 5178 #ifdef notyet 5179 if (cpuid > ctx->ifc_cpuid_highest) 5180 ctx->ifc_cpuid_highest = cpuid; 5181 #endif 5182 return 0; 5183 } 5184 5185 int 5186 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, 5187 iflib_intr_type_t type, driver_filter_t *filter, 5188 void *filter_arg, int qid, char *name) 5189 { 5190 struct grouptask *gtask; 5191 struct taskqgroup *tqg; 5192 iflib_filter_info_t info; 5193 gtask_fn_t *fn; 5194 int tqrid, err; 5195 driver_filter_t *intr_fast; 5196 void *q; 5197 5198 info = &ctx->ifc_filter_info; 5199 tqrid = rid; 5200 5201 switch (type) { 5202 /* XXX merge tx/rx for netmap? */ 5203 case IFLIB_INTR_TX: 5204 q = &ctx->ifc_txqs[qid]; 5205 info = &ctx->ifc_txqs[qid].ift_filter_info; 5206 gtask = &ctx->ifc_txqs[qid].ift_task; 5207 tqg = qgroup_if_io_tqg; 5208 fn = _task_fn_tx; 5209 intr_fast = iflib_fast_intr; 5210 GROUPTASK_INIT(gtask, 0, fn, q); 5211 break; 5212 case IFLIB_INTR_RX: 5213 q = &ctx->ifc_rxqs[qid]; 5214 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5215 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5216 tqg = qgroup_if_io_tqg; 5217 fn = _task_fn_rx; 5218 intr_fast = iflib_fast_intr; 5219 GROUPTASK_INIT(gtask, 0, fn, q); 5220 break; 5221 case IFLIB_INTR_RXTX: 5222 q = &ctx->ifc_rxqs[qid]; 5223 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5224 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5225 tqg = qgroup_if_io_tqg; 5226 fn = _task_fn_rx; 5227 intr_fast = iflib_fast_intr_rxtx; 5228 GROUPTASK_INIT(gtask, 0, fn, q); 5229 break; 5230 case IFLIB_INTR_ADMIN: 5231 q = ctx; 5232 tqrid = -1; 5233 info = &ctx->ifc_filter_info; 5234 gtask = &ctx->ifc_admin_task; 5235 tqg = qgroup_if_config_tqg; 5236 fn = _task_fn_admin; 5237 intr_fast = iflib_fast_intr_ctx; 5238 break; 5239 default: 5240 panic("unknown net intr type"); 5241 } 5242 5243 info->ifi_filter = filter; 5244 info->ifi_filter_arg = filter_arg; 5245 info->ifi_task = gtask; 5246 info->ifi_ctx = q; 5247 5248 err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); 5249 if (err != 0) { 5250 device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err); 5251 return (err); 5252 } 5253 if (type == IFLIB_INTR_ADMIN) 5254 return (0); 5255 5256 if (tqrid != -1) { 5257 err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name); 5258 if (err) 5259 return (err); 5260 } else { 5261 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5262 } 5263 5264 return (0); 5265 } 5266 5267 void 5268 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, char *name) 5269 { 5270 struct grouptask *gtask; 5271 struct taskqgroup *tqg; 5272 gtask_fn_t *fn; 5273 void *q; 5274 int irq_num = -1; 5275 int err; 5276 5277 switch (type) { 5278 case IFLIB_INTR_TX: 5279 q = &ctx->ifc_txqs[qid]; 5280 gtask = &ctx->ifc_txqs[qid].ift_task; 5281 tqg = qgroup_if_io_tqg; 5282 fn = _task_fn_tx; 5283 if (irq != NULL) 5284 irq_num = rman_get_start(irq->ii_res); 5285 break; 5286 case IFLIB_INTR_RX: 5287 q = &ctx->ifc_rxqs[qid]; 5288 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5289 tqg = qgroup_if_io_tqg; 5290 fn = _task_fn_rx; 5291 if (irq != NULL) 5292 irq_num = rman_get_start(irq->ii_res); 5293 break; 5294 case IFLIB_INTR_IOV: 5295 q = ctx; 5296 gtask = &ctx->ifc_vflr_task; 5297 tqg = qgroup_if_config_tqg; 5298 fn = _task_fn_iov; 5299 break; 5300 default: 5301 panic("unknown net intr type"); 5302 } 5303 GROUPTASK_INIT(gtask, 0, fn, q); 5304 if (irq_num != -1) { 5305 err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name); 5306 if (err) 5307 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5308 } 5309 else { 5310 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5311 } 5312 } 5313 5314 void 5315 iflib_irq_free(if_ctx_t ctx, if_irq_t irq) 5316 { 5317 if (irq->ii_tag) 5318 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); 5319 5320 if (irq->ii_res) 5321 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res); 5322 } 5323 5324 static int 5325 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, char *name) 5326 { 5327 iflib_txq_t txq = ctx->ifc_txqs; 5328 iflib_rxq_t rxq = ctx->ifc_rxqs; 5329 if_irq_t irq = &ctx->ifc_legacy_irq; 5330 iflib_filter_info_t info; 5331 struct grouptask *gtask; 5332 struct taskqgroup *tqg; 5333 gtask_fn_t *fn; 5334 int tqrid; 5335 void *q; 5336 int err; 5337 5338 q = &ctx->ifc_rxqs[0]; 5339 info = &rxq[0].ifr_filter_info; 5340 gtask = &rxq[0].ifr_task; 5341 tqg = qgroup_if_io_tqg; 5342 tqrid = irq->ii_rid = *rid; 5343 fn = _task_fn_rx; 5344 5345 ctx->ifc_flags |= IFC_LEGACY; 5346 info->ifi_filter = filter; 5347 info->ifi_filter_arg = filter_arg; 5348 info->ifi_task = gtask; 5349 info->ifi_ctx = ctx; 5350 5351 /* We allocate a single interrupt resource */ 5352 if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0) 5353 return (err); 5354 GROUPTASK_INIT(gtask, 0, fn, q); 5355 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5356 5357 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); 5358 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx"); 5359 return (0); 5360 } 5361 5362 void 5363 iflib_led_create(if_ctx_t ctx) 5364 { 5365 5366 ctx->ifc_led_dev = led_create(iflib_led_func, ctx, 5367 device_get_nameunit(ctx->ifc_dev)); 5368 } 5369 5370 void 5371 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) 5372 { 5373 5374 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 5375 } 5376 5377 void 5378 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) 5379 { 5380 5381 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); 5382 } 5383 5384 void 5385 iflib_admin_intr_deferred(if_ctx_t ctx) 5386 { 5387 #ifdef INVARIANTS 5388 struct grouptask *gtask; 5389 5390 gtask = &ctx->ifc_admin_task; 5391 MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); 5392 #endif 5393 5394 GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); 5395 } 5396 5397 void 5398 iflib_iov_intr_deferred(if_ctx_t ctx) 5399 { 5400 5401 GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); 5402 } 5403 5404 void 5405 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) 5406 { 5407 5408 taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); 5409 } 5410 5411 void 5412 iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn, 5413 char *name) 5414 { 5415 5416 GROUPTASK_INIT(gtask, 0, fn, ctx); 5417 taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); 5418 } 5419 5420 void 5421 iflib_config_gtask_deinit(struct grouptask *gtask) 5422 { 5423 5424 taskqgroup_detach(qgroup_if_config_tqg, gtask); 5425 } 5426 5427 void 5428 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) 5429 { 5430 if_t ifp = ctx->ifc_ifp; 5431 iflib_txq_t txq = ctx->ifc_txqs; 5432 5433 if_setbaudrate(ifp, baudrate); 5434 if (baudrate >= IF_Gbps(10)) 5435 ctx->ifc_flags |= IFC_PREFETCH; 5436 5437 /* If link down, disable watchdog */ 5438 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { 5439 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) 5440 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 5441 } 5442 ctx->ifc_link_state = link_state; 5443 if_link_state_change(ifp, link_state); 5444 } 5445 5446 static int 5447 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) 5448 { 5449 int credits; 5450 #ifdef INVARIANTS 5451 int credits_pre = txq->ift_cidx_processed; 5452 #endif 5453 5454 if (ctx->isc_txd_credits_update == NULL) 5455 return (0); 5456 5457 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) 5458 return (0); 5459 5460 txq->ift_processed += credits; 5461 txq->ift_cidx_processed += credits; 5462 5463 MPASS(credits_pre + credits == txq->ift_cidx_processed); 5464 if (txq->ift_cidx_processed >= txq->ift_size) 5465 txq->ift_cidx_processed -= txq->ift_size; 5466 return (credits); 5467 } 5468 5469 static int 5470 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) 5471 { 5472 5473 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, 5474 budget)); 5475 } 5476 5477 void 5478 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, 5479 const char *description, if_int_delay_info_t info, 5480 int offset, int value) 5481 { 5482 info->iidi_ctx = ctx; 5483 info->iidi_offset = offset; 5484 info->iidi_value = value; 5485 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), 5486 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), 5487 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 5488 info, 0, iflib_sysctl_int_delay, "I", description); 5489 } 5490 5491 struct mtx * 5492 iflib_ctx_lock_get(if_ctx_t ctx) 5493 { 5494 5495 return (&ctx->ifc_mtx); 5496 } 5497 5498 static int 5499 iflib_msix_init(if_ctx_t ctx) 5500 { 5501 device_t dev = ctx->ifc_dev; 5502 if_shared_ctx_t sctx = ctx->ifc_sctx; 5503 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5504 int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; 5505 int iflib_num_tx_queues, iflib_num_rx_queues; 5506 int err, admincnt, bar; 5507 5508 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; 5509 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; 5510 5511 device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); 5512 5513 bar = ctx->ifc_softc_ctx.isc_msix_bar; 5514 admincnt = sctx->isc_admin_intrcnt; 5515 /* Override by global tuneable */ 5516 { 5517 int i; 5518 size_t len = sizeof(i); 5519 err = kernel_sysctlbyname(curthread, "hw.pci.enable_msix", &i, &len, NULL, 0, NULL, 0); 5520 if (err == 0) { 5521 if (i == 0) 5522 goto msi; 5523 } 5524 else { 5525 device_printf(dev, "unable to read hw.pci.enable_msix."); 5526 } 5527 } 5528 /* Override by tuneable */ 5529 if (scctx->isc_disable_msix) 5530 goto msi; 5531 5532 /* 5533 ** When used in a virtualized environment 5534 ** PCI BUSMASTER capability may not be set 5535 ** so explicity set it here and rewrite 5536 ** the ENABLE in the MSIX control register 5537 ** at this point to cause the host to 5538 ** successfully initialize us. 5539 */ 5540 { 5541 int msix_ctrl, rid; 5542 5543 pci_enable_busmaster(dev); 5544 rid = 0; 5545 if (pci_find_cap(dev, PCIY_MSIX, &rid) == 0 && rid != 0) { 5546 rid += PCIR_MSIX_CTRL; 5547 msix_ctrl = pci_read_config(dev, rid, 2); 5548 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 5549 pci_write_config(dev, rid, msix_ctrl, 2); 5550 } else { 5551 device_printf(dev, "PCIY_MSIX capability not found; " 5552 "or rid %d == 0.\n", rid); 5553 goto msi; 5554 } 5555 } 5556 5557 /* 5558 * bar == -1 => "trust me I know what I'm doing" 5559 * Some drivers are for hardware that is so shoddily 5560 * documented that no one knows which bars are which 5561 * so the developer has to map all bars. This hack 5562 * allows shoddy garbage to use msix in this framework. 5563 */ 5564 if (bar != -1) { 5565 ctx->ifc_msix_mem = bus_alloc_resource_any(dev, 5566 SYS_RES_MEMORY, &bar, RF_ACTIVE); 5567 if (ctx->ifc_msix_mem == NULL) { 5568 /* May not be enabled */ 5569 device_printf(dev, "Unable to map MSIX table \n"); 5570 goto msi; 5571 } 5572 } 5573 /* First try MSI/X */ 5574 if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */ 5575 device_printf(dev, "System has MSIX disabled \n"); 5576 bus_release_resource(dev, SYS_RES_MEMORY, 5577 bar, ctx->ifc_msix_mem); 5578 ctx->ifc_msix_mem = NULL; 5579 goto msi; 5580 } 5581 #if IFLIB_DEBUG 5582 /* use only 1 qset in debug mode */ 5583 queuemsgs = min(msgs - admincnt, 1); 5584 #else 5585 queuemsgs = msgs - admincnt; 5586 #endif 5587 #ifdef RSS 5588 queues = imin(queuemsgs, rss_getnumbuckets()); 5589 #else 5590 queues = queuemsgs; 5591 #endif 5592 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); 5593 device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n", 5594 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); 5595 #ifdef RSS 5596 /* If we're doing RSS, clamp at the number of RSS buckets */ 5597 if (queues > rss_getnumbuckets()) 5598 queues = rss_getnumbuckets(); 5599 #endif 5600 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) 5601 rx_queues = iflib_num_rx_queues; 5602 else 5603 rx_queues = queues; 5604 5605 if (rx_queues > scctx->isc_nrxqsets) 5606 rx_queues = scctx->isc_nrxqsets; 5607 5608 /* 5609 * We want this to be all logical CPUs by default 5610 */ 5611 if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) 5612 tx_queues = iflib_num_tx_queues; 5613 else 5614 tx_queues = mp_ncpus; 5615 5616 if (tx_queues > scctx->isc_ntxqsets) 5617 tx_queues = scctx->isc_ntxqsets; 5618 5619 if (ctx->ifc_sysctl_qs_eq_override == 0) { 5620 #ifdef INVARIANTS 5621 if (tx_queues != rx_queues) 5622 device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", 5623 min(rx_queues, tx_queues), min(rx_queues, tx_queues)); 5624 #endif 5625 tx_queues = min(rx_queues, tx_queues); 5626 rx_queues = min(rx_queues, tx_queues); 5627 } 5628 5629 device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); 5630 5631 vectors = rx_queues + admincnt; 5632 if ((err = pci_alloc_msix(dev, &vectors)) == 0) { 5633 device_printf(dev, 5634 "Using MSIX interrupts with %d vectors\n", vectors); 5635 scctx->isc_vectors = vectors; 5636 scctx->isc_nrxqsets = rx_queues; 5637 scctx->isc_ntxqsets = tx_queues; 5638 scctx->isc_intr = IFLIB_INTR_MSIX; 5639 5640 return (vectors); 5641 } else { 5642 device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); 5643 } 5644 msi: 5645 vectors = pci_msi_count(dev); 5646 scctx->isc_nrxqsets = 1; 5647 scctx->isc_ntxqsets = 1; 5648 scctx->isc_vectors = vectors; 5649 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { 5650 device_printf(dev,"Using an MSI interrupt\n"); 5651 scctx->isc_intr = IFLIB_INTR_MSI; 5652 } else { 5653 device_printf(dev,"Using a Legacy interrupt\n"); 5654 scctx->isc_intr = IFLIB_INTR_LEGACY; 5655 } 5656 5657 return (vectors); 5658 } 5659 5660 char * ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; 5661 5662 static int 5663 mp_ring_state_handler(SYSCTL_HANDLER_ARGS) 5664 { 5665 int rc; 5666 uint16_t *state = ((uint16_t *)oidp->oid_arg1); 5667 struct sbuf *sb; 5668 char *ring_state = "UNKNOWN"; 5669 5670 /* XXX needed ? */ 5671 rc = sysctl_wire_old_buffer(req, 0); 5672 MPASS(rc == 0); 5673 if (rc != 0) 5674 return (rc); 5675 sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); 5676 MPASS(sb != NULL); 5677 if (sb == NULL) 5678 return (ENOMEM); 5679 if (state[3] <= 3) 5680 ring_state = ring_states[state[3]]; 5681 5682 sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", 5683 state[0], state[1], state[2], ring_state); 5684 rc = sbuf_finish(sb); 5685 sbuf_delete(sb); 5686 return(rc); 5687 } 5688 5689 enum iflib_ndesc_handler { 5690 IFLIB_NTXD_HANDLER, 5691 IFLIB_NRXD_HANDLER, 5692 }; 5693 5694 static int 5695 mp_ndesc_handler(SYSCTL_HANDLER_ARGS) 5696 { 5697 if_ctx_t ctx = (void *)arg1; 5698 enum iflib_ndesc_handler type = arg2; 5699 char buf[256] = {0}; 5700 qidx_t *ndesc; 5701 char *p, *next; 5702 int nqs, rc, i; 5703 5704 MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); 5705 5706 nqs = 8; 5707 switch(type) { 5708 case IFLIB_NTXD_HANDLER: 5709 ndesc = ctx->ifc_sysctl_ntxds; 5710 if (ctx->ifc_sctx) 5711 nqs = ctx->ifc_sctx->isc_ntxqs; 5712 break; 5713 case IFLIB_NRXD_HANDLER: 5714 ndesc = ctx->ifc_sysctl_nrxds; 5715 if (ctx->ifc_sctx) 5716 nqs = ctx->ifc_sctx->isc_nrxqs; 5717 break; 5718 } 5719 if (nqs == 0) 5720 nqs = 8; 5721 5722 for (i=0; i<8; i++) { 5723 if (i >= nqs) 5724 break; 5725 if (i) 5726 strcat(buf, ","); 5727 sprintf(strchr(buf, 0), "%d", ndesc[i]); 5728 } 5729 5730 rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); 5731 if (rc || req->newptr == NULL) 5732 return rc; 5733 5734 for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; 5735 i++, p = strsep(&next, " ,")) { 5736 ndesc[i] = strtoul(p, NULL, 10); 5737 } 5738 5739 return(rc); 5740 } 5741 5742 #define NAME_BUFLEN 32 5743 static void 5744 iflib_add_device_sysctl_pre(if_ctx_t ctx) 5745 { 5746 device_t dev = iflib_get_dev(ctx); 5747 struct sysctl_oid_list *child, *oid_list; 5748 struct sysctl_ctx_list *ctx_list; 5749 struct sysctl_oid *node; 5750 5751 ctx_list = device_get_sysctl_ctx(dev); 5752 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 5753 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", 5754 CTLFLAG_RD, NULL, "IFLIB fields"); 5755 oid_list = SYSCTL_CHILDREN(node); 5756 5757 SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", 5758 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, 5759 "driver version"); 5760 5761 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", 5762 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, 5763 "# of txqs to use, 0 => use default #"); 5764 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", 5765 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, 5766 "# of rxqs to use, 0 => use default #"); 5767 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", 5768 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, 5769 "permit #txq != #rxq"); 5770 SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", 5771 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, 5772 "disable MSIX (default 0)"); 5773 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", 5774 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, 5775 "set the rx budget"); 5776 5777 /* XXX change for per-queue sizes */ 5778 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", 5779 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, 5780 mp_ndesc_handler, "A", 5781 "list of # of tx descriptors to use, 0 = use default #"); 5782 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", 5783 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, 5784 mp_ndesc_handler, "A", 5785 "list of # of rx descriptors to use, 0 = use default #"); 5786 } 5787 5788 static void 5789 iflib_add_device_sysctl_post(if_ctx_t ctx) 5790 { 5791 if_shared_ctx_t sctx = ctx->ifc_sctx; 5792 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5793 device_t dev = iflib_get_dev(ctx); 5794 struct sysctl_oid_list *child; 5795 struct sysctl_ctx_list *ctx_list; 5796 iflib_fl_t fl; 5797 iflib_txq_t txq; 5798 iflib_rxq_t rxq; 5799 int i, j; 5800 char namebuf[NAME_BUFLEN]; 5801 char *qfmt; 5802 struct sysctl_oid *queue_node, *fl_node, *node; 5803 struct sysctl_oid_list *queue_list, *fl_list; 5804 ctx_list = device_get_sysctl_ctx(dev); 5805 5806 node = ctx->ifc_sysctl_node; 5807 child = SYSCTL_CHILDREN(node); 5808 5809 if (scctx->isc_ntxqsets > 100) 5810 qfmt = "txq%03d"; 5811 else if (scctx->isc_ntxqsets > 10) 5812 qfmt = "txq%02d"; 5813 else 5814 qfmt = "txq%d"; 5815 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { 5816 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 5817 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 5818 CTLFLAG_RD, NULL, "Queue Name"); 5819 queue_list = SYSCTL_CHILDREN(queue_node); 5820 #if MEMORY_LOGGING 5821 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", 5822 CTLFLAG_RD, 5823 &txq->ift_dequeued, "total mbufs freed"); 5824 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", 5825 CTLFLAG_RD, 5826 &txq->ift_enqueued, "total mbufs enqueued"); 5827 #endif 5828 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", 5829 CTLFLAG_RD, 5830 &txq->ift_mbuf_defrag, "# of times m_defrag was called"); 5831 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", 5832 CTLFLAG_RD, 5833 &txq->ift_pullups, "# of times m_pullup was called"); 5834 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", 5835 CTLFLAG_RD, 5836 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); 5837 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", 5838 CTLFLAG_RD, 5839 &txq->ift_no_desc_avail, "# of times no descriptors were available"); 5840 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", 5841 CTLFLAG_RD, 5842 &txq->ift_map_failed, "# of times dma map failed"); 5843 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", 5844 CTLFLAG_RD, 5845 &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); 5846 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", 5847 CTLFLAG_RD, 5848 &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); 5849 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", 5850 CTLFLAG_RD, 5851 &txq->ift_pidx, 1, "Producer Index"); 5852 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", 5853 CTLFLAG_RD, 5854 &txq->ift_cidx, 1, "Consumer Index"); 5855 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", 5856 CTLFLAG_RD, 5857 &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); 5858 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", 5859 CTLFLAG_RD, 5860 &txq->ift_in_use, 1, "descriptors in use"); 5861 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", 5862 CTLFLAG_RD, 5863 &txq->ift_processed, "descriptors procesed for clean"); 5864 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", 5865 CTLFLAG_RD, 5866 &txq->ift_cleaned, "total cleaned"); 5867 SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", 5868 CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 5869 0, mp_ring_state_handler, "A", "soft ring state"); 5870 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", 5871 CTLFLAG_RD, &txq->ift_br->enqueues, 5872 "# of enqueues to the mp_ring for this queue"); 5873 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", 5874 CTLFLAG_RD, &txq->ift_br->drops, 5875 "# of drops in the mp_ring for this queue"); 5876 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", 5877 CTLFLAG_RD, &txq->ift_br->starts, 5878 "# of normal consumer starts in the mp_ring for this queue"); 5879 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", 5880 CTLFLAG_RD, &txq->ift_br->stalls, 5881 "# of consumer stalls in the mp_ring for this queue"); 5882 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", 5883 CTLFLAG_RD, &txq->ift_br->restarts, 5884 "# of consumer restarts in the mp_ring for this queue"); 5885 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", 5886 CTLFLAG_RD, &txq->ift_br->abdications, 5887 "# of consumer abdications in the mp_ring for this queue"); 5888 } 5889 5890 if (scctx->isc_nrxqsets > 100) 5891 qfmt = "rxq%03d"; 5892 else if (scctx->isc_nrxqsets > 10) 5893 qfmt = "rxq%02d"; 5894 else 5895 qfmt = "rxq%d"; 5896 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { 5897 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 5898 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 5899 CTLFLAG_RD, NULL, "Queue Name"); 5900 queue_list = SYSCTL_CHILDREN(queue_node); 5901 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 5902 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", 5903 CTLFLAG_RD, 5904 &rxq->ifr_cq_pidx, 1, "Producer Index"); 5905 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", 5906 CTLFLAG_RD, 5907 &rxq->ifr_cq_cidx, 1, "Consumer Index"); 5908 } 5909 5910 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 5911 snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); 5912 fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, 5913 CTLFLAG_RD, NULL, "freelist Name"); 5914 fl_list = SYSCTL_CHILDREN(fl_node); 5915 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", 5916 CTLFLAG_RD, 5917 &fl->ifl_pidx, 1, "Producer Index"); 5918 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", 5919 CTLFLAG_RD, 5920 &fl->ifl_cidx, 1, "Consumer Index"); 5921 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", 5922 CTLFLAG_RD, 5923 &fl->ifl_credits, 1, "credits available"); 5924 #if MEMORY_LOGGING 5925 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", 5926 CTLFLAG_RD, 5927 &fl->ifl_m_enqueued, "mbufs allocated"); 5928 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", 5929 CTLFLAG_RD, 5930 &fl->ifl_m_dequeued, "mbufs freed"); 5931 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", 5932 CTLFLAG_RD, 5933 &fl->ifl_cl_enqueued, "clusters allocated"); 5934 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", 5935 CTLFLAG_RD, 5936 &fl->ifl_cl_dequeued, "clusters freed"); 5937 #endif 5938 5939 } 5940 } 5941 5942 } 5943 5944 #ifndef __NO_STRICT_ALIGNMENT 5945 static struct mbuf * 5946 iflib_fixup_rx(struct mbuf *m) 5947 { 5948 struct mbuf *n; 5949 5950 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 5951 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 5952 m->m_data += ETHER_HDR_LEN; 5953 n = m; 5954 } else { 5955 MGETHDR(n, M_NOWAIT, MT_DATA); 5956 if (n == NULL) { 5957 m_freem(m); 5958 return (NULL); 5959 } 5960 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 5961 m->m_data += ETHER_HDR_LEN; 5962 m->m_len -= ETHER_HDR_LEN; 5963 n->m_len = ETHER_HDR_LEN; 5964 M_MOVE_PKTHDR(n, m); 5965 n->m_next = m; 5966 } 5967 return (n); 5968 } 5969 #endif 5970