1 /*- 2 * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Neither the name of Matthew Macy nor the names of its 12 * contributors may be used to endorse or promote products derived from 13 * this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_acpi.h" 34 #include "opt_sched.h" 35 36 #include <sys/param.h> 37 #include <sys/types.h> 38 #include <sys/bus.h> 39 #include <sys/eventhandler.h> 40 #include <sys/jail.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/md5.h> 44 #include <sys/mutex.h> 45 #include <sys/module.h> 46 #include <sys/kobj.h> 47 #include <sys/rman.h> 48 #include <sys/proc.h> 49 #include <sys/sbuf.h> 50 #include <sys/smp.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/sysctl.h> 54 #include <sys/syslog.h> 55 #include <sys/taskqueue.h> 56 #include <sys/limits.h> 57 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_types.h> 61 #include <net/if_media.h> 62 #include <net/bpf.h> 63 #include <net/ethernet.h> 64 #include <net/mp_ring.h> 65 #include <net/vnet.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_pcb.h> 69 #include <netinet/tcp_lro.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/if_ether.h> 72 #include <netinet/ip.h> 73 #include <netinet/ip6.h> 74 #include <netinet/tcp.h> 75 #include <netinet/ip_var.h> 76 #include <netinet/netdump/netdump.h> 77 #include <netinet6/ip6_var.h> 78 79 #include <machine/bus.h> 80 #include <machine/in_cksum.h> 81 82 #include <vm/vm.h> 83 #include <vm/pmap.h> 84 85 #include <dev/led/led.h> 86 #include <dev/pci/pcireg.h> 87 #include <dev/pci/pcivar.h> 88 #include <dev/pci/pci_private.h> 89 90 #include <net/iflib.h> 91 #include <net/iflib_private.h> 92 93 #include "ifdi_if.h" 94 95 #if defined(__i386__) || defined(__amd64__) 96 #include <sys/memdesc.h> 97 #include <machine/bus.h> 98 #include <machine/md_var.h> 99 #include <machine/specialreg.h> 100 #include <x86/include/busdma_impl.h> 101 #include <x86/iommu/busdma_dmar.h> 102 #endif 103 104 #include <sys/bitstring.h> 105 /* 106 * enable accounting of every mbuf as it comes in to and goes out of 107 * iflib's software descriptor references 108 */ 109 #define MEMORY_LOGGING 0 110 /* 111 * Enable mbuf vectors for compressing long mbuf chains 112 */ 113 114 /* 115 * NB: 116 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead 117 * we prefetch needs to be determined by the time spent in m_free vis a vis 118 * the cost of a prefetch. This will of course vary based on the workload: 119 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which 120 * is quite expensive, thus suggesting very little prefetch. 121 * - small packet forwarding which is just returning a single mbuf to 122 * UMA will typically be very fast vis a vis the cost of a memory 123 * access. 124 */ 125 126 127 /* 128 * File organization: 129 * - private structures 130 * - iflib private utility functions 131 * - ifnet functions 132 * - vlan registry and other exported functions 133 * - iflib public core functions 134 * 135 * 136 */ 137 MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); 138 139 struct iflib_txq; 140 typedef struct iflib_txq *iflib_txq_t; 141 struct iflib_rxq; 142 typedef struct iflib_rxq *iflib_rxq_t; 143 struct iflib_fl; 144 typedef struct iflib_fl *iflib_fl_t; 145 146 struct iflib_ctx; 147 148 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); 149 static void iflib_timer(void *arg); 150 151 typedef struct iflib_filter_info { 152 driver_filter_t *ifi_filter; 153 void *ifi_filter_arg; 154 struct grouptask *ifi_task; 155 void *ifi_ctx; 156 } *iflib_filter_info_t; 157 158 struct iflib_ctx { 159 KOBJ_FIELDS; 160 /* 161 * Pointer to hardware driver's softc 162 */ 163 void *ifc_softc; 164 device_t ifc_dev; 165 if_t ifc_ifp; 166 167 cpuset_t ifc_cpus; 168 if_shared_ctx_t ifc_sctx; 169 struct if_softc_ctx ifc_softc_ctx; 170 171 struct sx ifc_ctx_sx; 172 struct mtx ifc_state_mtx; 173 174 uint16_t ifc_nhwtxqs; 175 176 iflib_txq_t ifc_txqs; 177 iflib_rxq_t ifc_rxqs; 178 uint32_t ifc_if_flags; 179 uint32_t ifc_flags; 180 uint32_t ifc_max_fl_buf_size; 181 int ifc_in_detach; 182 183 int ifc_link_state; 184 int ifc_link_irq; 185 int ifc_watchdog_events; 186 struct cdev *ifc_led_dev; 187 struct resource *ifc_msix_mem; 188 189 struct if_irq ifc_legacy_irq; 190 struct grouptask ifc_admin_task; 191 struct grouptask ifc_vflr_task; 192 struct iflib_filter_info ifc_filter_info; 193 struct ifmedia ifc_media; 194 195 struct sysctl_oid *ifc_sysctl_node; 196 uint16_t ifc_sysctl_ntxqs; 197 uint16_t ifc_sysctl_nrxqs; 198 uint16_t ifc_sysctl_qs_eq_override; 199 uint16_t ifc_sysctl_rx_budget; 200 uint16_t ifc_sysctl_tx_abdicate; 201 202 qidx_t ifc_sysctl_ntxds[8]; 203 qidx_t ifc_sysctl_nrxds[8]; 204 struct if_txrx ifc_txrx; 205 #define isc_txd_encap ifc_txrx.ift_txd_encap 206 #define isc_txd_flush ifc_txrx.ift_txd_flush 207 #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update 208 #define isc_rxd_available ifc_txrx.ift_rxd_available 209 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get 210 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 211 #define isc_rxd_flush ifc_txrx.ift_rxd_flush 212 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 213 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 214 #define isc_legacy_intr ifc_txrx.ift_legacy_intr 215 eventhandler_tag ifc_vlan_attach_event; 216 eventhandler_tag ifc_vlan_detach_event; 217 uint8_t ifc_mac[ETHER_ADDR_LEN]; 218 char ifc_mtx_name[16]; 219 }; 220 221 222 void * 223 iflib_get_softc(if_ctx_t ctx) 224 { 225 226 return (ctx->ifc_softc); 227 } 228 229 device_t 230 iflib_get_dev(if_ctx_t ctx) 231 { 232 233 return (ctx->ifc_dev); 234 } 235 236 if_t 237 iflib_get_ifp(if_ctx_t ctx) 238 { 239 240 return (ctx->ifc_ifp); 241 } 242 243 struct ifmedia * 244 iflib_get_media(if_ctx_t ctx) 245 { 246 247 return (&ctx->ifc_media); 248 } 249 250 uint32_t 251 iflib_get_flags(if_ctx_t ctx) 252 { 253 return (ctx->ifc_flags); 254 } 255 256 void 257 iflib_set_detach(if_ctx_t ctx) 258 { 259 ctx->ifc_in_detach = 1; 260 } 261 262 void 263 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) 264 { 265 266 bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); 267 } 268 269 if_softc_ctx_t 270 iflib_get_softc_ctx(if_ctx_t ctx) 271 { 272 273 return (&ctx->ifc_softc_ctx); 274 } 275 276 if_shared_ctx_t 277 iflib_get_sctx(if_ctx_t ctx) 278 { 279 280 return (ctx->ifc_sctx); 281 } 282 283 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) 284 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) 285 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) 286 287 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) 288 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) 289 290 #define RX_SW_DESC_MAP_CREATED (1 << 0) 291 #define TX_SW_DESC_MAP_CREATED (1 << 1) 292 #define RX_SW_DESC_INUSE (1 << 3) 293 #define TX_SW_DESC_MAPPED (1 << 4) 294 295 #define M_TOOBIG M_PROTO1 296 297 typedef struct iflib_sw_rx_desc_array { 298 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 299 struct mbuf **ifsd_m; /* pkthdr mbufs */ 300 caddr_t *ifsd_cl; /* direct cluster pointer for rx */ 301 uint8_t *ifsd_flags; 302 } iflib_rxsd_array_t; 303 304 typedef struct iflib_sw_tx_desc_array { 305 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 306 struct mbuf **ifsd_m; /* pkthdr mbufs */ 307 uint8_t *ifsd_flags; 308 } if_txsd_vec_t; 309 310 311 /* magic number that should be high enough for any hardware */ 312 #define IFLIB_MAX_TX_SEGS 128 313 /* bnxt supports 64 with hardware LRO enabled */ 314 #define IFLIB_MAX_RX_SEGS 64 315 #define IFLIB_RX_COPY_THRESH 128 316 #define IFLIB_MAX_RX_REFRESH 32 317 /* The minimum descriptors per second before we start coalescing */ 318 #define IFLIB_MIN_DESC_SEC 16384 319 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 320 #define IFLIB_QUEUE_IDLE 0 321 #define IFLIB_QUEUE_HUNG 1 322 #define IFLIB_QUEUE_WORKING 2 323 /* maximum number of txqs that can share an rx interrupt */ 324 #define IFLIB_MAX_TX_SHARED_INTR 4 325 326 /* this should really scale with ring size - this is a fairly arbitrary value */ 327 #define TX_BATCH_SIZE 32 328 329 #define IFLIB_RESTART_BUDGET 8 330 331 332 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ 333 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ 334 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) 335 struct iflib_txq { 336 qidx_t ift_in_use; 337 qidx_t ift_cidx; 338 qidx_t ift_cidx_processed; 339 qidx_t ift_pidx; 340 uint8_t ift_gen; 341 uint8_t ift_br_offset; 342 uint16_t ift_npending; 343 uint16_t ift_db_pending; 344 uint16_t ift_rs_pending; 345 /* implicit pad */ 346 uint8_t ift_txd_size[8]; 347 uint64_t ift_processed; 348 uint64_t ift_cleaned; 349 uint64_t ift_cleaned_prev; 350 #if MEMORY_LOGGING 351 uint64_t ift_enqueued; 352 uint64_t ift_dequeued; 353 #endif 354 uint64_t ift_no_tx_dma_setup; 355 uint64_t ift_no_desc_avail; 356 uint64_t ift_mbuf_defrag_failed; 357 uint64_t ift_mbuf_defrag; 358 uint64_t ift_map_failed; 359 uint64_t ift_txd_encap_efbig; 360 uint64_t ift_pullups; 361 uint64_t ift_last_timer_tick; 362 363 struct mtx ift_mtx; 364 struct mtx ift_db_mtx; 365 366 /* constant values */ 367 if_ctx_t ift_ctx; 368 struct ifmp_ring *ift_br; 369 struct grouptask ift_task; 370 qidx_t ift_size; 371 uint16_t ift_id; 372 struct callout ift_timer; 373 374 if_txsd_vec_t ift_sds; 375 uint8_t ift_qstatus; 376 uint8_t ift_closed; 377 uint8_t ift_update_freq; 378 struct iflib_filter_info ift_filter_info; 379 bus_dma_tag_t ift_desc_tag; 380 bus_dma_tag_t ift_tso_desc_tag; 381 iflib_dma_info_t ift_ifdi; 382 #define MTX_NAME_LEN 16 383 char ift_mtx_name[MTX_NAME_LEN]; 384 char ift_db_mtx_name[MTX_NAME_LEN]; 385 bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); 386 #ifdef IFLIB_DIAGNOSTICS 387 uint64_t ift_cpu_exec_count[256]; 388 #endif 389 } __aligned(CACHE_LINE_SIZE); 390 391 struct iflib_fl { 392 qidx_t ifl_cidx; 393 qidx_t ifl_pidx; 394 qidx_t ifl_credits; 395 uint8_t ifl_gen; 396 uint8_t ifl_rxd_size; 397 #if MEMORY_LOGGING 398 uint64_t ifl_m_enqueued; 399 uint64_t ifl_m_dequeued; 400 uint64_t ifl_cl_enqueued; 401 uint64_t ifl_cl_dequeued; 402 #endif 403 /* implicit pad */ 404 405 bitstr_t *ifl_rx_bitmap; 406 qidx_t ifl_fragidx; 407 /* constant */ 408 qidx_t ifl_size; 409 uint16_t ifl_buf_size; 410 uint16_t ifl_cltype; 411 uma_zone_t ifl_zone; 412 iflib_rxsd_array_t ifl_sds; 413 iflib_rxq_t ifl_rxq; 414 uint8_t ifl_id; 415 bus_dma_tag_t ifl_desc_tag; 416 iflib_dma_info_t ifl_ifdi; 417 uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); 418 caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; 419 qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; 420 } __aligned(CACHE_LINE_SIZE); 421 422 static inline qidx_t 423 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) 424 { 425 qidx_t used; 426 427 if (pidx > cidx) 428 used = pidx - cidx; 429 else if (pidx < cidx) 430 used = size - cidx + pidx; 431 else if (gen == 0 && pidx == cidx) 432 used = 0; 433 else if (gen == 1 && pidx == cidx) 434 used = size; 435 else 436 panic("bad state"); 437 438 return (used); 439 } 440 441 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) 442 443 #define IDXDIFF(head, tail, wrap) \ 444 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 445 446 struct iflib_rxq { 447 /* If there is a separate completion queue - 448 * these are the cq cidx and pidx. Otherwise 449 * these are unused. 450 */ 451 qidx_t ifr_size; 452 qidx_t ifr_cq_cidx; 453 qidx_t ifr_cq_pidx; 454 uint8_t ifr_cq_gen; 455 uint8_t ifr_fl_offset; 456 457 if_ctx_t ifr_ctx; 458 iflib_fl_t ifr_fl; 459 uint64_t ifr_rx_irq; 460 uint16_t ifr_id; 461 uint8_t ifr_lro_enabled; 462 uint8_t ifr_nfl; 463 uint8_t ifr_ntxqirq; 464 uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; 465 struct lro_ctrl ifr_lc; 466 struct grouptask ifr_task; 467 struct iflib_filter_info ifr_filter_info; 468 iflib_dma_info_t ifr_ifdi; 469 470 /* dynamically allocate if any drivers need a value substantially larger than this */ 471 struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); 472 #ifdef IFLIB_DIAGNOSTICS 473 uint64_t ifr_cpu_exec_count[256]; 474 #endif 475 } __aligned(CACHE_LINE_SIZE); 476 477 typedef struct if_rxsd { 478 caddr_t *ifsd_cl; 479 struct mbuf **ifsd_m; 480 iflib_fl_t ifsd_fl; 481 qidx_t ifsd_cidx; 482 } *if_rxsd_t; 483 484 /* multiple of word size */ 485 #ifdef __LP64__ 486 #define PKT_INFO_SIZE 6 487 #define RXD_INFO_SIZE 5 488 #define PKT_TYPE uint64_t 489 #else 490 #define PKT_INFO_SIZE 11 491 #define RXD_INFO_SIZE 8 492 #define PKT_TYPE uint32_t 493 #endif 494 #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) 495 #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) 496 497 typedef struct if_pkt_info_pad { 498 PKT_TYPE pkt_val[PKT_INFO_SIZE]; 499 } *if_pkt_info_pad_t; 500 typedef struct if_rxd_info_pad { 501 PKT_TYPE rxd_val[RXD_INFO_SIZE]; 502 } *if_rxd_info_pad_t; 503 504 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); 505 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); 506 507 508 static inline void 509 pkt_info_zero(if_pkt_info_t pi) 510 { 511 if_pkt_info_pad_t pi_pad; 512 513 pi_pad = (if_pkt_info_pad_t)pi; 514 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; 515 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; 516 #ifndef __LP64__ 517 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; 518 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; 519 #endif 520 } 521 522 static device_method_t iflib_pseudo_methods[] = { 523 DEVMETHOD(device_attach, noop_attach), 524 DEVMETHOD(device_detach, iflib_pseudo_detach), 525 DEVMETHOD_END 526 }; 527 528 driver_t iflib_pseudodriver = { 529 "iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx), 530 }; 531 532 static inline void 533 rxd_info_zero(if_rxd_info_t ri) 534 { 535 if_rxd_info_pad_t ri_pad; 536 int i; 537 538 ri_pad = (if_rxd_info_pad_t)ri; 539 for (i = 0; i < RXD_LOOP_BOUND; i += 4) { 540 ri_pad->rxd_val[i] = 0; 541 ri_pad->rxd_val[i+1] = 0; 542 ri_pad->rxd_val[i+2] = 0; 543 ri_pad->rxd_val[i+3] = 0; 544 } 545 #ifdef __LP64__ 546 ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; 547 #endif 548 } 549 550 /* 551 * Only allow a single packet to take up most 1/nth of the tx ring 552 */ 553 #define MAX_SINGLE_PACKET_FRACTION 12 554 #define IF_BAD_DMA (bus_addr_t)-1 555 556 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) 557 558 #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock") 559 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx) 560 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx) 561 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx) 562 563 564 #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF) 565 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx) 566 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx) 567 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx) 568 569 570 571 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) 572 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) 573 574 575 /* Our boot-time initialization hook */ 576 static int iflib_module_event_handler(module_t, int, void *); 577 578 static moduledata_t iflib_moduledata = { 579 "iflib", 580 iflib_module_event_handler, 581 NULL 582 }; 583 584 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); 585 MODULE_VERSION(iflib, 1); 586 587 MODULE_DEPEND(iflib, pci, 1, 1, 1); 588 MODULE_DEPEND(iflib, ether, 1, 1, 1); 589 590 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); 591 TASKQGROUP_DEFINE(if_config_tqg, 1, 1); 592 593 #ifndef IFLIB_DEBUG_COUNTERS 594 #ifdef INVARIANTS 595 #define IFLIB_DEBUG_COUNTERS 1 596 #else 597 #define IFLIB_DEBUG_COUNTERS 0 598 #endif /* !INVARIANTS */ 599 #endif 600 601 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, 602 "iflib driver parameters"); 603 604 /* 605 * XXX need to ensure that this can't accidentally cause the head to be moved backwards 606 */ 607 static int iflib_min_tx_latency = 0; 608 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, 609 &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); 610 static int iflib_no_tx_batch = 0; 611 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, 612 &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); 613 614 615 #if IFLIB_DEBUG_COUNTERS 616 617 static int iflib_tx_seen; 618 static int iflib_tx_sent; 619 static int iflib_tx_encap; 620 static int iflib_rx_allocs; 621 static int iflib_fl_refills; 622 static int iflib_fl_refills_large; 623 static int iflib_tx_frees; 624 625 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, 626 &iflib_tx_seen, 0, "# tx mbufs seen"); 627 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, 628 &iflib_tx_sent, 0, "# tx mbufs sent"); 629 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, 630 &iflib_tx_encap, 0, "# tx mbufs encapped"); 631 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, 632 &iflib_tx_frees, 0, "# tx frees"); 633 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, 634 &iflib_rx_allocs, 0, "# rx allocations"); 635 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, 636 &iflib_fl_refills, 0, "# refills"); 637 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, 638 &iflib_fl_refills_large, 0, "# large refills"); 639 640 641 static int iflib_txq_drain_flushing; 642 static int iflib_txq_drain_oactive; 643 static int iflib_txq_drain_notready; 644 static int iflib_txq_drain_encapfail; 645 646 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, 647 &iflib_txq_drain_flushing, 0, "# drain flushes"); 648 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, 649 &iflib_txq_drain_oactive, 0, "# drain oactives"); 650 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, 651 &iflib_txq_drain_notready, 0, "# drain notready"); 652 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD, 653 &iflib_txq_drain_encapfail, 0, "# drain encap fails"); 654 655 656 static int iflib_encap_load_mbuf_fail; 657 static int iflib_encap_pad_mbuf_fail; 658 static int iflib_encap_txq_avail_fail; 659 static int iflib_encap_txd_encap_fail; 660 661 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, 662 &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); 663 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, 664 &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); 665 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, 666 &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); 667 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, 668 &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); 669 670 static int iflib_task_fn_rxs; 671 static int iflib_rx_intr_enables; 672 static int iflib_fast_intrs; 673 static int iflib_intr_link; 674 static int iflib_intr_msix; 675 static int iflib_rx_unavail; 676 static int iflib_rx_ctx_inactive; 677 static int iflib_rx_zero_len; 678 static int iflib_rx_if_input; 679 static int iflib_rx_mbuf_null; 680 static int iflib_rxd_flush; 681 682 static int iflib_verbose_debug; 683 684 SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD, 685 &iflib_intr_link, 0, "# intr link calls"); 686 SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD, 687 &iflib_intr_msix, 0, "# intr msix calls"); 688 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, 689 &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); 690 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, 691 &iflib_rx_intr_enables, 0, "# rx intr enables"); 692 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, 693 &iflib_fast_intrs, 0, "# fast_intr calls"); 694 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, 695 &iflib_rx_unavail, 0, "# times rxeof called with no available data"); 696 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, 697 &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); 698 SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD, 699 &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf"); 700 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, 701 &iflib_rx_if_input, 0, "# times rxeof called if_input"); 702 SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, 703 &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); 704 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, 705 &iflib_rxd_flush, 0, "# times rxd_flush called"); 706 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, 707 &iflib_verbose_debug, 0, "enable verbose debugging"); 708 709 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) 710 static void 711 iflib_debug_reset(void) 712 { 713 iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = 714 iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = 715 iflib_txq_drain_flushing = iflib_txq_drain_oactive = 716 iflib_txq_drain_notready = iflib_txq_drain_encapfail = 717 iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = 718 iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = 719 iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = 720 iflib_intr_link = iflib_intr_msix = iflib_rx_unavail = 721 iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input = 722 iflib_rx_mbuf_null = iflib_rxd_flush = 0; 723 } 724 725 #else 726 #define DBG_COUNTER_INC(name) 727 static void iflib_debug_reset(void) {} 728 #endif 729 730 #define IFLIB_DEBUG 0 731 732 static void iflib_tx_structures_free(if_ctx_t ctx); 733 static void iflib_rx_structures_free(if_ctx_t ctx); 734 static int iflib_queues_alloc(if_ctx_t ctx); 735 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); 736 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); 737 static int iflib_qset_structures_setup(if_ctx_t ctx); 738 static int iflib_msix_init(if_ctx_t ctx); 739 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str); 740 static void iflib_txq_check_drain(iflib_txq_t txq, int budget); 741 static uint32_t iflib_txq_can_drain(struct ifmp_ring *); 742 static int iflib_register(if_ctx_t); 743 static void iflib_init_locked(if_ctx_t ctx); 744 static void iflib_add_device_sysctl_pre(if_ctx_t ctx); 745 static void iflib_add_device_sysctl_post(if_ctx_t ctx); 746 static void iflib_ifmp_purge(iflib_txq_t txq); 747 static void _iflib_pre_assert(if_softc_ctx_t scctx); 748 static void iflib_if_init_locked(if_ctx_t ctx); 749 #ifndef __NO_STRICT_ALIGNMENT 750 static struct mbuf * iflib_fixup_rx(struct mbuf *m); 751 #endif 752 753 NETDUMP_DEFINE(iflib); 754 755 #ifdef DEV_NETMAP 756 #include <sys/selinfo.h> 757 #include <net/netmap.h> 758 #include <dev/netmap/netmap_kern.h> 759 760 MODULE_DEPEND(iflib, netmap, 1, 1, 1); 761 762 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); 763 764 /* 765 * device-specific sysctl variables: 766 * 767 * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. 768 * During regular operations the CRC is stripped, but on some 769 * hardware reception of frames not multiple of 64 is slower, 770 * so using crcstrip=0 helps in benchmarks. 771 * 772 * iflib_rx_miss, iflib_rx_miss_bufs: 773 * count packets that might be missed due to lost interrupts. 774 */ 775 SYSCTL_DECL(_dev_netmap); 776 /* 777 * The xl driver by default strips CRCs and we do not override it. 778 */ 779 780 int iflib_crcstrip = 1; 781 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, 782 CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames"); 783 784 int iflib_rx_miss, iflib_rx_miss_bufs; 785 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, 786 CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr"); 787 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, 788 CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs"); 789 790 /* 791 * Register/unregister. We are already under netmap lock. 792 * Only called on the first register or the last unregister. 793 */ 794 static int 795 iflib_netmap_register(struct netmap_adapter *na, int onoff) 796 { 797 struct ifnet *ifp = na->ifp; 798 if_ctx_t ctx = ifp->if_softc; 799 int status; 800 801 CTX_LOCK(ctx); 802 IFDI_INTR_DISABLE(ctx); 803 804 /* Tell the stack that the interface is no longer active */ 805 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 806 807 if (!CTX_IS_VF(ctx)) 808 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); 809 810 /* enable or disable flags and callbacks in na and ifp */ 811 if (onoff) { 812 nm_set_native_flags(na); 813 } else { 814 nm_clear_native_flags(na); 815 } 816 iflib_stop(ctx); 817 iflib_init_locked(ctx); 818 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? 819 status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; 820 if (status) 821 nm_clear_native_flags(na); 822 CTX_UNLOCK(ctx); 823 return (status); 824 } 825 826 static int 827 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) 828 { 829 struct netmap_adapter *na = kring->na; 830 u_int const lim = kring->nkr_num_slots - 1; 831 u_int head = kring->rhead; 832 struct netmap_ring *ring = kring->ring; 833 bus_dmamap_t *map; 834 struct if_rxd_update iru; 835 if_ctx_t ctx = rxq->ifr_ctx; 836 iflib_fl_t fl = &rxq->ifr_fl[0]; 837 uint32_t refill_pidx, nic_i; 838 839 if (nm_i == head && __predict_true(!init)) 840 return 0; 841 iru_init(&iru, rxq, 0 /* flid */); 842 map = fl->ifl_sds.ifsd_map; 843 refill_pidx = netmap_idx_k2n(kring, nm_i); 844 /* 845 * IMPORTANT: we must leave one free slot in the ring, 846 * so move head back by one unit 847 */ 848 head = nm_prev(head, lim); 849 nic_i = UINT_MAX; 850 while (nm_i != head) { 851 for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { 852 struct netmap_slot *slot = &ring->slot[nm_i]; 853 void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); 854 uint32_t nic_i_dma = refill_pidx; 855 nic_i = netmap_idx_k2n(kring, nm_i); 856 857 MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); 858 859 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 860 return netmap_ring_reinit(kring); 861 862 fl->ifl_vm_addrs[tmp_pidx] = addr; 863 if (__predict_false(init) && map) { 864 netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 865 } else if (map && (slot->flags & NS_BUF_CHANGED)) { 866 /* buffer has changed, reload map */ 867 netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 868 } 869 slot->flags &= ~NS_BUF_CHANGED; 870 871 nm_i = nm_next(nm_i, lim); 872 fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); 873 if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) 874 continue; 875 876 iru.iru_pidx = refill_pidx; 877 iru.iru_count = tmp_pidx+1; 878 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 879 880 refill_pidx = nic_i; 881 if (map == NULL) 882 continue; 883 884 for (int n = 0; n < iru.iru_count; n++) { 885 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma], 886 BUS_DMASYNC_PREREAD); 887 /* XXX - change this to not use the netmap func*/ 888 nic_i_dma = nm_next(nic_i_dma, lim); 889 } 890 } 891 } 892 kring->nr_hwcur = head; 893 894 if (map) 895 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 896 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 897 if (__predict_true(nic_i != UINT_MAX)) 898 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); 899 return (0); 900 } 901 902 /* 903 * Reconcile kernel and user view of the transmit ring. 904 * 905 * All information is in the kring. 906 * Userspace wants to send packets up to the one before kring->rhead, 907 * kernel knows kring->nr_hwcur is the first unsent packet. 908 * 909 * Here we push packets out (as many as possible), and possibly 910 * reclaim buffers from previously completed transmission. 911 * 912 * The caller (netmap) guarantees that there is only one instance 913 * running at any time. Any interference with other driver 914 * methods should be handled by the individual drivers. 915 */ 916 static int 917 iflib_netmap_txsync(struct netmap_kring *kring, int flags) 918 { 919 struct netmap_adapter *na = kring->na; 920 struct ifnet *ifp = na->ifp; 921 struct netmap_ring *ring = kring->ring; 922 u_int nm_i; /* index into the netmap kring */ 923 u_int nic_i; /* index into the NIC ring */ 924 u_int n; 925 u_int const lim = kring->nkr_num_slots - 1; 926 u_int const head = kring->rhead; 927 struct if_pkt_info pi; 928 929 /* 930 * interrupts on every tx packet are expensive so request 931 * them every half ring, or where NS_REPORT is set 932 */ 933 u_int report_frequency = kring->nkr_num_slots >> 1; 934 /* device-specific */ 935 if_ctx_t ctx = ifp->if_softc; 936 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; 937 938 if (txq->ift_sds.ifsd_map) 939 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 940 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 941 942 943 /* 944 * First part: process new packets to send. 945 * nm_i is the current index in the netmap kring, 946 * nic_i is the corresponding index in the NIC ring. 947 * 948 * If we have packets to send (nm_i != head) 949 * iterate over the netmap ring, fetch length and update 950 * the corresponding slot in the NIC ring. Some drivers also 951 * need to update the buffer's physical address in the NIC slot 952 * even NS_BUF_CHANGED is not set (PNMB computes the addresses). 953 * 954 * The netmap_reload_map() calls is especially expensive, 955 * even when (as in this case) the tag is 0, so do only 956 * when the buffer has actually changed. 957 * 958 * If possible do not set the report/intr bit on all slots, 959 * but only a few times per ring or when NS_REPORT is set. 960 * 961 * Finally, on 10G and faster drivers, it might be useful 962 * to prefetch the next slot and txr entry. 963 */ 964 965 nm_i = kring->nr_hwcur; 966 if (nm_i != head) { /* we have new packets to send */ 967 pkt_info_zero(&pi); 968 pi.ipi_segs = txq->ift_segs; 969 pi.ipi_qsidx = kring->ring_id; 970 nic_i = netmap_idx_k2n(kring, nm_i); 971 972 __builtin_prefetch(&ring->slot[nm_i]); 973 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); 974 if (txq->ift_sds.ifsd_map) 975 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); 976 977 for (n = 0; nm_i != head; n++) { 978 struct netmap_slot *slot = &ring->slot[nm_i]; 979 u_int len = slot->len; 980 uint64_t paddr; 981 void *addr = PNMB(na, slot, &paddr); 982 int flags = (slot->flags & NS_REPORT || 983 nic_i == 0 || nic_i == report_frequency) ? 984 IPI_TX_INTR : 0; 985 986 /* device-specific */ 987 pi.ipi_len = len; 988 pi.ipi_segs[0].ds_addr = paddr; 989 pi.ipi_segs[0].ds_len = len; 990 pi.ipi_nsegs = 1; 991 pi.ipi_ndescs = 0; 992 pi.ipi_pidx = nic_i; 993 pi.ipi_flags = flags; 994 995 /* Fill the slot in the NIC ring. */ 996 ctx->isc_txd_encap(ctx->ifc_softc, &pi); 997 998 /* prefetch for next round */ 999 __builtin_prefetch(&ring->slot[nm_i + 1]); 1000 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); 1001 if (txq->ift_sds.ifsd_map) { 1002 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); 1003 1004 NM_CHECK_ADDR_LEN(na, addr, len); 1005 1006 if (slot->flags & NS_BUF_CHANGED) { 1007 /* buffer has changed, reload map */ 1008 netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr); 1009 } 1010 /* make sure changes to the buffer are synced */ 1011 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i], 1012 BUS_DMASYNC_PREWRITE); 1013 } 1014 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1015 nm_i = nm_next(nm_i, lim); 1016 nic_i = nm_next(nic_i, lim); 1017 } 1018 kring->nr_hwcur = nm_i; 1019 1020 /* synchronize the NIC ring */ 1021 if (txq->ift_sds.ifsd_map) 1022 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 1023 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1024 1025 /* (re)start the tx unit up to slot nic_i (excluded) */ 1026 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); 1027 } 1028 1029 /* 1030 * Second part: reclaim buffers for completed transmissions. 1031 * 1032 * If there are unclaimed buffers, attempt to reclaim them. 1033 * If none are reclaimed, and TX IRQs are not in use, do an initial 1034 * minimal delay, then trigger the tx handler which will spin in the 1035 * group task queue. 1036 */ 1037 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1038 if (iflib_tx_credits_update(ctx, txq)) { 1039 /* some tx completed, increment avail */ 1040 nic_i = txq->ift_cidx_processed; 1041 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 1042 } 1043 } 1044 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) 1045 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1046 callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000, 1047 iflib_timer, txq, txq->ift_timer.c_cpu); 1048 } 1049 return (0); 1050 } 1051 1052 /* 1053 * Reconcile kernel and user view of the receive ring. 1054 * Same as for the txsync, this routine must be efficient. 1055 * The caller guarantees a single invocations, but races against 1056 * the rest of the driver should be handled here. 1057 * 1058 * On call, kring->rhead is the first packet that userspace wants 1059 * to keep, and kring->rcur is the wakeup point. 1060 * The kernel has previously reported packets up to kring->rtail. 1061 * 1062 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective 1063 * of whether or not we received an interrupt. 1064 */ 1065 static int 1066 iflib_netmap_rxsync(struct netmap_kring *kring, int flags) 1067 { 1068 struct netmap_adapter *na = kring->na; 1069 struct netmap_ring *ring = kring->ring; 1070 uint32_t nm_i; /* index into the netmap ring */ 1071 uint32_t nic_i; /* index into the NIC ring */ 1072 u_int i, n; 1073 u_int const lim = kring->nkr_num_slots - 1; 1074 u_int const head = kring->rhead; 1075 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1076 struct if_rxd_info ri; 1077 1078 struct ifnet *ifp = na->ifp; 1079 if_ctx_t ctx = ifp->if_softc; 1080 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; 1081 iflib_fl_t fl = rxq->ifr_fl; 1082 if (head > lim) 1083 return netmap_ring_reinit(kring); 1084 1085 /* XXX check sync modes */ 1086 for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { 1087 if (fl->ifl_sds.ifsd_map == NULL) 1088 continue; 1089 bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map, 1090 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1091 } 1092 /* 1093 * First part: import newly received packets. 1094 * 1095 * nm_i is the index of the next free slot in the netmap ring, 1096 * nic_i is the index of the next received packet in the NIC ring, 1097 * and they may differ in case if_init() has been called while 1098 * in netmap mode. For the receive ring we have 1099 * 1100 * nic_i = rxr->next_check; 1101 * nm_i = kring->nr_hwtail (previous) 1102 * and 1103 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1104 * 1105 * rxr->next_check is set to 0 on a ring reinit 1106 */ 1107 if (netmap_no_pendintr || force_update) { 1108 int crclen = iflib_crcstrip ? 0 : 4; 1109 int error, avail; 1110 1111 for (i = 0; i < rxq->ifr_nfl; i++) { 1112 fl = &rxq->ifr_fl[i]; 1113 nic_i = fl->ifl_cidx; 1114 nm_i = netmap_idx_n2k(kring, nic_i); 1115 avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); 1116 for (n = 0; avail > 0; n++, avail--) { 1117 rxd_info_zero(&ri); 1118 ri.iri_frags = rxq->ifr_frags; 1119 ri.iri_qsidx = kring->ring_id; 1120 ri.iri_ifp = ctx->ifc_ifp; 1121 ri.iri_cidx = nic_i; 1122 1123 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 1124 ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; 1125 ring->slot[nm_i].flags = 0; 1126 if (fl->ifl_sds.ifsd_map) 1127 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, 1128 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); 1129 nm_i = nm_next(nm_i, lim); 1130 nic_i = nm_next(nic_i, lim); 1131 } 1132 if (n) { /* update the state variables */ 1133 if (netmap_no_pendintr && !force_update) { 1134 /* diagnostics */ 1135 iflib_rx_miss ++; 1136 iflib_rx_miss_bufs += n; 1137 } 1138 fl->ifl_cidx = nic_i; 1139 kring->nr_hwtail = nm_i; 1140 } 1141 kring->nr_kflags &= ~NKR_PENDINTR; 1142 } 1143 } 1144 /* 1145 * Second part: skip past packets that userspace has released. 1146 * (kring->nr_hwcur to head excluded), 1147 * and make the buffers available for reception. 1148 * As usual nm_i is the index in the netmap ring, 1149 * nic_i is the index in the NIC ring, and 1150 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1151 */ 1152 /* XXX not sure how this will work with multiple free lists */ 1153 nm_i = kring->nr_hwcur; 1154 1155 return (netmap_fl_refill(rxq, kring, nm_i, false)); 1156 } 1157 1158 static void 1159 iflib_netmap_intr(struct netmap_adapter *na, int onoff) 1160 { 1161 struct ifnet *ifp = na->ifp; 1162 if_ctx_t ctx = ifp->if_softc; 1163 1164 CTX_LOCK(ctx); 1165 if (onoff) { 1166 IFDI_INTR_ENABLE(ctx); 1167 } else { 1168 IFDI_INTR_DISABLE(ctx); 1169 } 1170 CTX_UNLOCK(ctx); 1171 } 1172 1173 1174 static int 1175 iflib_netmap_attach(if_ctx_t ctx) 1176 { 1177 struct netmap_adapter na; 1178 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1179 1180 bzero(&na, sizeof(na)); 1181 1182 na.ifp = ctx->ifc_ifp; 1183 na.na_flags = NAF_BDG_MAYSLEEP; 1184 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); 1185 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); 1186 1187 na.num_tx_desc = scctx->isc_ntxd[0]; 1188 na.num_rx_desc = scctx->isc_nrxd[0]; 1189 na.nm_txsync = iflib_netmap_txsync; 1190 na.nm_rxsync = iflib_netmap_rxsync; 1191 na.nm_register = iflib_netmap_register; 1192 na.nm_intr = iflib_netmap_intr; 1193 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; 1194 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; 1195 return (netmap_attach(&na)); 1196 } 1197 1198 static void 1199 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) 1200 { 1201 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1202 struct netmap_slot *slot; 1203 1204 slot = netmap_reset(na, NR_TX, txq->ift_id, 0); 1205 if (slot == NULL) 1206 return; 1207 if (txq->ift_sds.ifsd_map == NULL) 1208 return; 1209 1210 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { 1211 1212 /* 1213 * In netmap mode, set the map for the packet buffer. 1214 * NOTE: Some drivers (not this one) also need to set 1215 * the physical buffer address in the NIC ring. 1216 * netmap_idx_n2k() maps a nic index, i, into the corresponding 1217 * netmap slot index, si 1218 */ 1219 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); 1220 netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); 1221 } 1222 } 1223 1224 static void 1225 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) 1226 { 1227 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1228 struct netmap_kring *kring = na->rx_rings[rxq->ifr_id]; 1229 struct netmap_slot *slot; 1230 uint32_t nm_i; 1231 1232 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); 1233 if (slot == NULL) 1234 return; 1235 nm_i = netmap_idx_n2k(kring, 0); 1236 netmap_fl_refill(rxq, kring, nm_i, true); 1237 } 1238 1239 static void 1240 iflib_netmap_timer_adjust(if_ctx_t ctx, uint16_t txqid, uint32_t *reset_on) 1241 { 1242 struct netmap_kring *kring; 1243 1244 kring = NA(ctx->ifc_ifp)->tx_rings[txqid]; 1245 1246 if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) { 1247 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) 1248 netmap_tx_irq(ctx->ifc_ifp, txqid); 1249 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) { 1250 if (hz < 2000) 1251 *reset_on = 1; 1252 else 1253 *reset_on = hz / 1000; 1254 } 1255 } 1256 } 1257 1258 #define iflib_netmap_detach(ifp) netmap_detach(ifp) 1259 1260 #else 1261 #define iflib_netmap_txq_init(ctx, txq) 1262 #define iflib_netmap_rxq_init(ctx, rxq) 1263 #define iflib_netmap_detach(ifp) 1264 1265 #define iflib_netmap_attach(ctx) (0) 1266 #define netmap_rx_irq(ifp, qid, budget) (0) 1267 #define netmap_tx_irq(ifp, qid) do {} while (0) 1268 #define iflib_netmap_timer_adjust(ctx, txqid, reset_on) 1269 1270 #endif 1271 1272 #if defined(__i386__) || defined(__amd64__) 1273 static __inline void 1274 prefetch(void *x) 1275 { 1276 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1277 } 1278 static __inline void 1279 prefetch2cachelines(void *x) 1280 { 1281 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1282 #if (CACHE_LINE_SIZE < 128) 1283 __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); 1284 #endif 1285 } 1286 #else 1287 #define prefetch(x) 1288 #define prefetch2cachelines(x) 1289 #endif 1290 1291 static void 1292 iflib_gen_mac(if_ctx_t ctx) 1293 { 1294 struct thread *td; 1295 MD5_CTX mdctx; 1296 char uuid[HOSTUUIDLEN+1]; 1297 char buf[HOSTUUIDLEN+16]; 1298 uint8_t *mac; 1299 unsigned char digest[16]; 1300 1301 td = curthread; 1302 mac = ctx->ifc_mac; 1303 uuid[HOSTUUIDLEN] = 0; 1304 bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN); 1305 snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev)); 1306 /* 1307 * Generate a pseudo-random, deterministic MAC 1308 * address based on the UUID and unit number. 1309 * The FreeBSD Foundation OUI of 58-9C-FC is used. 1310 */ 1311 MD5Init(&mdctx); 1312 MD5Update(&mdctx, buf, strlen(buf)); 1313 MD5Final(digest, &mdctx); 1314 1315 mac[0] = 0x58; 1316 mac[1] = 0x9C; 1317 mac[2] = 0xFC; 1318 mac[3] = digest[0]; 1319 mac[4] = digest[1]; 1320 mac[5] = digest[2]; 1321 } 1322 1323 static void 1324 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) 1325 { 1326 iflib_fl_t fl; 1327 1328 fl = &rxq->ifr_fl[flid]; 1329 iru->iru_paddrs = fl->ifl_bus_addrs; 1330 iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; 1331 iru->iru_idxs = fl->ifl_rxd_idxs; 1332 iru->iru_qsidx = rxq->ifr_id; 1333 iru->iru_buf_size = fl->ifl_buf_size; 1334 iru->iru_flidx = fl->ifl_id; 1335 } 1336 1337 static void 1338 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) 1339 { 1340 if (err) 1341 return; 1342 *(bus_addr_t *) arg = segs[0].ds_addr; 1343 } 1344 1345 int 1346 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) 1347 { 1348 int err; 1349 if_shared_ctx_t sctx = ctx->ifc_sctx; 1350 device_t dev = ctx->ifc_dev; 1351 1352 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); 1353 1354 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1355 sctx->isc_q_align, 0, /* alignment, bounds */ 1356 BUS_SPACE_MAXADDR, /* lowaddr */ 1357 BUS_SPACE_MAXADDR, /* highaddr */ 1358 NULL, NULL, /* filter, filterarg */ 1359 size, /* maxsize */ 1360 1, /* nsegments */ 1361 size, /* maxsegsize */ 1362 BUS_DMA_ALLOCNOW, /* flags */ 1363 NULL, /* lockfunc */ 1364 NULL, /* lockarg */ 1365 &dma->idi_tag); 1366 if (err) { 1367 device_printf(dev, 1368 "%s: bus_dma_tag_create failed: %d\n", 1369 __func__, err); 1370 goto fail_0; 1371 } 1372 1373 err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, 1374 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); 1375 if (err) { 1376 device_printf(dev, 1377 "%s: bus_dmamem_alloc(%ju) failed: %d\n", 1378 __func__, (uintmax_t)size, err); 1379 goto fail_1; 1380 } 1381 1382 dma->idi_paddr = IF_BAD_DMA; 1383 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, 1384 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); 1385 if (err || dma->idi_paddr == IF_BAD_DMA) { 1386 device_printf(dev, 1387 "%s: bus_dmamap_load failed: %d\n", 1388 __func__, err); 1389 goto fail_2; 1390 } 1391 1392 dma->idi_size = size; 1393 return (0); 1394 1395 fail_2: 1396 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1397 fail_1: 1398 bus_dma_tag_destroy(dma->idi_tag); 1399 fail_0: 1400 dma->idi_tag = NULL; 1401 1402 return (err); 1403 } 1404 1405 int 1406 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) 1407 { 1408 int i, err; 1409 iflib_dma_info_t *dmaiter; 1410 1411 dmaiter = dmalist; 1412 for (i = 0; i < count; i++, dmaiter++) { 1413 if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) 1414 break; 1415 } 1416 if (err) 1417 iflib_dma_free_multi(dmalist, i); 1418 return (err); 1419 } 1420 1421 void 1422 iflib_dma_free(iflib_dma_info_t dma) 1423 { 1424 if (dma->idi_tag == NULL) 1425 return; 1426 if (dma->idi_paddr != IF_BAD_DMA) { 1427 bus_dmamap_sync(dma->idi_tag, dma->idi_map, 1428 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1429 bus_dmamap_unload(dma->idi_tag, dma->idi_map); 1430 dma->idi_paddr = IF_BAD_DMA; 1431 } 1432 if (dma->idi_vaddr != NULL) { 1433 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1434 dma->idi_vaddr = NULL; 1435 } 1436 bus_dma_tag_destroy(dma->idi_tag); 1437 dma->idi_tag = NULL; 1438 } 1439 1440 void 1441 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) 1442 { 1443 int i; 1444 iflib_dma_info_t *dmaiter = dmalist; 1445 1446 for (i = 0; i < count; i++, dmaiter++) 1447 iflib_dma_free(*dmaiter); 1448 } 1449 1450 #ifdef EARLY_AP_STARTUP 1451 static const int iflib_started = 1; 1452 #else 1453 /* 1454 * We used to abuse the smp_started flag to decide if the queues have been 1455 * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). 1456 * That gave bad races, since the SYSINIT() runs strictly after smp_started 1457 * is set. Run a SYSINIT() strictly after that to just set a usable 1458 * completion flag. 1459 */ 1460 1461 static int iflib_started; 1462 1463 static void 1464 iflib_record_started(void *arg) 1465 { 1466 iflib_started = 1; 1467 } 1468 1469 SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, 1470 iflib_record_started, NULL); 1471 #endif 1472 1473 static int 1474 iflib_fast_intr(void *arg) 1475 { 1476 iflib_filter_info_t info = arg; 1477 struct grouptask *gtask = info->ifi_task; 1478 if (!iflib_started) 1479 return (FILTER_HANDLED); 1480 1481 DBG_COUNTER_INC(fast_intrs); 1482 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1483 return (FILTER_HANDLED); 1484 1485 GROUPTASK_ENQUEUE(gtask); 1486 return (FILTER_HANDLED); 1487 } 1488 1489 static int 1490 iflib_fast_intr_rxtx(void *arg) 1491 { 1492 iflib_filter_info_t info = arg; 1493 struct grouptask *gtask = info->ifi_task; 1494 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; 1495 if_ctx_t ctx = NULL;; 1496 int i, cidx; 1497 1498 if (!iflib_started) 1499 return (FILTER_HANDLED); 1500 1501 DBG_COUNTER_INC(fast_intrs); 1502 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1503 return (FILTER_HANDLED); 1504 1505 MPASS(rxq->ifr_ntxqirq); 1506 for (i = 0; i < rxq->ifr_ntxqirq; i++) { 1507 qidx_t txqid = rxq->ifr_txqid[i]; 1508 1509 ctx = rxq->ifr_ctx; 1510 1511 if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) { 1512 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); 1513 continue; 1514 } 1515 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 1516 } 1517 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) 1518 cidx = rxq->ifr_cq_cidx; 1519 else 1520 cidx = rxq->ifr_fl[0].ifl_cidx; 1521 if (iflib_rxd_avail(ctx, rxq, cidx, 1)) 1522 GROUPTASK_ENQUEUE(gtask); 1523 else 1524 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 1525 return (FILTER_HANDLED); 1526 } 1527 1528 1529 static int 1530 iflib_fast_intr_ctx(void *arg) 1531 { 1532 iflib_filter_info_t info = arg; 1533 struct grouptask *gtask = info->ifi_task; 1534 1535 if (!iflib_started) 1536 return (FILTER_HANDLED); 1537 1538 DBG_COUNTER_INC(fast_intrs); 1539 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1540 return (FILTER_HANDLED); 1541 1542 GROUPTASK_ENQUEUE(gtask); 1543 return (FILTER_HANDLED); 1544 } 1545 1546 static int 1547 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 1548 driver_filter_t filter, driver_intr_t handler, void *arg, 1549 const char *name) 1550 { 1551 int rc, flags; 1552 struct resource *res; 1553 void *tag = NULL; 1554 device_t dev = ctx->ifc_dev; 1555 1556 flags = RF_ACTIVE; 1557 if (ctx->ifc_flags & IFC_LEGACY) 1558 flags |= RF_SHAREABLE; 1559 MPASS(rid < 512); 1560 irq->ii_rid = rid; 1561 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags); 1562 if (res == NULL) { 1563 device_printf(dev, 1564 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 1565 return (ENOMEM); 1566 } 1567 irq->ii_res = res; 1568 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); 1569 rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, 1570 filter, handler, arg, &tag); 1571 if (rc != 0) { 1572 device_printf(dev, 1573 "failed to setup interrupt for rid %d, name %s: %d\n", 1574 rid, name ? name : "unknown", rc); 1575 return (rc); 1576 } else if (name) 1577 bus_describe_intr(dev, res, tag, "%s", name); 1578 1579 irq->ii_tag = tag; 1580 return (0); 1581 } 1582 1583 1584 /********************************************************************* 1585 * 1586 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1587 * the information needed to transmit a packet on the wire. This is 1588 * called only once at attach, setup is done every reset. 1589 * 1590 **********************************************************************/ 1591 1592 static int 1593 iflib_txsd_alloc(iflib_txq_t txq) 1594 { 1595 if_ctx_t ctx = txq->ift_ctx; 1596 if_shared_ctx_t sctx = ctx->ifc_sctx; 1597 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1598 device_t dev = ctx->ifc_dev; 1599 bus_size_t tsomaxsize; 1600 int err, nsegments, ntsosegments; 1601 1602 nsegments = scctx->isc_tx_nsegments; 1603 ntsosegments = scctx->isc_tx_tso_segments_max; 1604 tsomaxsize = scctx->isc_tx_tso_size_max; 1605 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU) 1606 tsomaxsize += sizeof(struct ether_vlan_header); 1607 MPASS(scctx->isc_ntxd[0] > 0); 1608 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); 1609 MPASS(nsegments > 0); 1610 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) { 1611 MPASS(ntsosegments > 0); 1612 MPASS(sctx->isc_tso_maxsize >= tsomaxsize); 1613 } 1614 1615 /* 1616 * Setup DMA descriptor areas. 1617 */ 1618 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1619 1, 0, /* alignment, bounds */ 1620 BUS_SPACE_MAXADDR, /* lowaddr */ 1621 BUS_SPACE_MAXADDR, /* highaddr */ 1622 NULL, NULL, /* filter, filterarg */ 1623 sctx->isc_tx_maxsize, /* maxsize */ 1624 nsegments, /* nsegments */ 1625 sctx->isc_tx_maxsegsize, /* maxsegsize */ 1626 0, /* flags */ 1627 NULL, /* lockfunc */ 1628 NULL, /* lockfuncarg */ 1629 &txq->ift_desc_tag))) { 1630 device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); 1631 device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", 1632 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); 1633 goto fail; 1634 } 1635 if ((if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) & 1636 (err = bus_dma_tag_create(bus_get_dma_tag(dev), 1637 1, 0, /* alignment, bounds */ 1638 BUS_SPACE_MAXADDR, /* lowaddr */ 1639 BUS_SPACE_MAXADDR, /* highaddr */ 1640 NULL, NULL, /* filter, filterarg */ 1641 tsomaxsize, /* maxsize */ 1642 ntsosegments, /* nsegments */ 1643 sctx->isc_tso_maxsegsize,/* maxsegsize */ 1644 0, /* flags */ 1645 NULL, /* lockfunc */ 1646 NULL, /* lockfuncarg */ 1647 &txq->ift_tso_desc_tag))) { 1648 device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); 1649 1650 goto fail; 1651 } 1652 if (!(txq->ift_sds.ifsd_flags = 1653 (uint8_t *) malloc(sizeof(uint8_t) * 1654 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1655 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1656 err = ENOMEM; 1657 goto fail; 1658 } 1659 if (!(txq->ift_sds.ifsd_m = 1660 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1661 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1662 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1663 err = ENOMEM; 1664 goto fail; 1665 } 1666 1667 /* Create the descriptor buffer dma maps */ 1668 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1669 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1670 return (0); 1671 1672 if (!(txq->ift_sds.ifsd_map = 1673 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1674 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1675 err = ENOMEM; 1676 goto fail; 1677 } 1678 1679 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { 1680 err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); 1681 if (err != 0) { 1682 device_printf(dev, "Unable to create TX DMA map\n"); 1683 goto fail; 1684 } 1685 } 1686 #endif 1687 return (0); 1688 fail: 1689 /* We free all, it handles case where we are in the middle */ 1690 iflib_tx_structures_free(ctx); 1691 return (err); 1692 } 1693 1694 static void 1695 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) 1696 { 1697 bus_dmamap_t map; 1698 1699 map = NULL; 1700 if (txq->ift_sds.ifsd_map != NULL) 1701 map = txq->ift_sds.ifsd_map[i]; 1702 if (map != NULL) { 1703 bus_dmamap_unload(txq->ift_desc_tag, map); 1704 bus_dmamap_destroy(txq->ift_desc_tag, map); 1705 txq->ift_sds.ifsd_map[i] = NULL; 1706 } 1707 } 1708 1709 static void 1710 iflib_txq_destroy(iflib_txq_t txq) 1711 { 1712 if_ctx_t ctx = txq->ift_ctx; 1713 1714 for (int i = 0; i < txq->ift_size; i++) 1715 iflib_txsd_destroy(ctx, txq, i); 1716 if (txq->ift_sds.ifsd_map != NULL) { 1717 free(txq->ift_sds.ifsd_map, M_IFLIB); 1718 txq->ift_sds.ifsd_map = NULL; 1719 } 1720 if (txq->ift_sds.ifsd_m != NULL) { 1721 free(txq->ift_sds.ifsd_m, M_IFLIB); 1722 txq->ift_sds.ifsd_m = NULL; 1723 } 1724 if (txq->ift_sds.ifsd_flags != NULL) { 1725 free(txq->ift_sds.ifsd_flags, M_IFLIB); 1726 txq->ift_sds.ifsd_flags = NULL; 1727 } 1728 if (txq->ift_desc_tag != NULL) { 1729 bus_dma_tag_destroy(txq->ift_desc_tag); 1730 txq->ift_desc_tag = NULL; 1731 } 1732 if (txq->ift_tso_desc_tag != NULL) { 1733 bus_dma_tag_destroy(txq->ift_tso_desc_tag); 1734 txq->ift_tso_desc_tag = NULL; 1735 } 1736 } 1737 1738 static void 1739 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) 1740 { 1741 struct mbuf **mp; 1742 1743 mp = &txq->ift_sds.ifsd_m[i]; 1744 if (*mp == NULL) 1745 return; 1746 1747 if (txq->ift_sds.ifsd_map != NULL) { 1748 bus_dmamap_sync(txq->ift_desc_tag, 1749 txq->ift_sds.ifsd_map[i], 1750 BUS_DMASYNC_POSTWRITE); 1751 bus_dmamap_unload(txq->ift_desc_tag, 1752 txq->ift_sds.ifsd_map[i]); 1753 } 1754 m_free(*mp); 1755 DBG_COUNTER_INC(tx_frees); 1756 *mp = NULL; 1757 } 1758 1759 static int 1760 iflib_txq_setup(iflib_txq_t txq) 1761 { 1762 if_ctx_t ctx = txq->ift_ctx; 1763 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1764 iflib_dma_info_t di; 1765 int i; 1766 1767 /* Set number of descriptors available */ 1768 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 1769 /* XXX make configurable */ 1770 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; 1771 1772 /* Reset indices */ 1773 txq->ift_cidx_processed = 0; 1774 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; 1775 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; 1776 1777 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1778 bzero((void *)di->idi_vaddr, di->idi_size); 1779 1780 IFDI_TXQ_SETUP(ctx, txq->ift_id); 1781 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1782 bus_dmamap_sync(di->idi_tag, di->idi_map, 1783 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1784 return (0); 1785 } 1786 1787 /********************************************************************* 1788 * 1789 * Allocate memory for rx_buffer structures. Since we use one 1790 * rx_buffer per received packet, the maximum number of rx_buffer's 1791 * that we'll need is equal to the number of receive descriptors 1792 * that we've allocated. 1793 * 1794 **********************************************************************/ 1795 static int 1796 iflib_rxsd_alloc(iflib_rxq_t rxq) 1797 { 1798 if_ctx_t ctx = rxq->ifr_ctx; 1799 if_shared_ctx_t sctx = ctx->ifc_sctx; 1800 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1801 device_t dev = ctx->ifc_dev; 1802 iflib_fl_t fl; 1803 int err; 1804 1805 MPASS(scctx->isc_nrxd[0] > 0); 1806 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); 1807 1808 fl = rxq->ifr_fl; 1809 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { 1810 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ 1811 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1812 1, 0, /* alignment, bounds */ 1813 BUS_SPACE_MAXADDR, /* lowaddr */ 1814 BUS_SPACE_MAXADDR, /* highaddr */ 1815 NULL, NULL, /* filter, filterarg */ 1816 sctx->isc_rx_maxsize, /* maxsize */ 1817 sctx->isc_rx_nsegments, /* nsegments */ 1818 sctx->isc_rx_maxsegsize, /* maxsegsize */ 1819 0, /* flags */ 1820 NULL, /* lockfunc */ 1821 NULL, /* lockarg */ 1822 &fl->ifl_desc_tag); 1823 if (err) { 1824 device_printf(dev, "%s: bus_dma_tag_create failed %d\n", 1825 __func__, err); 1826 goto fail; 1827 } 1828 if (!(fl->ifl_sds.ifsd_flags = 1829 (uint8_t *) malloc(sizeof(uint8_t) * 1830 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1831 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1832 err = ENOMEM; 1833 goto fail; 1834 } 1835 if (!(fl->ifl_sds.ifsd_m = 1836 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1837 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1838 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1839 err = ENOMEM; 1840 goto fail; 1841 } 1842 if (!(fl->ifl_sds.ifsd_cl = 1843 (caddr_t *) malloc(sizeof(caddr_t) * 1844 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1845 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1846 err = ENOMEM; 1847 goto fail; 1848 } 1849 1850 /* Create the descriptor buffer dma maps */ 1851 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1852 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1853 continue; 1854 1855 if (!(fl->ifl_sds.ifsd_map = 1856 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1857 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1858 err = ENOMEM; 1859 goto fail; 1860 } 1861 1862 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { 1863 err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]); 1864 if (err != 0) { 1865 device_printf(dev, "Unable to create RX buffer DMA map\n"); 1866 goto fail; 1867 } 1868 } 1869 #endif 1870 } 1871 return (0); 1872 1873 fail: 1874 iflib_rx_structures_free(ctx); 1875 return (err); 1876 } 1877 1878 1879 /* 1880 * Internal service routines 1881 */ 1882 1883 struct rxq_refill_cb_arg { 1884 int error; 1885 bus_dma_segment_t seg; 1886 int nseg; 1887 }; 1888 1889 static void 1890 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1891 { 1892 struct rxq_refill_cb_arg *cb_arg = arg; 1893 1894 cb_arg->error = error; 1895 cb_arg->seg = segs[0]; 1896 cb_arg->nseg = nseg; 1897 } 1898 1899 1900 #ifdef ACPI_DMAR 1901 #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR) 1902 #else 1903 #define IS_DMAR(ctx) (0) 1904 #endif 1905 1906 /** 1907 * rxq_refill - refill an rxq free-buffer list 1908 * @ctx: the iflib context 1909 * @rxq: the free-list to refill 1910 * @n: the number of new buffers to allocate 1911 * 1912 * (Re)populate an rxq free-buffer list with up to @n new packet buffers. 1913 * The caller must assure that @n does not exceed the queue's capacity. 1914 */ 1915 static void 1916 _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) 1917 { 1918 struct mbuf *m; 1919 int idx, frag_idx = fl->ifl_fragidx; 1920 int pidx = fl->ifl_pidx; 1921 caddr_t cl, *sd_cl; 1922 struct mbuf **sd_m; 1923 uint8_t *sd_flags; 1924 struct if_rxd_update iru; 1925 bus_dmamap_t *sd_map; 1926 int n, i = 0; 1927 uint64_t bus_addr; 1928 int err; 1929 qidx_t credits; 1930 1931 sd_m = fl->ifl_sds.ifsd_m; 1932 sd_map = fl->ifl_sds.ifsd_map; 1933 sd_cl = fl->ifl_sds.ifsd_cl; 1934 sd_flags = fl->ifl_sds.ifsd_flags; 1935 idx = pidx; 1936 credits = fl->ifl_credits; 1937 1938 n = count; 1939 MPASS(n > 0); 1940 MPASS(credits + n <= fl->ifl_size); 1941 1942 if (pidx < fl->ifl_cidx) 1943 MPASS(pidx + n <= fl->ifl_cidx); 1944 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) 1945 MPASS(fl->ifl_gen == 0); 1946 if (pidx > fl->ifl_cidx) 1947 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); 1948 1949 DBG_COUNTER_INC(fl_refills); 1950 if (n > 8) 1951 DBG_COUNTER_INC(fl_refills_large); 1952 iru_init(&iru, fl->ifl_rxq, fl->ifl_id); 1953 while (n--) { 1954 /* 1955 * We allocate an uninitialized mbuf + cluster, mbuf is 1956 * initialized after rx. 1957 * 1958 * If the cluster is still set then we know a minimum sized packet was received 1959 */ 1960 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); 1961 if ((frag_idx < 0) || (frag_idx >= fl->ifl_size)) 1962 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); 1963 if ((cl = sd_cl[frag_idx]) == NULL) { 1964 if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) 1965 break; 1966 #if MEMORY_LOGGING 1967 fl->ifl_cl_enqueued++; 1968 #endif 1969 } 1970 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { 1971 break; 1972 } 1973 #if MEMORY_LOGGING 1974 fl->ifl_m_enqueued++; 1975 #endif 1976 1977 DBG_COUNTER_INC(rx_allocs); 1978 #if defined(__i386__) || defined(__amd64__) 1979 if (!IS_DMAR(ctx)) { 1980 bus_addr = pmap_kextract((vm_offset_t)cl); 1981 } else 1982 #endif 1983 { 1984 struct rxq_refill_cb_arg cb_arg; 1985 1986 cb_arg.error = 0; 1987 MPASS(sd_map != NULL); 1988 MPASS(sd_map[frag_idx] != NULL); 1989 err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx], 1990 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0); 1991 bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx], 1992 BUS_DMASYNC_PREREAD); 1993 1994 if (err != 0 || cb_arg.error) { 1995 /* 1996 * !zone_pack ? 1997 */ 1998 if (fl->ifl_zone == zone_pack) 1999 uma_zfree(fl->ifl_zone, cl); 2000 m_free(m); 2001 n = 0; 2002 goto done; 2003 } 2004 bus_addr = cb_arg.seg.ds_addr; 2005 } 2006 bit_set(fl->ifl_rx_bitmap, frag_idx); 2007 sd_flags[frag_idx] |= RX_SW_DESC_INUSE; 2008 2009 MPASS(sd_m[frag_idx] == NULL); 2010 sd_cl[frag_idx] = cl; 2011 sd_m[frag_idx] = m; 2012 fl->ifl_rxd_idxs[i] = frag_idx; 2013 fl->ifl_bus_addrs[i] = bus_addr; 2014 fl->ifl_vm_addrs[i] = cl; 2015 credits++; 2016 i++; 2017 MPASS(credits <= fl->ifl_size); 2018 if (++idx == fl->ifl_size) { 2019 fl->ifl_gen = 1; 2020 idx = 0; 2021 } 2022 if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { 2023 iru.iru_pidx = pidx; 2024 iru.iru_count = i; 2025 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2026 i = 0; 2027 pidx = idx; 2028 fl->ifl_pidx = idx; 2029 fl->ifl_credits = credits; 2030 } 2031 2032 } 2033 done: 2034 if (i) { 2035 iru.iru_pidx = pidx; 2036 iru.iru_count = i; 2037 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2038 fl->ifl_pidx = idx; 2039 fl->ifl_credits = credits; 2040 } 2041 DBG_COUNTER_INC(rxd_flush); 2042 if (fl->ifl_pidx == 0) 2043 pidx = fl->ifl_size - 1; 2044 else 2045 pidx = fl->ifl_pidx - 1; 2046 2047 if (sd_map) 2048 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2049 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2050 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); 2051 fl->ifl_fragidx = frag_idx; 2052 } 2053 2054 static __inline void 2055 __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) 2056 { 2057 /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ 2058 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; 2059 #ifdef INVARIANTS 2060 int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; 2061 #endif 2062 2063 MPASS(fl->ifl_credits <= fl->ifl_size); 2064 MPASS(reclaimable == delta); 2065 2066 if (reclaimable > 0) 2067 _iflib_fl_refill(ctx, fl, min(max, reclaimable)); 2068 } 2069 2070 static void 2071 iflib_fl_bufs_free(iflib_fl_t fl) 2072 { 2073 iflib_dma_info_t idi = fl->ifl_ifdi; 2074 uint32_t i; 2075 2076 for (i = 0; i < fl->ifl_size; i++) { 2077 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; 2078 uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i]; 2079 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; 2080 2081 if (*sd_flags & RX_SW_DESC_INUSE) { 2082 if (fl->ifl_sds.ifsd_map != NULL) { 2083 bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i]; 2084 bus_dmamap_unload(fl->ifl_desc_tag, sd_map); 2085 if (fl->ifl_rxq->ifr_ctx->ifc_in_detach) 2086 bus_dmamap_destroy(fl->ifl_desc_tag, sd_map); 2087 } 2088 if (*sd_m != NULL) { 2089 m_init(*sd_m, M_NOWAIT, MT_DATA, 0); 2090 uma_zfree(zone_mbuf, *sd_m); 2091 } 2092 if (*sd_cl != NULL) 2093 uma_zfree(fl->ifl_zone, *sd_cl); 2094 *sd_flags = 0; 2095 } else { 2096 MPASS(*sd_cl == NULL); 2097 MPASS(*sd_m == NULL); 2098 } 2099 #if MEMORY_LOGGING 2100 fl->ifl_m_dequeued++; 2101 fl->ifl_cl_dequeued++; 2102 #endif 2103 *sd_cl = NULL; 2104 *sd_m = NULL; 2105 } 2106 #ifdef INVARIANTS 2107 for (i = 0; i < fl->ifl_size; i++) { 2108 MPASS(fl->ifl_sds.ifsd_flags[i] == 0); 2109 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); 2110 MPASS(fl->ifl_sds.ifsd_m[i] == NULL); 2111 } 2112 #endif 2113 /* 2114 * Reset free list values 2115 */ 2116 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; 2117 bzero(idi->idi_vaddr, idi->idi_size); 2118 } 2119 2120 /********************************************************************* 2121 * 2122 * Initialize a receive ring and its buffers. 2123 * 2124 **********************************************************************/ 2125 static int 2126 iflib_fl_setup(iflib_fl_t fl) 2127 { 2128 iflib_rxq_t rxq = fl->ifl_rxq; 2129 if_ctx_t ctx = rxq->ifr_ctx; 2130 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2131 2132 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); 2133 /* 2134 ** Free current RX buffer structs and their mbufs 2135 */ 2136 iflib_fl_bufs_free(fl); 2137 /* Now replenish the mbufs */ 2138 MPASS(fl->ifl_credits == 0); 2139 /* 2140 * XXX don't set the max_frame_size to larger 2141 * than the hardware can handle 2142 */ 2143 if (sctx->isc_max_frame_size <= 2048) 2144 fl->ifl_buf_size = MCLBYTES; 2145 #ifndef CONTIGMALLOC_WORKS 2146 else 2147 fl->ifl_buf_size = MJUMPAGESIZE; 2148 #else 2149 else if (sctx->isc_max_frame_size <= 4096) 2150 fl->ifl_buf_size = MJUMPAGESIZE; 2151 else if (sctx->isc_max_frame_size <= 9216) 2152 fl->ifl_buf_size = MJUM9BYTES; 2153 else 2154 fl->ifl_buf_size = MJUM16BYTES; 2155 #endif 2156 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) 2157 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; 2158 fl->ifl_cltype = m_gettype(fl->ifl_buf_size); 2159 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 2160 2161 2162 /* avoid pre-allocating zillions of clusters to an idle card 2163 * potentially speeding up attach 2164 */ 2165 _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); 2166 MPASS(min(128, fl->ifl_size) == fl->ifl_credits); 2167 if (min(128, fl->ifl_size) != fl->ifl_credits) 2168 return (ENOBUFS); 2169 /* 2170 * handle failure 2171 */ 2172 MPASS(rxq != NULL); 2173 MPASS(fl->ifl_ifdi != NULL); 2174 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2175 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2176 return (0); 2177 } 2178 2179 /********************************************************************* 2180 * 2181 * Free receive ring data structures 2182 * 2183 **********************************************************************/ 2184 static void 2185 iflib_rx_sds_free(iflib_rxq_t rxq) 2186 { 2187 iflib_fl_t fl; 2188 int i; 2189 2190 if (rxq->ifr_fl != NULL) { 2191 for (i = 0; i < rxq->ifr_nfl; i++) { 2192 fl = &rxq->ifr_fl[i]; 2193 if (fl->ifl_desc_tag != NULL) { 2194 bus_dma_tag_destroy(fl->ifl_desc_tag); 2195 fl->ifl_desc_tag = NULL; 2196 } 2197 free(fl->ifl_sds.ifsd_m, M_IFLIB); 2198 free(fl->ifl_sds.ifsd_cl, M_IFLIB); 2199 /* XXX destroy maps first */ 2200 free(fl->ifl_sds.ifsd_map, M_IFLIB); 2201 fl->ifl_sds.ifsd_m = NULL; 2202 fl->ifl_sds.ifsd_cl = NULL; 2203 fl->ifl_sds.ifsd_map = NULL; 2204 } 2205 free(rxq->ifr_fl, M_IFLIB); 2206 rxq->ifr_fl = NULL; 2207 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 2208 } 2209 } 2210 2211 /* 2212 * MI independent logic 2213 * 2214 */ 2215 static void 2216 iflib_timer(void *arg) 2217 { 2218 iflib_txq_t txq = arg; 2219 if_ctx_t ctx = txq->ift_ctx; 2220 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2221 uint64_t this_tick = ticks; 2222 uint32_t reset_on = hz / 2; 2223 2224 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 2225 return; 2226 /* 2227 ** Check on the state of the TX queue(s), this 2228 ** can be done without the lock because its RO 2229 ** and the HUNG state will be static if set. 2230 */ 2231 if (this_tick - txq->ift_last_timer_tick >= hz / 2) { 2232 txq->ift_last_timer_tick = this_tick; 2233 IFDI_TIMER(ctx, txq->ift_id); 2234 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && 2235 ((txq->ift_cleaned_prev == txq->ift_cleaned) || 2236 (sctx->isc_pause_frames == 0))) 2237 goto hung; 2238 2239 if (ifmp_ring_is_stalled(txq->ift_br)) 2240 txq->ift_qstatus = IFLIB_QUEUE_HUNG; 2241 txq->ift_cleaned_prev = txq->ift_cleaned; 2242 } 2243 #ifdef DEV_NETMAP 2244 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) 2245 iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on); 2246 #endif 2247 /* handle any laggards */ 2248 if (txq->ift_db_pending) 2249 GROUPTASK_ENQUEUE(&txq->ift_task); 2250 2251 sctx->isc_pause_frames = 0; 2252 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) 2253 callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); 2254 return; 2255 hung: 2256 device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n", 2257 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); 2258 STATE_LOCK(ctx); 2259 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2260 ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET); 2261 iflib_admin_intr_deferred(ctx); 2262 STATE_UNLOCK(ctx); 2263 } 2264 2265 static void 2266 iflib_init_locked(if_ctx_t ctx) 2267 { 2268 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2269 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2270 if_t ifp = ctx->ifc_ifp; 2271 iflib_fl_t fl; 2272 iflib_txq_t txq; 2273 iflib_rxq_t rxq; 2274 int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; 2275 2276 2277 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2278 IFDI_INTR_DISABLE(ctx); 2279 2280 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); 2281 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); 2282 /* Set hardware offload abilities */ 2283 if_clearhwassist(ifp); 2284 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 2285 if_sethwassistbits(ifp, tx_ip_csum_flags, 0); 2286 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 2287 if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); 2288 if (if_getcapenable(ifp) & IFCAP_TSO4) 2289 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 2290 if (if_getcapenable(ifp) & IFCAP_TSO6) 2291 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 2292 2293 for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { 2294 CALLOUT_LOCK(txq); 2295 callout_stop(&txq->ift_timer); 2296 CALLOUT_UNLOCK(txq); 2297 iflib_netmap_txq_init(ctx, txq); 2298 } 2299 #ifdef INVARIANTS 2300 i = if_getdrvflags(ifp); 2301 #endif 2302 IFDI_INIT(ctx); 2303 MPASS(if_getdrvflags(ifp) == i); 2304 for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { 2305 /* XXX this should really be done on a per-queue basis */ 2306 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 2307 MPASS(rxq->ifr_id == i); 2308 iflib_netmap_rxq_init(ctx, rxq); 2309 continue; 2310 } 2311 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 2312 if (iflib_fl_setup(fl)) { 2313 device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); 2314 goto done; 2315 } 2316 } 2317 } 2318 done: 2319 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 2320 IFDI_INTR_ENABLE(ctx); 2321 txq = ctx->ifc_txqs; 2322 for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) 2323 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, 2324 txq->ift_timer.c_cpu); 2325 } 2326 2327 static int 2328 iflib_media_change(if_t ifp) 2329 { 2330 if_ctx_t ctx = if_getsoftc(ifp); 2331 int err; 2332 2333 CTX_LOCK(ctx); 2334 if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) 2335 iflib_init_locked(ctx); 2336 CTX_UNLOCK(ctx); 2337 return (err); 2338 } 2339 2340 static void 2341 iflib_media_status(if_t ifp, struct ifmediareq *ifmr) 2342 { 2343 if_ctx_t ctx = if_getsoftc(ifp); 2344 2345 CTX_LOCK(ctx); 2346 IFDI_UPDATE_ADMIN_STATUS(ctx); 2347 IFDI_MEDIA_STATUS(ctx, ifmr); 2348 CTX_UNLOCK(ctx); 2349 } 2350 2351 void 2352 iflib_stop(if_ctx_t ctx) 2353 { 2354 iflib_txq_t txq = ctx->ifc_txqs; 2355 iflib_rxq_t rxq = ctx->ifc_rxqs; 2356 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2357 iflib_dma_info_t di; 2358 iflib_fl_t fl; 2359 int i, j; 2360 2361 /* Tell the stack that the interface is no longer active */ 2362 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2363 2364 IFDI_INTR_DISABLE(ctx); 2365 DELAY(1000); 2366 IFDI_STOP(ctx); 2367 DELAY(1000); 2368 2369 iflib_debug_reset(); 2370 /* Wait for current tx queue users to exit to disarm watchdog timer. */ 2371 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { 2372 /* make sure all transmitters have completed before proceeding XXX */ 2373 2374 CALLOUT_LOCK(txq); 2375 callout_stop(&txq->ift_timer); 2376 CALLOUT_UNLOCK(txq); 2377 2378 /* clean any enqueued buffers */ 2379 iflib_ifmp_purge(txq); 2380 /* Free any existing tx buffers. */ 2381 for (j = 0; j < txq->ift_size; j++) { 2382 iflib_txsd_free(ctx, txq, j); 2383 } 2384 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; 2385 txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; 2386 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; 2387 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; 2388 txq->ift_pullups = 0; 2389 ifmp_ring_reset_stats(txq->ift_br); 2390 for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++) 2391 bzero((void *)di->idi_vaddr, di->idi_size); 2392 } 2393 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { 2394 /* make sure all transmitters have completed before proceeding XXX */ 2395 2396 for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++) 2397 bzero((void *)di->idi_vaddr, di->idi_size); 2398 /* also resets the free lists pidx/cidx */ 2399 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 2400 iflib_fl_bufs_free(fl); 2401 } 2402 } 2403 2404 static inline caddr_t 2405 calc_next_rxd(iflib_fl_t fl, int cidx) 2406 { 2407 qidx_t size; 2408 int nrxd; 2409 caddr_t start, end, cur, next; 2410 2411 nrxd = fl->ifl_size; 2412 size = fl->ifl_rxd_size; 2413 start = fl->ifl_ifdi->idi_vaddr; 2414 2415 if (__predict_false(size == 0)) 2416 return (start); 2417 cur = start + size*cidx; 2418 end = start + size*nrxd; 2419 next = CACHE_PTR_NEXT(cur); 2420 return (next < end ? next : start); 2421 } 2422 2423 static inline void 2424 prefetch_pkts(iflib_fl_t fl, int cidx) 2425 { 2426 int nextptr; 2427 int nrxd = fl->ifl_size; 2428 caddr_t next_rxd; 2429 2430 2431 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); 2432 prefetch(&fl->ifl_sds.ifsd_m[nextptr]); 2433 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); 2434 next_rxd = calc_next_rxd(fl, cidx); 2435 prefetch(next_rxd); 2436 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); 2437 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); 2438 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); 2439 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); 2440 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); 2441 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); 2442 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); 2443 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); 2444 } 2445 2446 static void 2447 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd) 2448 { 2449 int flid, cidx; 2450 bus_dmamap_t map; 2451 iflib_fl_t fl; 2452 iflib_dma_info_t di; 2453 int next; 2454 2455 map = NULL; 2456 flid = irf->irf_flid; 2457 cidx = irf->irf_idx; 2458 fl = &rxq->ifr_fl[flid]; 2459 sd->ifsd_fl = fl; 2460 sd->ifsd_cidx = cidx; 2461 sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx]; 2462 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; 2463 fl->ifl_credits--; 2464 #if MEMORY_LOGGING 2465 fl->ifl_m_dequeued++; 2466 #endif 2467 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) 2468 prefetch_pkts(fl, cidx); 2469 if (fl->ifl_sds.ifsd_map != NULL) { 2470 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); 2471 prefetch(&fl->ifl_sds.ifsd_map[next]); 2472 map = fl->ifl_sds.ifsd_map[cidx]; 2473 di = fl->ifl_ifdi; 2474 next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1); 2475 prefetch(&fl->ifl_sds.ifsd_flags[next]); 2476 bus_dmamap_sync(di->idi_tag, di->idi_map, 2477 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2478 2479 /* not valid assert if bxe really does SGE from non-contiguous elements */ 2480 MPASS(fl->ifl_cidx == cidx); 2481 if (unload) 2482 bus_dmamap_unload(fl->ifl_desc_tag, map); 2483 } 2484 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); 2485 if (__predict_false(fl->ifl_cidx == 0)) 2486 fl->ifl_gen = 0; 2487 if (map != NULL) 2488 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2489 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2490 bit_clear(fl->ifl_rx_bitmap, cidx); 2491 } 2492 2493 static struct mbuf * 2494 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd) 2495 { 2496 int i, padlen , flags; 2497 struct mbuf *m, *mh, *mt; 2498 caddr_t cl; 2499 2500 i = 0; 2501 mh = NULL; 2502 do { 2503 rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd); 2504 2505 MPASS(*sd->ifsd_cl != NULL); 2506 MPASS(*sd->ifsd_m != NULL); 2507 2508 /* Don't include zero-length frags */ 2509 if (ri->iri_frags[i].irf_len == 0) { 2510 /* XXX we can save the cluster here, but not the mbuf */ 2511 m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0); 2512 m_free(*sd->ifsd_m); 2513 *sd->ifsd_m = NULL; 2514 continue; 2515 } 2516 m = *sd->ifsd_m; 2517 *sd->ifsd_m = NULL; 2518 if (mh == NULL) { 2519 flags = M_PKTHDR|M_EXT; 2520 mh = mt = m; 2521 padlen = ri->iri_pad; 2522 } else { 2523 flags = M_EXT; 2524 mt->m_next = m; 2525 mt = m; 2526 /* assuming padding is only on the first fragment */ 2527 padlen = 0; 2528 } 2529 cl = *sd->ifsd_cl; 2530 *sd->ifsd_cl = NULL; 2531 2532 /* Can these two be made one ? */ 2533 m_init(m, M_NOWAIT, MT_DATA, flags); 2534 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); 2535 /* 2536 * These must follow m_init and m_cljset 2537 */ 2538 m->m_data += padlen; 2539 ri->iri_len -= padlen; 2540 m->m_len = ri->iri_frags[i].irf_len; 2541 } while (++i < ri->iri_nfrags); 2542 2543 return (mh); 2544 } 2545 2546 /* 2547 * Process one software descriptor 2548 */ 2549 static struct mbuf * 2550 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) 2551 { 2552 struct if_rxsd sd; 2553 struct mbuf *m; 2554 2555 /* should I merge this back in now that the two paths are basically duplicated? */ 2556 if (ri->iri_nfrags == 1 && 2557 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { 2558 rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd); 2559 m = *sd.ifsd_m; 2560 *sd.ifsd_m = NULL; 2561 m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); 2562 #ifndef __NO_STRICT_ALIGNMENT 2563 if (!IP_ALIGNED(m)) 2564 m->m_data += 2; 2565 #endif 2566 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); 2567 m->m_len = ri->iri_frags[0].irf_len; 2568 } else { 2569 m = assemble_segments(rxq, ri, &sd); 2570 } 2571 m->m_pkthdr.len = ri->iri_len; 2572 m->m_pkthdr.rcvif = ri->iri_ifp; 2573 m->m_flags |= ri->iri_flags; 2574 m->m_pkthdr.ether_vtag = ri->iri_vtag; 2575 m->m_pkthdr.flowid = ri->iri_flowid; 2576 M_HASHTYPE_SET(m, ri->iri_rsstype); 2577 m->m_pkthdr.csum_flags = ri->iri_csum_flags; 2578 m->m_pkthdr.csum_data = ri->iri_csum_data; 2579 return (m); 2580 } 2581 2582 #if defined(INET6) || defined(INET) 2583 static void 2584 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) 2585 { 2586 CURVNET_SET(lc->ifp->if_vnet); 2587 #if defined(INET6) 2588 *v6 = VNET(ip6_forwarding); 2589 #endif 2590 #if defined(INET) 2591 *v4 = VNET(ipforwarding); 2592 #endif 2593 CURVNET_RESTORE(); 2594 } 2595 2596 /* 2597 * Returns true if it's possible this packet could be LROed. 2598 * if it returns false, it is guaranteed that tcp_lro_rx() 2599 * would not return zero. 2600 */ 2601 static bool 2602 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) 2603 { 2604 struct ether_header *eh; 2605 uint16_t eh_type; 2606 2607 eh = mtod(m, struct ether_header *); 2608 eh_type = ntohs(eh->ether_type); 2609 switch (eh_type) { 2610 #if defined(INET6) 2611 case ETHERTYPE_IPV6: 2612 return !v6_forwarding; 2613 #endif 2614 #if defined (INET) 2615 case ETHERTYPE_IP: 2616 return !v4_forwarding; 2617 #endif 2618 } 2619 2620 return false; 2621 } 2622 #else 2623 static void 2624 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) 2625 { 2626 } 2627 #endif 2628 2629 static bool 2630 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) 2631 { 2632 if_ctx_t ctx = rxq->ifr_ctx; 2633 if_shared_ctx_t sctx = ctx->ifc_sctx; 2634 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2635 int avail, i; 2636 qidx_t *cidxp; 2637 struct if_rxd_info ri; 2638 int err, budget_left, rx_bytes, rx_pkts; 2639 iflib_fl_t fl; 2640 struct ifnet *ifp; 2641 int lro_enabled; 2642 bool v4_forwarding, v6_forwarding, lro_possible; 2643 2644 /* 2645 * XXX early demux data packets so that if_input processing only handles 2646 * acks in interrupt context 2647 */ 2648 struct mbuf *m, *mh, *mt, *mf; 2649 2650 lro_possible = v4_forwarding = v6_forwarding = false; 2651 ifp = ctx->ifc_ifp; 2652 mh = mt = NULL; 2653 MPASS(budget > 0); 2654 rx_pkts = rx_bytes = 0; 2655 if (sctx->isc_flags & IFLIB_HAS_RXCQ) 2656 cidxp = &rxq->ifr_cq_cidx; 2657 else 2658 cidxp = &rxq->ifr_fl[0].ifl_cidx; 2659 if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { 2660 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2661 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2662 DBG_COUNTER_INC(rx_unavail); 2663 return (false); 2664 } 2665 2666 for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) { 2667 if (__predict_false(!CTX_ACTIVE(ctx))) { 2668 DBG_COUNTER_INC(rx_ctx_inactive); 2669 break; 2670 } 2671 /* 2672 * Reset client set fields to their default values 2673 */ 2674 rxd_info_zero(&ri); 2675 ri.iri_qsidx = rxq->ifr_id; 2676 ri.iri_cidx = *cidxp; 2677 ri.iri_ifp = ifp; 2678 ri.iri_frags = rxq->ifr_frags; 2679 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 2680 2681 if (err) 2682 goto err; 2683 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 2684 *cidxp = ri.iri_cidx; 2685 /* Update our consumer index */ 2686 /* XXX NB: shurd - check if this is still safe */ 2687 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { 2688 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; 2689 rxq->ifr_cq_gen = 0; 2690 } 2691 /* was this only a completion queue message? */ 2692 if (__predict_false(ri.iri_nfrags == 0)) 2693 continue; 2694 } 2695 MPASS(ri.iri_nfrags != 0); 2696 MPASS(ri.iri_len != 0); 2697 2698 /* will advance the cidx on the corresponding free lists */ 2699 m = iflib_rxd_pkt_get(rxq, &ri); 2700 if (avail == 0 && budget_left) 2701 avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); 2702 2703 if (__predict_false(m == NULL)) { 2704 DBG_COUNTER_INC(rx_mbuf_null); 2705 continue; 2706 } 2707 /* imm_pkt: -- cxgb */ 2708 if (mh == NULL) 2709 mh = mt = m; 2710 else { 2711 mt->m_nextpkt = m; 2712 mt = m; 2713 } 2714 } 2715 /* make sure that we can refill faster than drain */ 2716 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2717 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2718 2719 lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); 2720 if (lro_enabled) 2721 iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); 2722 mt = mf = NULL; 2723 while (mh != NULL) { 2724 m = mh; 2725 mh = mh->m_nextpkt; 2726 m->m_nextpkt = NULL; 2727 #ifndef __NO_STRICT_ALIGNMENT 2728 if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) 2729 continue; 2730 #endif 2731 rx_bytes += m->m_pkthdr.len; 2732 rx_pkts++; 2733 #if defined(INET6) || defined(INET) 2734 if (lro_enabled) { 2735 if (!lro_possible) { 2736 lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); 2737 if (lro_possible && mf != NULL) { 2738 ifp->if_input(ifp, mf); 2739 DBG_COUNTER_INC(rx_if_input); 2740 mt = mf = NULL; 2741 } 2742 } 2743 if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == 2744 (CSUM_L4_CALC|CSUM_L4_VALID)) { 2745 if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) 2746 continue; 2747 } 2748 } 2749 #endif 2750 if (lro_possible) { 2751 ifp->if_input(ifp, m); 2752 DBG_COUNTER_INC(rx_if_input); 2753 continue; 2754 } 2755 2756 if (mf == NULL) 2757 mf = m; 2758 if (mt != NULL) 2759 mt->m_nextpkt = m; 2760 mt = m; 2761 } 2762 if (mf != NULL) { 2763 ifp->if_input(ifp, mf); 2764 DBG_COUNTER_INC(rx_if_input); 2765 } 2766 2767 if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); 2768 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); 2769 2770 /* 2771 * Flush any outstanding LRO work 2772 */ 2773 #if defined(INET6) || defined(INET) 2774 tcp_lro_flush_all(&rxq->ifr_lc); 2775 #endif 2776 if (avail) 2777 return true; 2778 return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); 2779 err: 2780 STATE_LOCK(ctx); 2781 ctx->ifc_flags |= IFC_DO_RESET; 2782 iflib_admin_intr_deferred(ctx); 2783 STATE_UNLOCK(ctx); 2784 return (false); 2785 } 2786 2787 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) 2788 static inline qidx_t 2789 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) 2790 { 2791 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2792 qidx_t minthresh = txq->ift_size / 8; 2793 if (in_use > 4*minthresh) 2794 return (notify_count); 2795 if (in_use > 2*minthresh) 2796 return (notify_count >> 1); 2797 if (in_use > minthresh) 2798 return (notify_count >> 3); 2799 return (0); 2800 } 2801 2802 static inline qidx_t 2803 txq_max_rs_deferred(iflib_txq_t txq) 2804 { 2805 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2806 qidx_t minthresh = txq->ift_size / 8; 2807 if (txq->ift_in_use > 4*minthresh) 2808 return (notify_count); 2809 if (txq->ift_in_use > 2*minthresh) 2810 return (notify_count >> 1); 2811 if (txq->ift_in_use > minthresh) 2812 return (notify_count >> 2); 2813 return (2); 2814 } 2815 2816 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) 2817 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) 2818 2819 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) 2820 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) 2821 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) 2822 2823 /* forward compatibility for cxgb */ 2824 #define FIRST_QSET(ctx) 0 2825 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) 2826 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) 2827 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) 2828 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) 2829 2830 /* XXX we should be setting this to something other than zero */ 2831 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) 2832 #define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max) 2833 2834 static inline bool 2835 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) 2836 { 2837 qidx_t dbval, max; 2838 bool rang; 2839 2840 rang = false; 2841 max = TXQ_MAX_DB_DEFERRED(txq, in_use); 2842 if (ring || txq->ift_db_pending >= max) { 2843 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; 2844 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); 2845 txq->ift_db_pending = txq->ift_npending = 0; 2846 rang = true; 2847 } 2848 return (rang); 2849 } 2850 2851 #ifdef PKT_DEBUG 2852 static void 2853 print_pkt(if_pkt_info_t pi) 2854 { 2855 printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", 2856 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); 2857 printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", 2858 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); 2859 printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", 2860 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); 2861 } 2862 #endif 2863 2864 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) 2865 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO)) 2866 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) 2867 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO)) 2868 2869 static int 2870 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) 2871 { 2872 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; 2873 struct ether_vlan_header *eh; 2874 struct mbuf *m, *n; 2875 2876 n = m = *mp; 2877 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && 2878 M_WRITABLE(m) == 0) { 2879 if ((m = m_dup(m, M_NOWAIT)) == NULL) { 2880 return (ENOMEM); 2881 } else { 2882 m_freem(*mp); 2883 n = *mp = m; 2884 } 2885 } 2886 2887 /* 2888 * Determine where frame payload starts. 2889 * Jump over vlan headers if already present, 2890 * helpful for QinQ too. 2891 */ 2892 if (__predict_false(m->m_len < sizeof(*eh))) { 2893 txq->ift_pullups++; 2894 if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) 2895 return (ENOMEM); 2896 } 2897 eh = mtod(m, struct ether_vlan_header *); 2898 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2899 pi->ipi_etype = ntohs(eh->evl_proto); 2900 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2901 } else { 2902 pi->ipi_etype = ntohs(eh->evl_encap_proto); 2903 pi->ipi_ehdrlen = ETHER_HDR_LEN; 2904 } 2905 2906 switch (pi->ipi_etype) { 2907 #ifdef INET 2908 case ETHERTYPE_IP: 2909 { 2910 struct ip *ip = NULL; 2911 struct tcphdr *th = NULL; 2912 int minthlen; 2913 2914 minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); 2915 if (__predict_false(m->m_len < minthlen)) { 2916 /* 2917 * if this code bloat is causing too much of a hit 2918 * move it to a separate function and mark it noinline 2919 */ 2920 if (m->m_len == pi->ipi_ehdrlen) { 2921 n = m->m_next; 2922 MPASS(n); 2923 if (n->m_len >= sizeof(*ip)) { 2924 ip = (struct ip *)n->m_data; 2925 if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2926 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2927 } else { 2928 txq->ift_pullups++; 2929 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2930 return (ENOMEM); 2931 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2932 } 2933 } else { 2934 txq->ift_pullups++; 2935 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2936 return (ENOMEM); 2937 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2938 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2939 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2940 } 2941 } else { 2942 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2943 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2944 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2945 } 2946 pi->ipi_ip_hlen = ip->ip_hl << 2; 2947 pi->ipi_ipproto = ip->ip_p; 2948 pi->ipi_flags |= IPI_TX_IPV4; 2949 2950 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) 2951 ip->ip_sum = 0; 2952 2953 /* TCP checksum offload may require TCP header length */ 2954 if (IS_TX_OFFLOAD4(pi)) { 2955 if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { 2956 if (__predict_false(th == NULL)) { 2957 txq->ift_pullups++; 2958 if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) 2959 return (ENOMEM); 2960 th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); 2961 } 2962 pi->ipi_tcp_hflags = th->th_flags; 2963 pi->ipi_tcp_hlen = th->th_off << 2; 2964 pi->ipi_tcp_seq = th->th_seq; 2965 } 2966 if (IS_TSO4(pi)) { 2967 if (__predict_false(ip->ip_p != IPPROTO_TCP)) 2968 return (ENXIO); 2969 th->th_sum = in_pseudo(ip->ip_src.s_addr, 2970 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2971 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 2972 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { 2973 ip->ip_sum = 0; 2974 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); 2975 } 2976 } 2977 } 2978 break; 2979 } 2980 #endif 2981 #ifdef INET6 2982 case ETHERTYPE_IPV6: 2983 { 2984 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); 2985 struct tcphdr *th; 2986 pi->ipi_ip_hlen = sizeof(struct ip6_hdr); 2987 2988 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { 2989 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) 2990 return (ENOMEM); 2991 } 2992 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); 2993 2994 /* XXX-BZ this will go badly in case of ext hdrs. */ 2995 pi->ipi_ipproto = ip6->ip6_nxt; 2996 pi->ipi_flags |= IPI_TX_IPV6; 2997 2998 /* TCP checksum offload may require TCP header length */ 2999 if (IS_TX_OFFLOAD6(pi)) { 3000 if (pi->ipi_ipproto == IPPROTO_TCP) { 3001 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { 3002 txq->ift_pullups++; 3003 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) 3004 return (ENOMEM); 3005 } 3006 pi->ipi_tcp_hflags = th->th_flags; 3007 pi->ipi_tcp_hlen = th->th_off << 2; 3008 pi->ipi_tcp_seq = th->th_seq; 3009 } 3010 if (IS_TSO6(pi)) { 3011 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) 3012 return (ENXIO); 3013 /* 3014 * The corresponding flag is set by the stack in the IPv4 3015 * TSO case, but not in IPv6 (at least in FreeBSD 10.2). 3016 * So, set it here because the rest of the flow requires it. 3017 */ 3018 pi->ipi_csum_flags |= CSUM_IP6_TCP; 3019 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); 3020 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 3021 } 3022 } 3023 break; 3024 } 3025 #endif 3026 default: 3027 pi->ipi_csum_flags &= ~CSUM_OFFLOAD; 3028 pi->ipi_ip_hlen = 0; 3029 break; 3030 } 3031 *mp = m; 3032 3033 return (0); 3034 } 3035 3036 static __noinline struct mbuf * 3037 collapse_pkthdr(struct mbuf *m0) 3038 { 3039 struct mbuf *m, *m_next, *tmp; 3040 3041 m = m0; 3042 m_next = m->m_next; 3043 while (m_next != NULL && m_next->m_len == 0) { 3044 m = m_next; 3045 m->m_next = NULL; 3046 m_free(m); 3047 m_next = m_next->m_next; 3048 } 3049 m = m0; 3050 m->m_next = m_next; 3051 if ((m_next->m_flags & M_EXT) == 0) { 3052 m = m_defrag(m, M_NOWAIT); 3053 } else { 3054 tmp = m_next->m_next; 3055 memcpy(m_next, m, MPKTHSIZE); 3056 m = m_next; 3057 m->m_next = tmp; 3058 } 3059 return (m); 3060 } 3061 3062 /* 3063 * If dodgy hardware rejects the scatter gather chain we've handed it 3064 * we'll need to remove the mbuf chain from ifsg_m[] before we can add the 3065 * m_defrag'd mbufs 3066 */ 3067 static __noinline struct mbuf * 3068 iflib_remove_mbuf(iflib_txq_t txq) 3069 { 3070 int ntxd, i, pidx; 3071 struct mbuf *m, *mh, **ifsd_m; 3072 3073 pidx = txq->ift_pidx; 3074 ifsd_m = txq->ift_sds.ifsd_m; 3075 ntxd = txq->ift_size; 3076 mh = m = ifsd_m[pidx]; 3077 ifsd_m[pidx] = NULL; 3078 #if MEMORY_LOGGING 3079 txq->ift_dequeued++; 3080 #endif 3081 i = 1; 3082 3083 while (m) { 3084 ifsd_m[(pidx + i) & (ntxd -1)] = NULL; 3085 #if MEMORY_LOGGING 3086 txq->ift_dequeued++; 3087 #endif 3088 m = m->m_next; 3089 i++; 3090 } 3091 return (mh); 3092 } 3093 3094 static int 3095 iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, 3096 struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, 3097 int max_segs, int flags) 3098 { 3099 if_ctx_t ctx; 3100 if_shared_ctx_t sctx; 3101 if_softc_ctx_t scctx; 3102 int i, next, pidx, err, ntxd, count; 3103 struct mbuf *m, *tmp, **ifsd_m; 3104 3105 m = *m0; 3106 3107 /* 3108 * Please don't ever do this 3109 */ 3110 if (__predict_false(m->m_len == 0)) 3111 *m0 = m = collapse_pkthdr(m); 3112 3113 ctx = txq->ift_ctx; 3114 sctx = ctx->ifc_sctx; 3115 scctx = &ctx->ifc_softc_ctx; 3116 ifsd_m = txq->ift_sds.ifsd_m; 3117 ntxd = txq->ift_size; 3118 pidx = txq->ift_pidx; 3119 if (map != NULL) { 3120 uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; 3121 3122 err = bus_dmamap_load_mbuf_sg(tag, map, 3123 *m0, segs, nsegs, BUS_DMA_NOWAIT); 3124 if (err) 3125 return (err); 3126 ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; 3127 count = 0; 3128 m = *m0; 3129 do { 3130 if (__predict_false(m->m_len <= 0)) { 3131 tmp = m; 3132 m = m->m_next; 3133 tmp->m_next = NULL; 3134 m_free(tmp); 3135 continue; 3136 } 3137 m = m->m_next; 3138 count++; 3139 } while (m != NULL); 3140 if (count > *nsegs) { 3141 ifsd_m[pidx] = *m0; 3142 ifsd_m[pidx]->m_flags |= M_TOOBIG; 3143 return (0); 3144 } 3145 m = *m0; 3146 count = 0; 3147 do { 3148 next = (pidx + count) & (ntxd-1); 3149 MPASS(ifsd_m[next] == NULL); 3150 ifsd_m[next] = m; 3151 count++; 3152 tmp = m; 3153 m = m->m_next; 3154 } while (m != NULL); 3155 } else { 3156 int buflen, sgsize, maxsegsz, max_sgsize; 3157 vm_offset_t vaddr; 3158 vm_paddr_t curaddr; 3159 3160 count = i = 0; 3161 m = *m0; 3162 if (m->m_pkthdr.csum_flags & CSUM_TSO) 3163 maxsegsz = scctx->isc_tx_tso_segsize_max; 3164 else 3165 maxsegsz = sctx->isc_tx_maxsegsize; 3166 3167 do { 3168 if (__predict_false(m->m_len <= 0)) { 3169 tmp = m; 3170 m = m->m_next; 3171 tmp->m_next = NULL; 3172 m_free(tmp); 3173 continue; 3174 } 3175 buflen = m->m_len; 3176 vaddr = (vm_offset_t)m->m_data; 3177 /* 3178 * see if we can't be smarter about physically 3179 * contiguous mappings 3180 */ 3181 next = (pidx + count) & (ntxd-1); 3182 MPASS(ifsd_m[next] == NULL); 3183 #if MEMORY_LOGGING 3184 txq->ift_enqueued++; 3185 #endif 3186 ifsd_m[next] = m; 3187 while (buflen > 0) { 3188 if (i >= max_segs) 3189 goto err; 3190 max_sgsize = MIN(buflen, maxsegsz); 3191 curaddr = pmap_kextract(vaddr); 3192 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 3193 sgsize = MIN(sgsize, max_sgsize); 3194 segs[i].ds_addr = curaddr; 3195 segs[i].ds_len = sgsize; 3196 vaddr += sgsize; 3197 buflen -= sgsize; 3198 i++; 3199 } 3200 count++; 3201 tmp = m; 3202 m = m->m_next; 3203 } while (m != NULL); 3204 *nsegs = i; 3205 } 3206 return (0); 3207 err: 3208 *m0 = iflib_remove_mbuf(txq); 3209 return (EFBIG); 3210 } 3211 3212 static inline caddr_t 3213 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) 3214 { 3215 qidx_t size; 3216 int ntxd; 3217 caddr_t start, end, cur, next; 3218 3219 ntxd = txq->ift_size; 3220 size = txq->ift_txd_size[qid]; 3221 start = txq->ift_ifdi[qid].idi_vaddr; 3222 3223 if (__predict_false(size == 0)) 3224 return (start); 3225 cur = start + size*cidx; 3226 end = start + size*ntxd; 3227 next = CACHE_PTR_NEXT(cur); 3228 return (next < end ? next : start); 3229 } 3230 3231 /* 3232 * Pad an mbuf to ensure a minimum ethernet frame size. 3233 * min_frame_size is the frame size (less CRC) to pad the mbuf to 3234 */ 3235 static __noinline int 3236 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) 3237 { 3238 /* 3239 * 18 is enough bytes to pad an ARP packet to 46 bytes, and 3240 * and ARP message is the smallest common payload I can think of 3241 */ 3242 static char pad[18]; /* just zeros */ 3243 int n; 3244 struct mbuf *new_head; 3245 3246 if (!M_WRITABLE(*m_head)) { 3247 new_head = m_dup(*m_head, M_NOWAIT); 3248 if (new_head == NULL) { 3249 m_freem(*m_head); 3250 device_printf(dev, "cannot pad short frame, m_dup() failed"); 3251 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3252 return ENOMEM; 3253 } 3254 m_freem(*m_head); 3255 *m_head = new_head; 3256 } 3257 3258 for (n = min_frame_size - (*m_head)->m_pkthdr.len; 3259 n > 0; n -= sizeof(pad)) 3260 if (!m_append(*m_head, min(n, sizeof(pad)), pad)) 3261 break; 3262 3263 if (n > 0) { 3264 m_freem(*m_head); 3265 device_printf(dev, "cannot pad short frame\n"); 3266 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3267 return (ENOBUFS); 3268 } 3269 3270 return 0; 3271 } 3272 3273 static int 3274 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) 3275 { 3276 if_ctx_t ctx; 3277 if_shared_ctx_t sctx; 3278 if_softc_ctx_t scctx; 3279 bus_dma_segment_t *segs; 3280 struct mbuf *m_head; 3281 void *next_txd; 3282 bus_dmamap_t map; 3283 struct if_pkt_info pi; 3284 int remap = 0; 3285 int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; 3286 bus_dma_tag_t desc_tag; 3287 3288 segs = txq->ift_segs; 3289 ctx = txq->ift_ctx; 3290 sctx = ctx->ifc_sctx; 3291 scctx = &ctx->ifc_softc_ctx; 3292 segs = txq->ift_segs; 3293 ntxd = txq->ift_size; 3294 m_head = *m_headp; 3295 map = NULL; 3296 3297 /* 3298 * If we're doing TSO the next descriptor to clean may be quite far ahead 3299 */ 3300 cidx = txq->ift_cidx; 3301 pidx = txq->ift_pidx; 3302 if (ctx->ifc_flags & IFC_PREFETCH) { 3303 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); 3304 if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { 3305 next_txd = calc_next_txd(txq, cidx, 0); 3306 prefetch(next_txd); 3307 } 3308 3309 /* prefetch the next cache line of mbuf pointers and flags */ 3310 prefetch(&txq->ift_sds.ifsd_m[next]); 3311 if (txq->ift_sds.ifsd_map != NULL) { 3312 prefetch(&txq->ift_sds.ifsd_map[next]); 3313 next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); 3314 prefetch(&txq->ift_sds.ifsd_flags[next]); 3315 } 3316 } else if (txq->ift_sds.ifsd_map != NULL) 3317 map = txq->ift_sds.ifsd_map[pidx]; 3318 3319 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3320 desc_tag = txq->ift_tso_desc_tag; 3321 max_segs = scctx->isc_tx_tso_segments_max; 3322 MPASS(desc_tag != NULL); 3323 MPASS(max_segs > 0); 3324 } else { 3325 desc_tag = txq->ift_desc_tag; 3326 max_segs = scctx->isc_tx_nsegments; 3327 } 3328 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && 3329 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { 3330 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); 3331 if (err) 3332 return err; 3333 } 3334 m_head = *m_headp; 3335 3336 pkt_info_zero(&pi); 3337 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); 3338 pi.ipi_pidx = pidx; 3339 pi.ipi_qsidx = txq->ift_id; 3340 pi.ipi_len = m_head->m_pkthdr.len; 3341 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; 3342 pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; 3343 3344 /* deliberate bitwise OR to make one condition */ 3345 if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { 3346 if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) 3347 return (err); 3348 m_head = *m_headp; 3349 } 3350 3351 retry: 3352 err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT); 3353 defrag: 3354 if (__predict_false(err)) { 3355 switch (err) { 3356 case EFBIG: 3357 /* try collapse once and defrag once */ 3358 if (remap == 0) { 3359 m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); 3360 /* try defrag if collapsing fails */ 3361 if (m_head == NULL) 3362 remap++; 3363 } 3364 if (remap == 1) 3365 m_head = m_defrag(*m_headp, M_NOWAIT); 3366 remap++; 3367 if (__predict_false(m_head == NULL)) 3368 goto defrag_failed; 3369 txq->ift_mbuf_defrag++; 3370 *m_headp = m_head; 3371 goto retry; 3372 break; 3373 case ENOMEM: 3374 txq->ift_no_tx_dma_setup++; 3375 break; 3376 default: 3377 txq->ift_no_tx_dma_setup++; 3378 m_freem(*m_headp); 3379 DBG_COUNTER_INC(tx_frees); 3380 *m_headp = NULL; 3381 break; 3382 } 3383 txq->ift_map_failed++; 3384 DBG_COUNTER_INC(encap_load_mbuf_fail); 3385 return (err); 3386 } 3387 3388 /* 3389 * XXX assumes a 1 to 1 relationship between segments and 3390 * descriptors - this does not hold true on all drivers, e.g. 3391 * cxgb 3392 */ 3393 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { 3394 txq->ift_no_desc_avail++; 3395 if (map != NULL) 3396 bus_dmamap_unload(desc_tag, map); 3397 DBG_COUNTER_INC(encap_txq_avail_fail); 3398 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) 3399 GROUPTASK_ENQUEUE(&txq->ift_task); 3400 return (ENOBUFS); 3401 } 3402 /* 3403 * On Intel cards we can greatly reduce the number of TX interrupts 3404 * we see by only setting report status on every Nth descriptor. 3405 * However, this also means that the driver will need to keep track 3406 * of the descriptors that RS was set on to check them for the DD bit. 3407 */ 3408 txq->ift_rs_pending += nsegs + 1; 3409 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || 3410 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { 3411 pi.ipi_flags |= IPI_TX_INTR; 3412 txq->ift_rs_pending = 0; 3413 } 3414 3415 pi.ipi_segs = segs; 3416 pi.ipi_nsegs = nsegs; 3417 3418 MPASS(pidx >= 0 && pidx < txq->ift_size); 3419 #ifdef PKT_DEBUG 3420 print_pkt(&pi); 3421 #endif 3422 if (map != NULL) 3423 bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE); 3424 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { 3425 if (map != NULL) 3426 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 3427 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3428 DBG_COUNTER_INC(tx_encap); 3429 MPASS(pi.ipi_new_pidx < txq->ift_size); 3430 3431 ndesc = pi.ipi_new_pidx - pi.ipi_pidx; 3432 if (pi.ipi_new_pidx < pi.ipi_pidx) { 3433 ndesc += txq->ift_size; 3434 txq->ift_gen = 1; 3435 } 3436 /* 3437 * drivers can need as many as 3438 * two sentinels 3439 */ 3440 MPASS(ndesc <= pi.ipi_nsegs + 2); 3441 MPASS(pi.ipi_new_pidx != pidx); 3442 MPASS(ndesc > 0); 3443 txq->ift_in_use += ndesc; 3444 3445 /* 3446 * We update the last software descriptor again here because there may 3447 * be a sentinel and/or there may be more mbufs than segments 3448 */ 3449 txq->ift_pidx = pi.ipi_new_pidx; 3450 txq->ift_npending += pi.ipi_ndescs; 3451 } else { 3452 *m_headp = m_head = iflib_remove_mbuf(txq); 3453 if (err == EFBIG) { 3454 txq->ift_txd_encap_efbig++; 3455 if (remap < 2) { 3456 remap = 1; 3457 goto defrag; 3458 } 3459 } 3460 DBG_COUNTER_INC(encap_txd_encap_fail); 3461 goto defrag_failed; 3462 } 3463 return (err); 3464 3465 defrag_failed: 3466 txq->ift_mbuf_defrag_failed++; 3467 txq->ift_map_failed++; 3468 m_freem(*m_headp); 3469 DBG_COUNTER_INC(tx_frees); 3470 *m_headp = NULL; 3471 return (ENOMEM); 3472 } 3473 3474 static void 3475 iflib_tx_desc_free(iflib_txq_t txq, int n) 3476 { 3477 int hasmap; 3478 uint32_t qsize, cidx, mask, gen; 3479 struct mbuf *m, **ifsd_m; 3480 uint8_t *ifsd_flags; 3481 bus_dmamap_t *ifsd_map; 3482 bool do_prefetch; 3483 3484 cidx = txq->ift_cidx; 3485 gen = txq->ift_gen; 3486 qsize = txq->ift_size; 3487 mask = qsize-1; 3488 hasmap = txq->ift_sds.ifsd_map != NULL; 3489 ifsd_flags = txq->ift_sds.ifsd_flags; 3490 ifsd_m = txq->ift_sds.ifsd_m; 3491 ifsd_map = txq->ift_sds.ifsd_map; 3492 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); 3493 3494 while (n-- > 0) { 3495 if (do_prefetch) { 3496 prefetch(ifsd_m[(cidx + 3) & mask]); 3497 prefetch(ifsd_m[(cidx + 4) & mask]); 3498 } 3499 if (ifsd_m[cidx] != NULL) { 3500 prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); 3501 prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]); 3502 if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) { 3503 /* 3504 * does it matter if it's not the TSO tag? If so we'll 3505 * have to add the type to flags 3506 */ 3507 bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); 3508 ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; 3509 } 3510 if ((m = ifsd_m[cidx]) != NULL) { 3511 /* XXX we don't support any drivers that batch packets yet */ 3512 MPASS(m->m_nextpkt == NULL); 3513 /* if the number of clusters exceeds the number of segments 3514 * there won't be space on the ring to save a pointer to each 3515 * cluster so we simply free the list here 3516 */ 3517 if (m->m_flags & M_TOOBIG) { 3518 m_freem(m); 3519 } else { 3520 m_free(m); 3521 } 3522 ifsd_m[cidx] = NULL; 3523 #if MEMORY_LOGGING 3524 txq->ift_dequeued++; 3525 #endif 3526 DBG_COUNTER_INC(tx_frees); 3527 } 3528 } 3529 if (__predict_false(++cidx == qsize)) { 3530 cidx = 0; 3531 gen = 0; 3532 } 3533 } 3534 txq->ift_cidx = cidx; 3535 txq->ift_gen = gen; 3536 } 3537 3538 static __inline int 3539 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) 3540 { 3541 int reclaim; 3542 if_ctx_t ctx = txq->ift_ctx; 3543 3544 KASSERT(thresh >= 0, ("invalid threshold to reclaim")); 3545 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); 3546 3547 /* 3548 * Need a rate-limiting check so that this isn't called every time 3549 */ 3550 iflib_tx_credits_update(ctx, txq); 3551 reclaim = DESC_RECLAIMABLE(txq); 3552 3553 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { 3554 #ifdef INVARIANTS 3555 if (iflib_verbose_debug) { 3556 printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, 3557 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, 3558 reclaim, thresh); 3559 3560 } 3561 #endif 3562 return (0); 3563 } 3564 iflib_tx_desc_free(txq, reclaim); 3565 txq->ift_cleaned += reclaim; 3566 txq->ift_in_use -= reclaim; 3567 3568 return (reclaim); 3569 } 3570 3571 static struct mbuf ** 3572 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) 3573 { 3574 int next, size; 3575 struct mbuf **items; 3576 3577 size = r->size; 3578 next = (cidx + CACHE_PTR_INCREMENT) & (size-1); 3579 items = __DEVOLATILE(struct mbuf **, &r->items[0]); 3580 3581 prefetch(items[(cidx + offset) & (size-1)]); 3582 if (remaining > 1) { 3583 prefetch2cachelines(&items[next]); 3584 prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); 3585 prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); 3586 prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); 3587 } 3588 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); 3589 } 3590 3591 static void 3592 iflib_txq_check_drain(iflib_txq_t txq, int budget) 3593 { 3594 3595 ifmp_ring_check_drainage(txq->ift_br, budget); 3596 } 3597 3598 static uint32_t 3599 iflib_txq_can_drain(struct ifmp_ring *r) 3600 { 3601 iflib_txq_t txq = r->cookie; 3602 if_ctx_t ctx = txq->ift_ctx; 3603 3604 return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) || 3605 ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); 3606 } 3607 3608 static uint32_t 3609 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3610 { 3611 iflib_txq_t txq = r->cookie; 3612 if_ctx_t ctx = txq->ift_ctx; 3613 struct ifnet *ifp = ctx->ifc_ifp; 3614 struct mbuf **mp, *m; 3615 int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail; 3616 int reclaimed, err, in_use_prev, desc_used; 3617 bool do_prefetch, ring, rang; 3618 3619 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || 3620 !LINK_ACTIVE(ctx))) { 3621 DBG_COUNTER_INC(txq_drain_notready); 3622 return (0); 3623 } 3624 reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 3625 rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); 3626 avail = IDXDIFF(pidx, cidx, r->size); 3627 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { 3628 DBG_COUNTER_INC(txq_drain_flushing); 3629 for (i = 0; i < avail; i++) { 3630 m_free(r->items[(cidx + i) & (r->size-1)]); 3631 r->items[(cidx + i) & (r->size-1)] = NULL; 3632 } 3633 return (avail); 3634 } 3635 3636 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { 3637 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3638 CALLOUT_LOCK(txq); 3639 callout_stop(&txq->ift_timer); 3640 CALLOUT_UNLOCK(txq); 3641 DBG_COUNTER_INC(txq_drain_oactive); 3642 return (0); 3643 } 3644 if (reclaimed) 3645 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3646 consumed = mcast_sent = bytes_sent = pkt_sent = 0; 3647 count = MIN(avail, TX_BATCH_SIZE); 3648 #ifdef INVARIANTS 3649 if (iflib_verbose_debug) 3650 printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, 3651 avail, ctx->ifc_flags, TXQ_AVAIL(txq)); 3652 #endif 3653 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); 3654 avail = TXQ_AVAIL(txq); 3655 err = 0; 3656 for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) { 3657 int rem = do_prefetch ? count - i : 0; 3658 3659 mp = _ring_peek_one(r, cidx, i, rem); 3660 MPASS(mp != NULL && *mp != NULL); 3661 if (__predict_false(*mp == (struct mbuf *)txq)) { 3662 consumed++; 3663 reclaimed++; 3664 continue; 3665 } 3666 in_use_prev = txq->ift_in_use; 3667 err = iflib_encap(txq, mp); 3668 if (__predict_false(err)) { 3669 DBG_COUNTER_INC(txq_drain_encapfail); 3670 /* no room - bail out */ 3671 if (err == ENOBUFS) 3672 break; 3673 consumed++; 3674 DBG_COUNTER_INC(txq_drain_encapfail); 3675 /* we can't send this packet - skip it */ 3676 continue; 3677 } 3678 consumed++; 3679 pkt_sent++; 3680 m = *mp; 3681 DBG_COUNTER_INC(tx_sent); 3682 bytes_sent += m->m_pkthdr.len; 3683 mcast_sent += !!(m->m_flags & M_MCAST); 3684 avail = TXQ_AVAIL(txq); 3685 3686 txq->ift_db_pending += (txq->ift_in_use - in_use_prev); 3687 desc_used += (txq->ift_in_use - in_use_prev); 3688 ETHER_BPF_MTAP(ifp, m); 3689 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) 3690 break; 3691 rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); 3692 } 3693 3694 /* deliberate use of bitwise or to avoid gratuitous short-circuit */ 3695 ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); 3696 iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); 3697 if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); 3698 if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); 3699 if (mcast_sent) 3700 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); 3701 #ifdef INVARIANTS 3702 if (iflib_verbose_debug) 3703 printf("consumed=%d\n", consumed); 3704 #endif 3705 return (consumed); 3706 } 3707 3708 static uint32_t 3709 iflib_txq_drain_always(struct ifmp_ring *r) 3710 { 3711 return (1); 3712 } 3713 3714 static uint32_t 3715 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3716 { 3717 int i, avail; 3718 struct mbuf **mp; 3719 iflib_txq_t txq; 3720 3721 txq = r->cookie; 3722 3723 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3724 CALLOUT_LOCK(txq); 3725 callout_stop(&txq->ift_timer); 3726 CALLOUT_UNLOCK(txq); 3727 3728 avail = IDXDIFF(pidx, cidx, r->size); 3729 for (i = 0; i < avail; i++) { 3730 mp = _ring_peek_one(r, cidx, i, avail - i); 3731 if (__predict_false(*mp == (struct mbuf *)txq)) 3732 continue; 3733 m_freem(*mp); 3734 } 3735 MPASS(ifmp_ring_is_stalled(r) == 0); 3736 return (avail); 3737 } 3738 3739 static void 3740 iflib_ifmp_purge(iflib_txq_t txq) 3741 { 3742 struct ifmp_ring *r; 3743 3744 r = txq->ift_br; 3745 r->drain = iflib_txq_drain_free; 3746 r->can_drain = iflib_txq_drain_always; 3747 3748 ifmp_ring_check_drainage(r, r->size); 3749 3750 r->drain = iflib_txq_drain; 3751 r->can_drain = iflib_txq_can_drain; 3752 } 3753 3754 static void 3755 _task_fn_tx(void *context) 3756 { 3757 iflib_txq_t txq = context; 3758 if_ctx_t ctx = txq->ift_ctx; 3759 struct ifnet *ifp = ctx->ifc_ifp; 3760 int abdicate = ctx->ifc_sysctl_tx_abdicate; 3761 3762 #ifdef IFLIB_DIAGNOSTICS 3763 txq->ift_cpu_exec_count[curcpu]++; 3764 #endif 3765 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 3766 return; 3767 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 3768 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)) 3769 netmap_tx_irq(ifp, txq->ift_id); 3770 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3771 return; 3772 } 3773 if (txq->ift_db_pending) 3774 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate); 3775 else if (!abdicate) 3776 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3777 /* 3778 * When abdicating, we always need to check drainage, not just when we don't enqueue 3779 */ 3780 if (abdicate) 3781 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3782 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3783 if (ctx->ifc_flags & IFC_LEGACY) 3784 IFDI_INTR_ENABLE(ctx); 3785 else { 3786 #ifdef INVARIANTS 3787 int rc = 3788 #endif 3789 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3790 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3791 } 3792 } 3793 3794 static void 3795 _task_fn_rx(void *context) 3796 { 3797 iflib_rxq_t rxq = context; 3798 if_ctx_t ctx = rxq->ifr_ctx; 3799 bool more; 3800 uint16_t budget; 3801 3802 #ifdef IFLIB_DIAGNOSTICS 3803 rxq->ifr_cpu_exec_count[curcpu]++; 3804 #endif 3805 DBG_COUNTER_INC(task_fn_rxs); 3806 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3807 return; 3808 more = true; 3809 #ifdef DEV_NETMAP 3810 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { 3811 u_int work = 0; 3812 if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { 3813 more = false; 3814 } 3815 } 3816 #endif 3817 budget = ctx->ifc_sysctl_rx_budget; 3818 if (budget == 0) 3819 budget = 16; /* XXX */ 3820 if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { 3821 if (ctx->ifc_flags & IFC_LEGACY) 3822 IFDI_INTR_ENABLE(ctx); 3823 else { 3824 #ifdef INVARIANTS 3825 int rc = 3826 #endif 3827 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 3828 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3829 DBG_COUNTER_INC(rx_intr_enables); 3830 } 3831 } 3832 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3833 return; 3834 if (more) 3835 GROUPTASK_ENQUEUE(&rxq->ifr_task); 3836 } 3837 3838 static void 3839 _task_fn_admin(void *context) 3840 { 3841 if_ctx_t ctx = context; 3842 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 3843 iflib_txq_t txq; 3844 int i; 3845 bool oactive, running, do_reset, do_watchdog; 3846 uint32_t reset_on = hz / 2; 3847 3848 STATE_LOCK(ctx); 3849 running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); 3850 oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); 3851 do_reset = (ctx->ifc_flags & IFC_DO_RESET); 3852 do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); 3853 ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG); 3854 STATE_UNLOCK(ctx); 3855 3856 if ((!running & !oactive) && 3857 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) 3858 return; 3859 3860 CTX_LOCK(ctx); 3861 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3862 CALLOUT_LOCK(txq); 3863 callout_stop(&txq->ift_timer); 3864 CALLOUT_UNLOCK(txq); 3865 } 3866 if (do_watchdog) { 3867 ctx->ifc_watchdog_events++; 3868 IFDI_WATCHDOG_RESET(ctx); 3869 } 3870 IFDI_UPDATE_ADMIN_STATUS(ctx); 3871 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3872 #ifdef DEV_NETMAP 3873 reset_on = hz / 2; 3874 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) 3875 iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on); 3876 #endif 3877 callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); 3878 } 3879 IFDI_LINK_INTR_ENABLE(ctx); 3880 if (do_reset) 3881 iflib_if_init_locked(ctx); 3882 CTX_UNLOCK(ctx); 3883 3884 if (LINK_ACTIVE(ctx) == 0) 3885 return; 3886 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) 3887 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 3888 } 3889 3890 3891 static void 3892 _task_fn_iov(void *context) 3893 { 3894 if_ctx_t ctx = context; 3895 3896 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 3897 return; 3898 3899 CTX_LOCK(ctx); 3900 IFDI_VFLR_HANDLE(ctx); 3901 CTX_UNLOCK(ctx); 3902 } 3903 3904 static int 3905 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 3906 { 3907 int err; 3908 if_int_delay_info_t info; 3909 if_ctx_t ctx; 3910 3911 info = (if_int_delay_info_t)arg1; 3912 ctx = info->iidi_ctx; 3913 info->iidi_req = req; 3914 info->iidi_oidp = oidp; 3915 CTX_LOCK(ctx); 3916 err = IFDI_SYSCTL_INT_DELAY(ctx, info); 3917 CTX_UNLOCK(ctx); 3918 return (err); 3919 } 3920 3921 /********************************************************************* 3922 * 3923 * IFNET FUNCTIONS 3924 * 3925 **********************************************************************/ 3926 3927 static void 3928 iflib_if_init_locked(if_ctx_t ctx) 3929 { 3930 iflib_stop(ctx); 3931 iflib_init_locked(ctx); 3932 } 3933 3934 3935 static void 3936 iflib_if_init(void *arg) 3937 { 3938 if_ctx_t ctx = arg; 3939 3940 CTX_LOCK(ctx); 3941 iflib_if_init_locked(ctx); 3942 CTX_UNLOCK(ctx); 3943 } 3944 3945 static int 3946 iflib_if_transmit(if_t ifp, struct mbuf *m) 3947 { 3948 if_ctx_t ctx = if_getsoftc(ifp); 3949 3950 iflib_txq_t txq; 3951 int err, qidx; 3952 int abdicate = ctx->ifc_sysctl_tx_abdicate; 3953 3954 if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { 3955 DBG_COUNTER_INC(tx_frees); 3956 m_freem(m); 3957 return (ENOBUFS); 3958 } 3959 3960 MPASS(m->m_nextpkt == NULL); 3961 qidx = 0; 3962 if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m)) 3963 qidx = QIDX(ctx, m); 3964 /* 3965 * XXX calculate buf_ring based on flowid (divvy up bits?) 3966 */ 3967 txq = &ctx->ifc_txqs[qidx]; 3968 3969 #ifdef DRIVER_BACKPRESSURE 3970 if (txq->ift_closed) { 3971 while (m != NULL) { 3972 next = m->m_nextpkt; 3973 m->m_nextpkt = NULL; 3974 m_freem(m); 3975 m = next; 3976 } 3977 return (ENOBUFS); 3978 } 3979 #endif 3980 #ifdef notyet 3981 qidx = count = 0; 3982 mp = marr; 3983 next = m; 3984 do { 3985 count++; 3986 next = next->m_nextpkt; 3987 } while (next != NULL); 3988 3989 if (count > nitems(marr)) 3990 if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { 3991 /* XXX check nextpkt */ 3992 m_freem(m); 3993 /* XXX simplify for now */ 3994 DBG_COUNTER_INC(tx_frees); 3995 return (ENOBUFS); 3996 } 3997 for (next = m, i = 0; next != NULL; i++) { 3998 mp[i] = next; 3999 next = next->m_nextpkt; 4000 mp[i]->m_nextpkt = NULL; 4001 } 4002 #endif 4003 DBG_COUNTER_INC(tx_seen); 4004 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate); 4005 4006 if (abdicate) 4007 GROUPTASK_ENQUEUE(&txq->ift_task); 4008 if (err) { 4009 if (!abdicate) 4010 GROUPTASK_ENQUEUE(&txq->ift_task); 4011 /* support forthcoming later */ 4012 #ifdef DRIVER_BACKPRESSURE 4013 txq->ift_closed = TRUE; 4014 #endif 4015 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 4016 m_freem(m); 4017 } 4018 4019 return (err); 4020 } 4021 4022 static void 4023 iflib_if_qflush(if_t ifp) 4024 { 4025 if_ctx_t ctx = if_getsoftc(ifp); 4026 iflib_txq_t txq = ctx->ifc_txqs; 4027 int i; 4028 4029 STATE_LOCK(ctx); 4030 ctx->ifc_flags |= IFC_QFLUSH; 4031 STATE_UNLOCK(ctx); 4032 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 4033 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) 4034 iflib_txq_check_drain(txq, 0); 4035 STATE_LOCK(ctx); 4036 ctx->ifc_flags &= ~IFC_QFLUSH; 4037 STATE_UNLOCK(ctx); 4038 4039 if_qflush(ifp); 4040 } 4041 4042 4043 #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ 4044 IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ 4045 IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) 4046 4047 static int 4048 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) 4049 { 4050 if_ctx_t ctx = if_getsoftc(ifp); 4051 struct ifreq *ifr = (struct ifreq *)data; 4052 #if defined(INET) || defined(INET6) 4053 struct ifaddr *ifa = (struct ifaddr *)data; 4054 #endif 4055 bool avoid_reset = FALSE; 4056 int err = 0, reinit = 0, bits; 4057 4058 switch (command) { 4059 case SIOCSIFADDR: 4060 #ifdef INET 4061 if (ifa->ifa_addr->sa_family == AF_INET) 4062 avoid_reset = TRUE; 4063 #endif 4064 #ifdef INET6 4065 if (ifa->ifa_addr->sa_family == AF_INET6) 4066 avoid_reset = TRUE; 4067 #endif 4068 /* 4069 ** Calling init results in link renegotiation, 4070 ** so we avoid doing it when possible. 4071 */ 4072 if (avoid_reset) { 4073 if_setflagbits(ifp, IFF_UP,0); 4074 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 4075 reinit = 1; 4076 #ifdef INET 4077 if (!(if_getflags(ifp) & IFF_NOARP)) 4078 arp_ifinit(ifp, ifa); 4079 #endif 4080 } else 4081 err = ether_ioctl(ifp, command, data); 4082 break; 4083 case SIOCSIFMTU: 4084 CTX_LOCK(ctx); 4085 if (ifr->ifr_mtu == if_getmtu(ifp)) { 4086 CTX_UNLOCK(ctx); 4087 break; 4088 } 4089 bits = if_getdrvflags(ifp); 4090 /* stop the driver and free any clusters before proceeding */ 4091 iflib_stop(ctx); 4092 4093 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { 4094 STATE_LOCK(ctx); 4095 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) 4096 ctx->ifc_flags |= IFC_MULTISEG; 4097 else 4098 ctx->ifc_flags &= ~IFC_MULTISEG; 4099 STATE_UNLOCK(ctx); 4100 err = if_setmtu(ifp, ifr->ifr_mtu); 4101 } 4102 iflib_init_locked(ctx); 4103 STATE_LOCK(ctx); 4104 if_setdrvflags(ifp, bits); 4105 STATE_UNLOCK(ctx); 4106 CTX_UNLOCK(ctx); 4107 break; 4108 case SIOCSIFFLAGS: 4109 CTX_LOCK(ctx); 4110 if (if_getflags(ifp) & IFF_UP) { 4111 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4112 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & 4113 (IFF_PROMISC | IFF_ALLMULTI)) { 4114 err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); 4115 } 4116 } else 4117 reinit = 1; 4118 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4119 iflib_stop(ctx); 4120 } 4121 ctx->ifc_if_flags = if_getflags(ifp); 4122 CTX_UNLOCK(ctx); 4123 break; 4124 case SIOCADDMULTI: 4125 case SIOCDELMULTI: 4126 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4127 CTX_LOCK(ctx); 4128 IFDI_INTR_DISABLE(ctx); 4129 IFDI_MULTI_SET(ctx); 4130 IFDI_INTR_ENABLE(ctx); 4131 CTX_UNLOCK(ctx); 4132 } 4133 break; 4134 case SIOCSIFMEDIA: 4135 CTX_LOCK(ctx); 4136 IFDI_MEDIA_SET(ctx); 4137 CTX_UNLOCK(ctx); 4138 /* falls thru */ 4139 case SIOCGIFMEDIA: 4140 case SIOCGIFXMEDIA: 4141 err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); 4142 break; 4143 case SIOCGI2C: 4144 { 4145 struct ifi2creq i2c; 4146 4147 err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 4148 if (err != 0) 4149 break; 4150 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 4151 err = EINVAL; 4152 break; 4153 } 4154 if (i2c.len > sizeof(i2c.data)) { 4155 err = EINVAL; 4156 break; 4157 } 4158 4159 if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) 4160 err = copyout(&i2c, ifr_data_get_ptr(ifr), 4161 sizeof(i2c)); 4162 break; 4163 } 4164 case SIOCSIFCAP: 4165 { 4166 int mask, setmask; 4167 4168 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 4169 setmask = 0; 4170 #ifdef TCP_OFFLOAD 4171 setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); 4172 #endif 4173 setmask |= (mask & IFCAP_FLAGS); 4174 4175 if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) 4176 setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); 4177 if ((mask & IFCAP_WOL) && 4178 (if_getcapabilities(ifp) & IFCAP_WOL) != 0) 4179 setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC)); 4180 if_vlancap(ifp); 4181 /* 4182 * want to ensure that traffic has stopped before we change any of the flags 4183 */ 4184 if (setmask) { 4185 CTX_LOCK(ctx); 4186 bits = if_getdrvflags(ifp); 4187 if (bits & IFF_DRV_RUNNING) 4188 iflib_stop(ctx); 4189 STATE_LOCK(ctx); 4190 if_togglecapenable(ifp, setmask); 4191 STATE_UNLOCK(ctx); 4192 if (bits & IFF_DRV_RUNNING) 4193 iflib_init_locked(ctx); 4194 STATE_LOCK(ctx); 4195 if_setdrvflags(ifp, bits); 4196 STATE_UNLOCK(ctx); 4197 CTX_UNLOCK(ctx); 4198 } 4199 break; 4200 } 4201 case SIOCGPRIVATE_0: 4202 case SIOCSDRVSPEC: 4203 case SIOCGDRVSPEC: 4204 CTX_LOCK(ctx); 4205 err = IFDI_PRIV_IOCTL(ctx, command, data); 4206 CTX_UNLOCK(ctx); 4207 break; 4208 default: 4209 err = ether_ioctl(ifp, command, data); 4210 break; 4211 } 4212 if (reinit) 4213 iflib_if_init(ctx); 4214 return (err); 4215 } 4216 4217 static uint64_t 4218 iflib_if_get_counter(if_t ifp, ift_counter cnt) 4219 { 4220 if_ctx_t ctx = if_getsoftc(ifp); 4221 4222 return (IFDI_GET_COUNTER(ctx, cnt)); 4223 } 4224 4225 /********************************************************************* 4226 * 4227 * OTHER FUNCTIONS EXPORTED TO THE STACK 4228 * 4229 **********************************************************************/ 4230 4231 static void 4232 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) 4233 { 4234 if_ctx_t ctx = if_getsoftc(ifp); 4235 4236 if ((void *)ctx != arg) 4237 return; 4238 4239 if ((vtag == 0) || (vtag > 4095)) 4240 return; 4241 4242 CTX_LOCK(ctx); 4243 IFDI_VLAN_REGISTER(ctx, vtag); 4244 /* Re-init to load the changes */ 4245 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4246 iflib_if_init_locked(ctx); 4247 CTX_UNLOCK(ctx); 4248 } 4249 4250 static void 4251 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) 4252 { 4253 if_ctx_t ctx = if_getsoftc(ifp); 4254 4255 if ((void *)ctx != arg) 4256 return; 4257 4258 if ((vtag == 0) || (vtag > 4095)) 4259 return; 4260 4261 CTX_LOCK(ctx); 4262 IFDI_VLAN_UNREGISTER(ctx, vtag); 4263 /* Re-init to load the changes */ 4264 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4265 iflib_if_init_locked(ctx); 4266 CTX_UNLOCK(ctx); 4267 } 4268 4269 static void 4270 iflib_led_func(void *arg, int onoff) 4271 { 4272 if_ctx_t ctx = arg; 4273 4274 CTX_LOCK(ctx); 4275 IFDI_LED_FUNC(ctx, onoff); 4276 CTX_UNLOCK(ctx); 4277 } 4278 4279 /********************************************************************* 4280 * 4281 * BUS FUNCTION DEFINITIONS 4282 * 4283 **********************************************************************/ 4284 4285 int 4286 iflib_device_probe(device_t dev) 4287 { 4288 pci_vendor_info_t *ent; 4289 4290 uint16_t pci_vendor_id, pci_device_id; 4291 uint16_t pci_subvendor_id, pci_subdevice_id; 4292 uint16_t pci_rev_id; 4293 if_shared_ctx_t sctx; 4294 4295 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4296 return (ENOTSUP); 4297 4298 pci_vendor_id = pci_get_vendor(dev); 4299 pci_device_id = pci_get_device(dev); 4300 pci_subvendor_id = pci_get_subvendor(dev); 4301 pci_subdevice_id = pci_get_subdevice(dev); 4302 pci_rev_id = pci_get_revid(dev); 4303 if (sctx->isc_parse_devinfo != NULL) 4304 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); 4305 4306 ent = sctx->isc_vendor_info; 4307 while (ent->pvi_vendor_id != 0) { 4308 if (pci_vendor_id != ent->pvi_vendor_id) { 4309 ent++; 4310 continue; 4311 } 4312 if ((pci_device_id == ent->pvi_device_id) && 4313 ((pci_subvendor_id == ent->pvi_subvendor_id) || 4314 (ent->pvi_subvendor_id == 0)) && 4315 ((pci_subdevice_id == ent->pvi_subdevice_id) || 4316 (ent->pvi_subdevice_id == 0)) && 4317 ((pci_rev_id == ent->pvi_rev_id) || 4318 (ent->pvi_rev_id == 0))) { 4319 4320 device_set_desc_copy(dev, ent->pvi_name); 4321 /* this needs to be changed to zero if the bus probing code 4322 * ever stops re-probing on best match because the sctx 4323 * may have its values over written by register calls 4324 * in subsequent probes 4325 */ 4326 return (BUS_PROBE_DEFAULT); 4327 } 4328 ent++; 4329 } 4330 return (ENXIO); 4331 } 4332 4333 static void 4334 iflib_reset_qvalues(if_ctx_t ctx) 4335 { 4336 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 4337 if_shared_ctx_t sctx = ctx->ifc_sctx; 4338 device_t dev = ctx->ifc_dev; 4339 int i; 4340 4341 scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES; 4342 scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH; 4343 /* 4344 * XXX sanity check that ntxd & nrxd are a power of 2 4345 */ 4346 if (ctx->ifc_sysctl_ntxqs != 0) 4347 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; 4348 if (ctx->ifc_sysctl_nrxqs != 0) 4349 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; 4350 4351 for (i = 0; i < sctx->isc_ntxqs; i++) { 4352 if (ctx->ifc_sysctl_ntxds[i] != 0) 4353 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; 4354 else 4355 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; 4356 } 4357 4358 for (i = 0; i < sctx->isc_nrxqs; i++) { 4359 if (ctx->ifc_sysctl_nrxds[i] != 0) 4360 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; 4361 else 4362 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; 4363 } 4364 4365 for (i = 0; i < sctx->isc_nrxqs; i++) { 4366 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { 4367 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", 4368 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); 4369 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; 4370 } 4371 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { 4372 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", 4373 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); 4374 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; 4375 } 4376 } 4377 4378 for (i = 0; i < sctx->isc_ntxqs; i++) { 4379 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { 4380 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", 4381 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); 4382 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; 4383 } 4384 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { 4385 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", 4386 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); 4387 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; 4388 } 4389 } 4390 } 4391 4392 int 4393 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) 4394 { 4395 int err, rid, msix; 4396 if_ctx_t ctx; 4397 if_t ifp; 4398 if_softc_ctx_t scctx; 4399 int i; 4400 uint16_t main_txq; 4401 uint16_t main_rxq; 4402 4403 4404 ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); 4405 4406 if (sc == NULL) { 4407 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4408 device_set_softc(dev, ctx); 4409 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4410 } 4411 4412 ctx->ifc_sctx = sctx; 4413 ctx->ifc_dev = dev; 4414 ctx->ifc_softc = sc; 4415 4416 if ((err = iflib_register(ctx)) != 0) { 4417 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4418 free(sc, M_IFLIB); 4419 free(ctx, M_IFLIB); 4420 device_printf(dev, "iflib_register failed %d\n", err); 4421 return (err); 4422 } 4423 iflib_add_device_sysctl_pre(ctx); 4424 4425 scctx = &ctx->ifc_softc_ctx; 4426 ifp = ctx->ifc_ifp; 4427 4428 iflib_reset_qvalues(ctx); 4429 CTX_LOCK(ctx); 4430 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4431 CTX_UNLOCK(ctx); 4432 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4433 return (err); 4434 } 4435 _iflib_pre_assert(scctx); 4436 ctx->ifc_txrx = *scctx->isc_txrx; 4437 4438 #ifdef INVARIANTS 4439 MPASS(scctx->isc_capabilities); 4440 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4441 MPASS(scctx->isc_tx_csum_flags); 4442 #endif 4443 4444 if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS); 4445 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS); 4446 4447 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4448 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4449 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4450 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4451 4452 #ifdef ACPI_DMAR 4453 if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) 4454 ctx->ifc_flags |= IFC_DMAR; 4455 #elif !(defined(__i386__) || defined(__amd64__)) 4456 /* set unconditionally for !x86 */ 4457 ctx->ifc_flags |= IFC_DMAR; 4458 #endif 4459 4460 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4461 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4462 4463 /* XXX change for per-queue sizes */ 4464 device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", 4465 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4466 for (i = 0; i < sctx->isc_nrxqs; i++) { 4467 if (!powerof2(scctx->isc_nrxd[i])) { 4468 /* round down instead? */ 4469 device_printf(dev, "# rx descriptors must be a power of 2\n"); 4470 err = EINVAL; 4471 goto fail; 4472 } 4473 } 4474 for (i = 0; i < sctx->isc_ntxqs; i++) { 4475 if (!powerof2(scctx->isc_ntxd[i])) { 4476 device_printf(dev, 4477 "# tx descriptors must be a power of 2"); 4478 err = EINVAL; 4479 goto fail; 4480 } 4481 } 4482 4483 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4484 MAX_SINGLE_PACKET_FRACTION) 4485 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4486 MAX_SINGLE_PACKET_FRACTION); 4487 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4488 MAX_SINGLE_PACKET_FRACTION) 4489 scctx->isc_tx_tso_segments_max = max(1, 4490 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4491 4492 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4493 if (if_getcapabilities(ifp) & IFCAP_TSO) { 4494 /* 4495 * The stack can't handle a TSO size larger than IP_MAXPACKET, 4496 * but some MACs do. 4497 */ 4498 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 4499 IP_MAXPACKET)); 4500 /* 4501 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 4502 * into account. In the worst case, each of these calls will 4503 * add another mbuf and, thus, the requirement for another DMA 4504 * segment. So for best performance, it doesn't make sense to 4505 * advertize a maximum of TSO segments that typically will 4506 * require defragmentation in iflib_encap(). 4507 */ 4508 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 4509 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 4510 } 4511 if (scctx->isc_rss_table_size == 0) 4512 scctx->isc_rss_table_size = 64; 4513 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4514 4515 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4516 /* XXX format name */ 4517 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); 4518 4519 /* Set up cpu set. If it fails, use the set of all CPUs. */ 4520 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { 4521 device_printf(dev, "Unable to fetch CPU list\n"); 4522 CPU_COPY(&all_cpus, &ctx->ifc_cpus); 4523 } 4524 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); 4525 4526 /* 4527 ** Now setup MSI or MSI/X, should 4528 ** return us the number of supported 4529 ** vectors. (Will be 1 for MSI) 4530 */ 4531 if (sctx->isc_flags & IFLIB_SKIP_MSIX) { 4532 msix = scctx->isc_vectors; 4533 } else if (scctx->isc_msix_bar != 0) 4534 /* 4535 * The simple fact that isc_msix_bar is not 0 does not mean we 4536 * we have a good value there that is known to work. 4537 */ 4538 msix = iflib_msix_init(ctx); 4539 else { 4540 scctx->isc_vectors = 1; 4541 scctx->isc_ntxqsets = 1; 4542 scctx->isc_nrxqsets = 1; 4543 scctx->isc_intr = IFLIB_INTR_LEGACY; 4544 msix = 0; 4545 } 4546 /* Get memory for the station queues */ 4547 if ((err = iflib_queues_alloc(ctx))) { 4548 device_printf(dev, "Unable to allocate queue memory\n"); 4549 goto fail; 4550 } 4551 4552 if ((err = iflib_qset_structures_setup(ctx))) 4553 goto fail_queues; 4554 4555 /* 4556 * Group taskqueues aren't properly set up until SMP is started, 4557 * so we disable interrupts until we can handle them post 4558 * SI_SUB_SMP. 4559 * 4560 * XXX: disabling interrupts doesn't actually work, at least for 4561 * the non-MSI case. When they occur before SI_SUB_SMP completes, 4562 * we do null handling and depend on this not causing too large an 4563 * interrupt storm. 4564 */ 4565 IFDI_INTR_DISABLE(ctx); 4566 if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { 4567 device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); 4568 goto fail_intr_free; 4569 } 4570 if (msix <= 1) { 4571 rid = 0; 4572 if (scctx->isc_intr == IFLIB_INTR_MSI) { 4573 MPASS(msix == 1); 4574 rid = 1; 4575 } 4576 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { 4577 device_printf(dev, "iflib_legacy_setup failed %d\n", err); 4578 goto fail_intr_free; 4579 } 4580 } 4581 4582 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4583 4584 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4585 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4586 goto fail_detach; 4587 } 4588 4589 /* 4590 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4591 * This must appear after the call to ether_ifattach() because 4592 * ether_ifattach() sets if_hdrlen to the default value. 4593 */ 4594 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4595 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 4596 4597 if ((err = iflib_netmap_attach(ctx))) { 4598 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); 4599 goto fail_detach; 4600 } 4601 *ctxp = ctx; 4602 4603 NETDUMP_SET(ctx->ifc_ifp, iflib); 4604 4605 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4606 iflib_add_device_sysctl_post(ctx); 4607 ctx->ifc_flags |= IFC_INIT_DONE; 4608 CTX_UNLOCK(ctx); 4609 return (0); 4610 fail_detach: 4611 ether_ifdetach(ctx->ifc_ifp); 4612 fail_intr_free: 4613 if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) 4614 pci_release_msi(ctx->ifc_dev); 4615 fail_queues: 4616 iflib_tx_structures_free(ctx); 4617 iflib_rx_structures_free(ctx); 4618 fail: 4619 IFDI_DETACH(ctx); 4620 CTX_UNLOCK(ctx); 4621 return (err); 4622 } 4623 4624 int 4625 iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, 4626 struct iflib_cloneattach_ctx *clctx) 4627 { 4628 int err; 4629 if_ctx_t ctx; 4630 if_t ifp; 4631 if_softc_ctx_t scctx; 4632 int i; 4633 void *sc; 4634 uint16_t main_txq; 4635 uint16_t main_rxq; 4636 4637 ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO); 4638 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4639 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4640 if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL)) 4641 ctx->ifc_flags |= IFC_PSEUDO; 4642 4643 ctx->ifc_sctx = sctx; 4644 ctx->ifc_softc = sc; 4645 ctx->ifc_dev = dev; 4646 4647 if ((err = iflib_register(ctx)) != 0) { 4648 device_printf(dev, "%s: iflib_register failed %d\n", __func__, err); 4649 free(sc, M_IFLIB); 4650 free(ctx, M_IFLIB); 4651 return (err); 4652 } 4653 iflib_add_device_sysctl_pre(ctx); 4654 4655 scctx = &ctx->ifc_softc_ctx; 4656 ifp = ctx->ifc_ifp; 4657 4658 /* 4659 * XXX sanity check that ntxd & nrxd are a power of 2 4660 */ 4661 iflib_reset_qvalues(ctx); 4662 4663 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4664 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4665 return (err); 4666 } 4667 if (sctx->isc_flags & IFLIB_GEN_MAC) 4668 iflib_gen_mac(ctx); 4669 if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name, 4670 clctx->cc_params)) != 0) { 4671 device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err); 4672 return (err); 4673 } 4674 ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 4675 ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 4676 ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO); 4677 4678 #ifdef INVARIANTS 4679 MPASS(scctx->isc_capabilities); 4680 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4681 MPASS(scctx->isc_tx_csum_flags); 4682 #endif 4683 4684 if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4685 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4686 4687 ifp->if_flags |= IFF_NOGROUP; 4688 if (sctx->isc_flags & IFLIB_PSEUDO) { 4689 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4690 4691 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4692 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4693 goto fail_detach; 4694 } 4695 *ctxp = ctx; 4696 4697 /* 4698 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4699 * This must appear after the call to ether_ifattach() because 4700 * ether_ifattach() sets if_hdrlen to the default value. 4701 */ 4702 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4703 if_setifheaderlen(ifp, 4704 sizeof(struct ether_vlan_header)); 4705 4706 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4707 iflib_add_device_sysctl_post(ctx); 4708 ctx->ifc_flags |= IFC_INIT_DONE; 4709 return (0); 4710 } 4711 _iflib_pre_assert(scctx); 4712 ctx->ifc_txrx = *scctx->isc_txrx; 4713 4714 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4715 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4716 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4717 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4718 4719 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4720 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4721 4722 /* XXX change for per-queue sizes */ 4723 device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", 4724 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4725 for (i = 0; i < sctx->isc_nrxqs; i++) { 4726 if (!powerof2(scctx->isc_nrxd[i])) { 4727 /* round down instead? */ 4728 device_printf(dev, "# rx descriptors must be a power of 2\n"); 4729 err = EINVAL; 4730 goto fail; 4731 } 4732 } 4733 for (i = 0; i < sctx->isc_ntxqs; i++) { 4734 if (!powerof2(scctx->isc_ntxd[i])) { 4735 device_printf(dev, 4736 "# tx descriptors must be a power of 2"); 4737 err = EINVAL; 4738 goto fail; 4739 } 4740 } 4741 4742 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4743 MAX_SINGLE_PACKET_FRACTION) 4744 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4745 MAX_SINGLE_PACKET_FRACTION); 4746 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4747 MAX_SINGLE_PACKET_FRACTION) 4748 scctx->isc_tx_tso_segments_max = max(1, 4749 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4750 4751 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4752 if (if_getcapabilities(ifp) & IFCAP_TSO) { 4753 /* 4754 * The stack can't handle a TSO size larger than IP_MAXPACKET, 4755 * but some MACs do. 4756 */ 4757 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 4758 IP_MAXPACKET)); 4759 /* 4760 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 4761 * into account. In the worst case, each of these calls will 4762 * add another mbuf and, thus, the requirement for another DMA 4763 * segment. So for best performance, it doesn't make sense to 4764 * advertize a maximum of TSO segments that typically will 4765 * require defragmentation in iflib_encap(). 4766 */ 4767 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 4768 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 4769 } 4770 if (scctx->isc_rss_table_size == 0) 4771 scctx->isc_rss_table_size = 64; 4772 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4773 4774 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4775 /* XXX format name */ 4776 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); 4777 4778 /* XXX --- can support > 1 -- but keep it simple for now */ 4779 scctx->isc_intr = IFLIB_INTR_LEGACY; 4780 4781 /* Get memory for the station queues */ 4782 if ((err = iflib_queues_alloc(ctx))) { 4783 device_printf(dev, "Unable to allocate queue memory\n"); 4784 goto fail; 4785 } 4786 4787 if ((err = iflib_qset_structures_setup(ctx))) { 4788 device_printf(dev, "qset structure setup failed %d\n", err); 4789 goto fail_queues; 4790 } 4791 4792 /* 4793 * XXX What if anything do we want to do about interrupts? 4794 */ 4795 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4796 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4797 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4798 goto fail_detach; 4799 } 4800 4801 /* 4802 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4803 * This must appear after the call to ether_ifattach() because 4804 * ether_ifattach() sets if_hdrlen to the default value. 4805 */ 4806 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4807 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 4808 4809 /* XXX handle more than one queue */ 4810 for (i = 0; i < scctx->isc_nrxqsets; i++) 4811 IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl); 4812 4813 *ctxp = ctx; 4814 4815 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4816 iflib_add_device_sysctl_post(ctx); 4817 ctx->ifc_flags |= IFC_INIT_DONE; 4818 return (0); 4819 fail_detach: 4820 ether_ifdetach(ctx->ifc_ifp); 4821 fail_queues: 4822 iflib_tx_structures_free(ctx); 4823 iflib_rx_structures_free(ctx); 4824 fail: 4825 IFDI_DETACH(ctx); 4826 return (err); 4827 } 4828 4829 int 4830 iflib_pseudo_deregister(if_ctx_t ctx) 4831 { 4832 if_t ifp = ctx->ifc_ifp; 4833 iflib_txq_t txq; 4834 iflib_rxq_t rxq; 4835 int i, j; 4836 struct taskqgroup *tqg; 4837 iflib_fl_t fl; 4838 4839 /* Unregister VLAN events */ 4840 if (ctx->ifc_vlan_attach_event != NULL) 4841 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 4842 if (ctx->ifc_vlan_detach_event != NULL) 4843 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 4844 4845 ether_ifdetach(ifp); 4846 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 4847 CTX_LOCK_DESTROY(ctx); 4848 /* XXX drain any dependent tasks */ 4849 tqg = qgroup_if_io_tqg; 4850 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 4851 callout_drain(&txq->ift_timer); 4852 if (txq->ift_task.gt_uniq != NULL) 4853 taskqgroup_detach(tqg, &txq->ift_task); 4854 } 4855 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 4856 if (rxq->ifr_task.gt_uniq != NULL) 4857 taskqgroup_detach(tqg, &rxq->ifr_task); 4858 4859 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 4860 free(fl->ifl_rx_bitmap, M_IFLIB); 4861 } 4862 tqg = qgroup_if_config_tqg; 4863 if (ctx->ifc_admin_task.gt_uniq != NULL) 4864 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 4865 if (ctx->ifc_vflr_task.gt_uniq != NULL) 4866 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 4867 4868 if_free(ifp); 4869 4870 iflib_tx_structures_free(ctx); 4871 iflib_rx_structures_free(ctx); 4872 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4873 free(ctx->ifc_softc, M_IFLIB); 4874 free(ctx, M_IFLIB); 4875 return (0); 4876 } 4877 4878 int 4879 iflib_device_attach(device_t dev) 4880 { 4881 if_ctx_t ctx; 4882 if_shared_ctx_t sctx; 4883 4884 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4885 return (ENOTSUP); 4886 4887 pci_enable_busmaster(dev); 4888 4889 return (iflib_device_register(dev, NULL, sctx, &ctx)); 4890 } 4891 4892 int 4893 iflib_device_deregister(if_ctx_t ctx) 4894 { 4895 if_t ifp = ctx->ifc_ifp; 4896 iflib_txq_t txq; 4897 iflib_rxq_t rxq; 4898 device_t dev = ctx->ifc_dev; 4899 int i, j; 4900 struct taskqgroup *tqg; 4901 iflib_fl_t fl; 4902 4903 /* Make sure VLANS are not using driver */ 4904 if (if_vlantrunkinuse(ifp)) { 4905 device_printf(dev,"Vlan in use, detach first\n"); 4906 return (EBUSY); 4907 } 4908 4909 CTX_LOCK(ctx); 4910 ctx->ifc_in_detach = 1; 4911 iflib_stop(ctx); 4912 CTX_UNLOCK(ctx); 4913 4914 /* Unregister VLAN events */ 4915 if (ctx->ifc_vlan_attach_event != NULL) 4916 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 4917 if (ctx->ifc_vlan_detach_event != NULL) 4918 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 4919 4920 iflib_netmap_detach(ifp); 4921 ether_ifdetach(ifp); 4922 if (ctx->ifc_led_dev != NULL) 4923 led_destroy(ctx->ifc_led_dev); 4924 /* XXX drain any dependent tasks */ 4925 tqg = qgroup_if_io_tqg; 4926 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 4927 callout_drain(&txq->ift_timer); 4928 if (txq->ift_task.gt_uniq != NULL) 4929 taskqgroup_detach(tqg, &txq->ift_task); 4930 } 4931 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 4932 if (rxq->ifr_task.gt_uniq != NULL) 4933 taskqgroup_detach(tqg, &rxq->ifr_task); 4934 4935 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 4936 free(fl->ifl_rx_bitmap, M_IFLIB); 4937 4938 } 4939 tqg = qgroup_if_config_tqg; 4940 if (ctx->ifc_admin_task.gt_uniq != NULL) 4941 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 4942 if (ctx->ifc_vflr_task.gt_uniq != NULL) 4943 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 4944 CTX_LOCK(ctx); 4945 IFDI_DETACH(ctx); 4946 CTX_UNLOCK(ctx); 4947 4948 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 4949 CTX_LOCK_DESTROY(ctx); 4950 device_set_softc(ctx->ifc_dev, NULL); 4951 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { 4952 pci_release_msi(dev); 4953 } 4954 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { 4955 iflib_irq_free(ctx, &ctx->ifc_legacy_irq); 4956 } 4957 if (ctx->ifc_msix_mem != NULL) { 4958 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, 4959 ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); 4960 ctx->ifc_msix_mem = NULL; 4961 } 4962 4963 bus_generic_detach(dev); 4964 if_free(ifp); 4965 4966 iflib_tx_structures_free(ctx); 4967 iflib_rx_structures_free(ctx); 4968 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4969 free(ctx->ifc_softc, M_IFLIB); 4970 free(ctx, M_IFLIB); 4971 return (0); 4972 } 4973 4974 4975 int 4976 iflib_device_detach(device_t dev) 4977 { 4978 if_ctx_t ctx = device_get_softc(dev); 4979 4980 return (iflib_device_deregister(ctx)); 4981 } 4982 4983 int 4984 iflib_device_suspend(device_t dev) 4985 { 4986 if_ctx_t ctx = device_get_softc(dev); 4987 4988 CTX_LOCK(ctx); 4989 IFDI_SUSPEND(ctx); 4990 CTX_UNLOCK(ctx); 4991 4992 return bus_generic_suspend(dev); 4993 } 4994 int 4995 iflib_device_shutdown(device_t dev) 4996 { 4997 if_ctx_t ctx = device_get_softc(dev); 4998 4999 CTX_LOCK(ctx); 5000 IFDI_SHUTDOWN(ctx); 5001 CTX_UNLOCK(ctx); 5002 5003 return bus_generic_suspend(dev); 5004 } 5005 5006 5007 int 5008 iflib_device_resume(device_t dev) 5009 { 5010 if_ctx_t ctx = device_get_softc(dev); 5011 iflib_txq_t txq = ctx->ifc_txqs; 5012 5013 CTX_LOCK(ctx); 5014 IFDI_RESUME(ctx); 5015 iflib_init_locked(ctx); 5016 CTX_UNLOCK(ctx); 5017 for (int i = 0; i < NTXQSETS(ctx); i++, txq++) 5018 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 5019 5020 return (bus_generic_resume(dev)); 5021 } 5022 5023 int 5024 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 5025 { 5026 int error; 5027 if_ctx_t ctx = device_get_softc(dev); 5028 5029 CTX_LOCK(ctx); 5030 error = IFDI_IOV_INIT(ctx, num_vfs, params); 5031 CTX_UNLOCK(ctx); 5032 5033 return (error); 5034 } 5035 5036 void 5037 iflib_device_iov_uninit(device_t dev) 5038 { 5039 if_ctx_t ctx = device_get_softc(dev); 5040 5041 CTX_LOCK(ctx); 5042 IFDI_IOV_UNINIT(ctx); 5043 CTX_UNLOCK(ctx); 5044 } 5045 5046 int 5047 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 5048 { 5049 int error; 5050 if_ctx_t ctx = device_get_softc(dev); 5051 5052 CTX_LOCK(ctx); 5053 error = IFDI_IOV_VF_ADD(ctx, vfnum, params); 5054 CTX_UNLOCK(ctx); 5055 5056 return (error); 5057 } 5058 5059 /********************************************************************* 5060 * 5061 * MODULE FUNCTION DEFINITIONS 5062 * 5063 **********************************************************************/ 5064 5065 /* 5066 * - Start a fast taskqueue thread for each core 5067 * - Start a taskqueue for control operations 5068 */ 5069 static int 5070 iflib_module_init(void) 5071 { 5072 return (0); 5073 } 5074 5075 static int 5076 iflib_module_event_handler(module_t mod, int what, void *arg) 5077 { 5078 int err; 5079 5080 switch (what) { 5081 case MOD_LOAD: 5082 if ((err = iflib_module_init()) != 0) 5083 return (err); 5084 break; 5085 case MOD_UNLOAD: 5086 return (EBUSY); 5087 default: 5088 return (EOPNOTSUPP); 5089 } 5090 5091 return (0); 5092 } 5093 5094 /********************************************************************* 5095 * 5096 * PUBLIC FUNCTION DEFINITIONS 5097 * ordered as in iflib.h 5098 * 5099 **********************************************************************/ 5100 5101 5102 static void 5103 _iflib_assert(if_shared_ctx_t sctx) 5104 { 5105 MPASS(sctx->isc_tx_maxsize); 5106 MPASS(sctx->isc_tx_maxsegsize); 5107 5108 MPASS(sctx->isc_rx_maxsize); 5109 MPASS(sctx->isc_rx_nsegments); 5110 MPASS(sctx->isc_rx_maxsegsize); 5111 5112 MPASS(sctx->isc_nrxd_min[0]); 5113 MPASS(sctx->isc_nrxd_max[0]); 5114 MPASS(sctx->isc_nrxd_default[0]); 5115 MPASS(sctx->isc_ntxd_min[0]); 5116 MPASS(sctx->isc_ntxd_max[0]); 5117 MPASS(sctx->isc_ntxd_default[0]); 5118 } 5119 5120 static void 5121 _iflib_pre_assert(if_softc_ctx_t scctx) 5122 { 5123 5124 MPASS(scctx->isc_txrx->ift_txd_encap); 5125 MPASS(scctx->isc_txrx->ift_txd_flush); 5126 MPASS(scctx->isc_txrx->ift_txd_credits_update); 5127 MPASS(scctx->isc_txrx->ift_rxd_available); 5128 MPASS(scctx->isc_txrx->ift_rxd_pkt_get); 5129 MPASS(scctx->isc_txrx->ift_rxd_refill); 5130 MPASS(scctx->isc_txrx->ift_rxd_flush); 5131 } 5132 5133 static int 5134 iflib_register(if_ctx_t ctx) 5135 { 5136 if_shared_ctx_t sctx = ctx->ifc_sctx; 5137 driver_t *driver = sctx->isc_driver; 5138 device_t dev = ctx->ifc_dev; 5139 if_t ifp; 5140 5141 _iflib_assert(sctx); 5142 5143 CTX_LOCK_INIT(ctx); 5144 STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); 5145 ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER); 5146 if (ifp == NULL) { 5147 device_printf(dev, "can not allocate ifnet structure\n"); 5148 return (ENOMEM); 5149 } 5150 5151 /* 5152 * Initialize our context's device specific methods 5153 */ 5154 kobj_init((kobj_t) ctx, (kobj_class_t) driver); 5155 kobj_class_compile((kobj_class_t) driver); 5156 driver->refs++; 5157 5158 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 5159 if_setsoftc(ifp, ctx); 5160 if_setdev(ifp, dev); 5161 if_setinitfn(ifp, iflib_if_init); 5162 if_setioctlfn(ifp, iflib_if_ioctl); 5163 if_settransmitfn(ifp, iflib_if_transmit); 5164 if_setqflushfn(ifp, iflib_if_qflush); 5165 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 5166 5167 ctx->ifc_vlan_attach_event = 5168 EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, 5169 EVENTHANDLER_PRI_FIRST); 5170 ctx->ifc_vlan_detach_event = 5171 EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, 5172 EVENTHANDLER_PRI_FIRST); 5173 5174 ifmedia_init(&ctx->ifc_media, IFM_IMASK, 5175 iflib_media_change, iflib_media_status); 5176 5177 return (0); 5178 } 5179 5180 5181 static int 5182 iflib_queues_alloc(if_ctx_t ctx) 5183 { 5184 if_shared_ctx_t sctx = ctx->ifc_sctx; 5185 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5186 device_t dev = ctx->ifc_dev; 5187 int nrxqsets = scctx->isc_nrxqsets; 5188 int ntxqsets = scctx->isc_ntxqsets; 5189 iflib_txq_t txq; 5190 iflib_rxq_t rxq; 5191 iflib_fl_t fl = NULL; 5192 int i, j, cpu, err, txconf, rxconf; 5193 iflib_dma_info_t ifdip; 5194 uint32_t *rxqsizes = scctx->isc_rxqsizes; 5195 uint32_t *txqsizes = scctx->isc_txqsizes; 5196 uint8_t nrxqs = sctx->isc_nrxqs; 5197 uint8_t ntxqs = sctx->isc_ntxqs; 5198 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; 5199 caddr_t *vaddrs; 5200 uint64_t *paddrs; 5201 5202 KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); 5203 KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); 5204 5205 /* Allocate the TX ring struct memory */ 5206 if (!(ctx->ifc_txqs = 5207 (iflib_txq_t) malloc(sizeof(struct iflib_txq) * 5208 ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5209 device_printf(dev, "Unable to allocate TX ring memory\n"); 5210 err = ENOMEM; 5211 goto fail; 5212 } 5213 5214 /* Now allocate the RX */ 5215 if (!(ctx->ifc_rxqs = 5216 (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * 5217 nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5218 device_printf(dev, "Unable to allocate RX ring memory\n"); 5219 err = ENOMEM; 5220 goto rx_fail; 5221 } 5222 5223 txq = ctx->ifc_txqs; 5224 rxq = ctx->ifc_rxqs; 5225 5226 /* 5227 * XXX handle allocation failure 5228 */ 5229 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { 5230 /* Set up some basics */ 5231 5232 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 5233 device_printf(dev, "failed to allocate iflib_dma_info\n"); 5234 err = ENOMEM; 5235 goto err_tx_desc; 5236 } 5237 txq->ift_ifdi = ifdip; 5238 for (j = 0; j < ntxqs; j++, ifdip++) { 5239 if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 5240 device_printf(dev, "Unable to allocate Descriptor memory\n"); 5241 err = ENOMEM; 5242 goto err_tx_desc; 5243 } 5244 txq->ift_txd_size[j] = scctx->isc_txd_size[j]; 5245 bzero((void *)ifdip->idi_vaddr, txqsizes[j]); 5246 } 5247 txq->ift_ctx = ctx; 5248 txq->ift_id = i; 5249 if (sctx->isc_flags & IFLIB_HAS_TXCQ) { 5250 txq->ift_br_offset = 1; 5251 } else { 5252 txq->ift_br_offset = 0; 5253 } 5254 /* XXX fix this */ 5255 txq->ift_timer.c_cpu = cpu; 5256 5257 if (iflib_txsd_alloc(txq)) { 5258 device_printf(dev, "Critical Failure setting up TX buffers\n"); 5259 err = ENOMEM; 5260 goto err_tx_desc; 5261 } 5262 5263 /* Initialize the TX lock */ 5264 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout", 5265 device_get_nameunit(dev), txq->ift_id); 5266 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); 5267 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); 5268 5269 snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db", 5270 device_get_nameunit(dev), txq->ift_id); 5271 5272 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, 5273 iflib_txq_can_drain, M_IFLIB, M_WAITOK); 5274 if (err) { 5275 /* XXX free any allocated rings */ 5276 device_printf(dev, "Unable to allocate buf_ring\n"); 5277 goto err_tx_desc; 5278 } 5279 } 5280 5281 for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { 5282 /* Set up some basics */ 5283 5284 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 5285 device_printf(dev, "failed to allocate iflib_dma_info\n"); 5286 err = ENOMEM; 5287 goto err_tx_desc; 5288 } 5289 5290 rxq->ifr_ifdi = ifdip; 5291 /* XXX this needs to be changed if #rx queues != #tx queues */ 5292 rxq->ifr_ntxqirq = 1; 5293 rxq->ifr_txqid[0] = i; 5294 for (j = 0; j < nrxqs; j++, ifdip++) { 5295 if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 5296 device_printf(dev, "Unable to allocate Descriptor memory\n"); 5297 err = ENOMEM; 5298 goto err_tx_desc; 5299 } 5300 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); 5301 } 5302 rxq->ifr_ctx = ctx; 5303 rxq->ifr_id = i; 5304 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 5305 rxq->ifr_fl_offset = 1; 5306 } else { 5307 rxq->ifr_fl_offset = 0; 5308 } 5309 rxq->ifr_nfl = nfree_lists; 5310 if (!(fl = 5311 (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { 5312 device_printf(dev, "Unable to allocate free list memory\n"); 5313 err = ENOMEM; 5314 goto err_tx_desc; 5315 } 5316 rxq->ifr_fl = fl; 5317 for (j = 0; j < nfree_lists; j++) { 5318 fl[j].ifl_rxq = rxq; 5319 fl[j].ifl_id = j; 5320 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; 5321 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; 5322 } 5323 /* Allocate receive buffers for the ring*/ 5324 if (iflib_rxsd_alloc(rxq)) { 5325 device_printf(dev, 5326 "Critical Failure setting up receive buffers\n"); 5327 err = ENOMEM; 5328 goto err_rx_desc; 5329 } 5330 5331 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 5332 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO); 5333 } 5334 5335 /* TXQs */ 5336 vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5337 paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5338 for (i = 0; i < ntxqsets; i++) { 5339 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; 5340 5341 for (j = 0; j < ntxqs; j++, di++) { 5342 vaddrs[i*ntxqs + j] = di->idi_vaddr; 5343 paddrs[i*ntxqs + j] = di->idi_paddr; 5344 } 5345 } 5346 if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { 5347 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 5348 iflib_tx_structures_free(ctx); 5349 free(vaddrs, M_IFLIB); 5350 free(paddrs, M_IFLIB); 5351 goto err_rx_desc; 5352 } 5353 free(vaddrs, M_IFLIB); 5354 free(paddrs, M_IFLIB); 5355 5356 /* RXQs */ 5357 vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5358 paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5359 for (i = 0; i < nrxqsets; i++) { 5360 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; 5361 5362 for (j = 0; j < nrxqs; j++, di++) { 5363 vaddrs[i*nrxqs + j] = di->idi_vaddr; 5364 paddrs[i*nrxqs + j] = di->idi_paddr; 5365 } 5366 } 5367 if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { 5368 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 5369 iflib_tx_structures_free(ctx); 5370 free(vaddrs, M_IFLIB); 5371 free(paddrs, M_IFLIB); 5372 goto err_rx_desc; 5373 } 5374 free(vaddrs, M_IFLIB); 5375 free(paddrs, M_IFLIB); 5376 5377 return (0); 5378 5379 /* XXX handle allocation failure changes */ 5380 err_rx_desc: 5381 err_tx_desc: 5382 rx_fail: 5383 if (ctx->ifc_rxqs != NULL) 5384 free(ctx->ifc_rxqs, M_IFLIB); 5385 ctx->ifc_rxqs = NULL; 5386 if (ctx->ifc_txqs != NULL) 5387 free(ctx->ifc_txqs, M_IFLIB); 5388 ctx->ifc_txqs = NULL; 5389 fail: 5390 return (err); 5391 } 5392 5393 static int 5394 iflib_tx_structures_setup(if_ctx_t ctx) 5395 { 5396 iflib_txq_t txq = ctx->ifc_txqs; 5397 int i; 5398 5399 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 5400 iflib_txq_setup(txq); 5401 5402 return (0); 5403 } 5404 5405 static void 5406 iflib_tx_structures_free(if_ctx_t ctx) 5407 { 5408 iflib_txq_t txq = ctx->ifc_txqs; 5409 int i, j; 5410 5411 for (i = 0; i < NTXQSETS(ctx); i++, txq++) { 5412 iflib_txq_destroy(txq); 5413 for (j = 0; j < ctx->ifc_nhwtxqs; j++) 5414 iflib_dma_free(&txq->ift_ifdi[j]); 5415 } 5416 free(ctx->ifc_txqs, M_IFLIB); 5417 ctx->ifc_txqs = NULL; 5418 IFDI_QUEUES_FREE(ctx); 5419 } 5420 5421 /********************************************************************* 5422 * 5423 * Initialize all receive rings. 5424 * 5425 **********************************************************************/ 5426 static int 5427 iflib_rx_structures_setup(if_ctx_t ctx) 5428 { 5429 iflib_rxq_t rxq = ctx->ifc_rxqs; 5430 int q; 5431 #if defined(INET6) || defined(INET) 5432 int i, err; 5433 #endif 5434 5435 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { 5436 #if defined(INET6) || defined(INET) 5437 tcp_lro_free(&rxq->ifr_lc); 5438 if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, 5439 TCP_LRO_ENTRIES, min(1024, 5440 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { 5441 device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); 5442 goto fail; 5443 } 5444 rxq->ifr_lro_enabled = TRUE; 5445 #endif 5446 IFDI_RXQ_SETUP(ctx, rxq->ifr_id); 5447 } 5448 return (0); 5449 #if defined(INET6) || defined(INET) 5450 fail: 5451 /* 5452 * Free RX software descriptors allocated so far, we will only handle 5453 * the rings that completed, the failing case will have 5454 * cleaned up for itself. 'q' failed, so its the terminus. 5455 */ 5456 rxq = ctx->ifc_rxqs; 5457 for (i = 0; i < q; ++i, rxq++) { 5458 iflib_rx_sds_free(rxq); 5459 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 5460 } 5461 return (err); 5462 #endif 5463 } 5464 5465 /********************************************************************* 5466 * 5467 * Free all receive rings. 5468 * 5469 **********************************************************************/ 5470 static void 5471 iflib_rx_structures_free(if_ctx_t ctx) 5472 { 5473 iflib_rxq_t rxq = ctx->ifc_rxqs; 5474 5475 for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { 5476 iflib_rx_sds_free(rxq); 5477 } 5478 } 5479 5480 static int 5481 iflib_qset_structures_setup(if_ctx_t ctx) 5482 { 5483 int err; 5484 5485 /* 5486 * It is expected that the caller takes care of freeing queues if this 5487 * fails. 5488 */ 5489 if ((err = iflib_tx_structures_setup(ctx)) != 0) { 5490 device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); 5491 return (err); 5492 } 5493 5494 if ((err = iflib_rx_structures_setup(ctx)) != 0) 5495 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); 5496 5497 return (err); 5498 } 5499 5500 int 5501 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 5502 driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name) 5503 { 5504 5505 return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); 5506 } 5507 5508 #ifdef SMP 5509 static int 5510 find_nth(if_ctx_t ctx, int qid) 5511 { 5512 cpuset_t cpus; 5513 int i, cpuid, eqid, count; 5514 5515 CPU_COPY(&ctx->ifc_cpus, &cpus); 5516 count = CPU_COUNT(&cpus); 5517 eqid = qid % count; 5518 /* clear up to the qid'th bit */ 5519 for (i = 0; i < eqid; i++) { 5520 cpuid = CPU_FFS(&cpus); 5521 MPASS(cpuid != 0); 5522 CPU_CLR(cpuid-1, &cpus); 5523 } 5524 cpuid = CPU_FFS(&cpus); 5525 MPASS(cpuid != 0); 5526 return (cpuid-1); 5527 } 5528 5529 #ifdef SCHED_ULE 5530 extern struct cpu_group *cpu_top; /* CPU topology */ 5531 5532 static int 5533 find_child_with_core(int cpu, struct cpu_group *grp) 5534 { 5535 int i; 5536 5537 if (grp->cg_children == 0) 5538 return -1; 5539 5540 MPASS(grp->cg_child); 5541 for (i = 0; i < grp->cg_children; i++) { 5542 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) 5543 return i; 5544 } 5545 5546 return -1; 5547 } 5548 5549 /* 5550 * Find the nth "close" core to the specified core 5551 * "close" is defined as the deepest level that shares 5552 * at least an L2 cache. With threads, this will be 5553 * threads on the same core. If the sahred cache is L3 5554 * or higher, simply returns the same core. 5555 */ 5556 static int 5557 find_close_core(int cpu, int core_offset) 5558 { 5559 struct cpu_group *grp; 5560 int i; 5561 int fcpu; 5562 cpuset_t cs; 5563 5564 grp = cpu_top; 5565 if (grp == NULL) 5566 return cpu; 5567 i = 0; 5568 while ((i = find_child_with_core(cpu, grp)) != -1) { 5569 /* If the child only has one cpu, don't descend */ 5570 if (grp->cg_child[i].cg_count <= 1) 5571 break; 5572 grp = &grp->cg_child[i]; 5573 } 5574 5575 /* If they don't share at least an L2 cache, use the same CPU */ 5576 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) 5577 return cpu; 5578 5579 /* Now pick one */ 5580 CPU_COPY(&grp->cg_mask, &cs); 5581 5582 /* Add the selected CPU offset to core offset. */ 5583 for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) { 5584 if (fcpu - 1 == cpu) 5585 break; 5586 CPU_CLR(fcpu - 1, &cs); 5587 } 5588 MPASS(fcpu); 5589 5590 core_offset += i; 5591 5592 CPU_COPY(&grp->cg_mask, &cs); 5593 for (i = core_offset % grp->cg_count; i > 0; i--) { 5594 MPASS(CPU_FFS(&cs)); 5595 CPU_CLR(CPU_FFS(&cs) - 1, &cs); 5596 } 5597 MPASS(CPU_FFS(&cs)); 5598 return CPU_FFS(&cs) - 1; 5599 } 5600 #else 5601 static int 5602 find_close_core(int cpu, int core_offset __unused) 5603 { 5604 return cpu; 5605 } 5606 #endif 5607 5608 static int 5609 get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid) 5610 { 5611 switch (type) { 5612 case IFLIB_INTR_TX: 5613 /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */ 5614 /* XXX handle multiple RX threads per core and more than two core per L2 group */ 5615 return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; 5616 case IFLIB_INTR_RX: 5617 case IFLIB_INTR_RXTX: 5618 /* RX queues get the specified core */ 5619 return qid / CPU_COUNT(&ctx->ifc_cpus); 5620 default: 5621 return -1; 5622 } 5623 } 5624 #else 5625 #define get_core_offset(ctx, type, qid) CPU_FIRST() 5626 #define find_close_core(cpuid, tid) CPU_FIRST() 5627 #define find_nth(ctx, gid) CPU_FIRST() 5628 #endif 5629 5630 /* Just to avoid copy/paste */ 5631 static inline int 5632 iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid, 5633 struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name) 5634 { 5635 int cpuid; 5636 int err, tid; 5637 5638 cpuid = find_nth(ctx, qid); 5639 tid = get_core_offset(ctx, type, qid); 5640 MPASS(tid >= 0); 5641 cpuid = find_close_core(cpuid, tid); 5642 err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name); 5643 if (err) { 5644 device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err); 5645 return (err); 5646 } 5647 #ifdef notyet 5648 if (cpuid > ctx->ifc_cpuid_highest) 5649 ctx->ifc_cpuid_highest = cpuid; 5650 #endif 5651 return 0; 5652 } 5653 5654 int 5655 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, 5656 iflib_intr_type_t type, driver_filter_t *filter, 5657 void *filter_arg, int qid, const char *name) 5658 { 5659 struct grouptask *gtask; 5660 struct taskqgroup *tqg; 5661 iflib_filter_info_t info; 5662 gtask_fn_t *fn; 5663 int tqrid, err; 5664 driver_filter_t *intr_fast; 5665 void *q; 5666 5667 info = &ctx->ifc_filter_info; 5668 tqrid = rid; 5669 5670 switch (type) { 5671 /* XXX merge tx/rx for netmap? */ 5672 case IFLIB_INTR_TX: 5673 q = &ctx->ifc_txqs[qid]; 5674 info = &ctx->ifc_txqs[qid].ift_filter_info; 5675 gtask = &ctx->ifc_txqs[qid].ift_task; 5676 tqg = qgroup_if_io_tqg; 5677 fn = _task_fn_tx; 5678 intr_fast = iflib_fast_intr; 5679 GROUPTASK_INIT(gtask, 0, fn, q); 5680 ctx->ifc_flags |= IFC_NETMAP_TX_IRQ; 5681 break; 5682 case IFLIB_INTR_RX: 5683 q = &ctx->ifc_rxqs[qid]; 5684 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5685 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5686 tqg = qgroup_if_io_tqg; 5687 fn = _task_fn_rx; 5688 intr_fast = iflib_fast_intr; 5689 GROUPTASK_INIT(gtask, 0, fn, q); 5690 break; 5691 case IFLIB_INTR_RXTX: 5692 q = &ctx->ifc_rxqs[qid]; 5693 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5694 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5695 tqg = qgroup_if_io_tqg; 5696 fn = _task_fn_rx; 5697 intr_fast = iflib_fast_intr_rxtx; 5698 GROUPTASK_INIT(gtask, 0, fn, q); 5699 break; 5700 case IFLIB_INTR_ADMIN: 5701 q = ctx; 5702 tqrid = -1; 5703 info = &ctx->ifc_filter_info; 5704 gtask = &ctx->ifc_admin_task; 5705 tqg = qgroup_if_config_tqg; 5706 fn = _task_fn_admin; 5707 intr_fast = iflib_fast_intr_ctx; 5708 break; 5709 default: 5710 panic("unknown net intr type"); 5711 } 5712 5713 info->ifi_filter = filter; 5714 info->ifi_filter_arg = filter_arg; 5715 info->ifi_task = gtask; 5716 info->ifi_ctx = q; 5717 5718 err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); 5719 if (err != 0) { 5720 device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err); 5721 return (err); 5722 } 5723 if (type == IFLIB_INTR_ADMIN) 5724 return (0); 5725 5726 if (tqrid != -1) { 5727 err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name); 5728 if (err) 5729 return (err); 5730 } else { 5731 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5732 } 5733 5734 return (0); 5735 } 5736 5737 void 5738 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name) 5739 { 5740 struct grouptask *gtask; 5741 struct taskqgroup *tqg; 5742 gtask_fn_t *fn; 5743 void *q; 5744 int irq_num = -1; 5745 int err; 5746 5747 switch (type) { 5748 case IFLIB_INTR_TX: 5749 q = &ctx->ifc_txqs[qid]; 5750 gtask = &ctx->ifc_txqs[qid].ift_task; 5751 tqg = qgroup_if_io_tqg; 5752 fn = _task_fn_tx; 5753 if (irq != NULL) 5754 irq_num = rman_get_start(irq->ii_res); 5755 break; 5756 case IFLIB_INTR_RX: 5757 q = &ctx->ifc_rxqs[qid]; 5758 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5759 tqg = qgroup_if_io_tqg; 5760 fn = _task_fn_rx; 5761 if (irq != NULL) 5762 irq_num = rman_get_start(irq->ii_res); 5763 break; 5764 case IFLIB_INTR_IOV: 5765 q = ctx; 5766 gtask = &ctx->ifc_vflr_task; 5767 tqg = qgroup_if_config_tqg; 5768 fn = _task_fn_iov; 5769 break; 5770 default: 5771 panic("unknown net intr type"); 5772 } 5773 GROUPTASK_INIT(gtask, 0, fn, q); 5774 if (irq_num != -1) { 5775 err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name); 5776 if (err) 5777 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5778 } 5779 else { 5780 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5781 } 5782 } 5783 5784 void 5785 iflib_irq_free(if_ctx_t ctx, if_irq_t irq) 5786 { 5787 if (irq->ii_tag) 5788 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); 5789 5790 if (irq->ii_res) 5791 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res); 5792 } 5793 5794 static int 5795 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name) 5796 { 5797 iflib_txq_t txq = ctx->ifc_txqs; 5798 iflib_rxq_t rxq = ctx->ifc_rxqs; 5799 if_irq_t irq = &ctx->ifc_legacy_irq; 5800 iflib_filter_info_t info; 5801 struct grouptask *gtask; 5802 struct taskqgroup *tqg; 5803 gtask_fn_t *fn; 5804 int tqrid; 5805 void *q; 5806 int err; 5807 5808 q = &ctx->ifc_rxqs[0]; 5809 info = &rxq[0].ifr_filter_info; 5810 gtask = &rxq[0].ifr_task; 5811 tqg = qgroup_if_io_tqg; 5812 tqrid = irq->ii_rid = *rid; 5813 fn = _task_fn_rx; 5814 5815 ctx->ifc_flags |= IFC_LEGACY; 5816 info->ifi_filter = filter; 5817 info->ifi_filter_arg = filter_arg; 5818 info->ifi_task = gtask; 5819 info->ifi_ctx = ctx; 5820 5821 /* We allocate a single interrupt resource */ 5822 if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0) 5823 return (err); 5824 GROUPTASK_INIT(gtask, 0, fn, q); 5825 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5826 5827 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); 5828 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx"); 5829 return (0); 5830 } 5831 5832 void 5833 iflib_led_create(if_ctx_t ctx) 5834 { 5835 5836 ctx->ifc_led_dev = led_create(iflib_led_func, ctx, 5837 device_get_nameunit(ctx->ifc_dev)); 5838 } 5839 5840 void 5841 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) 5842 { 5843 5844 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 5845 } 5846 5847 void 5848 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) 5849 { 5850 5851 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); 5852 } 5853 5854 void 5855 iflib_admin_intr_deferred(if_ctx_t ctx) 5856 { 5857 #ifdef INVARIANTS 5858 struct grouptask *gtask; 5859 5860 gtask = &ctx->ifc_admin_task; 5861 MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); 5862 #endif 5863 5864 GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); 5865 } 5866 5867 void 5868 iflib_iov_intr_deferred(if_ctx_t ctx) 5869 { 5870 5871 GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); 5872 } 5873 5874 void 5875 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) 5876 { 5877 5878 taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); 5879 } 5880 5881 void 5882 iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, 5883 const char *name) 5884 { 5885 5886 GROUPTASK_INIT(gtask, 0, fn, ctx); 5887 taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); 5888 } 5889 5890 void 5891 iflib_config_gtask_deinit(struct grouptask *gtask) 5892 { 5893 5894 taskqgroup_detach(qgroup_if_config_tqg, gtask); 5895 } 5896 5897 void 5898 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) 5899 { 5900 if_t ifp = ctx->ifc_ifp; 5901 iflib_txq_t txq = ctx->ifc_txqs; 5902 5903 if_setbaudrate(ifp, baudrate); 5904 if (baudrate >= IF_Gbps(10)) { 5905 STATE_LOCK(ctx); 5906 ctx->ifc_flags |= IFC_PREFETCH; 5907 STATE_UNLOCK(ctx); 5908 } 5909 /* If link down, disable watchdog */ 5910 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { 5911 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) 5912 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 5913 } 5914 ctx->ifc_link_state = link_state; 5915 if_link_state_change(ifp, link_state); 5916 } 5917 5918 static int 5919 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) 5920 { 5921 int credits; 5922 #ifdef INVARIANTS 5923 int credits_pre = txq->ift_cidx_processed; 5924 #endif 5925 5926 if (ctx->isc_txd_credits_update == NULL) 5927 return (0); 5928 5929 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) 5930 return (0); 5931 5932 txq->ift_processed += credits; 5933 txq->ift_cidx_processed += credits; 5934 5935 MPASS(credits_pre + credits == txq->ift_cidx_processed); 5936 if (txq->ift_cidx_processed >= txq->ift_size) 5937 txq->ift_cidx_processed -= txq->ift_size; 5938 return (credits); 5939 } 5940 5941 static int 5942 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) 5943 { 5944 5945 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, 5946 budget)); 5947 } 5948 5949 void 5950 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, 5951 const char *description, if_int_delay_info_t info, 5952 int offset, int value) 5953 { 5954 info->iidi_ctx = ctx; 5955 info->iidi_offset = offset; 5956 info->iidi_value = value; 5957 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), 5958 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), 5959 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 5960 info, 0, iflib_sysctl_int_delay, "I", description); 5961 } 5962 5963 struct sx * 5964 iflib_ctx_lock_get(if_ctx_t ctx) 5965 { 5966 5967 return (&ctx->ifc_ctx_sx); 5968 } 5969 5970 static int 5971 iflib_msix_init(if_ctx_t ctx) 5972 { 5973 device_t dev = ctx->ifc_dev; 5974 if_shared_ctx_t sctx = ctx->ifc_sctx; 5975 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5976 int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; 5977 int iflib_num_tx_queues, iflib_num_rx_queues; 5978 int err, admincnt, bar; 5979 5980 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; 5981 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; 5982 5983 device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); 5984 5985 bar = ctx->ifc_softc_ctx.isc_msix_bar; 5986 admincnt = sctx->isc_admin_intrcnt; 5987 /* Override by tuneable */ 5988 if (scctx->isc_disable_msix) 5989 goto msi; 5990 5991 /* 5992 * bar == -1 => "trust me I know what I'm doing" 5993 * Some drivers are for hardware that is so shoddily 5994 * documented that no one knows which bars are which 5995 * so the developer has to map all bars. This hack 5996 * allows shoddy garbage to use msix in this framework. 5997 */ 5998 if (bar != -1) { 5999 ctx->ifc_msix_mem = bus_alloc_resource_any(dev, 6000 SYS_RES_MEMORY, &bar, RF_ACTIVE); 6001 if (ctx->ifc_msix_mem == NULL) { 6002 /* May not be enabled */ 6003 device_printf(dev, "Unable to map MSIX table \n"); 6004 goto msi; 6005 } 6006 } 6007 /* First try MSI/X */ 6008 if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */ 6009 device_printf(dev, "System has MSIX disabled \n"); 6010 bus_release_resource(dev, SYS_RES_MEMORY, 6011 bar, ctx->ifc_msix_mem); 6012 ctx->ifc_msix_mem = NULL; 6013 goto msi; 6014 } 6015 #if IFLIB_DEBUG 6016 /* use only 1 qset in debug mode */ 6017 queuemsgs = min(msgs - admincnt, 1); 6018 #else 6019 queuemsgs = msgs - admincnt; 6020 #endif 6021 #ifdef RSS 6022 queues = imin(queuemsgs, rss_getnumbuckets()); 6023 #else 6024 queues = queuemsgs; 6025 #endif 6026 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); 6027 device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n", 6028 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); 6029 #ifdef RSS 6030 /* If we're doing RSS, clamp at the number of RSS buckets */ 6031 if (queues > rss_getnumbuckets()) 6032 queues = rss_getnumbuckets(); 6033 #endif 6034 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) 6035 rx_queues = iflib_num_rx_queues; 6036 else 6037 rx_queues = queues; 6038 6039 if (rx_queues > scctx->isc_nrxqsets) 6040 rx_queues = scctx->isc_nrxqsets; 6041 6042 /* 6043 * We want this to be all logical CPUs by default 6044 */ 6045 if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) 6046 tx_queues = iflib_num_tx_queues; 6047 else 6048 tx_queues = mp_ncpus; 6049 6050 if (tx_queues > scctx->isc_ntxqsets) 6051 tx_queues = scctx->isc_ntxqsets; 6052 6053 if (ctx->ifc_sysctl_qs_eq_override == 0) { 6054 #ifdef INVARIANTS 6055 if (tx_queues != rx_queues) 6056 device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", 6057 min(rx_queues, tx_queues), min(rx_queues, tx_queues)); 6058 #endif 6059 tx_queues = min(rx_queues, tx_queues); 6060 rx_queues = min(rx_queues, tx_queues); 6061 } 6062 6063 device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); 6064 6065 vectors = rx_queues + admincnt; 6066 if ((err = pci_alloc_msix(dev, &vectors)) == 0) { 6067 device_printf(dev, 6068 "Using MSIX interrupts with %d vectors\n", vectors); 6069 scctx->isc_vectors = vectors; 6070 scctx->isc_nrxqsets = rx_queues; 6071 scctx->isc_ntxqsets = tx_queues; 6072 scctx->isc_intr = IFLIB_INTR_MSIX; 6073 6074 return (vectors); 6075 } else { 6076 device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); 6077 bus_release_resource(dev, SYS_RES_MEMORY, bar, 6078 ctx->ifc_msix_mem); 6079 ctx->ifc_msix_mem = NULL; 6080 } 6081 msi: 6082 vectors = pci_msi_count(dev); 6083 scctx->isc_nrxqsets = 1; 6084 scctx->isc_ntxqsets = 1; 6085 scctx->isc_vectors = vectors; 6086 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { 6087 device_printf(dev,"Using an MSI interrupt\n"); 6088 scctx->isc_intr = IFLIB_INTR_MSI; 6089 } else { 6090 scctx->isc_vectors = 1; 6091 device_printf(dev,"Using a Legacy interrupt\n"); 6092 scctx->isc_intr = IFLIB_INTR_LEGACY; 6093 } 6094 6095 return (vectors); 6096 } 6097 6098 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; 6099 6100 static int 6101 mp_ring_state_handler(SYSCTL_HANDLER_ARGS) 6102 { 6103 int rc; 6104 uint16_t *state = ((uint16_t *)oidp->oid_arg1); 6105 struct sbuf *sb; 6106 const char *ring_state = "UNKNOWN"; 6107 6108 /* XXX needed ? */ 6109 rc = sysctl_wire_old_buffer(req, 0); 6110 MPASS(rc == 0); 6111 if (rc != 0) 6112 return (rc); 6113 sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); 6114 MPASS(sb != NULL); 6115 if (sb == NULL) 6116 return (ENOMEM); 6117 if (state[3] <= 3) 6118 ring_state = ring_states[state[3]]; 6119 6120 sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", 6121 state[0], state[1], state[2], ring_state); 6122 rc = sbuf_finish(sb); 6123 sbuf_delete(sb); 6124 return(rc); 6125 } 6126 6127 enum iflib_ndesc_handler { 6128 IFLIB_NTXD_HANDLER, 6129 IFLIB_NRXD_HANDLER, 6130 }; 6131 6132 static int 6133 mp_ndesc_handler(SYSCTL_HANDLER_ARGS) 6134 { 6135 if_ctx_t ctx = (void *)arg1; 6136 enum iflib_ndesc_handler type = arg2; 6137 char buf[256] = {0}; 6138 qidx_t *ndesc; 6139 char *p, *next; 6140 int nqs, rc, i; 6141 6142 MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); 6143 6144 nqs = 8; 6145 switch(type) { 6146 case IFLIB_NTXD_HANDLER: 6147 ndesc = ctx->ifc_sysctl_ntxds; 6148 if (ctx->ifc_sctx) 6149 nqs = ctx->ifc_sctx->isc_ntxqs; 6150 break; 6151 case IFLIB_NRXD_HANDLER: 6152 ndesc = ctx->ifc_sysctl_nrxds; 6153 if (ctx->ifc_sctx) 6154 nqs = ctx->ifc_sctx->isc_nrxqs; 6155 break; 6156 default: 6157 panic("unhandled type"); 6158 } 6159 if (nqs == 0) 6160 nqs = 8; 6161 6162 for (i=0; i<8; i++) { 6163 if (i >= nqs) 6164 break; 6165 if (i) 6166 strcat(buf, ","); 6167 sprintf(strchr(buf, 0), "%d", ndesc[i]); 6168 } 6169 6170 rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); 6171 if (rc || req->newptr == NULL) 6172 return rc; 6173 6174 for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; 6175 i++, p = strsep(&next, " ,")) { 6176 ndesc[i] = strtoul(p, NULL, 10); 6177 } 6178 6179 return(rc); 6180 } 6181 6182 #define NAME_BUFLEN 32 6183 static void 6184 iflib_add_device_sysctl_pre(if_ctx_t ctx) 6185 { 6186 device_t dev = iflib_get_dev(ctx); 6187 struct sysctl_oid_list *child, *oid_list; 6188 struct sysctl_ctx_list *ctx_list; 6189 struct sysctl_oid *node; 6190 6191 ctx_list = device_get_sysctl_ctx(dev); 6192 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 6193 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", 6194 CTLFLAG_RD, NULL, "IFLIB fields"); 6195 oid_list = SYSCTL_CHILDREN(node); 6196 6197 SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", 6198 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, 6199 "driver version"); 6200 6201 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", 6202 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, 6203 "# of txqs to use, 0 => use default #"); 6204 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", 6205 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, 6206 "# of rxqs to use, 0 => use default #"); 6207 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", 6208 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, 6209 "permit #txq != #rxq"); 6210 SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", 6211 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, 6212 "disable MSIX (default 0)"); 6213 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", 6214 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, 6215 "set the rx budget"); 6216 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate", 6217 CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0, 6218 "cause tx to abdicate instead of running to completion"); 6219 6220 /* XXX change for per-queue sizes */ 6221 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", 6222 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, 6223 mp_ndesc_handler, "A", 6224 "list of # of tx descriptors to use, 0 = use default #"); 6225 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", 6226 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, 6227 mp_ndesc_handler, "A", 6228 "list of # of rx descriptors to use, 0 = use default #"); 6229 } 6230 6231 static void 6232 iflib_add_device_sysctl_post(if_ctx_t ctx) 6233 { 6234 if_shared_ctx_t sctx = ctx->ifc_sctx; 6235 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 6236 device_t dev = iflib_get_dev(ctx); 6237 struct sysctl_oid_list *child; 6238 struct sysctl_ctx_list *ctx_list; 6239 iflib_fl_t fl; 6240 iflib_txq_t txq; 6241 iflib_rxq_t rxq; 6242 int i, j; 6243 char namebuf[NAME_BUFLEN]; 6244 char *qfmt; 6245 struct sysctl_oid *queue_node, *fl_node, *node; 6246 struct sysctl_oid_list *queue_list, *fl_list; 6247 ctx_list = device_get_sysctl_ctx(dev); 6248 6249 node = ctx->ifc_sysctl_node; 6250 child = SYSCTL_CHILDREN(node); 6251 6252 if (scctx->isc_ntxqsets > 100) 6253 qfmt = "txq%03d"; 6254 else if (scctx->isc_ntxqsets > 10) 6255 qfmt = "txq%02d"; 6256 else 6257 qfmt = "txq%d"; 6258 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { 6259 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6260 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6261 CTLFLAG_RD, NULL, "Queue Name"); 6262 queue_list = SYSCTL_CHILDREN(queue_node); 6263 #if MEMORY_LOGGING 6264 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", 6265 CTLFLAG_RD, 6266 &txq->ift_dequeued, "total mbufs freed"); 6267 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", 6268 CTLFLAG_RD, 6269 &txq->ift_enqueued, "total mbufs enqueued"); 6270 #endif 6271 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", 6272 CTLFLAG_RD, 6273 &txq->ift_mbuf_defrag, "# of times m_defrag was called"); 6274 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", 6275 CTLFLAG_RD, 6276 &txq->ift_pullups, "# of times m_pullup was called"); 6277 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", 6278 CTLFLAG_RD, 6279 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); 6280 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", 6281 CTLFLAG_RD, 6282 &txq->ift_no_desc_avail, "# of times no descriptors were available"); 6283 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", 6284 CTLFLAG_RD, 6285 &txq->ift_map_failed, "# of times dma map failed"); 6286 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", 6287 CTLFLAG_RD, 6288 &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); 6289 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", 6290 CTLFLAG_RD, 6291 &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); 6292 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", 6293 CTLFLAG_RD, 6294 &txq->ift_pidx, 1, "Producer Index"); 6295 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", 6296 CTLFLAG_RD, 6297 &txq->ift_cidx, 1, "Consumer Index"); 6298 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", 6299 CTLFLAG_RD, 6300 &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); 6301 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", 6302 CTLFLAG_RD, 6303 &txq->ift_in_use, 1, "descriptors in use"); 6304 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", 6305 CTLFLAG_RD, 6306 &txq->ift_processed, "descriptors procesed for clean"); 6307 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", 6308 CTLFLAG_RD, 6309 &txq->ift_cleaned, "total cleaned"); 6310 SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", 6311 CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 6312 0, mp_ring_state_handler, "A", "soft ring state"); 6313 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", 6314 CTLFLAG_RD, &txq->ift_br->enqueues, 6315 "# of enqueues to the mp_ring for this queue"); 6316 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", 6317 CTLFLAG_RD, &txq->ift_br->drops, 6318 "# of drops in the mp_ring for this queue"); 6319 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", 6320 CTLFLAG_RD, &txq->ift_br->starts, 6321 "# of normal consumer starts in the mp_ring for this queue"); 6322 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", 6323 CTLFLAG_RD, &txq->ift_br->stalls, 6324 "# of consumer stalls in the mp_ring for this queue"); 6325 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", 6326 CTLFLAG_RD, &txq->ift_br->restarts, 6327 "# of consumer restarts in the mp_ring for this queue"); 6328 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", 6329 CTLFLAG_RD, &txq->ift_br->abdications, 6330 "# of consumer abdications in the mp_ring for this queue"); 6331 } 6332 6333 if (scctx->isc_nrxqsets > 100) 6334 qfmt = "rxq%03d"; 6335 else if (scctx->isc_nrxqsets > 10) 6336 qfmt = "rxq%02d"; 6337 else 6338 qfmt = "rxq%d"; 6339 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { 6340 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6341 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6342 CTLFLAG_RD, NULL, "Queue Name"); 6343 queue_list = SYSCTL_CHILDREN(queue_node); 6344 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 6345 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", 6346 CTLFLAG_RD, 6347 &rxq->ifr_cq_pidx, 1, "Producer Index"); 6348 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", 6349 CTLFLAG_RD, 6350 &rxq->ifr_cq_cidx, 1, "Consumer Index"); 6351 } 6352 6353 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 6354 snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); 6355 fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, 6356 CTLFLAG_RD, NULL, "freelist Name"); 6357 fl_list = SYSCTL_CHILDREN(fl_node); 6358 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", 6359 CTLFLAG_RD, 6360 &fl->ifl_pidx, 1, "Producer Index"); 6361 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", 6362 CTLFLAG_RD, 6363 &fl->ifl_cidx, 1, "Consumer Index"); 6364 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", 6365 CTLFLAG_RD, 6366 &fl->ifl_credits, 1, "credits available"); 6367 #if MEMORY_LOGGING 6368 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", 6369 CTLFLAG_RD, 6370 &fl->ifl_m_enqueued, "mbufs allocated"); 6371 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", 6372 CTLFLAG_RD, 6373 &fl->ifl_m_dequeued, "mbufs freed"); 6374 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", 6375 CTLFLAG_RD, 6376 &fl->ifl_cl_enqueued, "clusters allocated"); 6377 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", 6378 CTLFLAG_RD, 6379 &fl->ifl_cl_dequeued, "clusters freed"); 6380 #endif 6381 6382 } 6383 } 6384 6385 } 6386 6387 #ifndef __NO_STRICT_ALIGNMENT 6388 static struct mbuf * 6389 iflib_fixup_rx(struct mbuf *m) 6390 { 6391 struct mbuf *n; 6392 6393 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 6394 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 6395 m->m_data += ETHER_HDR_LEN; 6396 n = m; 6397 } else { 6398 MGETHDR(n, M_NOWAIT, MT_DATA); 6399 if (n == NULL) { 6400 m_freem(m); 6401 return (NULL); 6402 } 6403 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 6404 m->m_data += ETHER_HDR_LEN; 6405 m->m_len -= ETHER_HDR_LEN; 6406 n->m_len = ETHER_HDR_LEN; 6407 M_MOVE_PKTHDR(n, m); 6408 n->m_next = m; 6409 } 6410 return (n); 6411 } 6412 #endif 6413 6414 #ifdef NETDUMP 6415 static void 6416 iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) 6417 { 6418 if_ctx_t ctx; 6419 6420 ctx = if_getsoftc(ifp); 6421 CTX_LOCK(ctx); 6422 *nrxr = NRXQSETS(ctx); 6423 *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size; 6424 *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size; 6425 CTX_UNLOCK(ctx); 6426 } 6427 6428 static void 6429 iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event) 6430 { 6431 if_ctx_t ctx; 6432 if_softc_ctx_t scctx; 6433 iflib_fl_t fl; 6434 iflib_rxq_t rxq; 6435 int i, j; 6436 6437 ctx = if_getsoftc(ifp); 6438 scctx = &ctx->ifc_softc_ctx; 6439 6440 switch (event) { 6441 case NETDUMP_START: 6442 for (i = 0; i < scctx->isc_nrxqsets; i++) { 6443 rxq = &ctx->ifc_rxqs[i]; 6444 for (j = 0; j < rxq->ifr_nfl; j++) { 6445 fl = rxq->ifr_fl; 6446 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 6447 } 6448 } 6449 iflib_no_tx_batch = 1; 6450 break; 6451 default: 6452 break; 6453 } 6454 } 6455 6456 static int 6457 iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m) 6458 { 6459 if_ctx_t ctx; 6460 iflib_txq_t txq; 6461 int error; 6462 6463 ctx = if_getsoftc(ifp); 6464 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6465 IFF_DRV_RUNNING) 6466 return (EBUSY); 6467 6468 txq = &ctx->ifc_txqs[0]; 6469 error = iflib_encap(txq, &m); 6470 if (error == 0) 6471 (void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use); 6472 return (error); 6473 } 6474 6475 static int 6476 iflib_netdump_poll(struct ifnet *ifp, int count) 6477 { 6478 if_ctx_t ctx; 6479 if_softc_ctx_t scctx; 6480 iflib_txq_t txq; 6481 int i; 6482 6483 ctx = if_getsoftc(ifp); 6484 scctx = &ctx->ifc_softc_ctx; 6485 6486 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6487 IFF_DRV_RUNNING) 6488 return (EBUSY); 6489 6490 txq = &ctx->ifc_txqs[0]; 6491 (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 6492 6493 for (i = 0; i < scctx->isc_nrxqsets; i++) 6494 (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */); 6495 return (0); 6496 } 6497 #endif /* NETDUMP */ 6498