1 /*- 2 * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Neither the name of Matthew Macy nor the names of its 12 * contributors may be used to endorse or promote products derived from 13 * this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_acpi.h" 34 #include "opt_sched.h" 35 36 #include <sys/param.h> 37 #include <sys/types.h> 38 #include <sys/bus.h> 39 #include <sys/eventhandler.h> 40 #include <sys/jail.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/md5.h> 44 #include <sys/mutex.h> 45 #include <sys/module.h> 46 #include <sys/kobj.h> 47 #include <sys/rman.h> 48 #include <sys/proc.h> 49 #include <sys/sbuf.h> 50 #include <sys/smp.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/sysctl.h> 54 #include <sys/syslog.h> 55 #include <sys/taskqueue.h> 56 #include <sys/limits.h> 57 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_types.h> 61 #include <net/if_media.h> 62 #include <net/bpf.h> 63 #include <net/ethernet.h> 64 #include <net/mp_ring.h> 65 #include <net/vnet.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_pcb.h> 69 #include <netinet/tcp_lro.h> 70 #include <netinet/in_systm.h> 71 #include <netinet/if_ether.h> 72 #include <netinet/ip.h> 73 #include <netinet/ip6.h> 74 #include <netinet/tcp.h> 75 #include <netinet/ip_var.h> 76 #include <netinet/netdump/netdump.h> 77 #include <netinet6/ip6_var.h> 78 79 #include <machine/bus.h> 80 #include <machine/in_cksum.h> 81 82 #include <vm/vm.h> 83 #include <vm/pmap.h> 84 85 #include <dev/led/led.h> 86 #include <dev/pci/pcireg.h> 87 #include <dev/pci/pcivar.h> 88 #include <dev/pci/pci_private.h> 89 90 #include <net/iflib.h> 91 #include <net/iflib_private.h> 92 93 #include "ifdi_if.h" 94 95 #if defined(__i386__) || defined(__amd64__) 96 #include <sys/memdesc.h> 97 #include <machine/bus.h> 98 #include <machine/md_var.h> 99 #include <machine/specialreg.h> 100 #include <x86/include/busdma_impl.h> 101 #include <x86/iommu/busdma_dmar.h> 102 #endif 103 104 #include <sys/bitstring.h> 105 /* 106 * enable accounting of every mbuf as it comes in to and goes out of 107 * iflib's software descriptor references 108 */ 109 #define MEMORY_LOGGING 0 110 /* 111 * Enable mbuf vectors for compressing long mbuf chains 112 */ 113 114 /* 115 * NB: 116 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead 117 * we prefetch needs to be determined by the time spent in m_free vis a vis 118 * the cost of a prefetch. This will of course vary based on the workload: 119 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which 120 * is quite expensive, thus suggesting very little prefetch. 121 * - small packet forwarding which is just returning a single mbuf to 122 * UMA will typically be very fast vis a vis the cost of a memory 123 * access. 124 */ 125 126 127 /* 128 * File organization: 129 * - private structures 130 * - iflib private utility functions 131 * - ifnet functions 132 * - vlan registry and other exported functions 133 * - iflib public core functions 134 * 135 * 136 */ 137 MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); 138 139 struct iflib_txq; 140 typedef struct iflib_txq *iflib_txq_t; 141 struct iflib_rxq; 142 typedef struct iflib_rxq *iflib_rxq_t; 143 struct iflib_fl; 144 typedef struct iflib_fl *iflib_fl_t; 145 146 struct iflib_ctx; 147 148 static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); 149 static void iflib_timer(void *arg); 150 151 typedef struct iflib_filter_info { 152 driver_filter_t *ifi_filter; 153 void *ifi_filter_arg; 154 struct grouptask *ifi_task; 155 void *ifi_ctx; 156 } *iflib_filter_info_t; 157 158 struct iflib_ctx { 159 KOBJ_FIELDS; 160 /* 161 * Pointer to hardware driver's softc 162 */ 163 void *ifc_softc; 164 device_t ifc_dev; 165 if_t ifc_ifp; 166 167 cpuset_t ifc_cpus; 168 if_shared_ctx_t ifc_sctx; 169 struct if_softc_ctx ifc_softc_ctx; 170 171 struct sx ifc_ctx_sx; 172 struct mtx ifc_state_mtx; 173 174 uint16_t ifc_nhwtxqs; 175 176 iflib_txq_t ifc_txqs; 177 iflib_rxq_t ifc_rxqs; 178 uint32_t ifc_if_flags; 179 uint32_t ifc_flags; 180 uint32_t ifc_max_fl_buf_size; 181 int ifc_in_detach; 182 183 int ifc_link_state; 184 int ifc_link_irq; 185 int ifc_watchdog_events; 186 struct cdev *ifc_led_dev; 187 struct resource *ifc_msix_mem; 188 189 struct if_irq ifc_legacy_irq; 190 struct grouptask ifc_admin_task; 191 struct grouptask ifc_vflr_task; 192 struct iflib_filter_info ifc_filter_info; 193 struct ifmedia ifc_media; 194 195 struct sysctl_oid *ifc_sysctl_node; 196 uint16_t ifc_sysctl_ntxqs; 197 uint16_t ifc_sysctl_nrxqs; 198 uint16_t ifc_sysctl_qs_eq_override; 199 uint16_t ifc_sysctl_rx_budget; 200 uint16_t ifc_sysctl_tx_abdicate; 201 202 qidx_t ifc_sysctl_ntxds[8]; 203 qidx_t ifc_sysctl_nrxds[8]; 204 struct if_txrx ifc_txrx; 205 #define isc_txd_encap ifc_txrx.ift_txd_encap 206 #define isc_txd_flush ifc_txrx.ift_txd_flush 207 #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update 208 #define isc_rxd_available ifc_txrx.ift_rxd_available 209 #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get 210 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 211 #define isc_rxd_flush ifc_txrx.ift_rxd_flush 212 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 213 #define isc_rxd_refill ifc_txrx.ift_rxd_refill 214 #define isc_legacy_intr ifc_txrx.ift_legacy_intr 215 eventhandler_tag ifc_vlan_attach_event; 216 eventhandler_tag ifc_vlan_detach_event; 217 uint8_t ifc_mac[ETHER_ADDR_LEN]; 218 char ifc_mtx_name[16]; 219 }; 220 221 222 void * 223 iflib_get_softc(if_ctx_t ctx) 224 { 225 226 return (ctx->ifc_softc); 227 } 228 229 device_t 230 iflib_get_dev(if_ctx_t ctx) 231 { 232 233 return (ctx->ifc_dev); 234 } 235 236 if_t 237 iflib_get_ifp(if_ctx_t ctx) 238 { 239 240 return (ctx->ifc_ifp); 241 } 242 243 struct ifmedia * 244 iflib_get_media(if_ctx_t ctx) 245 { 246 247 return (&ctx->ifc_media); 248 } 249 250 uint32_t 251 iflib_get_flags(if_ctx_t ctx) 252 { 253 return (ctx->ifc_flags); 254 } 255 256 void 257 iflib_set_detach(if_ctx_t ctx) 258 { 259 ctx->ifc_in_detach = 1; 260 } 261 262 void 263 iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) 264 { 265 266 bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); 267 } 268 269 if_softc_ctx_t 270 iflib_get_softc_ctx(if_ctx_t ctx) 271 { 272 273 return (&ctx->ifc_softc_ctx); 274 } 275 276 if_shared_ctx_t 277 iflib_get_sctx(if_ctx_t ctx) 278 { 279 280 return (ctx->ifc_sctx); 281 } 282 283 #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) 284 #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) 285 #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) 286 287 #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) 288 #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) 289 290 #define RX_SW_DESC_MAP_CREATED (1 << 0) 291 #define TX_SW_DESC_MAP_CREATED (1 << 1) 292 #define RX_SW_DESC_INUSE (1 << 3) 293 #define TX_SW_DESC_MAPPED (1 << 4) 294 295 #define M_TOOBIG M_PROTO1 296 297 typedef struct iflib_sw_rx_desc_array { 298 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 299 struct mbuf **ifsd_m; /* pkthdr mbufs */ 300 caddr_t *ifsd_cl; /* direct cluster pointer for rx */ 301 uint8_t *ifsd_flags; 302 } iflib_rxsd_array_t; 303 304 typedef struct iflib_sw_tx_desc_array { 305 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 306 struct mbuf **ifsd_m; /* pkthdr mbufs */ 307 uint8_t *ifsd_flags; 308 } if_txsd_vec_t; 309 310 311 /* magic number that should be high enough for any hardware */ 312 #define IFLIB_MAX_TX_SEGS 128 313 /* bnxt supports 64 with hardware LRO enabled */ 314 #define IFLIB_MAX_RX_SEGS 64 315 #define IFLIB_RX_COPY_THRESH 128 316 #define IFLIB_MAX_RX_REFRESH 32 317 /* The minimum descriptors per second before we start coalescing */ 318 #define IFLIB_MIN_DESC_SEC 16384 319 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 320 #define IFLIB_QUEUE_IDLE 0 321 #define IFLIB_QUEUE_HUNG 1 322 #define IFLIB_QUEUE_WORKING 2 323 /* maximum number of txqs that can share an rx interrupt */ 324 #define IFLIB_MAX_TX_SHARED_INTR 4 325 326 /* this should really scale with ring size - this is a fairly arbitrary value */ 327 #define TX_BATCH_SIZE 32 328 329 #define IFLIB_RESTART_BUDGET 8 330 331 332 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ 333 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ 334 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) 335 struct iflib_txq { 336 qidx_t ift_in_use; 337 qidx_t ift_cidx; 338 qidx_t ift_cidx_processed; 339 qidx_t ift_pidx; 340 uint8_t ift_gen; 341 uint8_t ift_br_offset; 342 uint16_t ift_npending; 343 uint16_t ift_db_pending; 344 uint16_t ift_rs_pending; 345 /* implicit pad */ 346 uint8_t ift_txd_size[8]; 347 uint64_t ift_processed; 348 uint64_t ift_cleaned; 349 uint64_t ift_cleaned_prev; 350 #if MEMORY_LOGGING 351 uint64_t ift_enqueued; 352 uint64_t ift_dequeued; 353 #endif 354 uint64_t ift_no_tx_dma_setup; 355 uint64_t ift_no_desc_avail; 356 uint64_t ift_mbuf_defrag_failed; 357 uint64_t ift_mbuf_defrag; 358 uint64_t ift_map_failed; 359 uint64_t ift_txd_encap_efbig; 360 uint64_t ift_pullups; 361 uint64_t ift_last_timer_tick; 362 363 struct mtx ift_mtx; 364 struct mtx ift_db_mtx; 365 366 /* constant values */ 367 if_ctx_t ift_ctx; 368 struct ifmp_ring *ift_br; 369 struct grouptask ift_task; 370 qidx_t ift_size; 371 uint16_t ift_id; 372 struct callout ift_timer; 373 374 if_txsd_vec_t ift_sds; 375 uint8_t ift_qstatus; 376 uint8_t ift_closed; 377 uint8_t ift_update_freq; 378 struct iflib_filter_info ift_filter_info; 379 bus_dma_tag_t ift_desc_tag; 380 bus_dma_tag_t ift_tso_desc_tag; 381 iflib_dma_info_t ift_ifdi; 382 #define MTX_NAME_LEN 16 383 char ift_mtx_name[MTX_NAME_LEN]; 384 char ift_db_mtx_name[MTX_NAME_LEN]; 385 bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); 386 #ifdef IFLIB_DIAGNOSTICS 387 uint64_t ift_cpu_exec_count[256]; 388 #endif 389 } __aligned(CACHE_LINE_SIZE); 390 391 struct iflib_fl { 392 qidx_t ifl_cidx; 393 qidx_t ifl_pidx; 394 qidx_t ifl_credits; 395 uint8_t ifl_gen; 396 uint8_t ifl_rxd_size; 397 #if MEMORY_LOGGING 398 uint64_t ifl_m_enqueued; 399 uint64_t ifl_m_dequeued; 400 uint64_t ifl_cl_enqueued; 401 uint64_t ifl_cl_dequeued; 402 #endif 403 /* implicit pad */ 404 405 bitstr_t *ifl_rx_bitmap; 406 qidx_t ifl_fragidx; 407 /* constant */ 408 qidx_t ifl_size; 409 uint16_t ifl_buf_size; 410 uint16_t ifl_cltype; 411 uma_zone_t ifl_zone; 412 iflib_rxsd_array_t ifl_sds; 413 iflib_rxq_t ifl_rxq; 414 uint8_t ifl_id; 415 bus_dma_tag_t ifl_desc_tag; 416 iflib_dma_info_t ifl_ifdi; 417 uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); 418 caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; 419 qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; 420 } __aligned(CACHE_LINE_SIZE); 421 422 static inline qidx_t 423 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) 424 { 425 qidx_t used; 426 427 if (pidx > cidx) 428 used = pidx - cidx; 429 else if (pidx < cidx) 430 used = size - cidx + pidx; 431 else if (gen == 0 && pidx == cidx) 432 used = 0; 433 else if (gen == 1 && pidx == cidx) 434 used = size; 435 else 436 panic("bad state"); 437 438 return (used); 439 } 440 441 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) 442 443 #define IDXDIFF(head, tail, wrap) \ 444 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 445 446 struct iflib_rxq { 447 /* If there is a separate completion queue - 448 * these are the cq cidx and pidx. Otherwise 449 * these are unused. 450 */ 451 qidx_t ifr_size; 452 qidx_t ifr_cq_cidx; 453 qidx_t ifr_cq_pidx; 454 uint8_t ifr_cq_gen; 455 uint8_t ifr_fl_offset; 456 457 if_ctx_t ifr_ctx; 458 iflib_fl_t ifr_fl; 459 uint64_t ifr_rx_irq; 460 uint16_t ifr_id; 461 uint8_t ifr_lro_enabled; 462 uint8_t ifr_nfl; 463 uint8_t ifr_ntxqirq; 464 uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; 465 struct lro_ctrl ifr_lc; 466 struct grouptask ifr_task; 467 struct iflib_filter_info ifr_filter_info; 468 iflib_dma_info_t ifr_ifdi; 469 470 /* dynamically allocate if any drivers need a value substantially larger than this */ 471 struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); 472 #ifdef IFLIB_DIAGNOSTICS 473 uint64_t ifr_cpu_exec_count[256]; 474 #endif 475 } __aligned(CACHE_LINE_SIZE); 476 477 typedef struct if_rxsd { 478 caddr_t *ifsd_cl; 479 struct mbuf **ifsd_m; 480 iflib_fl_t ifsd_fl; 481 qidx_t ifsd_cidx; 482 } *if_rxsd_t; 483 484 /* multiple of word size */ 485 #ifdef __LP64__ 486 #define PKT_INFO_SIZE 6 487 #define RXD_INFO_SIZE 5 488 #define PKT_TYPE uint64_t 489 #else 490 #define PKT_INFO_SIZE 11 491 #define RXD_INFO_SIZE 8 492 #define PKT_TYPE uint32_t 493 #endif 494 #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) 495 #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) 496 497 typedef struct if_pkt_info_pad { 498 PKT_TYPE pkt_val[PKT_INFO_SIZE]; 499 } *if_pkt_info_pad_t; 500 typedef struct if_rxd_info_pad { 501 PKT_TYPE rxd_val[RXD_INFO_SIZE]; 502 } *if_rxd_info_pad_t; 503 504 CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); 505 CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); 506 507 508 static inline void 509 pkt_info_zero(if_pkt_info_t pi) 510 { 511 if_pkt_info_pad_t pi_pad; 512 513 pi_pad = (if_pkt_info_pad_t)pi; 514 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; 515 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; 516 #ifndef __LP64__ 517 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; 518 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; 519 #endif 520 } 521 522 static device_method_t iflib_pseudo_methods[] = { 523 DEVMETHOD(device_attach, noop_attach), 524 DEVMETHOD(device_detach, iflib_pseudo_detach), 525 DEVMETHOD_END 526 }; 527 528 driver_t iflib_pseudodriver = { 529 "iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx), 530 }; 531 532 static inline void 533 rxd_info_zero(if_rxd_info_t ri) 534 { 535 if_rxd_info_pad_t ri_pad; 536 int i; 537 538 ri_pad = (if_rxd_info_pad_t)ri; 539 for (i = 0; i < RXD_LOOP_BOUND; i += 4) { 540 ri_pad->rxd_val[i] = 0; 541 ri_pad->rxd_val[i+1] = 0; 542 ri_pad->rxd_val[i+2] = 0; 543 ri_pad->rxd_val[i+3] = 0; 544 } 545 #ifdef __LP64__ 546 ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; 547 #endif 548 } 549 550 /* 551 * Only allow a single packet to take up most 1/nth of the tx ring 552 */ 553 #define MAX_SINGLE_PACKET_FRACTION 12 554 #define IF_BAD_DMA (bus_addr_t)-1 555 556 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) 557 558 #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock") 559 #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx) 560 #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx) 561 #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx) 562 563 564 #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF) 565 #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx) 566 #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx) 567 #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx) 568 569 570 571 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) 572 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) 573 574 575 /* Our boot-time initialization hook */ 576 static int iflib_module_event_handler(module_t, int, void *); 577 578 static moduledata_t iflib_moduledata = { 579 "iflib", 580 iflib_module_event_handler, 581 NULL 582 }; 583 584 DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); 585 MODULE_VERSION(iflib, 1); 586 587 MODULE_DEPEND(iflib, pci, 1, 1, 1); 588 MODULE_DEPEND(iflib, ether, 1, 1, 1); 589 590 TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); 591 TASKQGROUP_DEFINE(if_config_tqg, 1, 1); 592 593 #ifndef IFLIB_DEBUG_COUNTERS 594 #ifdef INVARIANTS 595 #define IFLIB_DEBUG_COUNTERS 1 596 #else 597 #define IFLIB_DEBUG_COUNTERS 0 598 #endif /* !INVARIANTS */ 599 #endif 600 601 static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, 602 "iflib driver parameters"); 603 604 /* 605 * XXX need to ensure that this can't accidentally cause the head to be moved backwards 606 */ 607 static int iflib_min_tx_latency = 0; 608 SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, 609 &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); 610 static int iflib_no_tx_batch = 0; 611 SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, 612 &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); 613 614 615 #if IFLIB_DEBUG_COUNTERS 616 617 static int iflib_tx_seen; 618 static int iflib_tx_sent; 619 static int iflib_tx_encap; 620 static int iflib_rx_allocs; 621 static int iflib_fl_refills; 622 static int iflib_fl_refills_large; 623 static int iflib_tx_frees; 624 625 SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, 626 &iflib_tx_seen, 0, "# tx mbufs seen"); 627 SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, 628 &iflib_tx_sent, 0, "# tx mbufs sent"); 629 SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, 630 &iflib_tx_encap, 0, "# tx mbufs encapped"); 631 SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, 632 &iflib_tx_frees, 0, "# tx frees"); 633 SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, 634 &iflib_rx_allocs, 0, "# rx allocations"); 635 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, 636 &iflib_fl_refills, 0, "# refills"); 637 SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, 638 &iflib_fl_refills_large, 0, "# large refills"); 639 640 641 static int iflib_txq_drain_flushing; 642 static int iflib_txq_drain_oactive; 643 static int iflib_txq_drain_notready; 644 static int iflib_txq_drain_encapfail; 645 646 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, 647 &iflib_txq_drain_flushing, 0, "# drain flushes"); 648 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, 649 &iflib_txq_drain_oactive, 0, "# drain oactives"); 650 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, 651 &iflib_txq_drain_notready, 0, "# drain notready"); 652 SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD, 653 &iflib_txq_drain_encapfail, 0, "# drain encap fails"); 654 655 656 static int iflib_encap_load_mbuf_fail; 657 static int iflib_encap_pad_mbuf_fail; 658 static int iflib_encap_txq_avail_fail; 659 static int iflib_encap_txd_encap_fail; 660 661 SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, 662 &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); 663 SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, 664 &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); 665 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, 666 &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); 667 SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, 668 &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); 669 670 static int iflib_task_fn_rxs; 671 static int iflib_rx_intr_enables; 672 static int iflib_fast_intrs; 673 static int iflib_intr_link; 674 static int iflib_intr_msix; 675 static int iflib_rx_unavail; 676 static int iflib_rx_ctx_inactive; 677 static int iflib_rx_zero_len; 678 static int iflib_rx_if_input; 679 static int iflib_rx_mbuf_null; 680 static int iflib_rxd_flush; 681 682 static int iflib_verbose_debug; 683 684 SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD, 685 &iflib_intr_link, 0, "# intr link calls"); 686 SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD, 687 &iflib_intr_msix, 0, "# intr msix calls"); 688 SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, 689 &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); 690 SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, 691 &iflib_rx_intr_enables, 0, "# rx intr enables"); 692 SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, 693 &iflib_fast_intrs, 0, "# fast_intr calls"); 694 SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, 695 &iflib_rx_unavail, 0, "# times rxeof called with no available data"); 696 SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, 697 &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); 698 SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD, 699 &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf"); 700 SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, 701 &iflib_rx_if_input, 0, "# times rxeof called if_input"); 702 SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, 703 &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); 704 SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, 705 &iflib_rxd_flush, 0, "# times rxd_flush called"); 706 SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, 707 &iflib_verbose_debug, 0, "enable verbose debugging"); 708 709 #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) 710 static void 711 iflib_debug_reset(void) 712 { 713 iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = 714 iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = 715 iflib_txq_drain_flushing = iflib_txq_drain_oactive = 716 iflib_txq_drain_notready = iflib_txq_drain_encapfail = 717 iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = 718 iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = 719 iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = 720 iflib_intr_link = iflib_intr_msix = iflib_rx_unavail = 721 iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input = 722 iflib_rx_mbuf_null = iflib_rxd_flush = 0; 723 } 724 725 #else 726 #define DBG_COUNTER_INC(name) 727 static void iflib_debug_reset(void) {} 728 #endif 729 730 #define IFLIB_DEBUG 0 731 732 static void iflib_tx_structures_free(if_ctx_t ctx); 733 static void iflib_rx_structures_free(if_ctx_t ctx); 734 static int iflib_queues_alloc(if_ctx_t ctx); 735 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); 736 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); 737 static int iflib_qset_structures_setup(if_ctx_t ctx); 738 static int iflib_msix_init(if_ctx_t ctx); 739 static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str); 740 static void iflib_txq_check_drain(iflib_txq_t txq, int budget); 741 static uint32_t iflib_txq_can_drain(struct ifmp_ring *); 742 static int iflib_register(if_ctx_t); 743 static void iflib_init_locked(if_ctx_t ctx); 744 static void iflib_add_device_sysctl_pre(if_ctx_t ctx); 745 static void iflib_add_device_sysctl_post(if_ctx_t ctx); 746 static void iflib_ifmp_purge(iflib_txq_t txq); 747 static void _iflib_pre_assert(if_softc_ctx_t scctx); 748 static void iflib_if_init_locked(if_ctx_t ctx); 749 #ifndef __NO_STRICT_ALIGNMENT 750 static struct mbuf * iflib_fixup_rx(struct mbuf *m); 751 #endif 752 753 NETDUMP_DEFINE(iflib); 754 755 #ifdef DEV_NETMAP 756 #include <sys/selinfo.h> 757 #include <net/netmap.h> 758 #include <dev/netmap/netmap_kern.h> 759 760 MODULE_DEPEND(iflib, netmap, 1, 1, 1); 761 762 static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); 763 764 /* 765 * device-specific sysctl variables: 766 * 767 * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. 768 * During regular operations the CRC is stripped, but on some 769 * hardware reception of frames not multiple of 64 is slower, 770 * so using crcstrip=0 helps in benchmarks. 771 * 772 * iflib_rx_miss, iflib_rx_miss_bufs: 773 * count packets that might be missed due to lost interrupts. 774 */ 775 SYSCTL_DECL(_dev_netmap); 776 /* 777 * The xl driver by default strips CRCs and we do not override it. 778 */ 779 780 int iflib_crcstrip = 1; 781 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, 782 CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames"); 783 784 int iflib_rx_miss, iflib_rx_miss_bufs; 785 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, 786 CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr"); 787 SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, 788 CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs"); 789 790 /* 791 * Register/unregister. We are already under netmap lock. 792 * Only called on the first register or the last unregister. 793 */ 794 static int 795 iflib_netmap_register(struct netmap_adapter *na, int onoff) 796 { 797 struct ifnet *ifp = na->ifp; 798 if_ctx_t ctx = ifp->if_softc; 799 int status; 800 801 CTX_LOCK(ctx); 802 IFDI_INTR_DISABLE(ctx); 803 804 /* Tell the stack that the interface is no longer active */ 805 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 806 807 if (!CTX_IS_VF(ctx)) 808 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); 809 810 /* enable or disable flags and callbacks in na and ifp */ 811 if (onoff) { 812 nm_set_native_flags(na); 813 } else { 814 nm_clear_native_flags(na); 815 } 816 iflib_stop(ctx); 817 iflib_init_locked(ctx); 818 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? 819 status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; 820 if (status) 821 nm_clear_native_flags(na); 822 CTX_UNLOCK(ctx); 823 return (status); 824 } 825 826 static int 827 netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) 828 { 829 struct netmap_adapter *na = kring->na; 830 u_int const lim = kring->nkr_num_slots - 1; 831 u_int head = kring->rhead; 832 struct netmap_ring *ring = kring->ring; 833 bus_dmamap_t *map; 834 struct if_rxd_update iru; 835 if_ctx_t ctx = rxq->ifr_ctx; 836 iflib_fl_t fl = &rxq->ifr_fl[0]; 837 uint32_t refill_pidx, nic_i; 838 839 if (nm_i == head && __predict_true(!init)) 840 return 0; 841 iru_init(&iru, rxq, 0 /* flid */); 842 map = fl->ifl_sds.ifsd_map; 843 refill_pidx = netmap_idx_k2n(kring, nm_i); 844 /* 845 * IMPORTANT: we must leave one free slot in the ring, 846 * so move head back by one unit 847 */ 848 head = nm_prev(head, lim); 849 nic_i = UINT_MAX; 850 while (nm_i != head) { 851 for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { 852 struct netmap_slot *slot = &ring->slot[nm_i]; 853 void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); 854 uint32_t nic_i_dma = refill_pidx; 855 nic_i = netmap_idx_k2n(kring, nm_i); 856 857 MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); 858 859 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 860 return netmap_ring_reinit(kring); 861 862 fl->ifl_vm_addrs[tmp_pidx] = addr; 863 if (__predict_false(init) && map) { 864 netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 865 } else if (map && (slot->flags & NS_BUF_CHANGED)) { 866 /* buffer has changed, reload map */ 867 netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 868 } 869 slot->flags &= ~NS_BUF_CHANGED; 870 871 nm_i = nm_next(nm_i, lim); 872 fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); 873 if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) 874 continue; 875 876 iru.iru_pidx = refill_pidx; 877 iru.iru_count = tmp_pidx+1; 878 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 879 880 refill_pidx = nic_i; 881 if (map == NULL) 882 continue; 883 884 for (int n = 0; n < iru.iru_count; n++) { 885 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma], 886 BUS_DMASYNC_PREREAD); 887 /* XXX - change this to not use the netmap func*/ 888 nic_i_dma = nm_next(nic_i_dma, lim); 889 } 890 } 891 } 892 kring->nr_hwcur = head; 893 894 if (map) 895 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 896 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 897 if (__predict_true(nic_i != UINT_MAX)) 898 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); 899 return (0); 900 } 901 902 /* 903 * Reconcile kernel and user view of the transmit ring. 904 * 905 * All information is in the kring. 906 * Userspace wants to send packets up to the one before kring->rhead, 907 * kernel knows kring->nr_hwcur is the first unsent packet. 908 * 909 * Here we push packets out (as many as possible), and possibly 910 * reclaim buffers from previously completed transmission. 911 * 912 * The caller (netmap) guarantees that there is only one instance 913 * running at any time. Any interference with other driver 914 * methods should be handled by the individual drivers. 915 */ 916 static int 917 iflib_netmap_txsync(struct netmap_kring *kring, int flags) 918 { 919 struct netmap_adapter *na = kring->na; 920 struct ifnet *ifp = na->ifp; 921 struct netmap_ring *ring = kring->ring; 922 u_int nm_i; /* index into the netmap kring */ 923 u_int nic_i; /* index into the NIC ring */ 924 u_int n; 925 u_int const lim = kring->nkr_num_slots - 1; 926 u_int const head = kring->rhead; 927 struct if_pkt_info pi; 928 929 /* 930 * interrupts on every tx packet are expensive so request 931 * them every half ring, or where NS_REPORT is set 932 */ 933 u_int report_frequency = kring->nkr_num_slots >> 1; 934 /* device-specific */ 935 if_ctx_t ctx = ifp->if_softc; 936 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; 937 938 if (txq->ift_sds.ifsd_map) 939 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 940 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 941 942 943 /* 944 * First part: process new packets to send. 945 * nm_i is the current index in the netmap kring, 946 * nic_i is the corresponding index in the NIC ring. 947 * 948 * If we have packets to send (nm_i != head) 949 * iterate over the netmap ring, fetch length and update 950 * the corresponding slot in the NIC ring. Some drivers also 951 * need to update the buffer's physical address in the NIC slot 952 * even NS_BUF_CHANGED is not set (PNMB computes the addresses). 953 * 954 * The netmap_reload_map() calls is especially expensive, 955 * even when (as in this case) the tag is 0, so do only 956 * when the buffer has actually changed. 957 * 958 * If possible do not set the report/intr bit on all slots, 959 * but only a few times per ring or when NS_REPORT is set. 960 * 961 * Finally, on 10G and faster drivers, it might be useful 962 * to prefetch the next slot and txr entry. 963 */ 964 965 nm_i = kring->nr_hwcur; 966 if (nm_i != head) { /* we have new packets to send */ 967 pkt_info_zero(&pi); 968 pi.ipi_segs = txq->ift_segs; 969 pi.ipi_qsidx = kring->ring_id; 970 nic_i = netmap_idx_k2n(kring, nm_i); 971 972 __builtin_prefetch(&ring->slot[nm_i]); 973 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); 974 if (txq->ift_sds.ifsd_map) 975 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); 976 977 for (n = 0; nm_i != head; n++) { 978 struct netmap_slot *slot = &ring->slot[nm_i]; 979 u_int len = slot->len; 980 uint64_t paddr; 981 void *addr = PNMB(na, slot, &paddr); 982 int flags = (slot->flags & NS_REPORT || 983 nic_i == 0 || nic_i == report_frequency) ? 984 IPI_TX_INTR : 0; 985 986 /* device-specific */ 987 pi.ipi_len = len; 988 pi.ipi_segs[0].ds_addr = paddr; 989 pi.ipi_segs[0].ds_len = len; 990 pi.ipi_nsegs = 1; 991 pi.ipi_ndescs = 0; 992 pi.ipi_pidx = nic_i; 993 pi.ipi_flags = flags; 994 995 /* Fill the slot in the NIC ring. */ 996 ctx->isc_txd_encap(ctx->ifc_softc, &pi); 997 998 /* prefetch for next round */ 999 __builtin_prefetch(&ring->slot[nm_i + 1]); 1000 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); 1001 if (txq->ift_sds.ifsd_map) { 1002 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); 1003 1004 NM_CHECK_ADDR_LEN(na, addr, len); 1005 1006 if (slot->flags & NS_BUF_CHANGED) { 1007 /* buffer has changed, reload map */ 1008 netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr); 1009 } 1010 /* make sure changes to the buffer are synced */ 1011 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i], 1012 BUS_DMASYNC_PREWRITE); 1013 } 1014 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1015 nm_i = nm_next(nm_i, lim); 1016 nic_i = nm_next(nic_i, lim); 1017 } 1018 kring->nr_hwcur = nm_i; 1019 1020 /* synchronize the NIC ring */ 1021 if (txq->ift_sds.ifsd_map) 1022 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 1023 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1024 1025 /* (re)start the tx unit up to slot nic_i (excluded) */ 1026 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); 1027 } 1028 1029 /* 1030 * Second part: reclaim buffers for completed transmissions. 1031 * 1032 * If there are unclaimed buffers, attempt to reclaim them. 1033 * If none are reclaimed, and TX IRQs are not in use, do an initial 1034 * minimal delay, then trigger the tx handler which will spin in the 1035 * group task queue. 1036 */ 1037 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1038 if (iflib_tx_credits_update(ctx, txq)) { 1039 /* some tx completed, increment avail */ 1040 nic_i = txq->ift_cidx_processed; 1041 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 1042 } 1043 } 1044 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) 1045 if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { 1046 callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000, 1047 iflib_timer, txq, txq->ift_timer.c_cpu); 1048 } 1049 return (0); 1050 } 1051 1052 /* 1053 * Reconcile kernel and user view of the receive ring. 1054 * Same as for the txsync, this routine must be efficient. 1055 * The caller guarantees a single invocations, but races against 1056 * the rest of the driver should be handled here. 1057 * 1058 * On call, kring->rhead is the first packet that userspace wants 1059 * to keep, and kring->rcur is the wakeup point. 1060 * The kernel has previously reported packets up to kring->rtail. 1061 * 1062 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective 1063 * of whether or not we received an interrupt. 1064 */ 1065 static int 1066 iflib_netmap_rxsync(struct netmap_kring *kring, int flags) 1067 { 1068 struct netmap_adapter *na = kring->na; 1069 struct netmap_ring *ring = kring->ring; 1070 uint32_t nm_i; /* index into the netmap ring */ 1071 uint32_t nic_i; /* index into the NIC ring */ 1072 u_int i, n; 1073 u_int const lim = kring->nkr_num_slots - 1; 1074 u_int const head = kring->rhead; 1075 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1076 struct if_rxd_info ri; 1077 1078 struct ifnet *ifp = na->ifp; 1079 if_ctx_t ctx = ifp->if_softc; 1080 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; 1081 iflib_fl_t fl = rxq->ifr_fl; 1082 if (head > lim) 1083 return netmap_ring_reinit(kring); 1084 1085 /* XXX check sync modes */ 1086 for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { 1087 if (fl->ifl_sds.ifsd_map == NULL) 1088 continue; 1089 bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map, 1090 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1091 } 1092 /* 1093 * First part: import newly received packets. 1094 * 1095 * nm_i is the index of the next free slot in the netmap ring, 1096 * nic_i is the index of the next received packet in the NIC ring, 1097 * and they may differ in case if_init() has been called while 1098 * in netmap mode. For the receive ring we have 1099 * 1100 * nic_i = rxr->next_check; 1101 * nm_i = kring->nr_hwtail (previous) 1102 * and 1103 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1104 * 1105 * rxr->next_check is set to 0 on a ring reinit 1106 */ 1107 if (netmap_no_pendintr || force_update) { 1108 int crclen = iflib_crcstrip ? 0 : 4; 1109 int error, avail; 1110 1111 for (i = 0; i < rxq->ifr_nfl; i++) { 1112 fl = &rxq->ifr_fl[i]; 1113 nic_i = fl->ifl_cidx; 1114 nm_i = netmap_idx_n2k(kring, nic_i); 1115 avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); 1116 for (n = 0; avail > 0; n++, avail--) { 1117 rxd_info_zero(&ri); 1118 ri.iri_frags = rxq->ifr_frags; 1119 ri.iri_qsidx = kring->ring_id; 1120 ri.iri_ifp = ctx->ifc_ifp; 1121 ri.iri_cidx = nic_i; 1122 1123 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 1124 ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; 1125 ring->slot[nm_i].flags = 0; 1126 if (fl->ifl_sds.ifsd_map) 1127 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, 1128 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); 1129 nm_i = nm_next(nm_i, lim); 1130 nic_i = nm_next(nic_i, lim); 1131 } 1132 if (n) { /* update the state variables */ 1133 if (netmap_no_pendintr && !force_update) { 1134 /* diagnostics */ 1135 iflib_rx_miss ++; 1136 iflib_rx_miss_bufs += n; 1137 } 1138 fl->ifl_cidx = nic_i; 1139 kring->nr_hwtail = nm_i; 1140 } 1141 kring->nr_kflags &= ~NKR_PENDINTR; 1142 } 1143 } 1144 /* 1145 * Second part: skip past packets that userspace has released. 1146 * (kring->nr_hwcur to head excluded), 1147 * and make the buffers available for reception. 1148 * As usual nm_i is the index in the netmap ring, 1149 * nic_i is the index in the NIC ring, and 1150 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1151 */ 1152 /* XXX not sure how this will work with multiple free lists */ 1153 nm_i = kring->nr_hwcur; 1154 1155 return (netmap_fl_refill(rxq, kring, nm_i, false)); 1156 } 1157 1158 static void 1159 iflib_netmap_intr(struct netmap_adapter *na, int onoff) 1160 { 1161 struct ifnet *ifp = na->ifp; 1162 if_ctx_t ctx = ifp->if_softc; 1163 1164 CTX_LOCK(ctx); 1165 if (onoff) { 1166 IFDI_INTR_ENABLE(ctx); 1167 } else { 1168 IFDI_INTR_DISABLE(ctx); 1169 } 1170 CTX_UNLOCK(ctx); 1171 } 1172 1173 1174 static int 1175 iflib_netmap_attach(if_ctx_t ctx) 1176 { 1177 struct netmap_adapter na; 1178 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1179 1180 bzero(&na, sizeof(na)); 1181 1182 na.ifp = ctx->ifc_ifp; 1183 na.na_flags = NAF_BDG_MAYSLEEP; 1184 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); 1185 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); 1186 1187 na.num_tx_desc = scctx->isc_ntxd[0]; 1188 na.num_rx_desc = scctx->isc_nrxd[0]; 1189 na.nm_txsync = iflib_netmap_txsync; 1190 na.nm_rxsync = iflib_netmap_rxsync; 1191 na.nm_register = iflib_netmap_register; 1192 na.nm_intr = iflib_netmap_intr; 1193 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; 1194 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; 1195 return (netmap_attach(&na)); 1196 } 1197 1198 static void 1199 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) 1200 { 1201 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1202 struct netmap_slot *slot; 1203 1204 slot = netmap_reset(na, NR_TX, txq->ift_id, 0); 1205 if (slot == NULL) 1206 return; 1207 if (txq->ift_sds.ifsd_map == NULL) 1208 return; 1209 1210 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { 1211 1212 /* 1213 * In netmap mode, set the map for the packet buffer. 1214 * NOTE: Some drivers (not this one) also need to set 1215 * the physical buffer address in the NIC ring. 1216 * netmap_idx_n2k() maps a nic index, i, into the corresponding 1217 * netmap slot index, si 1218 */ 1219 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); 1220 netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); 1221 } 1222 } 1223 1224 static void 1225 iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) 1226 { 1227 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1228 struct netmap_kring *kring = na->rx_rings[rxq->ifr_id]; 1229 struct netmap_slot *slot; 1230 uint32_t nm_i; 1231 1232 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); 1233 if (slot == NULL) 1234 return; 1235 nm_i = netmap_idx_n2k(kring, 0); 1236 netmap_fl_refill(rxq, kring, nm_i, true); 1237 } 1238 1239 static void 1240 iflib_netmap_timer_adjust(if_ctx_t ctx, uint16_t txqid, uint32_t *reset_on) 1241 { 1242 struct netmap_kring *kring; 1243 1244 kring = NA(ctx->ifc_ifp)->tx_rings[txqid]; 1245 1246 if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) { 1247 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) 1248 netmap_tx_irq(ctx->ifc_ifp, txqid); 1249 if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) { 1250 if (hz < 2000) 1251 *reset_on = 1; 1252 else 1253 *reset_on = hz / 1000; 1254 } 1255 } 1256 } 1257 1258 #define iflib_netmap_detach(ifp) netmap_detach(ifp) 1259 1260 #else 1261 #define iflib_netmap_txq_init(ctx, txq) 1262 #define iflib_netmap_rxq_init(ctx, rxq) 1263 #define iflib_netmap_detach(ifp) 1264 1265 #define iflib_netmap_attach(ctx) (0) 1266 #define netmap_rx_irq(ifp, qid, budget) (0) 1267 #define netmap_tx_irq(ifp, qid) do {} while (0) 1268 #define iflib_netmap_timer_adjust(ctx, txqid, reset_on) 1269 1270 #endif 1271 1272 #if defined(__i386__) || defined(__amd64__) 1273 static __inline void 1274 prefetch(void *x) 1275 { 1276 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1277 } 1278 static __inline void 1279 prefetch2cachelines(void *x) 1280 { 1281 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1282 #if (CACHE_LINE_SIZE < 128) 1283 __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); 1284 #endif 1285 } 1286 #else 1287 #define prefetch(x) 1288 #define prefetch2cachelines(x) 1289 #endif 1290 1291 static void 1292 iflib_gen_mac(if_ctx_t ctx) 1293 { 1294 struct thread *td; 1295 MD5_CTX mdctx; 1296 char uuid[HOSTUUIDLEN+1]; 1297 char buf[HOSTUUIDLEN+16]; 1298 uint8_t *mac; 1299 unsigned char digest[16]; 1300 1301 td = curthread; 1302 mac = ctx->ifc_mac; 1303 uuid[HOSTUUIDLEN] = 0; 1304 bcopy(td->td_ucred->cr_prison->pr_hostuuid, uuid, HOSTUUIDLEN); 1305 snprintf(buf, HOSTUUIDLEN+16, "%s-%s", uuid, device_get_nameunit(ctx->ifc_dev)); 1306 /* 1307 * Generate a pseudo-random, deterministic MAC 1308 * address based on the UUID and unit number. 1309 * The FreeBSD Foundation OUI of 58-9C-FC is used. 1310 */ 1311 MD5Init(&mdctx); 1312 MD5Update(&mdctx, buf, strlen(buf)); 1313 MD5Final(digest, &mdctx); 1314 1315 mac[0] = 0x58; 1316 mac[1] = 0x9C; 1317 mac[2] = 0xFC; 1318 mac[3] = digest[0]; 1319 mac[4] = digest[1]; 1320 mac[5] = digest[2]; 1321 } 1322 1323 static void 1324 iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) 1325 { 1326 iflib_fl_t fl; 1327 1328 fl = &rxq->ifr_fl[flid]; 1329 iru->iru_paddrs = fl->ifl_bus_addrs; 1330 iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; 1331 iru->iru_idxs = fl->ifl_rxd_idxs; 1332 iru->iru_qsidx = rxq->ifr_id; 1333 iru->iru_buf_size = fl->ifl_buf_size; 1334 iru->iru_flidx = fl->ifl_id; 1335 } 1336 1337 static void 1338 _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) 1339 { 1340 if (err) 1341 return; 1342 *(bus_addr_t *) arg = segs[0].ds_addr; 1343 } 1344 1345 int 1346 iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) 1347 { 1348 int err; 1349 if_shared_ctx_t sctx = ctx->ifc_sctx; 1350 device_t dev = ctx->ifc_dev; 1351 1352 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); 1353 1354 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1355 sctx->isc_q_align, 0, /* alignment, bounds */ 1356 BUS_SPACE_MAXADDR, /* lowaddr */ 1357 BUS_SPACE_MAXADDR, /* highaddr */ 1358 NULL, NULL, /* filter, filterarg */ 1359 size, /* maxsize */ 1360 1, /* nsegments */ 1361 size, /* maxsegsize */ 1362 BUS_DMA_ALLOCNOW, /* flags */ 1363 NULL, /* lockfunc */ 1364 NULL, /* lockarg */ 1365 &dma->idi_tag); 1366 if (err) { 1367 device_printf(dev, 1368 "%s: bus_dma_tag_create failed: %d\n", 1369 __func__, err); 1370 goto fail_0; 1371 } 1372 1373 err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, 1374 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); 1375 if (err) { 1376 device_printf(dev, 1377 "%s: bus_dmamem_alloc(%ju) failed: %d\n", 1378 __func__, (uintmax_t)size, err); 1379 goto fail_1; 1380 } 1381 1382 dma->idi_paddr = IF_BAD_DMA; 1383 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, 1384 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); 1385 if (err || dma->idi_paddr == IF_BAD_DMA) { 1386 device_printf(dev, 1387 "%s: bus_dmamap_load failed: %d\n", 1388 __func__, err); 1389 goto fail_2; 1390 } 1391 1392 dma->idi_size = size; 1393 return (0); 1394 1395 fail_2: 1396 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1397 fail_1: 1398 bus_dma_tag_destroy(dma->idi_tag); 1399 fail_0: 1400 dma->idi_tag = NULL; 1401 1402 return (err); 1403 } 1404 1405 int 1406 iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) 1407 { 1408 int i, err; 1409 iflib_dma_info_t *dmaiter; 1410 1411 dmaiter = dmalist; 1412 for (i = 0; i < count; i++, dmaiter++) { 1413 if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) 1414 break; 1415 } 1416 if (err) 1417 iflib_dma_free_multi(dmalist, i); 1418 return (err); 1419 } 1420 1421 void 1422 iflib_dma_free(iflib_dma_info_t dma) 1423 { 1424 if (dma->idi_tag == NULL) 1425 return; 1426 if (dma->idi_paddr != IF_BAD_DMA) { 1427 bus_dmamap_sync(dma->idi_tag, dma->idi_map, 1428 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1429 bus_dmamap_unload(dma->idi_tag, dma->idi_map); 1430 dma->idi_paddr = IF_BAD_DMA; 1431 } 1432 if (dma->idi_vaddr != NULL) { 1433 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1434 dma->idi_vaddr = NULL; 1435 } 1436 bus_dma_tag_destroy(dma->idi_tag); 1437 dma->idi_tag = NULL; 1438 } 1439 1440 void 1441 iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) 1442 { 1443 int i; 1444 iflib_dma_info_t *dmaiter = dmalist; 1445 1446 for (i = 0; i < count; i++, dmaiter++) 1447 iflib_dma_free(*dmaiter); 1448 } 1449 1450 #ifdef EARLY_AP_STARTUP 1451 static const int iflib_started = 1; 1452 #else 1453 /* 1454 * We used to abuse the smp_started flag to decide if the queues have been 1455 * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). 1456 * That gave bad races, since the SYSINIT() runs strictly after smp_started 1457 * is set. Run a SYSINIT() strictly after that to just set a usable 1458 * completion flag. 1459 */ 1460 1461 static int iflib_started; 1462 1463 static void 1464 iflib_record_started(void *arg) 1465 { 1466 iflib_started = 1; 1467 } 1468 1469 SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, 1470 iflib_record_started, NULL); 1471 #endif 1472 1473 static int 1474 iflib_fast_intr(void *arg) 1475 { 1476 iflib_filter_info_t info = arg; 1477 struct grouptask *gtask = info->ifi_task; 1478 if (!iflib_started) 1479 return (FILTER_HANDLED); 1480 1481 DBG_COUNTER_INC(fast_intrs); 1482 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1483 return (FILTER_HANDLED); 1484 1485 GROUPTASK_ENQUEUE(gtask); 1486 return (FILTER_HANDLED); 1487 } 1488 1489 static int 1490 iflib_fast_intr_rxtx(void *arg) 1491 { 1492 iflib_filter_info_t info = arg; 1493 struct grouptask *gtask = info->ifi_task; 1494 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; 1495 if_ctx_t ctx = NULL;; 1496 int i, cidx; 1497 1498 if (!iflib_started) 1499 return (FILTER_HANDLED); 1500 1501 DBG_COUNTER_INC(fast_intrs); 1502 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1503 return (FILTER_HANDLED); 1504 1505 MPASS(rxq->ifr_ntxqirq); 1506 for (i = 0; i < rxq->ifr_ntxqirq; i++) { 1507 qidx_t txqid = rxq->ifr_txqid[i]; 1508 1509 ctx = rxq->ifr_ctx; 1510 1511 if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) { 1512 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); 1513 continue; 1514 } 1515 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 1516 } 1517 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) 1518 cidx = rxq->ifr_cq_cidx; 1519 else 1520 cidx = rxq->ifr_fl[0].ifl_cidx; 1521 if (iflib_rxd_avail(ctx, rxq, cidx, 1)) 1522 GROUPTASK_ENQUEUE(gtask); 1523 else 1524 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 1525 return (FILTER_HANDLED); 1526 } 1527 1528 1529 static int 1530 iflib_fast_intr_ctx(void *arg) 1531 { 1532 iflib_filter_info_t info = arg; 1533 struct grouptask *gtask = info->ifi_task; 1534 1535 if (!iflib_started) 1536 return (FILTER_HANDLED); 1537 1538 DBG_COUNTER_INC(fast_intrs); 1539 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1540 return (FILTER_HANDLED); 1541 1542 GROUPTASK_ENQUEUE(gtask); 1543 return (FILTER_HANDLED); 1544 } 1545 1546 static int 1547 _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 1548 driver_filter_t filter, driver_intr_t handler, void *arg, 1549 const char *name) 1550 { 1551 int rc, flags; 1552 struct resource *res; 1553 void *tag = NULL; 1554 device_t dev = ctx->ifc_dev; 1555 1556 flags = RF_ACTIVE; 1557 if (ctx->ifc_flags & IFC_LEGACY) 1558 flags |= RF_SHAREABLE; 1559 MPASS(rid < 512); 1560 irq->ii_rid = rid; 1561 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags); 1562 if (res == NULL) { 1563 device_printf(dev, 1564 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 1565 return (ENOMEM); 1566 } 1567 irq->ii_res = res; 1568 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); 1569 rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, 1570 filter, handler, arg, &tag); 1571 if (rc != 0) { 1572 device_printf(dev, 1573 "failed to setup interrupt for rid %d, name %s: %d\n", 1574 rid, name ? name : "unknown", rc); 1575 return (rc); 1576 } else if (name) 1577 bus_describe_intr(dev, res, tag, "%s", name); 1578 1579 irq->ii_tag = tag; 1580 return (0); 1581 } 1582 1583 1584 /********************************************************************* 1585 * 1586 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1587 * the information needed to transmit a packet on the wire. This is 1588 * called only once at attach, setup is done every reset. 1589 * 1590 **********************************************************************/ 1591 1592 static int 1593 iflib_txsd_alloc(iflib_txq_t txq) 1594 { 1595 if_ctx_t ctx = txq->ift_ctx; 1596 if_shared_ctx_t sctx = ctx->ifc_sctx; 1597 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1598 device_t dev = ctx->ifc_dev; 1599 bus_size_t tsomaxsize; 1600 int err, nsegments, ntsosegments; 1601 1602 nsegments = scctx->isc_tx_nsegments; 1603 ntsosegments = scctx->isc_tx_tso_segments_max; 1604 tsomaxsize = scctx->isc_tx_tso_size_max; 1605 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU) 1606 tsomaxsize += sizeof(struct ether_vlan_header); 1607 MPASS(scctx->isc_ntxd[0] > 0); 1608 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); 1609 MPASS(nsegments > 0); 1610 if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) { 1611 MPASS(ntsosegments > 0); 1612 MPASS(sctx->isc_tso_maxsize >= tsomaxsize); 1613 } 1614 1615 /* 1616 * Setup DMA descriptor areas. 1617 */ 1618 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1619 1, 0, /* alignment, bounds */ 1620 BUS_SPACE_MAXADDR, /* lowaddr */ 1621 BUS_SPACE_MAXADDR, /* highaddr */ 1622 NULL, NULL, /* filter, filterarg */ 1623 sctx->isc_tx_maxsize, /* maxsize */ 1624 nsegments, /* nsegments */ 1625 sctx->isc_tx_maxsegsize, /* maxsegsize */ 1626 0, /* flags */ 1627 NULL, /* lockfunc */ 1628 NULL, /* lockfuncarg */ 1629 &txq->ift_desc_tag))) { 1630 device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); 1631 device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", 1632 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); 1633 goto fail; 1634 } 1635 if ((if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) & 1636 (err = bus_dma_tag_create(bus_get_dma_tag(dev), 1637 1, 0, /* alignment, bounds */ 1638 BUS_SPACE_MAXADDR, /* lowaddr */ 1639 BUS_SPACE_MAXADDR, /* highaddr */ 1640 NULL, NULL, /* filter, filterarg */ 1641 tsomaxsize, /* maxsize */ 1642 ntsosegments, /* nsegments */ 1643 sctx->isc_tso_maxsegsize,/* maxsegsize */ 1644 0, /* flags */ 1645 NULL, /* lockfunc */ 1646 NULL, /* lockfuncarg */ 1647 &txq->ift_tso_desc_tag))) { 1648 device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); 1649 1650 goto fail; 1651 } 1652 if (!(txq->ift_sds.ifsd_flags = 1653 (uint8_t *) malloc(sizeof(uint8_t) * 1654 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1655 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1656 err = ENOMEM; 1657 goto fail; 1658 } 1659 if (!(txq->ift_sds.ifsd_m = 1660 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1661 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1662 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1663 err = ENOMEM; 1664 goto fail; 1665 } 1666 1667 /* Create the descriptor buffer dma maps */ 1668 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1669 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1670 return (0); 1671 1672 if (!(txq->ift_sds.ifsd_map = 1673 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1674 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1675 err = ENOMEM; 1676 goto fail; 1677 } 1678 1679 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { 1680 err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); 1681 if (err != 0) { 1682 device_printf(dev, "Unable to create TX DMA map\n"); 1683 goto fail; 1684 } 1685 } 1686 #endif 1687 return (0); 1688 fail: 1689 /* We free all, it handles case where we are in the middle */ 1690 iflib_tx_structures_free(ctx); 1691 return (err); 1692 } 1693 1694 static void 1695 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) 1696 { 1697 bus_dmamap_t map; 1698 1699 map = NULL; 1700 if (txq->ift_sds.ifsd_map != NULL) 1701 map = txq->ift_sds.ifsd_map[i]; 1702 if (map != NULL) { 1703 bus_dmamap_unload(txq->ift_desc_tag, map); 1704 bus_dmamap_destroy(txq->ift_desc_tag, map); 1705 txq->ift_sds.ifsd_map[i] = NULL; 1706 } 1707 } 1708 1709 static void 1710 iflib_txq_destroy(iflib_txq_t txq) 1711 { 1712 if_ctx_t ctx = txq->ift_ctx; 1713 1714 for (int i = 0; i < txq->ift_size; i++) 1715 iflib_txsd_destroy(ctx, txq, i); 1716 if (txq->ift_sds.ifsd_map != NULL) { 1717 free(txq->ift_sds.ifsd_map, M_IFLIB); 1718 txq->ift_sds.ifsd_map = NULL; 1719 } 1720 if (txq->ift_sds.ifsd_m != NULL) { 1721 free(txq->ift_sds.ifsd_m, M_IFLIB); 1722 txq->ift_sds.ifsd_m = NULL; 1723 } 1724 if (txq->ift_sds.ifsd_flags != NULL) { 1725 free(txq->ift_sds.ifsd_flags, M_IFLIB); 1726 txq->ift_sds.ifsd_flags = NULL; 1727 } 1728 if (txq->ift_desc_tag != NULL) { 1729 bus_dma_tag_destroy(txq->ift_desc_tag); 1730 txq->ift_desc_tag = NULL; 1731 } 1732 if (txq->ift_tso_desc_tag != NULL) { 1733 bus_dma_tag_destroy(txq->ift_tso_desc_tag); 1734 txq->ift_tso_desc_tag = NULL; 1735 } 1736 } 1737 1738 static void 1739 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) 1740 { 1741 struct mbuf **mp; 1742 1743 mp = &txq->ift_sds.ifsd_m[i]; 1744 if (*mp == NULL) 1745 return; 1746 1747 if (txq->ift_sds.ifsd_map != NULL) { 1748 bus_dmamap_sync(txq->ift_desc_tag, 1749 txq->ift_sds.ifsd_map[i], 1750 BUS_DMASYNC_POSTWRITE); 1751 bus_dmamap_unload(txq->ift_desc_tag, 1752 txq->ift_sds.ifsd_map[i]); 1753 } 1754 m_free(*mp); 1755 DBG_COUNTER_INC(tx_frees); 1756 *mp = NULL; 1757 } 1758 1759 static int 1760 iflib_txq_setup(iflib_txq_t txq) 1761 { 1762 if_ctx_t ctx = txq->ift_ctx; 1763 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1764 iflib_dma_info_t di; 1765 int i; 1766 1767 /* Set number of descriptors available */ 1768 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 1769 /* XXX make configurable */ 1770 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; 1771 1772 /* Reset indices */ 1773 txq->ift_cidx_processed = 0; 1774 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; 1775 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; 1776 1777 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1778 bzero((void *)di->idi_vaddr, di->idi_size); 1779 1780 IFDI_TXQ_SETUP(ctx, txq->ift_id); 1781 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1782 bus_dmamap_sync(di->idi_tag, di->idi_map, 1783 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1784 return (0); 1785 } 1786 1787 /********************************************************************* 1788 * 1789 * Allocate memory for rx_buffer structures. Since we use one 1790 * rx_buffer per received packet, the maximum number of rx_buffer's 1791 * that we'll need is equal to the number of receive descriptors 1792 * that we've allocated. 1793 * 1794 **********************************************************************/ 1795 static int 1796 iflib_rxsd_alloc(iflib_rxq_t rxq) 1797 { 1798 if_ctx_t ctx = rxq->ifr_ctx; 1799 if_shared_ctx_t sctx = ctx->ifc_sctx; 1800 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1801 device_t dev = ctx->ifc_dev; 1802 iflib_fl_t fl; 1803 int err; 1804 1805 MPASS(scctx->isc_nrxd[0] > 0); 1806 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); 1807 1808 fl = rxq->ifr_fl; 1809 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { 1810 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ 1811 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1812 1, 0, /* alignment, bounds */ 1813 BUS_SPACE_MAXADDR, /* lowaddr */ 1814 BUS_SPACE_MAXADDR, /* highaddr */ 1815 NULL, NULL, /* filter, filterarg */ 1816 sctx->isc_rx_maxsize, /* maxsize */ 1817 sctx->isc_rx_nsegments, /* nsegments */ 1818 sctx->isc_rx_maxsegsize, /* maxsegsize */ 1819 0, /* flags */ 1820 NULL, /* lockfunc */ 1821 NULL, /* lockarg */ 1822 &fl->ifl_desc_tag); 1823 if (err) { 1824 device_printf(dev, "%s: bus_dma_tag_create failed %d\n", 1825 __func__, err); 1826 goto fail; 1827 } 1828 if (!(fl->ifl_sds.ifsd_flags = 1829 (uint8_t *) malloc(sizeof(uint8_t) * 1830 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1831 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1832 err = ENOMEM; 1833 goto fail; 1834 } 1835 if (!(fl->ifl_sds.ifsd_m = 1836 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1837 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1838 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1839 err = ENOMEM; 1840 goto fail; 1841 } 1842 if (!(fl->ifl_sds.ifsd_cl = 1843 (caddr_t *) malloc(sizeof(caddr_t) * 1844 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1845 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1846 err = ENOMEM; 1847 goto fail; 1848 } 1849 1850 /* Create the descriptor buffer dma maps */ 1851 #if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1852 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1853 continue; 1854 1855 if (!(fl->ifl_sds.ifsd_map = 1856 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1857 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1858 err = ENOMEM; 1859 goto fail; 1860 } 1861 1862 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { 1863 err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]); 1864 if (err != 0) { 1865 device_printf(dev, "Unable to create RX buffer DMA map\n"); 1866 goto fail; 1867 } 1868 } 1869 #endif 1870 } 1871 return (0); 1872 1873 fail: 1874 iflib_rx_structures_free(ctx); 1875 return (err); 1876 } 1877 1878 1879 /* 1880 * Internal service routines 1881 */ 1882 1883 struct rxq_refill_cb_arg { 1884 int error; 1885 bus_dma_segment_t seg; 1886 int nseg; 1887 }; 1888 1889 static void 1890 _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1891 { 1892 struct rxq_refill_cb_arg *cb_arg = arg; 1893 1894 cb_arg->error = error; 1895 cb_arg->seg = segs[0]; 1896 cb_arg->nseg = nseg; 1897 } 1898 1899 1900 #ifdef ACPI_DMAR 1901 #define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR) 1902 #else 1903 #define IS_DMAR(ctx) (0) 1904 #endif 1905 1906 /** 1907 * rxq_refill - refill an rxq free-buffer list 1908 * @ctx: the iflib context 1909 * @rxq: the free-list to refill 1910 * @n: the number of new buffers to allocate 1911 * 1912 * (Re)populate an rxq free-buffer list with up to @n new packet buffers. 1913 * The caller must assure that @n does not exceed the queue's capacity. 1914 */ 1915 static void 1916 _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) 1917 { 1918 struct mbuf *m; 1919 int idx, frag_idx = fl->ifl_fragidx; 1920 int pidx = fl->ifl_pidx; 1921 caddr_t cl, *sd_cl; 1922 struct mbuf **sd_m; 1923 uint8_t *sd_flags; 1924 struct if_rxd_update iru; 1925 bus_dmamap_t *sd_map; 1926 int n, i = 0; 1927 uint64_t bus_addr; 1928 int err; 1929 qidx_t credits; 1930 1931 sd_m = fl->ifl_sds.ifsd_m; 1932 sd_map = fl->ifl_sds.ifsd_map; 1933 sd_cl = fl->ifl_sds.ifsd_cl; 1934 sd_flags = fl->ifl_sds.ifsd_flags; 1935 idx = pidx; 1936 credits = fl->ifl_credits; 1937 1938 n = count; 1939 MPASS(n > 0); 1940 MPASS(credits + n <= fl->ifl_size); 1941 1942 if (pidx < fl->ifl_cidx) 1943 MPASS(pidx + n <= fl->ifl_cidx); 1944 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) 1945 MPASS(fl->ifl_gen == 0); 1946 if (pidx > fl->ifl_cidx) 1947 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); 1948 1949 DBG_COUNTER_INC(fl_refills); 1950 if (n > 8) 1951 DBG_COUNTER_INC(fl_refills_large); 1952 iru_init(&iru, fl->ifl_rxq, fl->ifl_id); 1953 while (n--) { 1954 /* 1955 * We allocate an uninitialized mbuf + cluster, mbuf is 1956 * initialized after rx. 1957 * 1958 * If the cluster is still set then we know a minimum sized packet was received 1959 */ 1960 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); 1961 if ((frag_idx < 0) || (frag_idx >= fl->ifl_size)) 1962 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); 1963 if ((cl = sd_cl[frag_idx]) == NULL) { 1964 if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) 1965 break; 1966 #if MEMORY_LOGGING 1967 fl->ifl_cl_enqueued++; 1968 #endif 1969 } 1970 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { 1971 break; 1972 } 1973 #if MEMORY_LOGGING 1974 fl->ifl_m_enqueued++; 1975 #endif 1976 1977 DBG_COUNTER_INC(rx_allocs); 1978 #if defined(__i386__) || defined(__amd64__) 1979 if (!IS_DMAR(ctx)) { 1980 bus_addr = pmap_kextract((vm_offset_t)cl); 1981 } else 1982 #endif 1983 { 1984 struct rxq_refill_cb_arg cb_arg; 1985 1986 cb_arg.error = 0; 1987 MPASS(sd_map != NULL); 1988 MPASS(sd_map[frag_idx] != NULL); 1989 err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx], 1990 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0); 1991 bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx], 1992 BUS_DMASYNC_PREREAD); 1993 1994 if (err != 0 || cb_arg.error) { 1995 /* 1996 * !zone_pack ? 1997 */ 1998 if (fl->ifl_zone == zone_pack) 1999 uma_zfree(fl->ifl_zone, cl); 2000 m_free(m); 2001 n = 0; 2002 goto done; 2003 } 2004 bus_addr = cb_arg.seg.ds_addr; 2005 } 2006 bit_set(fl->ifl_rx_bitmap, frag_idx); 2007 sd_flags[frag_idx] |= RX_SW_DESC_INUSE; 2008 2009 MPASS(sd_m[frag_idx] == NULL); 2010 sd_cl[frag_idx] = cl; 2011 sd_m[frag_idx] = m; 2012 fl->ifl_rxd_idxs[i] = frag_idx; 2013 fl->ifl_bus_addrs[i] = bus_addr; 2014 fl->ifl_vm_addrs[i] = cl; 2015 credits++; 2016 i++; 2017 MPASS(credits <= fl->ifl_size); 2018 if (++idx == fl->ifl_size) { 2019 fl->ifl_gen = 1; 2020 idx = 0; 2021 } 2022 if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { 2023 iru.iru_pidx = pidx; 2024 iru.iru_count = i; 2025 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2026 i = 0; 2027 pidx = idx; 2028 fl->ifl_pidx = idx; 2029 fl->ifl_credits = credits; 2030 } 2031 2032 } 2033 done: 2034 if (i) { 2035 iru.iru_pidx = pidx; 2036 iru.iru_count = i; 2037 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 2038 fl->ifl_pidx = idx; 2039 fl->ifl_credits = credits; 2040 } 2041 DBG_COUNTER_INC(rxd_flush); 2042 if (fl->ifl_pidx == 0) 2043 pidx = fl->ifl_size - 1; 2044 else 2045 pidx = fl->ifl_pidx - 1; 2046 2047 if (sd_map) 2048 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2049 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2050 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); 2051 fl->ifl_fragidx = frag_idx; 2052 } 2053 2054 static __inline void 2055 __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) 2056 { 2057 /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ 2058 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; 2059 #ifdef INVARIANTS 2060 int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; 2061 #endif 2062 2063 MPASS(fl->ifl_credits <= fl->ifl_size); 2064 MPASS(reclaimable == delta); 2065 2066 if (reclaimable > 0) 2067 _iflib_fl_refill(ctx, fl, min(max, reclaimable)); 2068 } 2069 2070 static void 2071 iflib_fl_bufs_free(iflib_fl_t fl) 2072 { 2073 iflib_dma_info_t idi = fl->ifl_ifdi; 2074 uint32_t i; 2075 2076 for (i = 0; i < fl->ifl_size; i++) { 2077 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; 2078 uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i]; 2079 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; 2080 2081 if (*sd_flags & RX_SW_DESC_INUSE) { 2082 if (fl->ifl_sds.ifsd_map != NULL) { 2083 bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i]; 2084 bus_dmamap_unload(fl->ifl_desc_tag, sd_map); 2085 if (fl->ifl_rxq->ifr_ctx->ifc_in_detach) 2086 bus_dmamap_destroy(fl->ifl_desc_tag, sd_map); 2087 } 2088 if (*sd_m != NULL) { 2089 m_init(*sd_m, M_NOWAIT, MT_DATA, 0); 2090 uma_zfree(zone_mbuf, *sd_m); 2091 } 2092 if (*sd_cl != NULL) 2093 uma_zfree(fl->ifl_zone, *sd_cl); 2094 *sd_flags = 0; 2095 } else { 2096 MPASS(*sd_cl == NULL); 2097 MPASS(*sd_m == NULL); 2098 } 2099 #if MEMORY_LOGGING 2100 fl->ifl_m_dequeued++; 2101 fl->ifl_cl_dequeued++; 2102 #endif 2103 *sd_cl = NULL; 2104 *sd_m = NULL; 2105 } 2106 #ifdef INVARIANTS 2107 for (i = 0; i < fl->ifl_size; i++) { 2108 MPASS(fl->ifl_sds.ifsd_flags[i] == 0); 2109 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); 2110 MPASS(fl->ifl_sds.ifsd_m[i] == NULL); 2111 } 2112 #endif 2113 /* 2114 * Reset free list values 2115 */ 2116 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; 2117 bzero(idi->idi_vaddr, idi->idi_size); 2118 } 2119 2120 /********************************************************************* 2121 * 2122 * Initialize a receive ring and its buffers. 2123 * 2124 **********************************************************************/ 2125 static int 2126 iflib_fl_setup(iflib_fl_t fl) 2127 { 2128 iflib_rxq_t rxq = fl->ifl_rxq; 2129 if_ctx_t ctx = rxq->ifr_ctx; 2130 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2131 2132 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); 2133 /* 2134 ** Free current RX buffer structs and their mbufs 2135 */ 2136 iflib_fl_bufs_free(fl); 2137 /* Now replenish the mbufs */ 2138 MPASS(fl->ifl_credits == 0); 2139 /* 2140 * XXX don't set the max_frame_size to larger 2141 * than the hardware can handle 2142 */ 2143 if (sctx->isc_max_frame_size <= 2048) 2144 fl->ifl_buf_size = MCLBYTES; 2145 #ifndef CONTIGMALLOC_WORKS 2146 else 2147 fl->ifl_buf_size = MJUMPAGESIZE; 2148 #else 2149 else if (sctx->isc_max_frame_size <= 4096) 2150 fl->ifl_buf_size = MJUMPAGESIZE; 2151 else if (sctx->isc_max_frame_size <= 9216) 2152 fl->ifl_buf_size = MJUM9BYTES; 2153 else 2154 fl->ifl_buf_size = MJUM16BYTES; 2155 #endif 2156 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) 2157 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; 2158 fl->ifl_cltype = m_gettype(fl->ifl_buf_size); 2159 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 2160 2161 2162 /* avoid pre-allocating zillions of clusters to an idle card 2163 * potentially speeding up attach 2164 */ 2165 _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); 2166 MPASS(min(128, fl->ifl_size) == fl->ifl_credits); 2167 if (min(128, fl->ifl_size) != fl->ifl_credits) 2168 return (ENOBUFS); 2169 /* 2170 * handle failure 2171 */ 2172 MPASS(rxq != NULL); 2173 MPASS(fl->ifl_ifdi != NULL); 2174 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2175 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2176 return (0); 2177 } 2178 2179 /********************************************************************* 2180 * 2181 * Free receive ring data structures 2182 * 2183 **********************************************************************/ 2184 static void 2185 iflib_rx_sds_free(iflib_rxq_t rxq) 2186 { 2187 iflib_fl_t fl; 2188 int i; 2189 2190 if (rxq->ifr_fl != NULL) { 2191 for (i = 0; i < rxq->ifr_nfl; i++) { 2192 fl = &rxq->ifr_fl[i]; 2193 if (fl->ifl_desc_tag != NULL) { 2194 bus_dma_tag_destroy(fl->ifl_desc_tag); 2195 fl->ifl_desc_tag = NULL; 2196 } 2197 free(fl->ifl_sds.ifsd_m, M_IFLIB); 2198 free(fl->ifl_sds.ifsd_cl, M_IFLIB); 2199 /* XXX destroy maps first */ 2200 free(fl->ifl_sds.ifsd_map, M_IFLIB); 2201 fl->ifl_sds.ifsd_m = NULL; 2202 fl->ifl_sds.ifsd_cl = NULL; 2203 fl->ifl_sds.ifsd_map = NULL; 2204 } 2205 free(rxq->ifr_fl, M_IFLIB); 2206 rxq->ifr_fl = NULL; 2207 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 2208 } 2209 } 2210 2211 /* 2212 * MI independent logic 2213 * 2214 */ 2215 static void 2216 iflib_timer(void *arg) 2217 { 2218 iflib_txq_t txq = arg; 2219 if_ctx_t ctx = txq->ift_ctx; 2220 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2221 uint64_t this_tick = ticks; 2222 uint32_t reset_on = hz / 2; 2223 2224 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 2225 return; 2226 /* 2227 ** Check on the state of the TX queue(s), this 2228 ** can be done without the lock because its RO 2229 ** and the HUNG state will be static if set. 2230 */ 2231 if (this_tick - txq->ift_last_timer_tick >= hz / 2) { 2232 txq->ift_last_timer_tick = this_tick; 2233 IFDI_TIMER(ctx, txq->ift_id); 2234 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && 2235 ((txq->ift_cleaned_prev == txq->ift_cleaned) || 2236 (sctx->isc_pause_frames == 0))) 2237 goto hung; 2238 2239 if (ifmp_ring_is_stalled(txq->ift_br)) 2240 txq->ift_qstatus = IFLIB_QUEUE_HUNG; 2241 txq->ift_cleaned_prev = txq->ift_cleaned; 2242 } 2243 #ifdef DEV_NETMAP 2244 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) 2245 iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on); 2246 #endif 2247 /* handle any laggards */ 2248 if (txq->ift_db_pending) 2249 GROUPTASK_ENQUEUE(&txq->ift_task); 2250 2251 sctx->isc_pause_frames = 0; 2252 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) 2253 callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); 2254 return; 2255 hung: 2256 device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n", 2257 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); 2258 STATE_LOCK(ctx); 2259 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2260 ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET); 2261 iflib_admin_intr_deferred(ctx); 2262 STATE_UNLOCK(ctx); 2263 } 2264 2265 static void 2266 iflib_init_locked(if_ctx_t ctx) 2267 { 2268 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2269 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2270 if_t ifp = ctx->ifc_ifp; 2271 iflib_fl_t fl; 2272 iflib_txq_t txq; 2273 iflib_rxq_t rxq; 2274 int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; 2275 2276 2277 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2278 IFDI_INTR_DISABLE(ctx); 2279 2280 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); 2281 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); 2282 /* Set hardware offload abilities */ 2283 if_clearhwassist(ifp); 2284 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 2285 if_sethwassistbits(ifp, tx_ip_csum_flags, 0); 2286 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 2287 if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); 2288 if (if_getcapenable(ifp) & IFCAP_TSO4) 2289 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 2290 if (if_getcapenable(ifp) & IFCAP_TSO6) 2291 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 2292 2293 for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { 2294 CALLOUT_LOCK(txq); 2295 callout_stop(&txq->ift_timer); 2296 CALLOUT_UNLOCK(txq); 2297 iflib_netmap_txq_init(ctx, txq); 2298 } 2299 #ifdef INVARIANTS 2300 i = if_getdrvflags(ifp); 2301 #endif 2302 IFDI_INIT(ctx); 2303 MPASS(if_getdrvflags(ifp) == i); 2304 for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { 2305 /* XXX this should really be done on a per-queue basis */ 2306 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 2307 MPASS(rxq->ifr_id == i); 2308 iflib_netmap_rxq_init(ctx, rxq); 2309 continue; 2310 } 2311 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 2312 if (iflib_fl_setup(fl)) { 2313 device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); 2314 goto done; 2315 } 2316 } 2317 } 2318 done: 2319 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 2320 IFDI_INTR_ENABLE(ctx); 2321 txq = ctx->ifc_txqs; 2322 for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) 2323 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, 2324 txq->ift_timer.c_cpu); 2325 } 2326 2327 static int 2328 iflib_media_change(if_t ifp) 2329 { 2330 if_ctx_t ctx = if_getsoftc(ifp); 2331 int err; 2332 2333 CTX_LOCK(ctx); 2334 if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) 2335 iflib_init_locked(ctx); 2336 CTX_UNLOCK(ctx); 2337 return (err); 2338 } 2339 2340 static void 2341 iflib_media_status(if_t ifp, struct ifmediareq *ifmr) 2342 { 2343 if_ctx_t ctx = if_getsoftc(ifp); 2344 2345 CTX_LOCK(ctx); 2346 IFDI_UPDATE_ADMIN_STATUS(ctx); 2347 IFDI_MEDIA_STATUS(ctx, ifmr); 2348 CTX_UNLOCK(ctx); 2349 } 2350 2351 void 2352 iflib_stop(if_ctx_t ctx) 2353 { 2354 iflib_txq_t txq = ctx->ifc_txqs; 2355 iflib_rxq_t rxq = ctx->ifc_rxqs; 2356 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2357 iflib_dma_info_t di; 2358 iflib_fl_t fl; 2359 int i, j; 2360 2361 /* Tell the stack that the interface is no longer active */ 2362 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2363 2364 IFDI_INTR_DISABLE(ctx); 2365 DELAY(1000); 2366 IFDI_STOP(ctx); 2367 DELAY(1000); 2368 2369 iflib_debug_reset(); 2370 /* Wait for current tx queue users to exit to disarm watchdog timer. */ 2371 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { 2372 /* make sure all transmitters have completed before proceeding XXX */ 2373 2374 CALLOUT_LOCK(txq); 2375 callout_stop(&txq->ift_timer); 2376 CALLOUT_UNLOCK(txq); 2377 2378 /* clean any enqueued buffers */ 2379 iflib_ifmp_purge(txq); 2380 /* Free any existing tx buffers. */ 2381 for (j = 0; j < txq->ift_size; j++) { 2382 iflib_txsd_free(ctx, txq, j); 2383 } 2384 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; 2385 txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; 2386 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; 2387 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; 2388 txq->ift_pullups = 0; 2389 ifmp_ring_reset_stats(txq->ift_br); 2390 for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++) 2391 bzero((void *)di->idi_vaddr, di->idi_size); 2392 } 2393 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { 2394 /* make sure all transmitters have completed before proceeding XXX */ 2395 2396 for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++) 2397 bzero((void *)di->idi_vaddr, di->idi_size); 2398 /* also resets the free lists pidx/cidx */ 2399 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 2400 iflib_fl_bufs_free(fl); 2401 } 2402 } 2403 2404 static inline caddr_t 2405 calc_next_rxd(iflib_fl_t fl, int cidx) 2406 { 2407 qidx_t size; 2408 int nrxd; 2409 caddr_t start, end, cur, next; 2410 2411 nrxd = fl->ifl_size; 2412 size = fl->ifl_rxd_size; 2413 start = fl->ifl_ifdi->idi_vaddr; 2414 2415 if (__predict_false(size == 0)) 2416 return (start); 2417 cur = start + size*cidx; 2418 end = start + size*nrxd; 2419 next = CACHE_PTR_NEXT(cur); 2420 return (next < end ? next : start); 2421 } 2422 2423 static inline void 2424 prefetch_pkts(iflib_fl_t fl, int cidx) 2425 { 2426 int nextptr; 2427 int nrxd = fl->ifl_size; 2428 caddr_t next_rxd; 2429 2430 2431 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); 2432 prefetch(&fl->ifl_sds.ifsd_m[nextptr]); 2433 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); 2434 next_rxd = calc_next_rxd(fl, cidx); 2435 prefetch(next_rxd); 2436 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); 2437 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); 2438 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); 2439 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); 2440 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); 2441 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); 2442 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); 2443 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); 2444 } 2445 2446 static void 2447 rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd) 2448 { 2449 int flid, cidx; 2450 bus_dmamap_t map; 2451 iflib_fl_t fl; 2452 iflib_dma_info_t di; 2453 int next; 2454 2455 map = NULL; 2456 flid = irf->irf_flid; 2457 cidx = irf->irf_idx; 2458 fl = &rxq->ifr_fl[flid]; 2459 sd->ifsd_fl = fl; 2460 sd->ifsd_cidx = cidx; 2461 sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx]; 2462 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; 2463 fl->ifl_credits--; 2464 #if MEMORY_LOGGING 2465 fl->ifl_m_dequeued++; 2466 #endif 2467 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) 2468 prefetch_pkts(fl, cidx); 2469 if (fl->ifl_sds.ifsd_map != NULL) { 2470 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); 2471 prefetch(&fl->ifl_sds.ifsd_map[next]); 2472 map = fl->ifl_sds.ifsd_map[cidx]; 2473 di = fl->ifl_ifdi; 2474 next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1); 2475 prefetch(&fl->ifl_sds.ifsd_flags[next]); 2476 bus_dmamap_sync(di->idi_tag, di->idi_map, 2477 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2478 2479 /* not valid assert if bxe really does SGE from non-contiguous elements */ 2480 MPASS(fl->ifl_cidx == cidx); 2481 if (unload) 2482 bus_dmamap_unload(fl->ifl_desc_tag, map); 2483 } 2484 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); 2485 if (__predict_false(fl->ifl_cidx == 0)) 2486 fl->ifl_gen = 0; 2487 if (map != NULL) 2488 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2489 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2490 bit_clear(fl->ifl_rx_bitmap, cidx); 2491 } 2492 2493 static struct mbuf * 2494 assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd) 2495 { 2496 int i, padlen , flags; 2497 struct mbuf *m, *mh, *mt; 2498 caddr_t cl; 2499 2500 i = 0; 2501 mh = NULL; 2502 do { 2503 rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd); 2504 2505 MPASS(*sd->ifsd_cl != NULL); 2506 MPASS(*sd->ifsd_m != NULL); 2507 2508 /* Don't include zero-length frags */ 2509 if (ri->iri_frags[i].irf_len == 0) { 2510 /* XXX we can save the cluster here, but not the mbuf */ 2511 m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0); 2512 m_free(*sd->ifsd_m); 2513 *sd->ifsd_m = NULL; 2514 continue; 2515 } 2516 m = *sd->ifsd_m; 2517 *sd->ifsd_m = NULL; 2518 if (mh == NULL) { 2519 flags = M_PKTHDR|M_EXT; 2520 mh = mt = m; 2521 padlen = ri->iri_pad; 2522 } else { 2523 flags = M_EXT; 2524 mt->m_next = m; 2525 mt = m; 2526 /* assuming padding is only on the first fragment */ 2527 padlen = 0; 2528 } 2529 cl = *sd->ifsd_cl; 2530 *sd->ifsd_cl = NULL; 2531 2532 /* Can these two be made one ? */ 2533 m_init(m, M_NOWAIT, MT_DATA, flags); 2534 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); 2535 /* 2536 * These must follow m_init and m_cljset 2537 */ 2538 m->m_data += padlen; 2539 ri->iri_len -= padlen; 2540 m->m_len = ri->iri_frags[i].irf_len; 2541 } while (++i < ri->iri_nfrags); 2542 2543 return (mh); 2544 } 2545 2546 /* 2547 * Process one software descriptor 2548 */ 2549 static struct mbuf * 2550 iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) 2551 { 2552 struct if_rxsd sd; 2553 struct mbuf *m; 2554 2555 /* should I merge this back in now that the two paths are basically duplicated? */ 2556 if (ri->iri_nfrags == 1 && 2557 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { 2558 rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd); 2559 m = *sd.ifsd_m; 2560 *sd.ifsd_m = NULL; 2561 m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); 2562 #ifndef __NO_STRICT_ALIGNMENT 2563 if (!IP_ALIGNED(m)) 2564 m->m_data += 2; 2565 #endif 2566 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); 2567 m->m_len = ri->iri_frags[0].irf_len; 2568 } else { 2569 m = assemble_segments(rxq, ri, &sd); 2570 } 2571 m->m_pkthdr.len = ri->iri_len; 2572 m->m_pkthdr.rcvif = ri->iri_ifp; 2573 m->m_flags |= ri->iri_flags; 2574 m->m_pkthdr.ether_vtag = ri->iri_vtag; 2575 m->m_pkthdr.flowid = ri->iri_flowid; 2576 M_HASHTYPE_SET(m, ri->iri_rsstype); 2577 m->m_pkthdr.csum_flags = ri->iri_csum_flags; 2578 m->m_pkthdr.csum_data = ri->iri_csum_data; 2579 return (m); 2580 } 2581 2582 #if defined(INET6) || defined(INET) 2583 static void 2584 iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) 2585 { 2586 CURVNET_SET(lc->ifp->if_vnet); 2587 #if defined(INET6) 2588 *v6 = VNET(ip6_forwarding); 2589 #endif 2590 #if defined(INET) 2591 *v4 = VNET(ipforwarding); 2592 #endif 2593 CURVNET_RESTORE(); 2594 } 2595 2596 /* 2597 * Returns true if it's possible this packet could be LROed. 2598 * if it returns false, it is guaranteed that tcp_lro_rx() 2599 * would not return zero. 2600 */ 2601 static bool 2602 iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) 2603 { 2604 struct ether_header *eh; 2605 uint16_t eh_type; 2606 2607 eh = mtod(m, struct ether_header *); 2608 eh_type = ntohs(eh->ether_type); 2609 switch (eh_type) { 2610 #if defined(INET6) 2611 case ETHERTYPE_IPV6: 2612 return !v6_forwarding; 2613 #endif 2614 #if defined (INET) 2615 case ETHERTYPE_IP: 2616 return !v4_forwarding; 2617 #endif 2618 } 2619 2620 return false; 2621 } 2622 #else 2623 static void 2624 iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) 2625 { 2626 } 2627 #endif 2628 2629 static bool 2630 iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) 2631 { 2632 if_ctx_t ctx = rxq->ifr_ctx; 2633 if_shared_ctx_t sctx = ctx->ifc_sctx; 2634 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2635 int avail, i; 2636 qidx_t *cidxp; 2637 struct if_rxd_info ri; 2638 int err, budget_left, rx_bytes, rx_pkts; 2639 iflib_fl_t fl; 2640 struct ifnet *ifp; 2641 int lro_enabled; 2642 bool v4_forwarding, v6_forwarding, lro_possible; 2643 2644 /* 2645 * XXX early demux data packets so that if_input processing only handles 2646 * acks in interrupt context 2647 */ 2648 struct mbuf *m, *mh, *mt, *mf; 2649 2650 lro_possible = v4_forwarding = v6_forwarding = false; 2651 ifp = ctx->ifc_ifp; 2652 mh = mt = NULL; 2653 MPASS(budget > 0); 2654 rx_pkts = rx_bytes = 0; 2655 if (sctx->isc_flags & IFLIB_HAS_RXCQ) 2656 cidxp = &rxq->ifr_cq_cidx; 2657 else 2658 cidxp = &rxq->ifr_fl[0].ifl_cidx; 2659 if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { 2660 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2661 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2662 DBG_COUNTER_INC(rx_unavail); 2663 return (false); 2664 } 2665 2666 for (budget_left = budget; budget_left > 0 && avail > 0;) { 2667 if (__predict_false(!CTX_ACTIVE(ctx))) { 2668 DBG_COUNTER_INC(rx_ctx_inactive); 2669 break; 2670 } 2671 /* 2672 * Reset client set fields to their default values 2673 */ 2674 rxd_info_zero(&ri); 2675 ri.iri_qsidx = rxq->ifr_id; 2676 ri.iri_cidx = *cidxp; 2677 ri.iri_ifp = ifp; 2678 ri.iri_frags = rxq->ifr_frags; 2679 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 2680 2681 if (err) 2682 goto err; 2683 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 2684 *cidxp = ri.iri_cidx; 2685 /* Update our consumer index */ 2686 /* XXX NB: shurd - check if this is still safe */ 2687 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { 2688 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; 2689 rxq->ifr_cq_gen = 0; 2690 } 2691 /* was this only a completion queue message? */ 2692 if (__predict_false(ri.iri_nfrags == 0)) 2693 continue; 2694 } 2695 MPASS(ri.iri_nfrags != 0); 2696 MPASS(ri.iri_len != 0); 2697 2698 /* will advance the cidx on the corresponding free lists */ 2699 m = iflib_rxd_pkt_get(rxq, &ri); 2700 avail--; 2701 budget_left--; 2702 if (avail == 0 && budget_left) 2703 avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); 2704 2705 if (__predict_false(m == NULL)) { 2706 DBG_COUNTER_INC(rx_mbuf_null); 2707 continue; 2708 } 2709 /* imm_pkt: -- cxgb */ 2710 if (mh == NULL) 2711 mh = mt = m; 2712 else { 2713 mt->m_nextpkt = m; 2714 mt = m; 2715 } 2716 } 2717 /* make sure that we can refill faster than drain */ 2718 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2719 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2720 2721 lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); 2722 if (lro_enabled) 2723 iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); 2724 mt = mf = NULL; 2725 while (mh != NULL) { 2726 m = mh; 2727 mh = mh->m_nextpkt; 2728 m->m_nextpkt = NULL; 2729 #ifndef __NO_STRICT_ALIGNMENT 2730 if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) 2731 continue; 2732 #endif 2733 rx_bytes += m->m_pkthdr.len; 2734 rx_pkts++; 2735 #if defined(INET6) || defined(INET) 2736 if (lro_enabled) { 2737 if (!lro_possible) { 2738 lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); 2739 if (lro_possible && mf != NULL) { 2740 ifp->if_input(ifp, mf); 2741 DBG_COUNTER_INC(rx_if_input); 2742 mt = mf = NULL; 2743 } 2744 } 2745 if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == 2746 (CSUM_L4_CALC|CSUM_L4_VALID)) { 2747 if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) 2748 continue; 2749 } 2750 } 2751 #endif 2752 if (lro_possible) { 2753 ifp->if_input(ifp, m); 2754 DBG_COUNTER_INC(rx_if_input); 2755 continue; 2756 } 2757 2758 if (mf == NULL) 2759 mf = m; 2760 if (mt != NULL) 2761 mt->m_nextpkt = m; 2762 mt = m; 2763 } 2764 if (mf != NULL) { 2765 ifp->if_input(ifp, mf); 2766 DBG_COUNTER_INC(rx_if_input); 2767 } 2768 2769 if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); 2770 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); 2771 2772 /* 2773 * Flush any outstanding LRO work 2774 */ 2775 #if defined(INET6) || defined(INET) 2776 tcp_lro_flush_all(&rxq->ifr_lc); 2777 #endif 2778 if (avail) 2779 return true; 2780 return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); 2781 err: 2782 STATE_LOCK(ctx); 2783 ctx->ifc_flags |= IFC_DO_RESET; 2784 iflib_admin_intr_deferred(ctx); 2785 STATE_UNLOCK(ctx); 2786 return (false); 2787 } 2788 2789 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) 2790 static inline qidx_t 2791 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) 2792 { 2793 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2794 qidx_t minthresh = txq->ift_size / 8; 2795 if (in_use > 4*minthresh) 2796 return (notify_count); 2797 if (in_use > 2*minthresh) 2798 return (notify_count >> 1); 2799 if (in_use > minthresh) 2800 return (notify_count >> 3); 2801 return (0); 2802 } 2803 2804 static inline qidx_t 2805 txq_max_rs_deferred(iflib_txq_t txq) 2806 { 2807 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2808 qidx_t minthresh = txq->ift_size / 8; 2809 if (txq->ift_in_use > 4*minthresh) 2810 return (notify_count); 2811 if (txq->ift_in_use > 2*minthresh) 2812 return (notify_count >> 1); 2813 if (txq->ift_in_use > minthresh) 2814 return (notify_count >> 2); 2815 return (2); 2816 } 2817 2818 #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) 2819 #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) 2820 2821 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) 2822 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) 2823 #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) 2824 2825 /* forward compatibility for cxgb */ 2826 #define FIRST_QSET(ctx) 0 2827 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) 2828 #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) 2829 #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) 2830 #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) 2831 2832 /* XXX we should be setting this to something other than zero */ 2833 #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) 2834 #define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max) 2835 2836 static inline bool 2837 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) 2838 { 2839 qidx_t dbval, max; 2840 bool rang; 2841 2842 rang = false; 2843 max = TXQ_MAX_DB_DEFERRED(txq, in_use); 2844 if (ring || txq->ift_db_pending >= max) { 2845 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; 2846 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); 2847 txq->ift_db_pending = txq->ift_npending = 0; 2848 rang = true; 2849 } 2850 return (rang); 2851 } 2852 2853 #ifdef PKT_DEBUG 2854 static void 2855 print_pkt(if_pkt_info_t pi) 2856 { 2857 printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", 2858 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); 2859 printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", 2860 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); 2861 printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", 2862 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); 2863 } 2864 #endif 2865 2866 #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) 2867 #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO)) 2868 #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) 2869 #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO)) 2870 2871 static int 2872 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) 2873 { 2874 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; 2875 struct ether_vlan_header *eh; 2876 struct mbuf *m, *n; 2877 2878 m = *mp; 2879 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && 2880 M_WRITABLE(m) == 0) { 2881 if ((m = m_dup(m, M_NOWAIT)) == NULL) { 2882 return (ENOMEM); 2883 } else { 2884 m_freem(*mp); 2885 *mp = m; 2886 } 2887 } 2888 2889 /* 2890 * Determine where frame payload starts. 2891 * Jump over vlan headers if already present, 2892 * helpful for QinQ too. 2893 */ 2894 if (__predict_false(m->m_len < sizeof(*eh))) { 2895 txq->ift_pullups++; 2896 if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) 2897 return (ENOMEM); 2898 } 2899 eh = mtod(m, struct ether_vlan_header *); 2900 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2901 pi->ipi_etype = ntohs(eh->evl_proto); 2902 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2903 } else { 2904 pi->ipi_etype = ntohs(eh->evl_encap_proto); 2905 pi->ipi_ehdrlen = ETHER_HDR_LEN; 2906 } 2907 2908 switch (pi->ipi_etype) { 2909 #ifdef INET 2910 case ETHERTYPE_IP: 2911 { 2912 struct ip *ip = NULL; 2913 struct tcphdr *th = NULL; 2914 int minthlen; 2915 2916 minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); 2917 if (__predict_false(m->m_len < minthlen)) { 2918 /* 2919 * if this code bloat is causing too much of a hit 2920 * move it to a separate function and mark it noinline 2921 */ 2922 if (m->m_len == pi->ipi_ehdrlen) { 2923 n = m->m_next; 2924 MPASS(n); 2925 if (n->m_len >= sizeof(*ip)) { 2926 ip = (struct ip *)n->m_data; 2927 if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2928 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2929 } else { 2930 txq->ift_pullups++; 2931 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2932 return (ENOMEM); 2933 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2934 } 2935 } else { 2936 txq->ift_pullups++; 2937 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2938 return (ENOMEM); 2939 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2940 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2941 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2942 } 2943 } else { 2944 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2945 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2946 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2947 } 2948 pi->ipi_ip_hlen = ip->ip_hl << 2; 2949 pi->ipi_ipproto = ip->ip_p; 2950 pi->ipi_flags |= IPI_TX_IPV4; 2951 2952 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) 2953 ip->ip_sum = 0; 2954 2955 /* TCP checksum offload may require TCP header length */ 2956 if (IS_TX_OFFLOAD4(pi)) { 2957 if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { 2958 if (__predict_false(th == NULL)) { 2959 txq->ift_pullups++; 2960 if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) 2961 return (ENOMEM); 2962 th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); 2963 } 2964 pi->ipi_tcp_hflags = th->th_flags; 2965 pi->ipi_tcp_hlen = th->th_off << 2; 2966 pi->ipi_tcp_seq = th->th_seq; 2967 } 2968 if (IS_TSO4(pi)) { 2969 if (__predict_false(ip->ip_p != IPPROTO_TCP)) 2970 return (ENXIO); 2971 th->th_sum = in_pseudo(ip->ip_src.s_addr, 2972 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2973 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 2974 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { 2975 ip->ip_sum = 0; 2976 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); 2977 } 2978 } 2979 } 2980 break; 2981 } 2982 #endif 2983 #ifdef INET6 2984 case ETHERTYPE_IPV6: 2985 { 2986 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); 2987 struct tcphdr *th; 2988 pi->ipi_ip_hlen = sizeof(struct ip6_hdr); 2989 2990 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { 2991 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) 2992 return (ENOMEM); 2993 } 2994 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); 2995 2996 /* XXX-BZ this will go badly in case of ext hdrs. */ 2997 pi->ipi_ipproto = ip6->ip6_nxt; 2998 pi->ipi_flags |= IPI_TX_IPV6; 2999 3000 /* TCP checksum offload may require TCP header length */ 3001 if (IS_TX_OFFLOAD6(pi)) { 3002 if (pi->ipi_ipproto == IPPROTO_TCP) { 3003 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { 3004 txq->ift_pullups++; 3005 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) 3006 return (ENOMEM); 3007 } 3008 pi->ipi_tcp_hflags = th->th_flags; 3009 pi->ipi_tcp_hlen = th->th_off << 2; 3010 pi->ipi_tcp_seq = th->th_seq; 3011 } 3012 if (IS_TSO6(pi)) { 3013 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) 3014 return (ENXIO); 3015 /* 3016 * The corresponding flag is set by the stack in the IPv4 3017 * TSO case, but not in IPv6 (at least in FreeBSD 10.2). 3018 * So, set it here because the rest of the flow requires it. 3019 */ 3020 pi->ipi_csum_flags |= CSUM_IP6_TCP; 3021 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); 3022 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 3023 } 3024 } 3025 break; 3026 } 3027 #endif 3028 default: 3029 pi->ipi_csum_flags &= ~CSUM_OFFLOAD; 3030 pi->ipi_ip_hlen = 0; 3031 break; 3032 } 3033 *mp = m; 3034 3035 return (0); 3036 } 3037 3038 static __noinline struct mbuf * 3039 collapse_pkthdr(struct mbuf *m0) 3040 { 3041 struct mbuf *m, *m_next, *tmp; 3042 3043 m = m0; 3044 m_next = m->m_next; 3045 while (m_next != NULL && m_next->m_len == 0) { 3046 m = m_next; 3047 m->m_next = NULL; 3048 m_free(m); 3049 m_next = m_next->m_next; 3050 } 3051 m = m0; 3052 m->m_next = m_next; 3053 if (m_next == NULL) 3054 return (m); 3055 if ((m_next->m_flags & M_EXT) == 0) { 3056 m = m_defrag(m, M_NOWAIT); 3057 } else { 3058 tmp = m_next->m_next; 3059 memcpy(m_next, m, MPKTHSIZE); 3060 m = m_next; 3061 m->m_next = tmp; 3062 } 3063 return (m); 3064 } 3065 3066 /* 3067 * If dodgy hardware rejects the scatter gather chain we've handed it 3068 * we'll need to remove the mbuf chain from ifsg_m[] before we can add the 3069 * m_defrag'd mbufs 3070 */ 3071 static __noinline struct mbuf * 3072 iflib_remove_mbuf(iflib_txq_t txq) 3073 { 3074 int ntxd, i, pidx; 3075 struct mbuf *m, *mh, **ifsd_m; 3076 3077 pidx = txq->ift_pidx; 3078 ifsd_m = txq->ift_sds.ifsd_m; 3079 ntxd = txq->ift_size; 3080 mh = m = ifsd_m[pidx]; 3081 ifsd_m[pidx] = NULL; 3082 #if MEMORY_LOGGING 3083 txq->ift_dequeued++; 3084 #endif 3085 i = 1; 3086 3087 while (m) { 3088 ifsd_m[(pidx + i) & (ntxd -1)] = NULL; 3089 #if MEMORY_LOGGING 3090 txq->ift_dequeued++; 3091 #endif 3092 m = m->m_next; 3093 i++; 3094 } 3095 return (mh); 3096 } 3097 3098 static int 3099 iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, 3100 struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, 3101 int max_segs, int flags) 3102 { 3103 if_ctx_t ctx; 3104 if_shared_ctx_t sctx; 3105 if_softc_ctx_t scctx; 3106 int i, next, pidx, err, ntxd, count; 3107 struct mbuf *m, *tmp, **ifsd_m; 3108 3109 m = *m0; 3110 3111 /* 3112 * Please don't ever do this 3113 */ 3114 if (__predict_false(m->m_len == 0)) 3115 *m0 = collapse_pkthdr(m); 3116 3117 ctx = txq->ift_ctx; 3118 sctx = ctx->ifc_sctx; 3119 scctx = &ctx->ifc_softc_ctx; 3120 ifsd_m = txq->ift_sds.ifsd_m; 3121 ntxd = txq->ift_size; 3122 pidx = txq->ift_pidx; 3123 if (map != NULL) { 3124 uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; 3125 3126 err = bus_dmamap_load_mbuf_sg(tag, map, 3127 *m0, segs, nsegs, BUS_DMA_NOWAIT); 3128 if (err) 3129 return (err); 3130 ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; 3131 count = 0; 3132 m = *m0; 3133 do { 3134 if (__predict_false(m->m_len <= 0)) { 3135 tmp = m; 3136 m = m->m_next; 3137 tmp->m_next = NULL; 3138 m_free(tmp); 3139 continue; 3140 } 3141 m = m->m_next; 3142 count++; 3143 } while (m != NULL); 3144 if (count > *nsegs) { 3145 ifsd_m[pidx] = *m0; 3146 ifsd_m[pidx]->m_flags |= M_TOOBIG; 3147 return (0); 3148 } 3149 m = *m0; 3150 count = 0; 3151 do { 3152 next = (pidx + count) & (ntxd-1); 3153 MPASS(ifsd_m[next] == NULL); 3154 ifsd_m[next] = m; 3155 count++; 3156 tmp = m; 3157 m = m->m_next; 3158 } while (m != NULL); 3159 } else { 3160 int buflen, sgsize, maxsegsz, max_sgsize; 3161 vm_offset_t vaddr; 3162 vm_paddr_t curaddr; 3163 3164 count = i = 0; 3165 m = *m0; 3166 if (m->m_pkthdr.csum_flags & CSUM_TSO) 3167 maxsegsz = scctx->isc_tx_tso_segsize_max; 3168 else 3169 maxsegsz = sctx->isc_tx_maxsegsize; 3170 3171 do { 3172 if (__predict_false(m->m_len <= 0)) { 3173 tmp = m; 3174 m = m->m_next; 3175 tmp->m_next = NULL; 3176 m_free(tmp); 3177 continue; 3178 } 3179 buflen = m->m_len; 3180 vaddr = (vm_offset_t)m->m_data; 3181 /* 3182 * see if we can't be smarter about physically 3183 * contiguous mappings 3184 */ 3185 next = (pidx + count) & (ntxd-1); 3186 MPASS(ifsd_m[next] == NULL); 3187 #if MEMORY_LOGGING 3188 txq->ift_enqueued++; 3189 #endif 3190 ifsd_m[next] = m; 3191 while (buflen > 0) { 3192 if (i >= max_segs) 3193 goto err; 3194 max_sgsize = MIN(buflen, maxsegsz); 3195 curaddr = pmap_kextract(vaddr); 3196 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 3197 sgsize = MIN(sgsize, max_sgsize); 3198 segs[i].ds_addr = curaddr; 3199 segs[i].ds_len = sgsize; 3200 vaddr += sgsize; 3201 buflen -= sgsize; 3202 i++; 3203 } 3204 count++; 3205 tmp = m; 3206 m = m->m_next; 3207 } while (m != NULL); 3208 *nsegs = i; 3209 } 3210 return (0); 3211 err: 3212 *m0 = iflib_remove_mbuf(txq); 3213 return (EFBIG); 3214 } 3215 3216 static inline caddr_t 3217 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) 3218 { 3219 qidx_t size; 3220 int ntxd; 3221 caddr_t start, end, cur, next; 3222 3223 ntxd = txq->ift_size; 3224 size = txq->ift_txd_size[qid]; 3225 start = txq->ift_ifdi[qid].idi_vaddr; 3226 3227 if (__predict_false(size == 0)) 3228 return (start); 3229 cur = start + size*cidx; 3230 end = start + size*ntxd; 3231 next = CACHE_PTR_NEXT(cur); 3232 return (next < end ? next : start); 3233 } 3234 3235 /* 3236 * Pad an mbuf to ensure a minimum ethernet frame size. 3237 * min_frame_size is the frame size (less CRC) to pad the mbuf to 3238 */ 3239 static __noinline int 3240 iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) 3241 { 3242 /* 3243 * 18 is enough bytes to pad an ARP packet to 46 bytes, and 3244 * and ARP message is the smallest common payload I can think of 3245 */ 3246 static char pad[18]; /* just zeros */ 3247 int n; 3248 struct mbuf *new_head; 3249 3250 if (!M_WRITABLE(*m_head)) { 3251 new_head = m_dup(*m_head, M_NOWAIT); 3252 if (new_head == NULL) { 3253 m_freem(*m_head); 3254 device_printf(dev, "cannot pad short frame, m_dup() failed"); 3255 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3256 return ENOMEM; 3257 } 3258 m_freem(*m_head); 3259 *m_head = new_head; 3260 } 3261 3262 for (n = min_frame_size - (*m_head)->m_pkthdr.len; 3263 n > 0; n -= sizeof(pad)) 3264 if (!m_append(*m_head, min(n, sizeof(pad)), pad)) 3265 break; 3266 3267 if (n > 0) { 3268 m_freem(*m_head); 3269 device_printf(dev, "cannot pad short frame\n"); 3270 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3271 return (ENOBUFS); 3272 } 3273 3274 return 0; 3275 } 3276 3277 static int 3278 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) 3279 { 3280 if_ctx_t ctx; 3281 if_shared_ctx_t sctx; 3282 if_softc_ctx_t scctx; 3283 bus_dma_segment_t *segs; 3284 struct mbuf *m_head; 3285 void *next_txd; 3286 bus_dmamap_t map; 3287 struct if_pkt_info pi; 3288 int remap = 0; 3289 int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; 3290 bus_dma_tag_t desc_tag; 3291 3292 ctx = txq->ift_ctx; 3293 sctx = ctx->ifc_sctx; 3294 scctx = &ctx->ifc_softc_ctx; 3295 segs = txq->ift_segs; 3296 ntxd = txq->ift_size; 3297 m_head = *m_headp; 3298 map = NULL; 3299 3300 /* 3301 * If we're doing TSO the next descriptor to clean may be quite far ahead 3302 */ 3303 cidx = txq->ift_cidx; 3304 pidx = txq->ift_pidx; 3305 if (ctx->ifc_flags & IFC_PREFETCH) { 3306 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); 3307 if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { 3308 next_txd = calc_next_txd(txq, cidx, 0); 3309 prefetch(next_txd); 3310 } 3311 3312 /* prefetch the next cache line of mbuf pointers and flags */ 3313 prefetch(&txq->ift_sds.ifsd_m[next]); 3314 if (txq->ift_sds.ifsd_map != NULL) { 3315 prefetch(&txq->ift_sds.ifsd_map[next]); 3316 next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); 3317 prefetch(&txq->ift_sds.ifsd_flags[next]); 3318 } 3319 } else if (txq->ift_sds.ifsd_map != NULL) 3320 map = txq->ift_sds.ifsd_map[pidx]; 3321 3322 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3323 desc_tag = txq->ift_tso_desc_tag; 3324 max_segs = scctx->isc_tx_tso_segments_max; 3325 MPASS(desc_tag != NULL); 3326 MPASS(max_segs > 0); 3327 } else { 3328 desc_tag = txq->ift_desc_tag; 3329 max_segs = scctx->isc_tx_nsegments; 3330 } 3331 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && 3332 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { 3333 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); 3334 if (err) 3335 return err; 3336 } 3337 m_head = *m_headp; 3338 3339 pkt_info_zero(&pi); 3340 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); 3341 pi.ipi_pidx = pidx; 3342 pi.ipi_qsidx = txq->ift_id; 3343 pi.ipi_len = m_head->m_pkthdr.len; 3344 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; 3345 pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; 3346 3347 /* deliberate bitwise OR to make one condition */ 3348 if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { 3349 if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) 3350 return (err); 3351 m_head = *m_headp; 3352 } 3353 3354 retry: 3355 err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT); 3356 defrag: 3357 if (__predict_false(err)) { 3358 switch (err) { 3359 case EFBIG: 3360 /* try collapse once and defrag once */ 3361 if (remap == 0) { 3362 m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); 3363 /* try defrag if collapsing fails */ 3364 if (m_head == NULL) 3365 remap++; 3366 } 3367 if (remap == 1) 3368 m_head = m_defrag(*m_headp, M_NOWAIT); 3369 remap++; 3370 if (__predict_false(m_head == NULL)) 3371 goto defrag_failed; 3372 txq->ift_mbuf_defrag++; 3373 *m_headp = m_head; 3374 goto retry; 3375 break; 3376 case ENOMEM: 3377 txq->ift_no_tx_dma_setup++; 3378 break; 3379 default: 3380 txq->ift_no_tx_dma_setup++; 3381 m_freem(*m_headp); 3382 DBG_COUNTER_INC(tx_frees); 3383 *m_headp = NULL; 3384 break; 3385 } 3386 txq->ift_map_failed++; 3387 DBG_COUNTER_INC(encap_load_mbuf_fail); 3388 return (err); 3389 } 3390 3391 /* 3392 * XXX assumes a 1 to 1 relationship between segments and 3393 * descriptors - this does not hold true on all drivers, e.g. 3394 * cxgb 3395 */ 3396 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { 3397 txq->ift_no_desc_avail++; 3398 if (map != NULL) 3399 bus_dmamap_unload(desc_tag, map); 3400 DBG_COUNTER_INC(encap_txq_avail_fail); 3401 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) 3402 GROUPTASK_ENQUEUE(&txq->ift_task); 3403 return (ENOBUFS); 3404 } 3405 /* 3406 * On Intel cards we can greatly reduce the number of TX interrupts 3407 * we see by only setting report status on every Nth descriptor. 3408 * However, this also means that the driver will need to keep track 3409 * of the descriptors that RS was set on to check them for the DD bit. 3410 */ 3411 txq->ift_rs_pending += nsegs + 1; 3412 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || 3413 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { 3414 pi.ipi_flags |= IPI_TX_INTR; 3415 txq->ift_rs_pending = 0; 3416 } 3417 3418 pi.ipi_segs = segs; 3419 pi.ipi_nsegs = nsegs; 3420 3421 MPASS(pidx >= 0 && pidx < txq->ift_size); 3422 #ifdef PKT_DEBUG 3423 print_pkt(&pi); 3424 #endif 3425 if (map != NULL) 3426 bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE); 3427 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { 3428 if (map != NULL) 3429 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 3430 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3431 DBG_COUNTER_INC(tx_encap); 3432 MPASS(pi.ipi_new_pidx < txq->ift_size); 3433 3434 ndesc = pi.ipi_new_pidx - pi.ipi_pidx; 3435 if (pi.ipi_new_pidx < pi.ipi_pidx) { 3436 ndesc += txq->ift_size; 3437 txq->ift_gen = 1; 3438 } 3439 /* 3440 * drivers can need as many as 3441 * two sentinels 3442 */ 3443 MPASS(ndesc <= pi.ipi_nsegs + 2); 3444 MPASS(pi.ipi_new_pidx != pidx); 3445 MPASS(ndesc > 0); 3446 txq->ift_in_use += ndesc; 3447 3448 /* 3449 * We update the last software descriptor again here because there may 3450 * be a sentinel and/or there may be more mbufs than segments 3451 */ 3452 txq->ift_pidx = pi.ipi_new_pidx; 3453 txq->ift_npending += pi.ipi_ndescs; 3454 } else { 3455 *m_headp = m_head = iflib_remove_mbuf(txq); 3456 if (err == EFBIG) { 3457 txq->ift_txd_encap_efbig++; 3458 if (remap < 2) { 3459 remap = 1; 3460 goto defrag; 3461 } 3462 } 3463 DBG_COUNTER_INC(encap_txd_encap_fail); 3464 goto defrag_failed; 3465 } 3466 return (err); 3467 3468 defrag_failed: 3469 txq->ift_mbuf_defrag_failed++; 3470 txq->ift_map_failed++; 3471 m_freem(*m_headp); 3472 DBG_COUNTER_INC(tx_frees); 3473 *m_headp = NULL; 3474 return (ENOMEM); 3475 } 3476 3477 static void 3478 iflib_tx_desc_free(iflib_txq_t txq, int n) 3479 { 3480 int hasmap; 3481 uint32_t qsize, cidx, mask, gen; 3482 struct mbuf *m, **ifsd_m; 3483 uint8_t *ifsd_flags; 3484 bus_dmamap_t *ifsd_map; 3485 bool do_prefetch; 3486 3487 cidx = txq->ift_cidx; 3488 gen = txq->ift_gen; 3489 qsize = txq->ift_size; 3490 mask = qsize-1; 3491 hasmap = txq->ift_sds.ifsd_map != NULL; 3492 ifsd_flags = txq->ift_sds.ifsd_flags; 3493 ifsd_m = txq->ift_sds.ifsd_m; 3494 ifsd_map = txq->ift_sds.ifsd_map; 3495 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); 3496 3497 while (n-- > 0) { 3498 if (do_prefetch) { 3499 prefetch(ifsd_m[(cidx + 3) & mask]); 3500 prefetch(ifsd_m[(cidx + 4) & mask]); 3501 } 3502 if (ifsd_m[cidx] != NULL) { 3503 prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); 3504 prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]); 3505 if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) { 3506 /* 3507 * does it matter if it's not the TSO tag? If so we'll 3508 * have to add the type to flags 3509 */ 3510 bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); 3511 ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; 3512 } 3513 if ((m = ifsd_m[cidx]) != NULL) { 3514 /* XXX we don't support any drivers that batch packets yet */ 3515 MPASS(m->m_nextpkt == NULL); 3516 /* if the number of clusters exceeds the number of segments 3517 * there won't be space on the ring to save a pointer to each 3518 * cluster so we simply free the list here 3519 */ 3520 if (m->m_flags & M_TOOBIG) { 3521 m_freem(m); 3522 } else { 3523 m_free(m); 3524 } 3525 ifsd_m[cidx] = NULL; 3526 #if MEMORY_LOGGING 3527 txq->ift_dequeued++; 3528 #endif 3529 DBG_COUNTER_INC(tx_frees); 3530 } 3531 } 3532 if (__predict_false(++cidx == qsize)) { 3533 cidx = 0; 3534 gen = 0; 3535 } 3536 } 3537 txq->ift_cidx = cidx; 3538 txq->ift_gen = gen; 3539 } 3540 3541 static __inline int 3542 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) 3543 { 3544 int reclaim; 3545 if_ctx_t ctx = txq->ift_ctx; 3546 3547 KASSERT(thresh >= 0, ("invalid threshold to reclaim")); 3548 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); 3549 3550 /* 3551 * Need a rate-limiting check so that this isn't called every time 3552 */ 3553 iflib_tx_credits_update(ctx, txq); 3554 reclaim = DESC_RECLAIMABLE(txq); 3555 3556 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { 3557 #ifdef INVARIANTS 3558 if (iflib_verbose_debug) { 3559 printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, 3560 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, 3561 reclaim, thresh); 3562 3563 } 3564 #endif 3565 return (0); 3566 } 3567 iflib_tx_desc_free(txq, reclaim); 3568 txq->ift_cleaned += reclaim; 3569 txq->ift_in_use -= reclaim; 3570 3571 return (reclaim); 3572 } 3573 3574 static struct mbuf ** 3575 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) 3576 { 3577 int next, size; 3578 struct mbuf **items; 3579 3580 size = r->size; 3581 next = (cidx + CACHE_PTR_INCREMENT) & (size-1); 3582 items = __DEVOLATILE(struct mbuf **, &r->items[0]); 3583 3584 prefetch(items[(cidx + offset) & (size-1)]); 3585 if (remaining > 1) { 3586 prefetch2cachelines(&items[next]); 3587 prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); 3588 prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); 3589 prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); 3590 } 3591 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); 3592 } 3593 3594 static void 3595 iflib_txq_check_drain(iflib_txq_t txq, int budget) 3596 { 3597 3598 ifmp_ring_check_drainage(txq->ift_br, budget); 3599 } 3600 3601 static uint32_t 3602 iflib_txq_can_drain(struct ifmp_ring *r) 3603 { 3604 iflib_txq_t txq = r->cookie; 3605 if_ctx_t ctx = txq->ift_ctx; 3606 3607 return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) || 3608 ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); 3609 } 3610 3611 static uint32_t 3612 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3613 { 3614 iflib_txq_t txq = r->cookie; 3615 if_ctx_t ctx = txq->ift_ctx; 3616 struct ifnet *ifp = ctx->ifc_ifp; 3617 struct mbuf **mp, *m; 3618 int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail; 3619 int reclaimed, err, in_use_prev, desc_used; 3620 bool do_prefetch, ring, rang; 3621 3622 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || 3623 !LINK_ACTIVE(ctx))) { 3624 DBG_COUNTER_INC(txq_drain_notready); 3625 return (0); 3626 } 3627 reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 3628 rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); 3629 avail = IDXDIFF(pidx, cidx, r->size); 3630 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { 3631 DBG_COUNTER_INC(txq_drain_flushing); 3632 for (i = 0; i < avail; i++) { 3633 m_free(r->items[(cidx + i) & (r->size-1)]); 3634 r->items[(cidx + i) & (r->size-1)] = NULL; 3635 } 3636 return (avail); 3637 } 3638 3639 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { 3640 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3641 CALLOUT_LOCK(txq); 3642 callout_stop(&txq->ift_timer); 3643 CALLOUT_UNLOCK(txq); 3644 DBG_COUNTER_INC(txq_drain_oactive); 3645 return (0); 3646 } 3647 if (reclaimed) 3648 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3649 consumed = mcast_sent = bytes_sent = pkt_sent = 0; 3650 count = MIN(avail, TX_BATCH_SIZE); 3651 #ifdef INVARIANTS 3652 if (iflib_verbose_debug) 3653 printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, 3654 avail, ctx->ifc_flags, TXQ_AVAIL(txq)); 3655 #endif 3656 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); 3657 avail = TXQ_AVAIL(txq); 3658 err = 0; 3659 for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) { 3660 int rem = do_prefetch ? count - i : 0; 3661 3662 mp = _ring_peek_one(r, cidx, i, rem); 3663 MPASS(mp != NULL && *mp != NULL); 3664 if (__predict_false(*mp == (struct mbuf *)txq)) { 3665 consumed++; 3666 reclaimed++; 3667 continue; 3668 } 3669 in_use_prev = txq->ift_in_use; 3670 err = iflib_encap(txq, mp); 3671 if (__predict_false(err)) { 3672 DBG_COUNTER_INC(txq_drain_encapfail); 3673 /* no room - bail out */ 3674 if (err == ENOBUFS) 3675 break; 3676 consumed++; 3677 DBG_COUNTER_INC(txq_drain_encapfail); 3678 /* we can't send this packet - skip it */ 3679 continue; 3680 } 3681 consumed++; 3682 pkt_sent++; 3683 m = *mp; 3684 DBG_COUNTER_INC(tx_sent); 3685 bytes_sent += m->m_pkthdr.len; 3686 mcast_sent += !!(m->m_flags & M_MCAST); 3687 avail = TXQ_AVAIL(txq); 3688 3689 txq->ift_db_pending += (txq->ift_in_use - in_use_prev); 3690 desc_used += (txq->ift_in_use - in_use_prev); 3691 ETHER_BPF_MTAP(ifp, m); 3692 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) 3693 break; 3694 rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); 3695 } 3696 3697 /* deliberate use of bitwise or to avoid gratuitous short-circuit */ 3698 ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); 3699 iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); 3700 if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); 3701 if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); 3702 if (mcast_sent) 3703 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); 3704 #ifdef INVARIANTS 3705 if (iflib_verbose_debug) 3706 printf("consumed=%d\n", consumed); 3707 #endif 3708 return (consumed); 3709 } 3710 3711 static uint32_t 3712 iflib_txq_drain_always(struct ifmp_ring *r) 3713 { 3714 return (1); 3715 } 3716 3717 static uint32_t 3718 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3719 { 3720 int i, avail; 3721 struct mbuf **mp; 3722 iflib_txq_t txq; 3723 3724 txq = r->cookie; 3725 3726 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3727 CALLOUT_LOCK(txq); 3728 callout_stop(&txq->ift_timer); 3729 CALLOUT_UNLOCK(txq); 3730 3731 avail = IDXDIFF(pidx, cidx, r->size); 3732 for (i = 0; i < avail; i++) { 3733 mp = _ring_peek_one(r, cidx, i, avail - i); 3734 if (__predict_false(*mp == (struct mbuf *)txq)) 3735 continue; 3736 m_freem(*mp); 3737 } 3738 MPASS(ifmp_ring_is_stalled(r) == 0); 3739 return (avail); 3740 } 3741 3742 static void 3743 iflib_ifmp_purge(iflib_txq_t txq) 3744 { 3745 struct ifmp_ring *r; 3746 3747 r = txq->ift_br; 3748 r->drain = iflib_txq_drain_free; 3749 r->can_drain = iflib_txq_drain_always; 3750 3751 ifmp_ring_check_drainage(r, r->size); 3752 3753 r->drain = iflib_txq_drain; 3754 r->can_drain = iflib_txq_can_drain; 3755 } 3756 3757 static void 3758 _task_fn_tx(void *context) 3759 { 3760 iflib_txq_t txq = context; 3761 if_ctx_t ctx = txq->ift_ctx; 3762 struct ifnet *ifp = ctx->ifc_ifp; 3763 int abdicate = ctx->ifc_sysctl_tx_abdicate; 3764 3765 #ifdef IFLIB_DIAGNOSTICS 3766 txq->ift_cpu_exec_count[curcpu]++; 3767 #endif 3768 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 3769 return; 3770 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 3771 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)) 3772 netmap_tx_irq(ifp, txq->ift_id); 3773 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3774 return; 3775 } 3776 if (txq->ift_db_pending) 3777 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate); 3778 else if (!abdicate) 3779 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3780 /* 3781 * When abdicating, we always need to check drainage, not just when we don't enqueue 3782 */ 3783 if (abdicate) 3784 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3785 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3786 if (ctx->ifc_flags & IFC_LEGACY) 3787 IFDI_INTR_ENABLE(ctx); 3788 else { 3789 #ifdef INVARIANTS 3790 int rc = 3791 #endif 3792 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3793 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3794 } 3795 } 3796 3797 static void 3798 _task_fn_rx(void *context) 3799 { 3800 iflib_rxq_t rxq = context; 3801 if_ctx_t ctx = rxq->ifr_ctx; 3802 bool more; 3803 uint16_t budget; 3804 3805 #ifdef IFLIB_DIAGNOSTICS 3806 rxq->ifr_cpu_exec_count[curcpu]++; 3807 #endif 3808 DBG_COUNTER_INC(task_fn_rxs); 3809 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3810 return; 3811 more = true; 3812 #ifdef DEV_NETMAP 3813 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { 3814 u_int work = 0; 3815 if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { 3816 more = false; 3817 } 3818 } 3819 #endif 3820 budget = ctx->ifc_sysctl_rx_budget; 3821 if (budget == 0) 3822 budget = 16; /* XXX */ 3823 if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { 3824 if (ctx->ifc_flags & IFC_LEGACY) 3825 IFDI_INTR_ENABLE(ctx); 3826 else { 3827 #ifdef INVARIANTS 3828 int rc = 3829 #endif 3830 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 3831 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3832 DBG_COUNTER_INC(rx_intr_enables); 3833 } 3834 } 3835 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3836 return; 3837 if (more) 3838 GROUPTASK_ENQUEUE(&rxq->ifr_task); 3839 } 3840 3841 static void 3842 _task_fn_admin(void *context) 3843 { 3844 if_ctx_t ctx = context; 3845 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 3846 iflib_txq_t txq; 3847 int i; 3848 bool oactive, running, do_reset, do_watchdog; 3849 uint32_t reset_on = hz / 2; 3850 3851 STATE_LOCK(ctx); 3852 running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); 3853 oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); 3854 do_reset = (ctx->ifc_flags & IFC_DO_RESET); 3855 do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); 3856 ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG); 3857 STATE_UNLOCK(ctx); 3858 3859 if ((!running & !oactive) && 3860 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) 3861 return; 3862 3863 CTX_LOCK(ctx); 3864 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3865 CALLOUT_LOCK(txq); 3866 callout_stop(&txq->ift_timer); 3867 CALLOUT_UNLOCK(txq); 3868 } 3869 if (do_watchdog) { 3870 ctx->ifc_watchdog_events++; 3871 IFDI_WATCHDOG_RESET(ctx); 3872 } 3873 IFDI_UPDATE_ADMIN_STATUS(ctx); 3874 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3875 #ifdef DEV_NETMAP 3876 reset_on = hz / 2; 3877 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) 3878 iflib_netmap_timer_adjust(ctx, txq->ift_id, &reset_on); 3879 #endif 3880 callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); 3881 } 3882 IFDI_LINK_INTR_ENABLE(ctx); 3883 if (do_reset) 3884 iflib_if_init_locked(ctx); 3885 CTX_UNLOCK(ctx); 3886 3887 if (LINK_ACTIVE(ctx) == 0) 3888 return; 3889 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) 3890 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 3891 } 3892 3893 3894 static void 3895 _task_fn_iov(void *context) 3896 { 3897 if_ctx_t ctx = context; 3898 3899 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 3900 return; 3901 3902 CTX_LOCK(ctx); 3903 IFDI_VFLR_HANDLE(ctx); 3904 CTX_UNLOCK(ctx); 3905 } 3906 3907 static int 3908 iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 3909 { 3910 int err; 3911 if_int_delay_info_t info; 3912 if_ctx_t ctx; 3913 3914 info = (if_int_delay_info_t)arg1; 3915 ctx = info->iidi_ctx; 3916 info->iidi_req = req; 3917 info->iidi_oidp = oidp; 3918 CTX_LOCK(ctx); 3919 err = IFDI_SYSCTL_INT_DELAY(ctx, info); 3920 CTX_UNLOCK(ctx); 3921 return (err); 3922 } 3923 3924 /********************************************************************* 3925 * 3926 * IFNET FUNCTIONS 3927 * 3928 **********************************************************************/ 3929 3930 static void 3931 iflib_if_init_locked(if_ctx_t ctx) 3932 { 3933 iflib_stop(ctx); 3934 iflib_init_locked(ctx); 3935 } 3936 3937 3938 static void 3939 iflib_if_init(void *arg) 3940 { 3941 if_ctx_t ctx = arg; 3942 3943 CTX_LOCK(ctx); 3944 iflib_if_init_locked(ctx); 3945 CTX_UNLOCK(ctx); 3946 } 3947 3948 static int 3949 iflib_if_transmit(if_t ifp, struct mbuf *m) 3950 { 3951 if_ctx_t ctx = if_getsoftc(ifp); 3952 3953 iflib_txq_t txq; 3954 int err, qidx; 3955 int abdicate = ctx->ifc_sysctl_tx_abdicate; 3956 3957 if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { 3958 DBG_COUNTER_INC(tx_frees); 3959 m_freem(m); 3960 return (ENOBUFS); 3961 } 3962 3963 MPASS(m->m_nextpkt == NULL); 3964 qidx = 0; 3965 if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m)) 3966 qidx = QIDX(ctx, m); 3967 /* 3968 * XXX calculate buf_ring based on flowid (divvy up bits?) 3969 */ 3970 txq = &ctx->ifc_txqs[qidx]; 3971 3972 #ifdef DRIVER_BACKPRESSURE 3973 if (txq->ift_closed) { 3974 while (m != NULL) { 3975 next = m->m_nextpkt; 3976 m->m_nextpkt = NULL; 3977 m_freem(m); 3978 m = next; 3979 } 3980 return (ENOBUFS); 3981 } 3982 #endif 3983 #ifdef notyet 3984 qidx = count = 0; 3985 mp = marr; 3986 next = m; 3987 do { 3988 count++; 3989 next = next->m_nextpkt; 3990 } while (next != NULL); 3991 3992 if (count > nitems(marr)) 3993 if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { 3994 /* XXX check nextpkt */ 3995 m_freem(m); 3996 /* XXX simplify for now */ 3997 DBG_COUNTER_INC(tx_frees); 3998 return (ENOBUFS); 3999 } 4000 for (next = m, i = 0; next != NULL; i++) { 4001 mp[i] = next; 4002 next = next->m_nextpkt; 4003 mp[i]->m_nextpkt = NULL; 4004 } 4005 #endif 4006 DBG_COUNTER_INC(tx_seen); 4007 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate); 4008 4009 if (abdicate) 4010 GROUPTASK_ENQUEUE(&txq->ift_task); 4011 if (err) { 4012 if (!abdicate) 4013 GROUPTASK_ENQUEUE(&txq->ift_task); 4014 /* support forthcoming later */ 4015 #ifdef DRIVER_BACKPRESSURE 4016 txq->ift_closed = TRUE; 4017 #endif 4018 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 4019 m_freem(m); 4020 } 4021 4022 return (err); 4023 } 4024 4025 static void 4026 iflib_if_qflush(if_t ifp) 4027 { 4028 if_ctx_t ctx = if_getsoftc(ifp); 4029 iflib_txq_t txq = ctx->ifc_txqs; 4030 int i; 4031 4032 STATE_LOCK(ctx); 4033 ctx->ifc_flags |= IFC_QFLUSH; 4034 STATE_UNLOCK(ctx); 4035 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 4036 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) 4037 iflib_txq_check_drain(txq, 0); 4038 STATE_LOCK(ctx); 4039 ctx->ifc_flags &= ~IFC_QFLUSH; 4040 STATE_UNLOCK(ctx); 4041 4042 if_qflush(ifp); 4043 } 4044 4045 4046 #define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ 4047 IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ 4048 IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) 4049 4050 static int 4051 iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) 4052 { 4053 if_ctx_t ctx = if_getsoftc(ifp); 4054 struct ifreq *ifr = (struct ifreq *)data; 4055 #if defined(INET) || defined(INET6) 4056 struct ifaddr *ifa = (struct ifaddr *)data; 4057 #endif 4058 bool avoid_reset = FALSE; 4059 int err = 0, reinit = 0, bits; 4060 4061 switch (command) { 4062 case SIOCSIFADDR: 4063 #ifdef INET 4064 if (ifa->ifa_addr->sa_family == AF_INET) 4065 avoid_reset = TRUE; 4066 #endif 4067 #ifdef INET6 4068 if (ifa->ifa_addr->sa_family == AF_INET6) 4069 avoid_reset = TRUE; 4070 #endif 4071 /* 4072 ** Calling init results in link renegotiation, 4073 ** so we avoid doing it when possible. 4074 */ 4075 if (avoid_reset) { 4076 if_setflagbits(ifp, IFF_UP,0); 4077 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 4078 reinit = 1; 4079 #ifdef INET 4080 if (!(if_getflags(ifp) & IFF_NOARP)) 4081 arp_ifinit(ifp, ifa); 4082 #endif 4083 } else 4084 err = ether_ioctl(ifp, command, data); 4085 break; 4086 case SIOCSIFMTU: 4087 CTX_LOCK(ctx); 4088 if (ifr->ifr_mtu == if_getmtu(ifp)) { 4089 CTX_UNLOCK(ctx); 4090 break; 4091 } 4092 bits = if_getdrvflags(ifp); 4093 /* stop the driver and free any clusters before proceeding */ 4094 iflib_stop(ctx); 4095 4096 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { 4097 STATE_LOCK(ctx); 4098 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) 4099 ctx->ifc_flags |= IFC_MULTISEG; 4100 else 4101 ctx->ifc_flags &= ~IFC_MULTISEG; 4102 STATE_UNLOCK(ctx); 4103 err = if_setmtu(ifp, ifr->ifr_mtu); 4104 } 4105 iflib_init_locked(ctx); 4106 STATE_LOCK(ctx); 4107 if_setdrvflags(ifp, bits); 4108 STATE_UNLOCK(ctx); 4109 CTX_UNLOCK(ctx); 4110 break; 4111 case SIOCSIFFLAGS: 4112 CTX_LOCK(ctx); 4113 if (if_getflags(ifp) & IFF_UP) { 4114 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4115 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & 4116 (IFF_PROMISC | IFF_ALLMULTI)) { 4117 err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); 4118 } 4119 } else 4120 reinit = 1; 4121 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4122 iflib_stop(ctx); 4123 } 4124 ctx->ifc_if_flags = if_getflags(ifp); 4125 CTX_UNLOCK(ctx); 4126 break; 4127 case SIOCADDMULTI: 4128 case SIOCDELMULTI: 4129 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4130 CTX_LOCK(ctx); 4131 IFDI_INTR_DISABLE(ctx); 4132 IFDI_MULTI_SET(ctx); 4133 IFDI_INTR_ENABLE(ctx); 4134 CTX_UNLOCK(ctx); 4135 } 4136 break; 4137 case SIOCSIFMEDIA: 4138 CTX_LOCK(ctx); 4139 IFDI_MEDIA_SET(ctx); 4140 CTX_UNLOCK(ctx); 4141 /* falls thru */ 4142 case SIOCGIFMEDIA: 4143 case SIOCGIFXMEDIA: 4144 err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); 4145 break; 4146 case SIOCGI2C: 4147 { 4148 struct ifi2creq i2c; 4149 4150 err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 4151 if (err != 0) 4152 break; 4153 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 4154 err = EINVAL; 4155 break; 4156 } 4157 if (i2c.len > sizeof(i2c.data)) { 4158 err = EINVAL; 4159 break; 4160 } 4161 4162 if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) 4163 err = copyout(&i2c, ifr_data_get_ptr(ifr), 4164 sizeof(i2c)); 4165 break; 4166 } 4167 case SIOCSIFCAP: 4168 { 4169 int mask, setmask; 4170 4171 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 4172 setmask = 0; 4173 #ifdef TCP_OFFLOAD 4174 setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); 4175 #endif 4176 setmask |= (mask & IFCAP_FLAGS); 4177 4178 if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) 4179 setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); 4180 if ((mask & IFCAP_WOL) && 4181 (if_getcapabilities(ifp) & IFCAP_WOL) != 0) 4182 setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC)); 4183 if_vlancap(ifp); 4184 /* 4185 * want to ensure that traffic has stopped before we change any of the flags 4186 */ 4187 if (setmask) { 4188 CTX_LOCK(ctx); 4189 bits = if_getdrvflags(ifp); 4190 if (bits & IFF_DRV_RUNNING) 4191 iflib_stop(ctx); 4192 STATE_LOCK(ctx); 4193 if_togglecapenable(ifp, setmask); 4194 STATE_UNLOCK(ctx); 4195 if (bits & IFF_DRV_RUNNING) 4196 iflib_init_locked(ctx); 4197 STATE_LOCK(ctx); 4198 if_setdrvflags(ifp, bits); 4199 STATE_UNLOCK(ctx); 4200 CTX_UNLOCK(ctx); 4201 } 4202 break; 4203 } 4204 case SIOCGPRIVATE_0: 4205 case SIOCSDRVSPEC: 4206 case SIOCGDRVSPEC: 4207 CTX_LOCK(ctx); 4208 err = IFDI_PRIV_IOCTL(ctx, command, data); 4209 CTX_UNLOCK(ctx); 4210 break; 4211 default: 4212 err = ether_ioctl(ifp, command, data); 4213 break; 4214 } 4215 if (reinit) 4216 iflib_if_init(ctx); 4217 return (err); 4218 } 4219 4220 static uint64_t 4221 iflib_if_get_counter(if_t ifp, ift_counter cnt) 4222 { 4223 if_ctx_t ctx = if_getsoftc(ifp); 4224 4225 return (IFDI_GET_COUNTER(ctx, cnt)); 4226 } 4227 4228 /********************************************************************* 4229 * 4230 * OTHER FUNCTIONS EXPORTED TO THE STACK 4231 * 4232 **********************************************************************/ 4233 4234 static void 4235 iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) 4236 { 4237 if_ctx_t ctx = if_getsoftc(ifp); 4238 4239 if ((void *)ctx != arg) 4240 return; 4241 4242 if ((vtag == 0) || (vtag > 4095)) 4243 return; 4244 4245 CTX_LOCK(ctx); 4246 IFDI_VLAN_REGISTER(ctx, vtag); 4247 /* Re-init to load the changes */ 4248 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4249 iflib_if_init_locked(ctx); 4250 CTX_UNLOCK(ctx); 4251 } 4252 4253 static void 4254 iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) 4255 { 4256 if_ctx_t ctx = if_getsoftc(ifp); 4257 4258 if ((void *)ctx != arg) 4259 return; 4260 4261 if ((vtag == 0) || (vtag > 4095)) 4262 return; 4263 4264 CTX_LOCK(ctx); 4265 IFDI_VLAN_UNREGISTER(ctx, vtag); 4266 /* Re-init to load the changes */ 4267 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4268 iflib_if_init_locked(ctx); 4269 CTX_UNLOCK(ctx); 4270 } 4271 4272 static void 4273 iflib_led_func(void *arg, int onoff) 4274 { 4275 if_ctx_t ctx = arg; 4276 4277 CTX_LOCK(ctx); 4278 IFDI_LED_FUNC(ctx, onoff); 4279 CTX_UNLOCK(ctx); 4280 } 4281 4282 /********************************************************************* 4283 * 4284 * BUS FUNCTION DEFINITIONS 4285 * 4286 **********************************************************************/ 4287 4288 int 4289 iflib_device_probe(device_t dev) 4290 { 4291 pci_vendor_info_t *ent; 4292 4293 uint16_t pci_vendor_id, pci_device_id; 4294 uint16_t pci_subvendor_id, pci_subdevice_id; 4295 uint16_t pci_rev_id; 4296 if_shared_ctx_t sctx; 4297 4298 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4299 return (ENOTSUP); 4300 4301 pci_vendor_id = pci_get_vendor(dev); 4302 pci_device_id = pci_get_device(dev); 4303 pci_subvendor_id = pci_get_subvendor(dev); 4304 pci_subdevice_id = pci_get_subdevice(dev); 4305 pci_rev_id = pci_get_revid(dev); 4306 if (sctx->isc_parse_devinfo != NULL) 4307 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); 4308 4309 ent = sctx->isc_vendor_info; 4310 while (ent->pvi_vendor_id != 0) { 4311 if (pci_vendor_id != ent->pvi_vendor_id) { 4312 ent++; 4313 continue; 4314 } 4315 if ((pci_device_id == ent->pvi_device_id) && 4316 ((pci_subvendor_id == ent->pvi_subvendor_id) || 4317 (ent->pvi_subvendor_id == 0)) && 4318 ((pci_subdevice_id == ent->pvi_subdevice_id) || 4319 (ent->pvi_subdevice_id == 0)) && 4320 ((pci_rev_id == ent->pvi_rev_id) || 4321 (ent->pvi_rev_id == 0))) { 4322 4323 device_set_desc_copy(dev, ent->pvi_name); 4324 /* this needs to be changed to zero if the bus probing code 4325 * ever stops re-probing on best match because the sctx 4326 * may have its values over written by register calls 4327 * in subsequent probes 4328 */ 4329 return (BUS_PROBE_DEFAULT); 4330 } 4331 ent++; 4332 } 4333 return (ENXIO); 4334 } 4335 4336 static void 4337 iflib_reset_qvalues(if_ctx_t ctx) 4338 { 4339 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 4340 if_shared_ctx_t sctx = ctx->ifc_sctx; 4341 device_t dev = ctx->ifc_dev; 4342 int i; 4343 4344 scctx->isc_txrx_budget_bytes_max = IFLIB_MAX_TX_BYTES; 4345 scctx->isc_tx_qdepth = IFLIB_DEFAULT_TX_QDEPTH; 4346 /* 4347 * XXX sanity check that ntxd & nrxd are a power of 2 4348 */ 4349 if (ctx->ifc_sysctl_ntxqs != 0) 4350 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; 4351 if (ctx->ifc_sysctl_nrxqs != 0) 4352 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; 4353 4354 for (i = 0; i < sctx->isc_ntxqs; i++) { 4355 if (ctx->ifc_sysctl_ntxds[i] != 0) 4356 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; 4357 else 4358 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; 4359 } 4360 4361 for (i = 0; i < sctx->isc_nrxqs; i++) { 4362 if (ctx->ifc_sysctl_nrxds[i] != 0) 4363 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; 4364 else 4365 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; 4366 } 4367 4368 for (i = 0; i < sctx->isc_nrxqs; i++) { 4369 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { 4370 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", 4371 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); 4372 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; 4373 } 4374 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { 4375 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", 4376 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); 4377 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; 4378 } 4379 } 4380 4381 for (i = 0; i < sctx->isc_ntxqs; i++) { 4382 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { 4383 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", 4384 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); 4385 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; 4386 } 4387 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { 4388 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", 4389 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); 4390 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; 4391 } 4392 } 4393 } 4394 4395 int 4396 iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) 4397 { 4398 int err, rid, msix; 4399 if_ctx_t ctx; 4400 if_t ifp; 4401 if_softc_ctx_t scctx; 4402 int i; 4403 uint16_t main_txq; 4404 uint16_t main_rxq; 4405 4406 4407 ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); 4408 4409 if (sc == NULL) { 4410 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4411 device_set_softc(dev, ctx); 4412 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4413 } 4414 4415 ctx->ifc_sctx = sctx; 4416 ctx->ifc_dev = dev; 4417 ctx->ifc_softc = sc; 4418 4419 if ((err = iflib_register(ctx)) != 0) { 4420 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4421 free(sc, M_IFLIB); 4422 free(ctx, M_IFLIB); 4423 device_printf(dev, "iflib_register failed %d\n", err); 4424 return (err); 4425 } 4426 iflib_add_device_sysctl_pre(ctx); 4427 4428 scctx = &ctx->ifc_softc_ctx; 4429 ifp = ctx->ifc_ifp; 4430 4431 iflib_reset_qvalues(ctx); 4432 CTX_LOCK(ctx); 4433 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4434 CTX_UNLOCK(ctx); 4435 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4436 return (err); 4437 } 4438 _iflib_pre_assert(scctx); 4439 ctx->ifc_txrx = *scctx->isc_txrx; 4440 4441 #ifdef INVARIANTS 4442 MPASS(scctx->isc_capabilities); 4443 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4444 MPASS(scctx->isc_tx_csum_flags); 4445 #endif 4446 4447 if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS); 4448 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS); 4449 4450 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4451 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4452 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4453 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4454 4455 #ifdef ACPI_DMAR 4456 if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) 4457 ctx->ifc_flags |= IFC_DMAR; 4458 #elif !(defined(__i386__) || defined(__amd64__)) 4459 /* set unconditionally for !x86 */ 4460 ctx->ifc_flags |= IFC_DMAR; 4461 #endif 4462 4463 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4464 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4465 4466 /* XXX change for per-queue sizes */ 4467 device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", 4468 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4469 for (i = 0; i < sctx->isc_nrxqs; i++) { 4470 if (!powerof2(scctx->isc_nrxd[i])) { 4471 /* round down instead? */ 4472 device_printf(dev, "# rx descriptors must be a power of 2\n"); 4473 err = EINVAL; 4474 goto fail; 4475 } 4476 } 4477 for (i = 0; i < sctx->isc_ntxqs; i++) { 4478 if (!powerof2(scctx->isc_ntxd[i])) { 4479 device_printf(dev, 4480 "# tx descriptors must be a power of 2"); 4481 err = EINVAL; 4482 goto fail; 4483 } 4484 } 4485 4486 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4487 MAX_SINGLE_PACKET_FRACTION) 4488 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4489 MAX_SINGLE_PACKET_FRACTION); 4490 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4491 MAX_SINGLE_PACKET_FRACTION) 4492 scctx->isc_tx_tso_segments_max = max(1, 4493 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4494 4495 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4496 if (if_getcapabilities(ifp) & IFCAP_TSO) { 4497 /* 4498 * The stack can't handle a TSO size larger than IP_MAXPACKET, 4499 * but some MACs do. 4500 */ 4501 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 4502 IP_MAXPACKET)); 4503 /* 4504 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 4505 * into account. In the worst case, each of these calls will 4506 * add another mbuf and, thus, the requirement for another DMA 4507 * segment. So for best performance, it doesn't make sense to 4508 * advertize a maximum of TSO segments that typically will 4509 * require defragmentation in iflib_encap(). 4510 */ 4511 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 4512 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 4513 } 4514 if (scctx->isc_rss_table_size == 0) 4515 scctx->isc_rss_table_size = 64; 4516 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4517 4518 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4519 /* XXX format name */ 4520 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); 4521 4522 /* Set up cpu set. If it fails, use the set of all CPUs. */ 4523 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { 4524 device_printf(dev, "Unable to fetch CPU list\n"); 4525 CPU_COPY(&all_cpus, &ctx->ifc_cpus); 4526 } 4527 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); 4528 4529 /* 4530 ** Now setup MSI or MSI/X, should 4531 ** return us the number of supported 4532 ** vectors. (Will be 1 for MSI) 4533 */ 4534 if (sctx->isc_flags & IFLIB_SKIP_MSIX) { 4535 msix = scctx->isc_vectors; 4536 } else if (scctx->isc_msix_bar != 0) 4537 /* 4538 * The simple fact that isc_msix_bar is not 0 does not mean we 4539 * we have a good value there that is known to work. 4540 */ 4541 msix = iflib_msix_init(ctx); 4542 else { 4543 scctx->isc_vectors = 1; 4544 scctx->isc_ntxqsets = 1; 4545 scctx->isc_nrxqsets = 1; 4546 scctx->isc_intr = IFLIB_INTR_LEGACY; 4547 msix = 0; 4548 } 4549 /* Get memory for the station queues */ 4550 if ((err = iflib_queues_alloc(ctx))) { 4551 device_printf(dev, "Unable to allocate queue memory\n"); 4552 goto fail; 4553 } 4554 4555 if ((err = iflib_qset_structures_setup(ctx))) 4556 goto fail_queues; 4557 4558 /* 4559 * Group taskqueues aren't properly set up until SMP is started, 4560 * so we disable interrupts until we can handle them post 4561 * SI_SUB_SMP. 4562 * 4563 * XXX: disabling interrupts doesn't actually work, at least for 4564 * the non-MSI case. When they occur before SI_SUB_SMP completes, 4565 * we do null handling and depend on this not causing too large an 4566 * interrupt storm. 4567 */ 4568 IFDI_INTR_DISABLE(ctx); 4569 if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { 4570 device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); 4571 goto fail_intr_free; 4572 } 4573 if (msix <= 1) { 4574 rid = 0; 4575 if (scctx->isc_intr == IFLIB_INTR_MSI) { 4576 MPASS(msix == 1); 4577 rid = 1; 4578 } 4579 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { 4580 device_printf(dev, "iflib_legacy_setup failed %d\n", err); 4581 goto fail_intr_free; 4582 } 4583 } 4584 4585 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4586 4587 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4588 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4589 goto fail_detach; 4590 } 4591 4592 /* 4593 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4594 * This must appear after the call to ether_ifattach() because 4595 * ether_ifattach() sets if_hdrlen to the default value. 4596 */ 4597 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4598 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 4599 4600 if ((err = iflib_netmap_attach(ctx))) { 4601 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); 4602 goto fail_detach; 4603 } 4604 *ctxp = ctx; 4605 4606 NETDUMP_SET(ctx->ifc_ifp, iflib); 4607 4608 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4609 iflib_add_device_sysctl_post(ctx); 4610 ctx->ifc_flags |= IFC_INIT_DONE; 4611 CTX_UNLOCK(ctx); 4612 return (0); 4613 fail_detach: 4614 ether_ifdetach(ctx->ifc_ifp); 4615 fail_intr_free: 4616 if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) 4617 pci_release_msi(ctx->ifc_dev); 4618 fail_queues: 4619 iflib_tx_structures_free(ctx); 4620 iflib_rx_structures_free(ctx); 4621 fail: 4622 IFDI_DETACH(ctx); 4623 CTX_UNLOCK(ctx); 4624 return (err); 4625 } 4626 4627 int 4628 iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, 4629 struct iflib_cloneattach_ctx *clctx) 4630 { 4631 int err; 4632 if_ctx_t ctx; 4633 if_t ifp; 4634 if_softc_ctx_t scctx; 4635 int i; 4636 void *sc; 4637 uint16_t main_txq; 4638 uint16_t main_rxq; 4639 4640 ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO); 4641 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4642 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4643 if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL)) 4644 ctx->ifc_flags |= IFC_PSEUDO; 4645 4646 ctx->ifc_sctx = sctx; 4647 ctx->ifc_softc = sc; 4648 ctx->ifc_dev = dev; 4649 4650 if ((err = iflib_register(ctx)) != 0) { 4651 device_printf(dev, "%s: iflib_register failed %d\n", __func__, err); 4652 free(sc, M_IFLIB); 4653 free(ctx, M_IFLIB); 4654 return (err); 4655 } 4656 iflib_add_device_sysctl_pre(ctx); 4657 4658 scctx = &ctx->ifc_softc_ctx; 4659 ifp = ctx->ifc_ifp; 4660 4661 /* 4662 * XXX sanity check that ntxd & nrxd are a power of 2 4663 */ 4664 iflib_reset_qvalues(ctx); 4665 4666 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4667 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4668 return (err); 4669 } 4670 if (sctx->isc_flags & IFLIB_GEN_MAC) 4671 iflib_gen_mac(ctx); 4672 if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name, 4673 clctx->cc_params)) != 0) { 4674 device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err); 4675 return (err); 4676 } 4677 ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 4678 ifmedia_add(&ctx->ifc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 4679 ifmedia_set(&ctx->ifc_media, IFM_ETHER | IFM_AUTO); 4680 4681 #ifdef INVARIANTS 4682 MPASS(scctx->isc_capabilities); 4683 if (scctx->isc_capabilities & IFCAP_TXCSUM) 4684 MPASS(scctx->isc_tx_csum_flags); 4685 #endif 4686 4687 if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4688 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE); 4689 4690 ifp->if_flags |= IFF_NOGROUP; 4691 if (sctx->isc_flags & IFLIB_PSEUDO) { 4692 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4693 4694 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4695 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4696 goto fail_detach; 4697 } 4698 *ctxp = ctx; 4699 4700 /* 4701 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4702 * This must appear after the call to ether_ifattach() because 4703 * ether_ifattach() sets if_hdrlen to the default value. 4704 */ 4705 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4706 if_setifheaderlen(ifp, 4707 sizeof(struct ether_vlan_header)); 4708 4709 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4710 iflib_add_device_sysctl_post(ctx); 4711 ctx->ifc_flags |= IFC_INIT_DONE; 4712 return (0); 4713 } 4714 _iflib_pre_assert(scctx); 4715 ctx->ifc_txrx = *scctx->isc_txrx; 4716 4717 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4718 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4719 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4720 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4721 4722 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4723 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4724 4725 /* XXX change for per-queue sizes */ 4726 device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", 4727 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4728 for (i = 0; i < sctx->isc_nrxqs; i++) { 4729 if (!powerof2(scctx->isc_nrxd[i])) { 4730 /* round down instead? */ 4731 device_printf(dev, "# rx descriptors must be a power of 2\n"); 4732 err = EINVAL; 4733 goto fail; 4734 } 4735 } 4736 for (i = 0; i < sctx->isc_ntxqs; i++) { 4737 if (!powerof2(scctx->isc_ntxd[i])) { 4738 device_printf(dev, 4739 "# tx descriptors must be a power of 2"); 4740 err = EINVAL; 4741 goto fail; 4742 } 4743 } 4744 4745 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4746 MAX_SINGLE_PACKET_FRACTION) 4747 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4748 MAX_SINGLE_PACKET_FRACTION); 4749 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4750 MAX_SINGLE_PACKET_FRACTION) 4751 scctx->isc_tx_tso_segments_max = max(1, 4752 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4753 4754 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4755 if (if_getcapabilities(ifp) & IFCAP_TSO) { 4756 /* 4757 * The stack can't handle a TSO size larger than IP_MAXPACKET, 4758 * but some MACs do. 4759 */ 4760 if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, 4761 IP_MAXPACKET)); 4762 /* 4763 * Take maximum number of m_pullup(9)'s in iflib_parse_header() 4764 * into account. In the worst case, each of these calls will 4765 * add another mbuf and, thus, the requirement for another DMA 4766 * segment. So for best performance, it doesn't make sense to 4767 * advertize a maximum of TSO segments that typically will 4768 * require defragmentation in iflib_encap(). 4769 */ 4770 if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); 4771 if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); 4772 } 4773 if (scctx->isc_rss_table_size == 0) 4774 scctx->isc_rss_table_size = 64; 4775 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4776 4777 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4778 /* XXX format name */ 4779 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); 4780 4781 /* XXX --- can support > 1 -- but keep it simple for now */ 4782 scctx->isc_intr = IFLIB_INTR_LEGACY; 4783 4784 /* Get memory for the station queues */ 4785 if ((err = iflib_queues_alloc(ctx))) { 4786 device_printf(dev, "Unable to allocate queue memory\n"); 4787 goto fail; 4788 } 4789 4790 if ((err = iflib_qset_structures_setup(ctx))) { 4791 device_printf(dev, "qset structure setup failed %d\n", err); 4792 goto fail_queues; 4793 } 4794 4795 /* 4796 * XXX What if anything do we want to do about interrupts? 4797 */ 4798 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4799 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4800 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4801 goto fail_detach; 4802 } 4803 4804 /* 4805 * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. 4806 * This must appear after the call to ether_ifattach() because 4807 * ether_ifattach() sets if_hdrlen to the default value. 4808 */ 4809 if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) 4810 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 4811 4812 /* XXX handle more than one queue */ 4813 for (i = 0; i < scctx->isc_nrxqsets; i++) 4814 IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl); 4815 4816 *ctxp = ctx; 4817 4818 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4819 iflib_add_device_sysctl_post(ctx); 4820 ctx->ifc_flags |= IFC_INIT_DONE; 4821 return (0); 4822 fail_detach: 4823 ether_ifdetach(ctx->ifc_ifp); 4824 fail_queues: 4825 iflib_tx_structures_free(ctx); 4826 iflib_rx_structures_free(ctx); 4827 fail: 4828 IFDI_DETACH(ctx); 4829 return (err); 4830 } 4831 4832 int 4833 iflib_pseudo_deregister(if_ctx_t ctx) 4834 { 4835 if_t ifp = ctx->ifc_ifp; 4836 iflib_txq_t txq; 4837 iflib_rxq_t rxq; 4838 int i, j; 4839 struct taskqgroup *tqg; 4840 iflib_fl_t fl; 4841 4842 /* Unregister VLAN events */ 4843 if (ctx->ifc_vlan_attach_event != NULL) 4844 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 4845 if (ctx->ifc_vlan_detach_event != NULL) 4846 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 4847 4848 ether_ifdetach(ifp); 4849 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 4850 CTX_LOCK_DESTROY(ctx); 4851 /* XXX drain any dependent tasks */ 4852 tqg = qgroup_if_io_tqg; 4853 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 4854 callout_drain(&txq->ift_timer); 4855 if (txq->ift_task.gt_uniq != NULL) 4856 taskqgroup_detach(tqg, &txq->ift_task); 4857 } 4858 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 4859 if (rxq->ifr_task.gt_uniq != NULL) 4860 taskqgroup_detach(tqg, &rxq->ifr_task); 4861 4862 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 4863 free(fl->ifl_rx_bitmap, M_IFLIB); 4864 } 4865 tqg = qgroup_if_config_tqg; 4866 if (ctx->ifc_admin_task.gt_uniq != NULL) 4867 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 4868 if (ctx->ifc_vflr_task.gt_uniq != NULL) 4869 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 4870 4871 if_free(ifp); 4872 4873 iflib_tx_structures_free(ctx); 4874 iflib_rx_structures_free(ctx); 4875 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4876 free(ctx->ifc_softc, M_IFLIB); 4877 free(ctx, M_IFLIB); 4878 return (0); 4879 } 4880 4881 int 4882 iflib_device_attach(device_t dev) 4883 { 4884 if_ctx_t ctx; 4885 if_shared_ctx_t sctx; 4886 4887 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4888 return (ENOTSUP); 4889 4890 pci_enable_busmaster(dev); 4891 4892 return (iflib_device_register(dev, NULL, sctx, &ctx)); 4893 } 4894 4895 int 4896 iflib_device_deregister(if_ctx_t ctx) 4897 { 4898 if_t ifp = ctx->ifc_ifp; 4899 iflib_txq_t txq; 4900 iflib_rxq_t rxq; 4901 device_t dev = ctx->ifc_dev; 4902 int i, j; 4903 struct taskqgroup *tqg; 4904 iflib_fl_t fl; 4905 4906 /* Make sure VLANS are not using driver */ 4907 if (if_vlantrunkinuse(ifp)) { 4908 device_printf(dev,"Vlan in use, detach first\n"); 4909 return (EBUSY); 4910 } 4911 4912 CTX_LOCK(ctx); 4913 ctx->ifc_in_detach = 1; 4914 iflib_stop(ctx); 4915 CTX_UNLOCK(ctx); 4916 4917 /* Unregister VLAN events */ 4918 if (ctx->ifc_vlan_attach_event != NULL) 4919 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 4920 if (ctx->ifc_vlan_detach_event != NULL) 4921 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 4922 4923 iflib_netmap_detach(ifp); 4924 ether_ifdetach(ifp); 4925 if (ctx->ifc_led_dev != NULL) 4926 led_destroy(ctx->ifc_led_dev); 4927 /* XXX drain any dependent tasks */ 4928 tqg = qgroup_if_io_tqg; 4929 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 4930 callout_drain(&txq->ift_timer); 4931 if (txq->ift_task.gt_uniq != NULL) 4932 taskqgroup_detach(tqg, &txq->ift_task); 4933 } 4934 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 4935 if (rxq->ifr_task.gt_uniq != NULL) 4936 taskqgroup_detach(tqg, &rxq->ifr_task); 4937 4938 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 4939 free(fl->ifl_rx_bitmap, M_IFLIB); 4940 4941 } 4942 tqg = qgroup_if_config_tqg; 4943 if (ctx->ifc_admin_task.gt_uniq != NULL) 4944 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 4945 if (ctx->ifc_vflr_task.gt_uniq != NULL) 4946 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 4947 CTX_LOCK(ctx); 4948 IFDI_DETACH(ctx); 4949 CTX_UNLOCK(ctx); 4950 4951 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 4952 CTX_LOCK_DESTROY(ctx); 4953 device_set_softc(ctx->ifc_dev, NULL); 4954 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { 4955 pci_release_msi(dev); 4956 } 4957 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { 4958 iflib_irq_free(ctx, &ctx->ifc_legacy_irq); 4959 } 4960 if (ctx->ifc_msix_mem != NULL) { 4961 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, 4962 ctx->ifc_softc_ctx.isc_msix_bar, ctx->ifc_msix_mem); 4963 ctx->ifc_msix_mem = NULL; 4964 } 4965 4966 bus_generic_detach(dev); 4967 if_free(ifp); 4968 4969 iflib_tx_structures_free(ctx); 4970 iflib_rx_structures_free(ctx); 4971 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4972 free(ctx->ifc_softc, M_IFLIB); 4973 free(ctx, M_IFLIB); 4974 return (0); 4975 } 4976 4977 4978 int 4979 iflib_device_detach(device_t dev) 4980 { 4981 if_ctx_t ctx = device_get_softc(dev); 4982 4983 return (iflib_device_deregister(ctx)); 4984 } 4985 4986 int 4987 iflib_device_suspend(device_t dev) 4988 { 4989 if_ctx_t ctx = device_get_softc(dev); 4990 4991 CTX_LOCK(ctx); 4992 IFDI_SUSPEND(ctx); 4993 CTX_UNLOCK(ctx); 4994 4995 return bus_generic_suspend(dev); 4996 } 4997 int 4998 iflib_device_shutdown(device_t dev) 4999 { 5000 if_ctx_t ctx = device_get_softc(dev); 5001 5002 CTX_LOCK(ctx); 5003 IFDI_SHUTDOWN(ctx); 5004 CTX_UNLOCK(ctx); 5005 5006 return bus_generic_suspend(dev); 5007 } 5008 5009 5010 int 5011 iflib_device_resume(device_t dev) 5012 { 5013 if_ctx_t ctx = device_get_softc(dev); 5014 iflib_txq_t txq = ctx->ifc_txqs; 5015 5016 CTX_LOCK(ctx); 5017 IFDI_RESUME(ctx); 5018 iflib_init_locked(ctx); 5019 CTX_UNLOCK(ctx); 5020 for (int i = 0; i < NTXQSETS(ctx); i++, txq++) 5021 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 5022 5023 return (bus_generic_resume(dev)); 5024 } 5025 5026 int 5027 iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 5028 { 5029 int error; 5030 if_ctx_t ctx = device_get_softc(dev); 5031 5032 CTX_LOCK(ctx); 5033 error = IFDI_IOV_INIT(ctx, num_vfs, params); 5034 CTX_UNLOCK(ctx); 5035 5036 return (error); 5037 } 5038 5039 void 5040 iflib_device_iov_uninit(device_t dev) 5041 { 5042 if_ctx_t ctx = device_get_softc(dev); 5043 5044 CTX_LOCK(ctx); 5045 IFDI_IOV_UNINIT(ctx); 5046 CTX_UNLOCK(ctx); 5047 } 5048 5049 int 5050 iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 5051 { 5052 int error; 5053 if_ctx_t ctx = device_get_softc(dev); 5054 5055 CTX_LOCK(ctx); 5056 error = IFDI_IOV_VF_ADD(ctx, vfnum, params); 5057 CTX_UNLOCK(ctx); 5058 5059 return (error); 5060 } 5061 5062 /********************************************************************* 5063 * 5064 * MODULE FUNCTION DEFINITIONS 5065 * 5066 **********************************************************************/ 5067 5068 /* 5069 * - Start a fast taskqueue thread for each core 5070 * - Start a taskqueue for control operations 5071 */ 5072 static int 5073 iflib_module_init(void) 5074 { 5075 return (0); 5076 } 5077 5078 static int 5079 iflib_module_event_handler(module_t mod, int what, void *arg) 5080 { 5081 int err; 5082 5083 switch (what) { 5084 case MOD_LOAD: 5085 if ((err = iflib_module_init()) != 0) 5086 return (err); 5087 break; 5088 case MOD_UNLOAD: 5089 return (EBUSY); 5090 default: 5091 return (EOPNOTSUPP); 5092 } 5093 5094 return (0); 5095 } 5096 5097 /********************************************************************* 5098 * 5099 * PUBLIC FUNCTION DEFINITIONS 5100 * ordered as in iflib.h 5101 * 5102 **********************************************************************/ 5103 5104 5105 static void 5106 _iflib_assert(if_shared_ctx_t sctx) 5107 { 5108 MPASS(sctx->isc_tx_maxsize); 5109 MPASS(sctx->isc_tx_maxsegsize); 5110 5111 MPASS(sctx->isc_rx_maxsize); 5112 MPASS(sctx->isc_rx_nsegments); 5113 MPASS(sctx->isc_rx_maxsegsize); 5114 5115 MPASS(sctx->isc_nrxd_min[0]); 5116 MPASS(sctx->isc_nrxd_max[0]); 5117 MPASS(sctx->isc_nrxd_default[0]); 5118 MPASS(sctx->isc_ntxd_min[0]); 5119 MPASS(sctx->isc_ntxd_max[0]); 5120 MPASS(sctx->isc_ntxd_default[0]); 5121 } 5122 5123 static void 5124 _iflib_pre_assert(if_softc_ctx_t scctx) 5125 { 5126 5127 MPASS(scctx->isc_txrx->ift_txd_encap); 5128 MPASS(scctx->isc_txrx->ift_txd_flush); 5129 MPASS(scctx->isc_txrx->ift_txd_credits_update); 5130 MPASS(scctx->isc_txrx->ift_rxd_available); 5131 MPASS(scctx->isc_txrx->ift_rxd_pkt_get); 5132 MPASS(scctx->isc_txrx->ift_rxd_refill); 5133 MPASS(scctx->isc_txrx->ift_rxd_flush); 5134 } 5135 5136 static int 5137 iflib_register(if_ctx_t ctx) 5138 { 5139 if_shared_ctx_t sctx = ctx->ifc_sctx; 5140 driver_t *driver = sctx->isc_driver; 5141 device_t dev = ctx->ifc_dev; 5142 if_t ifp; 5143 5144 _iflib_assert(sctx); 5145 5146 CTX_LOCK_INIT(ctx); 5147 STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); 5148 ifp = ctx->ifc_ifp = if_gethandle(IFT_ETHER); 5149 if (ifp == NULL) { 5150 device_printf(dev, "can not allocate ifnet structure\n"); 5151 return (ENOMEM); 5152 } 5153 5154 /* 5155 * Initialize our context's device specific methods 5156 */ 5157 kobj_init((kobj_t) ctx, (kobj_class_t) driver); 5158 kobj_class_compile((kobj_class_t) driver); 5159 driver->refs++; 5160 5161 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 5162 if_setsoftc(ifp, ctx); 5163 if_setdev(ifp, dev); 5164 if_setinitfn(ifp, iflib_if_init); 5165 if_setioctlfn(ifp, iflib_if_ioctl); 5166 if_settransmitfn(ifp, iflib_if_transmit); 5167 if_setqflushfn(ifp, iflib_if_qflush); 5168 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 5169 5170 ctx->ifc_vlan_attach_event = 5171 EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, 5172 EVENTHANDLER_PRI_FIRST); 5173 ctx->ifc_vlan_detach_event = 5174 EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, 5175 EVENTHANDLER_PRI_FIRST); 5176 5177 ifmedia_init(&ctx->ifc_media, IFM_IMASK, 5178 iflib_media_change, iflib_media_status); 5179 5180 return (0); 5181 } 5182 5183 5184 static int 5185 iflib_queues_alloc(if_ctx_t ctx) 5186 { 5187 if_shared_ctx_t sctx = ctx->ifc_sctx; 5188 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5189 device_t dev = ctx->ifc_dev; 5190 int nrxqsets = scctx->isc_nrxqsets; 5191 int ntxqsets = scctx->isc_ntxqsets; 5192 iflib_txq_t txq; 5193 iflib_rxq_t rxq; 5194 iflib_fl_t fl = NULL; 5195 int i, j, cpu, err, txconf, rxconf; 5196 iflib_dma_info_t ifdip; 5197 uint32_t *rxqsizes = scctx->isc_rxqsizes; 5198 uint32_t *txqsizes = scctx->isc_txqsizes; 5199 uint8_t nrxqs = sctx->isc_nrxqs; 5200 uint8_t ntxqs = sctx->isc_ntxqs; 5201 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; 5202 caddr_t *vaddrs; 5203 uint64_t *paddrs; 5204 5205 KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); 5206 KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); 5207 5208 /* Allocate the TX ring struct memory */ 5209 if (!(ctx->ifc_txqs = 5210 (iflib_txq_t) malloc(sizeof(struct iflib_txq) * 5211 ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5212 device_printf(dev, "Unable to allocate TX ring memory\n"); 5213 err = ENOMEM; 5214 goto fail; 5215 } 5216 5217 /* Now allocate the RX */ 5218 if (!(ctx->ifc_rxqs = 5219 (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * 5220 nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 5221 device_printf(dev, "Unable to allocate RX ring memory\n"); 5222 err = ENOMEM; 5223 goto rx_fail; 5224 } 5225 5226 txq = ctx->ifc_txqs; 5227 rxq = ctx->ifc_rxqs; 5228 5229 /* 5230 * XXX handle allocation failure 5231 */ 5232 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { 5233 /* Set up some basics */ 5234 5235 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 5236 device_printf(dev, "failed to allocate iflib_dma_info\n"); 5237 err = ENOMEM; 5238 goto err_tx_desc; 5239 } 5240 txq->ift_ifdi = ifdip; 5241 for (j = 0; j < ntxqs; j++, ifdip++) { 5242 if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 5243 device_printf(dev, "Unable to allocate Descriptor memory\n"); 5244 err = ENOMEM; 5245 goto err_tx_desc; 5246 } 5247 txq->ift_txd_size[j] = scctx->isc_txd_size[j]; 5248 bzero((void *)ifdip->idi_vaddr, txqsizes[j]); 5249 } 5250 txq->ift_ctx = ctx; 5251 txq->ift_id = i; 5252 if (sctx->isc_flags & IFLIB_HAS_TXCQ) { 5253 txq->ift_br_offset = 1; 5254 } else { 5255 txq->ift_br_offset = 0; 5256 } 5257 /* XXX fix this */ 5258 txq->ift_timer.c_cpu = cpu; 5259 5260 if (iflib_txsd_alloc(txq)) { 5261 device_printf(dev, "Critical Failure setting up TX buffers\n"); 5262 err = ENOMEM; 5263 goto err_tx_desc; 5264 } 5265 5266 /* Initialize the TX lock */ 5267 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout", 5268 device_get_nameunit(dev), txq->ift_id); 5269 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); 5270 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); 5271 5272 snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db", 5273 device_get_nameunit(dev), txq->ift_id); 5274 5275 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, 5276 iflib_txq_can_drain, M_IFLIB, M_WAITOK); 5277 if (err) { 5278 /* XXX free any allocated rings */ 5279 device_printf(dev, "Unable to allocate buf_ring\n"); 5280 goto err_tx_desc; 5281 } 5282 } 5283 5284 for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { 5285 /* Set up some basics */ 5286 5287 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 5288 device_printf(dev, "failed to allocate iflib_dma_info\n"); 5289 err = ENOMEM; 5290 goto err_tx_desc; 5291 } 5292 5293 rxq->ifr_ifdi = ifdip; 5294 /* XXX this needs to be changed if #rx queues != #tx queues */ 5295 rxq->ifr_ntxqirq = 1; 5296 rxq->ifr_txqid[0] = i; 5297 for (j = 0; j < nrxqs; j++, ifdip++) { 5298 if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 5299 device_printf(dev, "Unable to allocate Descriptor memory\n"); 5300 err = ENOMEM; 5301 goto err_tx_desc; 5302 } 5303 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); 5304 } 5305 rxq->ifr_ctx = ctx; 5306 rxq->ifr_id = i; 5307 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 5308 rxq->ifr_fl_offset = 1; 5309 } else { 5310 rxq->ifr_fl_offset = 0; 5311 } 5312 rxq->ifr_nfl = nfree_lists; 5313 if (!(fl = 5314 (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { 5315 device_printf(dev, "Unable to allocate free list memory\n"); 5316 err = ENOMEM; 5317 goto err_tx_desc; 5318 } 5319 rxq->ifr_fl = fl; 5320 for (j = 0; j < nfree_lists; j++) { 5321 fl[j].ifl_rxq = rxq; 5322 fl[j].ifl_id = j; 5323 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; 5324 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; 5325 } 5326 /* Allocate receive buffers for the ring*/ 5327 if (iflib_rxsd_alloc(rxq)) { 5328 device_printf(dev, 5329 "Critical Failure setting up receive buffers\n"); 5330 err = ENOMEM; 5331 goto err_rx_desc; 5332 } 5333 5334 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 5335 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO); 5336 } 5337 5338 /* TXQs */ 5339 vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5340 paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 5341 for (i = 0; i < ntxqsets; i++) { 5342 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; 5343 5344 for (j = 0; j < ntxqs; j++, di++) { 5345 vaddrs[i*ntxqs + j] = di->idi_vaddr; 5346 paddrs[i*ntxqs + j] = di->idi_paddr; 5347 } 5348 } 5349 if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { 5350 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 5351 iflib_tx_structures_free(ctx); 5352 free(vaddrs, M_IFLIB); 5353 free(paddrs, M_IFLIB); 5354 goto err_rx_desc; 5355 } 5356 free(vaddrs, M_IFLIB); 5357 free(paddrs, M_IFLIB); 5358 5359 /* RXQs */ 5360 vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5361 paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 5362 for (i = 0; i < nrxqsets; i++) { 5363 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; 5364 5365 for (j = 0; j < nrxqs; j++, di++) { 5366 vaddrs[i*nrxqs + j] = di->idi_vaddr; 5367 paddrs[i*nrxqs + j] = di->idi_paddr; 5368 } 5369 } 5370 if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { 5371 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 5372 iflib_tx_structures_free(ctx); 5373 free(vaddrs, M_IFLIB); 5374 free(paddrs, M_IFLIB); 5375 goto err_rx_desc; 5376 } 5377 free(vaddrs, M_IFLIB); 5378 free(paddrs, M_IFLIB); 5379 5380 return (0); 5381 5382 /* XXX handle allocation failure changes */ 5383 err_rx_desc: 5384 err_tx_desc: 5385 rx_fail: 5386 if (ctx->ifc_rxqs != NULL) 5387 free(ctx->ifc_rxqs, M_IFLIB); 5388 ctx->ifc_rxqs = NULL; 5389 if (ctx->ifc_txqs != NULL) 5390 free(ctx->ifc_txqs, M_IFLIB); 5391 ctx->ifc_txqs = NULL; 5392 fail: 5393 return (err); 5394 } 5395 5396 static int 5397 iflib_tx_structures_setup(if_ctx_t ctx) 5398 { 5399 iflib_txq_t txq = ctx->ifc_txqs; 5400 int i; 5401 5402 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 5403 iflib_txq_setup(txq); 5404 5405 return (0); 5406 } 5407 5408 static void 5409 iflib_tx_structures_free(if_ctx_t ctx) 5410 { 5411 iflib_txq_t txq = ctx->ifc_txqs; 5412 int i, j; 5413 5414 for (i = 0; i < NTXQSETS(ctx); i++, txq++) { 5415 iflib_txq_destroy(txq); 5416 for (j = 0; j < ctx->ifc_nhwtxqs; j++) 5417 iflib_dma_free(&txq->ift_ifdi[j]); 5418 } 5419 free(ctx->ifc_txqs, M_IFLIB); 5420 ctx->ifc_txqs = NULL; 5421 IFDI_QUEUES_FREE(ctx); 5422 } 5423 5424 /********************************************************************* 5425 * 5426 * Initialize all receive rings. 5427 * 5428 **********************************************************************/ 5429 static int 5430 iflib_rx_structures_setup(if_ctx_t ctx) 5431 { 5432 iflib_rxq_t rxq = ctx->ifc_rxqs; 5433 int q; 5434 #if defined(INET6) || defined(INET) 5435 int i, err; 5436 #endif 5437 5438 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { 5439 #if defined(INET6) || defined(INET) 5440 tcp_lro_free(&rxq->ifr_lc); 5441 if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, 5442 TCP_LRO_ENTRIES, min(1024, 5443 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { 5444 device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); 5445 goto fail; 5446 } 5447 rxq->ifr_lro_enabled = TRUE; 5448 #endif 5449 IFDI_RXQ_SETUP(ctx, rxq->ifr_id); 5450 } 5451 return (0); 5452 #if defined(INET6) || defined(INET) 5453 fail: 5454 /* 5455 * Free RX software descriptors allocated so far, we will only handle 5456 * the rings that completed, the failing case will have 5457 * cleaned up for itself. 'q' failed, so its the terminus. 5458 */ 5459 rxq = ctx->ifc_rxqs; 5460 for (i = 0; i < q; ++i, rxq++) { 5461 iflib_rx_sds_free(rxq); 5462 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 5463 } 5464 return (err); 5465 #endif 5466 } 5467 5468 /********************************************************************* 5469 * 5470 * Free all receive rings. 5471 * 5472 **********************************************************************/ 5473 static void 5474 iflib_rx_structures_free(if_ctx_t ctx) 5475 { 5476 iflib_rxq_t rxq = ctx->ifc_rxqs; 5477 5478 for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { 5479 iflib_rx_sds_free(rxq); 5480 } 5481 } 5482 5483 static int 5484 iflib_qset_structures_setup(if_ctx_t ctx) 5485 { 5486 int err; 5487 5488 /* 5489 * It is expected that the caller takes care of freeing queues if this 5490 * fails. 5491 */ 5492 if ((err = iflib_tx_structures_setup(ctx)) != 0) { 5493 device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); 5494 return (err); 5495 } 5496 5497 if ((err = iflib_rx_structures_setup(ctx)) != 0) 5498 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); 5499 5500 return (err); 5501 } 5502 5503 int 5504 iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 5505 driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name) 5506 { 5507 5508 return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); 5509 } 5510 5511 #ifdef SMP 5512 static int 5513 find_nth(if_ctx_t ctx, int qid) 5514 { 5515 cpuset_t cpus; 5516 int i, cpuid, eqid, count; 5517 5518 CPU_COPY(&ctx->ifc_cpus, &cpus); 5519 count = CPU_COUNT(&cpus); 5520 eqid = qid % count; 5521 /* clear up to the qid'th bit */ 5522 for (i = 0; i < eqid; i++) { 5523 cpuid = CPU_FFS(&cpus); 5524 MPASS(cpuid != 0); 5525 CPU_CLR(cpuid-1, &cpus); 5526 } 5527 cpuid = CPU_FFS(&cpus); 5528 MPASS(cpuid != 0); 5529 return (cpuid-1); 5530 } 5531 5532 #ifdef SCHED_ULE 5533 extern struct cpu_group *cpu_top; /* CPU topology */ 5534 5535 static int 5536 find_child_with_core(int cpu, struct cpu_group *grp) 5537 { 5538 int i; 5539 5540 if (grp->cg_children == 0) 5541 return -1; 5542 5543 MPASS(grp->cg_child); 5544 for (i = 0; i < grp->cg_children; i++) { 5545 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) 5546 return i; 5547 } 5548 5549 return -1; 5550 } 5551 5552 /* 5553 * Find the nth "close" core to the specified core 5554 * "close" is defined as the deepest level that shares 5555 * at least an L2 cache. With threads, this will be 5556 * threads on the same core. If the sahred cache is L3 5557 * or higher, simply returns the same core. 5558 */ 5559 static int 5560 find_close_core(int cpu, int core_offset) 5561 { 5562 struct cpu_group *grp; 5563 int i; 5564 int fcpu; 5565 cpuset_t cs; 5566 5567 grp = cpu_top; 5568 if (grp == NULL) 5569 return cpu; 5570 i = 0; 5571 while ((i = find_child_with_core(cpu, grp)) != -1) { 5572 /* If the child only has one cpu, don't descend */ 5573 if (grp->cg_child[i].cg_count <= 1) 5574 break; 5575 grp = &grp->cg_child[i]; 5576 } 5577 5578 /* If they don't share at least an L2 cache, use the same CPU */ 5579 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) 5580 return cpu; 5581 5582 /* Now pick one */ 5583 CPU_COPY(&grp->cg_mask, &cs); 5584 5585 /* Add the selected CPU offset to core offset. */ 5586 for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) { 5587 if (fcpu - 1 == cpu) 5588 break; 5589 CPU_CLR(fcpu - 1, &cs); 5590 } 5591 MPASS(fcpu); 5592 5593 core_offset += i; 5594 5595 CPU_COPY(&grp->cg_mask, &cs); 5596 for (i = core_offset % grp->cg_count; i > 0; i--) { 5597 MPASS(CPU_FFS(&cs)); 5598 CPU_CLR(CPU_FFS(&cs) - 1, &cs); 5599 } 5600 MPASS(CPU_FFS(&cs)); 5601 return CPU_FFS(&cs) - 1; 5602 } 5603 #else 5604 static int 5605 find_close_core(int cpu, int core_offset __unused) 5606 { 5607 return cpu; 5608 } 5609 #endif 5610 5611 static int 5612 get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid) 5613 { 5614 switch (type) { 5615 case IFLIB_INTR_TX: 5616 /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */ 5617 /* XXX handle multiple RX threads per core and more than two core per L2 group */ 5618 return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; 5619 case IFLIB_INTR_RX: 5620 case IFLIB_INTR_RXTX: 5621 /* RX queues get the specified core */ 5622 return qid / CPU_COUNT(&ctx->ifc_cpus); 5623 default: 5624 return -1; 5625 } 5626 } 5627 #else 5628 #define get_core_offset(ctx, type, qid) CPU_FIRST() 5629 #define find_close_core(cpuid, tid) CPU_FIRST() 5630 #define find_nth(ctx, gid) CPU_FIRST() 5631 #endif 5632 5633 /* Just to avoid copy/paste */ 5634 static inline int 5635 iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid, 5636 struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name) 5637 { 5638 int cpuid; 5639 int err, tid; 5640 5641 cpuid = find_nth(ctx, qid); 5642 tid = get_core_offset(ctx, type, qid); 5643 MPASS(tid >= 0); 5644 cpuid = find_close_core(cpuid, tid); 5645 err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name); 5646 if (err) { 5647 device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err); 5648 return (err); 5649 } 5650 #ifdef notyet 5651 if (cpuid > ctx->ifc_cpuid_highest) 5652 ctx->ifc_cpuid_highest = cpuid; 5653 #endif 5654 return 0; 5655 } 5656 5657 int 5658 iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, 5659 iflib_intr_type_t type, driver_filter_t *filter, 5660 void *filter_arg, int qid, const char *name) 5661 { 5662 struct grouptask *gtask; 5663 struct taskqgroup *tqg; 5664 iflib_filter_info_t info; 5665 gtask_fn_t *fn; 5666 int tqrid, err; 5667 driver_filter_t *intr_fast; 5668 void *q; 5669 5670 info = &ctx->ifc_filter_info; 5671 tqrid = rid; 5672 5673 switch (type) { 5674 /* XXX merge tx/rx for netmap? */ 5675 case IFLIB_INTR_TX: 5676 q = &ctx->ifc_txqs[qid]; 5677 info = &ctx->ifc_txqs[qid].ift_filter_info; 5678 gtask = &ctx->ifc_txqs[qid].ift_task; 5679 tqg = qgroup_if_io_tqg; 5680 fn = _task_fn_tx; 5681 intr_fast = iflib_fast_intr; 5682 GROUPTASK_INIT(gtask, 0, fn, q); 5683 ctx->ifc_flags |= IFC_NETMAP_TX_IRQ; 5684 break; 5685 case IFLIB_INTR_RX: 5686 q = &ctx->ifc_rxqs[qid]; 5687 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5688 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5689 tqg = qgroup_if_io_tqg; 5690 fn = _task_fn_rx; 5691 intr_fast = iflib_fast_intr; 5692 GROUPTASK_INIT(gtask, 0, fn, q); 5693 break; 5694 case IFLIB_INTR_RXTX: 5695 q = &ctx->ifc_rxqs[qid]; 5696 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5697 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5698 tqg = qgroup_if_io_tqg; 5699 fn = _task_fn_rx; 5700 intr_fast = iflib_fast_intr_rxtx; 5701 GROUPTASK_INIT(gtask, 0, fn, q); 5702 break; 5703 case IFLIB_INTR_ADMIN: 5704 q = ctx; 5705 tqrid = -1; 5706 info = &ctx->ifc_filter_info; 5707 gtask = &ctx->ifc_admin_task; 5708 tqg = qgroup_if_config_tqg; 5709 fn = _task_fn_admin; 5710 intr_fast = iflib_fast_intr_ctx; 5711 break; 5712 default: 5713 panic("unknown net intr type"); 5714 } 5715 5716 info->ifi_filter = filter; 5717 info->ifi_filter_arg = filter_arg; 5718 info->ifi_task = gtask; 5719 info->ifi_ctx = q; 5720 5721 err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); 5722 if (err != 0) { 5723 device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err); 5724 return (err); 5725 } 5726 if (type == IFLIB_INTR_ADMIN) 5727 return (0); 5728 5729 if (tqrid != -1) { 5730 err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name); 5731 if (err) 5732 return (err); 5733 } else { 5734 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5735 } 5736 5737 return (0); 5738 } 5739 5740 void 5741 iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name) 5742 { 5743 struct grouptask *gtask; 5744 struct taskqgroup *tqg; 5745 gtask_fn_t *fn; 5746 void *q; 5747 int irq_num = -1; 5748 int err; 5749 5750 switch (type) { 5751 case IFLIB_INTR_TX: 5752 q = &ctx->ifc_txqs[qid]; 5753 gtask = &ctx->ifc_txqs[qid].ift_task; 5754 tqg = qgroup_if_io_tqg; 5755 fn = _task_fn_tx; 5756 if (irq != NULL) 5757 irq_num = rman_get_start(irq->ii_res); 5758 break; 5759 case IFLIB_INTR_RX: 5760 q = &ctx->ifc_rxqs[qid]; 5761 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5762 tqg = qgroup_if_io_tqg; 5763 fn = _task_fn_rx; 5764 if (irq != NULL) 5765 irq_num = rman_get_start(irq->ii_res); 5766 break; 5767 case IFLIB_INTR_IOV: 5768 q = ctx; 5769 gtask = &ctx->ifc_vflr_task; 5770 tqg = qgroup_if_config_tqg; 5771 fn = _task_fn_iov; 5772 break; 5773 default: 5774 panic("unknown net intr type"); 5775 } 5776 GROUPTASK_INIT(gtask, 0, fn, q); 5777 if (irq_num != -1) { 5778 err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name); 5779 if (err) 5780 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5781 } 5782 else { 5783 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5784 } 5785 } 5786 5787 void 5788 iflib_irq_free(if_ctx_t ctx, if_irq_t irq) 5789 { 5790 if (irq->ii_tag) 5791 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); 5792 5793 if (irq->ii_res) 5794 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res); 5795 } 5796 5797 static int 5798 iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name) 5799 { 5800 iflib_txq_t txq = ctx->ifc_txqs; 5801 iflib_rxq_t rxq = ctx->ifc_rxqs; 5802 if_irq_t irq = &ctx->ifc_legacy_irq; 5803 iflib_filter_info_t info; 5804 struct grouptask *gtask; 5805 struct taskqgroup *tqg; 5806 gtask_fn_t *fn; 5807 int tqrid; 5808 void *q; 5809 int err; 5810 5811 q = &ctx->ifc_rxqs[0]; 5812 info = &rxq[0].ifr_filter_info; 5813 gtask = &rxq[0].ifr_task; 5814 tqg = qgroup_if_io_tqg; 5815 tqrid = irq->ii_rid = *rid; 5816 fn = _task_fn_rx; 5817 5818 ctx->ifc_flags |= IFC_LEGACY; 5819 info->ifi_filter = filter; 5820 info->ifi_filter_arg = filter_arg; 5821 info->ifi_task = gtask; 5822 info->ifi_ctx = ctx; 5823 5824 /* We allocate a single interrupt resource */ 5825 if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0) 5826 return (err); 5827 GROUPTASK_INIT(gtask, 0, fn, q); 5828 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5829 5830 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); 5831 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx"); 5832 return (0); 5833 } 5834 5835 void 5836 iflib_led_create(if_ctx_t ctx) 5837 { 5838 5839 ctx->ifc_led_dev = led_create(iflib_led_func, ctx, 5840 device_get_nameunit(ctx->ifc_dev)); 5841 } 5842 5843 void 5844 iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) 5845 { 5846 5847 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 5848 } 5849 5850 void 5851 iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) 5852 { 5853 5854 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); 5855 } 5856 5857 void 5858 iflib_admin_intr_deferred(if_ctx_t ctx) 5859 { 5860 #ifdef INVARIANTS 5861 struct grouptask *gtask; 5862 5863 gtask = &ctx->ifc_admin_task; 5864 MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); 5865 #endif 5866 5867 GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); 5868 } 5869 5870 void 5871 iflib_iov_intr_deferred(if_ctx_t ctx) 5872 { 5873 5874 GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); 5875 } 5876 5877 void 5878 iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) 5879 { 5880 5881 taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); 5882 } 5883 5884 void 5885 iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, 5886 const char *name) 5887 { 5888 5889 GROUPTASK_INIT(gtask, 0, fn, ctx); 5890 taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); 5891 } 5892 5893 void 5894 iflib_config_gtask_deinit(struct grouptask *gtask) 5895 { 5896 5897 taskqgroup_detach(qgroup_if_config_tqg, gtask); 5898 } 5899 5900 void 5901 iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) 5902 { 5903 if_t ifp = ctx->ifc_ifp; 5904 iflib_txq_t txq = ctx->ifc_txqs; 5905 5906 if_setbaudrate(ifp, baudrate); 5907 if (baudrate >= IF_Gbps(10)) { 5908 STATE_LOCK(ctx); 5909 ctx->ifc_flags |= IFC_PREFETCH; 5910 STATE_UNLOCK(ctx); 5911 } 5912 /* If link down, disable watchdog */ 5913 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { 5914 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) 5915 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 5916 } 5917 ctx->ifc_link_state = link_state; 5918 if_link_state_change(ifp, link_state); 5919 } 5920 5921 static int 5922 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) 5923 { 5924 int credits; 5925 #ifdef INVARIANTS 5926 int credits_pre = txq->ift_cidx_processed; 5927 #endif 5928 5929 if (ctx->isc_txd_credits_update == NULL) 5930 return (0); 5931 5932 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) 5933 return (0); 5934 5935 txq->ift_processed += credits; 5936 txq->ift_cidx_processed += credits; 5937 5938 MPASS(credits_pre + credits == txq->ift_cidx_processed); 5939 if (txq->ift_cidx_processed >= txq->ift_size) 5940 txq->ift_cidx_processed -= txq->ift_size; 5941 return (credits); 5942 } 5943 5944 static int 5945 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) 5946 { 5947 5948 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, 5949 budget)); 5950 } 5951 5952 void 5953 iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, 5954 const char *description, if_int_delay_info_t info, 5955 int offset, int value) 5956 { 5957 info->iidi_ctx = ctx; 5958 info->iidi_offset = offset; 5959 info->iidi_value = value; 5960 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), 5961 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), 5962 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 5963 info, 0, iflib_sysctl_int_delay, "I", description); 5964 } 5965 5966 struct sx * 5967 iflib_ctx_lock_get(if_ctx_t ctx) 5968 { 5969 5970 return (&ctx->ifc_ctx_sx); 5971 } 5972 5973 static int 5974 iflib_msix_init(if_ctx_t ctx) 5975 { 5976 device_t dev = ctx->ifc_dev; 5977 if_shared_ctx_t sctx = ctx->ifc_sctx; 5978 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5979 int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; 5980 int iflib_num_tx_queues, iflib_num_rx_queues; 5981 int err, admincnt, bar; 5982 5983 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; 5984 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; 5985 5986 device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); 5987 5988 bar = ctx->ifc_softc_ctx.isc_msix_bar; 5989 admincnt = sctx->isc_admin_intrcnt; 5990 /* Override by tuneable */ 5991 if (scctx->isc_disable_msix) 5992 goto msi; 5993 5994 /* 5995 * bar == -1 => "trust me I know what I'm doing" 5996 * Some drivers are for hardware that is so shoddily 5997 * documented that no one knows which bars are which 5998 * so the developer has to map all bars. This hack 5999 * allows shoddy garbage to use msix in this framework. 6000 */ 6001 if (bar != -1) { 6002 ctx->ifc_msix_mem = bus_alloc_resource_any(dev, 6003 SYS_RES_MEMORY, &bar, RF_ACTIVE); 6004 if (ctx->ifc_msix_mem == NULL) { 6005 /* May not be enabled */ 6006 device_printf(dev, "Unable to map MSIX table \n"); 6007 goto msi; 6008 } 6009 } 6010 /* First try MSI/X */ 6011 if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */ 6012 device_printf(dev, "System has MSIX disabled \n"); 6013 bus_release_resource(dev, SYS_RES_MEMORY, 6014 bar, ctx->ifc_msix_mem); 6015 ctx->ifc_msix_mem = NULL; 6016 goto msi; 6017 } 6018 #if IFLIB_DEBUG 6019 /* use only 1 qset in debug mode */ 6020 queuemsgs = min(msgs - admincnt, 1); 6021 #else 6022 queuemsgs = msgs - admincnt; 6023 #endif 6024 #ifdef RSS 6025 queues = imin(queuemsgs, rss_getnumbuckets()); 6026 #else 6027 queues = queuemsgs; 6028 #endif 6029 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); 6030 device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n", 6031 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); 6032 #ifdef RSS 6033 /* If we're doing RSS, clamp at the number of RSS buckets */ 6034 if (queues > rss_getnumbuckets()) 6035 queues = rss_getnumbuckets(); 6036 #endif 6037 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) 6038 rx_queues = iflib_num_rx_queues; 6039 else 6040 rx_queues = queues; 6041 6042 if (rx_queues > scctx->isc_nrxqsets) 6043 rx_queues = scctx->isc_nrxqsets; 6044 6045 /* 6046 * We want this to be all logical CPUs by default 6047 */ 6048 if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) 6049 tx_queues = iflib_num_tx_queues; 6050 else 6051 tx_queues = mp_ncpus; 6052 6053 if (tx_queues > scctx->isc_ntxqsets) 6054 tx_queues = scctx->isc_ntxqsets; 6055 6056 if (ctx->ifc_sysctl_qs_eq_override == 0) { 6057 #ifdef INVARIANTS 6058 if (tx_queues != rx_queues) 6059 device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", 6060 min(rx_queues, tx_queues), min(rx_queues, tx_queues)); 6061 #endif 6062 tx_queues = min(rx_queues, tx_queues); 6063 rx_queues = min(rx_queues, tx_queues); 6064 } 6065 6066 device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); 6067 6068 vectors = rx_queues + admincnt; 6069 if ((err = pci_alloc_msix(dev, &vectors)) == 0) { 6070 device_printf(dev, 6071 "Using MSIX interrupts with %d vectors\n", vectors); 6072 scctx->isc_vectors = vectors; 6073 scctx->isc_nrxqsets = rx_queues; 6074 scctx->isc_ntxqsets = tx_queues; 6075 scctx->isc_intr = IFLIB_INTR_MSIX; 6076 6077 return (vectors); 6078 } else { 6079 device_printf(dev, "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); 6080 bus_release_resource(dev, SYS_RES_MEMORY, bar, 6081 ctx->ifc_msix_mem); 6082 ctx->ifc_msix_mem = NULL; 6083 } 6084 msi: 6085 vectors = pci_msi_count(dev); 6086 scctx->isc_nrxqsets = 1; 6087 scctx->isc_ntxqsets = 1; 6088 scctx->isc_vectors = vectors; 6089 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { 6090 device_printf(dev,"Using an MSI interrupt\n"); 6091 scctx->isc_intr = IFLIB_INTR_MSI; 6092 } else { 6093 scctx->isc_vectors = 1; 6094 device_printf(dev,"Using a Legacy interrupt\n"); 6095 scctx->isc_intr = IFLIB_INTR_LEGACY; 6096 } 6097 6098 return (vectors); 6099 } 6100 6101 static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; 6102 6103 static int 6104 mp_ring_state_handler(SYSCTL_HANDLER_ARGS) 6105 { 6106 int rc; 6107 uint16_t *state = ((uint16_t *)oidp->oid_arg1); 6108 struct sbuf *sb; 6109 const char *ring_state = "UNKNOWN"; 6110 6111 /* XXX needed ? */ 6112 rc = sysctl_wire_old_buffer(req, 0); 6113 MPASS(rc == 0); 6114 if (rc != 0) 6115 return (rc); 6116 sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); 6117 MPASS(sb != NULL); 6118 if (sb == NULL) 6119 return (ENOMEM); 6120 if (state[3] <= 3) 6121 ring_state = ring_states[state[3]]; 6122 6123 sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", 6124 state[0], state[1], state[2], ring_state); 6125 rc = sbuf_finish(sb); 6126 sbuf_delete(sb); 6127 return(rc); 6128 } 6129 6130 enum iflib_ndesc_handler { 6131 IFLIB_NTXD_HANDLER, 6132 IFLIB_NRXD_HANDLER, 6133 }; 6134 6135 static int 6136 mp_ndesc_handler(SYSCTL_HANDLER_ARGS) 6137 { 6138 if_ctx_t ctx = (void *)arg1; 6139 enum iflib_ndesc_handler type = arg2; 6140 char buf[256] = {0}; 6141 qidx_t *ndesc; 6142 char *p, *next; 6143 int nqs, rc, i; 6144 6145 MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); 6146 6147 nqs = 8; 6148 switch(type) { 6149 case IFLIB_NTXD_HANDLER: 6150 ndesc = ctx->ifc_sysctl_ntxds; 6151 if (ctx->ifc_sctx) 6152 nqs = ctx->ifc_sctx->isc_ntxqs; 6153 break; 6154 case IFLIB_NRXD_HANDLER: 6155 ndesc = ctx->ifc_sysctl_nrxds; 6156 if (ctx->ifc_sctx) 6157 nqs = ctx->ifc_sctx->isc_nrxqs; 6158 break; 6159 default: 6160 panic("unhandled type"); 6161 } 6162 if (nqs == 0) 6163 nqs = 8; 6164 6165 for (i=0; i<8; i++) { 6166 if (i >= nqs) 6167 break; 6168 if (i) 6169 strcat(buf, ","); 6170 sprintf(strchr(buf, 0), "%d", ndesc[i]); 6171 } 6172 6173 rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); 6174 if (rc || req->newptr == NULL) 6175 return rc; 6176 6177 for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; 6178 i++, p = strsep(&next, " ,")) { 6179 ndesc[i] = strtoul(p, NULL, 10); 6180 } 6181 6182 return(rc); 6183 } 6184 6185 #define NAME_BUFLEN 32 6186 static void 6187 iflib_add_device_sysctl_pre(if_ctx_t ctx) 6188 { 6189 device_t dev = iflib_get_dev(ctx); 6190 struct sysctl_oid_list *child, *oid_list; 6191 struct sysctl_ctx_list *ctx_list; 6192 struct sysctl_oid *node; 6193 6194 ctx_list = device_get_sysctl_ctx(dev); 6195 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 6196 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", 6197 CTLFLAG_RD, NULL, "IFLIB fields"); 6198 oid_list = SYSCTL_CHILDREN(node); 6199 6200 SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", 6201 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, 6202 "driver version"); 6203 6204 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", 6205 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, 6206 "# of txqs to use, 0 => use default #"); 6207 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", 6208 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, 6209 "# of rxqs to use, 0 => use default #"); 6210 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", 6211 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, 6212 "permit #txq != #rxq"); 6213 SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", 6214 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, 6215 "disable MSIX (default 0)"); 6216 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", 6217 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, 6218 "set the rx budget"); 6219 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate", 6220 CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0, 6221 "cause tx to abdicate instead of running to completion"); 6222 6223 /* XXX change for per-queue sizes */ 6224 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", 6225 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, 6226 mp_ndesc_handler, "A", 6227 "list of # of tx descriptors to use, 0 = use default #"); 6228 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", 6229 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, 6230 mp_ndesc_handler, "A", 6231 "list of # of rx descriptors to use, 0 = use default #"); 6232 } 6233 6234 static void 6235 iflib_add_device_sysctl_post(if_ctx_t ctx) 6236 { 6237 if_shared_ctx_t sctx = ctx->ifc_sctx; 6238 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 6239 device_t dev = iflib_get_dev(ctx); 6240 struct sysctl_oid_list *child; 6241 struct sysctl_ctx_list *ctx_list; 6242 iflib_fl_t fl; 6243 iflib_txq_t txq; 6244 iflib_rxq_t rxq; 6245 int i, j; 6246 char namebuf[NAME_BUFLEN]; 6247 char *qfmt; 6248 struct sysctl_oid *queue_node, *fl_node, *node; 6249 struct sysctl_oid_list *queue_list, *fl_list; 6250 ctx_list = device_get_sysctl_ctx(dev); 6251 6252 node = ctx->ifc_sysctl_node; 6253 child = SYSCTL_CHILDREN(node); 6254 6255 if (scctx->isc_ntxqsets > 100) 6256 qfmt = "txq%03d"; 6257 else if (scctx->isc_ntxqsets > 10) 6258 qfmt = "txq%02d"; 6259 else 6260 qfmt = "txq%d"; 6261 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { 6262 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6263 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6264 CTLFLAG_RD, NULL, "Queue Name"); 6265 queue_list = SYSCTL_CHILDREN(queue_node); 6266 #if MEMORY_LOGGING 6267 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", 6268 CTLFLAG_RD, 6269 &txq->ift_dequeued, "total mbufs freed"); 6270 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", 6271 CTLFLAG_RD, 6272 &txq->ift_enqueued, "total mbufs enqueued"); 6273 #endif 6274 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", 6275 CTLFLAG_RD, 6276 &txq->ift_mbuf_defrag, "# of times m_defrag was called"); 6277 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", 6278 CTLFLAG_RD, 6279 &txq->ift_pullups, "# of times m_pullup was called"); 6280 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", 6281 CTLFLAG_RD, 6282 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); 6283 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", 6284 CTLFLAG_RD, 6285 &txq->ift_no_desc_avail, "# of times no descriptors were available"); 6286 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", 6287 CTLFLAG_RD, 6288 &txq->ift_map_failed, "# of times dma map failed"); 6289 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", 6290 CTLFLAG_RD, 6291 &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); 6292 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", 6293 CTLFLAG_RD, 6294 &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); 6295 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", 6296 CTLFLAG_RD, 6297 &txq->ift_pidx, 1, "Producer Index"); 6298 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", 6299 CTLFLAG_RD, 6300 &txq->ift_cidx, 1, "Consumer Index"); 6301 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", 6302 CTLFLAG_RD, 6303 &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); 6304 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", 6305 CTLFLAG_RD, 6306 &txq->ift_in_use, 1, "descriptors in use"); 6307 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", 6308 CTLFLAG_RD, 6309 &txq->ift_processed, "descriptors procesed for clean"); 6310 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", 6311 CTLFLAG_RD, 6312 &txq->ift_cleaned, "total cleaned"); 6313 SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", 6314 CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 6315 0, mp_ring_state_handler, "A", "soft ring state"); 6316 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", 6317 CTLFLAG_RD, &txq->ift_br->enqueues, 6318 "# of enqueues to the mp_ring for this queue"); 6319 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", 6320 CTLFLAG_RD, &txq->ift_br->drops, 6321 "# of drops in the mp_ring for this queue"); 6322 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", 6323 CTLFLAG_RD, &txq->ift_br->starts, 6324 "# of normal consumer starts in the mp_ring for this queue"); 6325 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", 6326 CTLFLAG_RD, &txq->ift_br->stalls, 6327 "# of consumer stalls in the mp_ring for this queue"); 6328 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", 6329 CTLFLAG_RD, &txq->ift_br->restarts, 6330 "# of consumer restarts in the mp_ring for this queue"); 6331 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", 6332 CTLFLAG_RD, &txq->ift_br->abdications, 6333 "# of consumer abdications in the mp_ring for this queue"); 6334 } 6335 6336 if (scctx->isc_nrxqsets > 100) 6337 qfmt = "rxq%03d"; 6338 else if (scctx->isc_nrxqsets > 10) 6339 qfmt = "rxq%02d"; 6340 else 6341 qfmt = "rxq%d"; 6342 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { 6343 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 6344 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 6345 CTLFLAG_RD, NULL, "Queue Name"); 6346 queue_list = SYSCTL_CHILDREN(queue_node); 6347 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 6348 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", 6349 CTLFLAG_RD, 6350 &rxq->ifr_cq_pidx, 1, "Producer Index"); 6351 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", 6352 CTLFLAG_RD, 6353 &rxq->ifr_cq_cidx, 1, "Consumer Index"); 6354 } 6355 6356 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 6357 snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); 6358 fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, 6359 CTLFLAG_RD, NULL, "freelist Name"); 6360 fl_list = SYSCTL_CHILDREN(fl_node); 6361 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", 6362 CTLFLAG_RD, 6363 &fl->ifl_pidx, 1, "Producer Index"); 6364 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", 6365 CTLFLAG_RD, 6366 &fl->ifl_cidx, 1, "Consumer Index"); 6367 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", 6368 CTLFLAG_RD, 6369 &fl->ifl_credits, 1, "credits available"); 6370 #if MEMORY_LOGGING 6371 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", 6372 CTLFLAG_RD, 6373 &fl->ifl_m_enqueued, "mbufs allocated"); 6374 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", 6375 CTLFLAG_RD, 6376 &fl->ifl_m_dequeued, "mbufs freed"); 6377 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", 6378 CTLFLAG_RD, 6379 &fl->ifl_cl_enqueued, "clusters allocated"); 6380 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", 6381 CTLFLAG_RD, 6382 &fl->ifl_cl_dequeued, "clusters freed"); 6383 #endif 6384 6385 } 6386 } 6387 6388 } 6389 6390 #ifndef __NO_STRICT_ALIGNMENT 6391 static struct mbuf * 6392 iflib_fixup_rx(struct mbuf *m) 6393 { 6394 struct mbuf *n; 6395 6396 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 6397 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 6398 m->m_data += ETHER_HDR_LEN; 6399 n = m; 6400 } else { 6401 MGETHDR(n, M_NOWAIT, MT_DATA); 6402 if (n == NULL) { 6403 m_freem(m); 6404 return (NULL); 6405 } 6406 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 6407 m->m_data += ETHER_HDR_LEN; 6408 m->m_len -= ETHER_HDR_LEN; 6409 n->m_len = ETHER_HDR_LEN; 6410 M_MOVE_PKTHDR(n, m); 6411 n->m_next = m; 6412 } 6413 return (n); 6414 } 6415 #endif 6416 6417 #ifdef NETDUMP 6418 static void 6419 iflib_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) 6420 { 6421 if_ctx_t ctx; 6422 6423 ctx = if_getsoftc(ifp); 6424 CTX_LOCK(ctx); 6425 *nrxr = NRXQSETS(ctx); 6426 *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size; 6427 *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size; 6428 CTX_UNLOCK(ctx); 6429 } 6430 6431 static void 6432 iflib_netdump_event(struct ifnet *ifp, enum netdump_ev event) 6433 { 6434 if_ctx_t ctx; 6435 if_softc_ctx_t scctx; 6436 iflib_fl_t fl; 6437 iflib_rxq_t rxq; 6438 int i, j; 6439 6440 ctx = if_getsoftc(ifp); 6441 scctx = &ctx->ifc_softc_ctx; 6442 6443 switch (event) { 6444 case NETDUMP_START: 6445 for (i = 0; i < scctx->isc_nrxqsets; i++) { 6446 rxq = &ctx->ifc_rxqs[i]; 6447 for (j = 0; j < rxq->ifr_nfl; j++) { 6448 fl = rxq->ifr_fl; 6449 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 6450 } 6451 } 6452 iflib_no_tx_batch = 1; 6453 break; 6454 default: 6455 break; 6456 } 6457 } 6458 6459 static int 6460 iflib_netdump_transmit(struct ifnet *ifp, struct mbuf *m) 6461 { 6462 if_ctx_t ctx; 6463 iflib_txq_t txq; 6464 int error; 6465 6466 ctx = if_getsoftc(ifp); 6467 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6468 IFF_DRV_RUNNING) 6469 return (EBUSY); 6470 6471 txq = &ctx->ifc_txqs[0]; 6472 error = iflib_encap(txq, &m); 6473 if (error == 0) 6474 (void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use); 6475 return (error); 6476 } 6477 6478 static int 6479 iflib_netdump_poll(struct ifnet *ifp, int count) 6480 { 6481 if_ctx_t ctx; 6482 if_softc_ctx_t scctx; 6483 iflib_txq_t txq; 6484 int i; 6485 6486 ctx = if_getsoftc(ifp); 6487 scctx = &ctx->ifc_softc_ctx; 6488 6489 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 6490 IFF_DRV_RUNNING) 6491 return (EBUSY); 6492 6493 txq = &ctx->ifc_txqs[0]; 6494 (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 6495 6496 for (i = 0; i < scctx->isc_nrxqsets; i++) 6497 (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */); 6498 return (0); 6499 } 6500 #endif /* NETDUMP */ 6501