1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 * 29 */ 30 31 #ifndef __T4_ADAPTER_H__ 32 #define __T4_ADAPTER_H__ 33 34 #include <sys/kernel.h> 35 #include <sys/bus.h> 36 #include <sys/rman.h> 37 #include <sys/types.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/rwlock.h> 41 #include <sys/sx.h> 42 #include <vm/uma.h> 43 44 #include <dev/pci/pcivar.h> 45 #include <dev/pci/pcireg.h> 46 #include <machine/bus.h> 47 #include <sys/socket.h> 48 #include <sys/sysctl.h> 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_media.h> 53 #include <netinet/in.h> 54 #include <netinet/tcp_lro.h> 55 56 #include "offload.h" 57 #include "t4_ioctl.h" 58 #include "common/t4_msg.h" 59 #include "firmware/t4fw_interface.h" 60 61 #define KTR_CXGBE KTR_SPARE3 62 MALLOC_DECLARE(M_CXGBE); 63 #define CXGBE_UNIMPLEMENTED(s) \ 64 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__) 65 66 #if defined(__i386__) || defined(__amd64__) 67 static __inline void 68 prefetch(void *x) 69 { 70 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 71 } 72 #else 73 #define prefetch(x) 74 #endif 75 76 #ifndef SYSCTL_ADD_UQUAD 77 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD 78 #define sysctl_handle_64 sysctl_handle_quad 79 #define CTLTYPE_U64 CTLTYPE_QUAD 80 #endif 81 82 #if (__FreeBSD_version >= 900030) || \ 83 ((__FreeBSD_version >= 802507) && (__FreeBSD_version < 900000)) 84 #define SBUF_DRAIN 1 85 #endif 86 87 #ifdef __amd64__ 88 /* XXX: need systemwide bus_space_read_8/bus_space_write_8 */ 89 static __inline uint64_t 90 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 91 bus_size_t offset) 92 { 93 KASSERT(tag == X86_BUS_SPACE_MEM, 94 ("%s: can only handle mem space", __func__)); 95 96 return (*(volatile uint64_t *)(handle + offset)); 97 } 98 99 static __inline void 100 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh, 101 bus_size_t offset, uint64_t value) 102 { 103 KASSERT(tag == X86_BUS_SPACE_MEM, 104 ("%s: can only handle mem space", __func__)); 105 106 *(volatile uint64_t *)(bsh + offset) = value; 107 } 108 #else 109 static __inline uint64_t 110 t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 111 bus_size_t offset) 112 { 113 return (uint64_t)bus_space_read_4(tag, handle, offset) + 114 ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32); 115 } 116 117 static __inline void 118 t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh, 119 bus_size_t offset, uint64_t value) 120 { 121 bus_space_write_4(tag, bsh, offset, value); 122 bus_space_write_4(tag, bsh, offset + 4, value >> 32); 123 } 124 #endif 125 126 struct adapter; 127 typedef struct adapter adapter_t; 128 129 enum { 130 /* 131 * All ingress queues use this entry size. Note that the firmware event 132 * queue and any iq expecting CPL_RX_PKT in the descriptor needs this to 133 * be at least 64. 134 */ 135 IQ_ESIZE = 64, 136 137 /* Default queue sizes for all kinds of ingress queues */ 138 FW_IQ_QSIZE = 256, 139 RX_IQ_QSIZE = 1024, 140 141 /* All egress queues use this entry size */ 142 EQ_ESIZE = 64, 143 144 /* Default queue sizes for all kinds of egress queues */ 145 CTRL_EQ_QSIZE = 128, 146 TX_EQ_QSIZE = 1024, 147 148 #if MJUMPAGESIZE != MCLBYTES 149 SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */ 150 #else 151 SW_ZONE_SIZES = 3, /* cluster, jumbo9k, jumbo16k */ 152 #endif 153 CL_METADATA_SIZE = CACHE_LINE_SIZE, 154 155 SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */ 156 TX_SGL_SEGS = 39, 157 TX_SGL_SEGS_TSO = 38, 158 TX_WR_FLITS = SGE_MAX_WR_LEN / 8 159 }; 160 161 enum { 162 /* adapter intr_type */ 163 INTR_INTX = (1 << 0), 164 INTR_MSI = (1 << 1), 165 INTR_MSIX = (1 << 2) 166 }; 167 168 enum { 169 XGMAC_MTU = (1 << 0), 170 XGMAC_PROMISC = (1 << 1), 171 XGMAC_ALLMULTI = (1 << 2), 172 XGMAC_VLANEX = (1 << 3), 173 XGMAC_UCADDR = (1 << 4), 174 XGMAC_MCADDRS = (1 << 5), 175 176 XGMAC_ALL = 0xffff 177 }; 178 179 enum { 180 /* flags understood by begin_synchronized_op */ 181 HOLD_LOCK = (1 << 0), 182 SLEEP_OK = (1 << 1), 183 INTR_OK = (1 << 2), 184 185 /* flags understood by end_synchronized_op */ 186 LOCK_HELD = HOLD_LOCK, 187 }; 188 189 enum { 190 /* adapter flags */ 191 FULL_INIT_DONE = (1 << 0), 192 FW_OK = (1 << 1), 193 /* INTR_DIRECT = (1 << 2), No longer used. */ 194 MASTER_PF = (1 << 3), 195 ADAP_SYSCTL_CTX = (1 << 4), 196 /* TOM_INIT_DONE= (1 << 5), No longer used */ 197 BUF_PACKING_OK = (1 << 6), 198 199 CXGBE_BUSY = (1 << 9), 200 201 /* port flags */ 202 HAS_TRACEQ = (1 << 3), 203 204 /* VI flags */ 205 DOOMED = (1 << 0), 206 VI_INIT_DONE = (1 << 1), 207 VI_SYSCTL_CTX = (1 << 2), 208 INTR_RXQ = (1 << 4), /* All NIC rxq's take interrupts */ 209 INTR_OFLD_RXQ = (1 << 5), /* All TOE rxq's take interrupts */ 210 INTR_ALL = (INTR_RXQ | INTR_OFLD_RXQ), 211 212 /* adapter debug_flags */ 213 DF_DUMP_MBOX = (1 << 0), 214 }; 215 216 #define IS_DOOMED(vi) ((vi)->flags & DOOMED) 217 #define SET_DOOMED(vi) do {(vi)->flags |= DOOMED;} while (0) 218 #define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY) 219 #define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0) 220 #define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0) 221 222 struct vi_info { 223 device_t dev; 224 struct port_info *pi; 225 226 struct ifnet *ifp; 227 struct ifmedia media; 228 229 unsigned long flags; 230 int if_flags; 231 232 uint16_t *rss, *nm_rss; 233 uint16_t viid; 234 int16_t xact_addr_filt;/* index of exact MAC address filter */ 235 uint16_t rss_size; /* size of VI's RSS table slice */ 236 uint16_t rss_base; /* start of VI's RSS table slice */ 237 238 eventhandler_tag vlan_c; 239 240 int nintr; 241 int first_intr; 242 243 /* These need to be int as they are used in sysctl */ 244 int ntxq; /* # of tx queues */ 245 int first_txq; /* index of first tx queue */ 246 int rsrv_noflowq; /* Reserve queue 0 for non-flowid packets */ 247 int nrxq; /* # of rx queues */ 248 int first_rxq; /* index of first rx queue */ 249 int nofldtxq; /* # of offload tx queues */ 250 int first_ofld_txq; /* index of first offload tx queue */ 251 int nofldrxq; /* # of offload rx queues */ 252 int first_ofld_rxq; /* index of first offload rx queue */ 253 int nnmtxq; 254 int first_nm_txq; 255 int nnmrxq; 256 int first_nm_rxq; 257 int tmr_idx; 258 int pktc_idx; 259 int qsize_rxq; 260 int qsize_txq; 261 262 struct timeval last_refreshed; 263 struct fw_vi_stats_vf stats; 264 265 struct callout tick; 266 struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */ 267 268 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */ 269 }; 270 271 enum { 272 /* tx_sched_class flags */ 273 TX_SC_OK = (1 << 0), /* Set up in hardware, active. */ 274 }; 275 276 struct tx_sched_class { 277 int refcount; 278 int flags; 279 struct t4_sched_class_params params; 280 }; 281 282 struct port_info { 283 device_t dev; 284 struct adapter *adapter; 285 286 struct vi_info *vi; 287 int nvi; 288 int up_vis; 289 int uld_vis; 290 291 struct tx_sched_class *tc; /* traffic classes for this channel */ 292 293 struct mtx pi_lock; 294 char lockname[16]; 295 unsigned long flags; 296 297 uint8_t lport; /* associated offload logical port */ 298 int8_t mdio_addr; 299 uint8_t port_type; 300 uint8_t mod_type; 301 uint8_t port_id; 302 uint8_t tx_chan; 303 uint8_t rx_chan_map; /* rx MPS channel bitmap */ 304 305 int linkdnrc; 306 struct link_config link_cfg; 307 308 struct timeval last_refreshed; 309 struct port_stats stats; 310 u_int tnl_cong_drops; 311 u_int tx_parse_error; 312 313 struct callout tick; 314 }; 315 316 #define IS_MAIN_VI(vi) ((vi) == &((vi)->pi->vi[0])) 317 318 /* Where the cluster came from, how it has been carved up. */ 319 struct cluster_layout { 320 int8_t zidx; 321 int8_t hwidx; 322 uint16_t region1; /* mbufs laid out within this region */ 323 /* region2 is the DMA region */ 324 uint16_t region3; /* cluster_metadata within this region */ 325 }; 326 327 struct cluster_metadata { 328 u_int refcount; 329 struct fl_sdesc *sd; /* For debug only. Could easily be stale */ 330 }; 331 332 struct fl_sdesc { 333 caddr_t cl; 334 uint16_t nmbuf; /* # of driver originated mbufs with ref on cluster */ 335 struct cluster_layout cll; 336 }; 337 338 struct tx_desc { 339 __be64 flit[8]; 340 }; 341 342 struct tx_sdesc { 343 struct mbuf *m; /* m_nextpkt linked chain of frames */ 344 uint8_t desc_used; /* # of hardware descriptors used by the WR */ 345 }; 346 347 348 #define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header)) 349 struct iq_desc { 350 struct rss_header rss; 351 uint8_t cpl[IQ_PAD]; 352 struct rsp_ctrl rsp; 353 }; 354 #undef IQ_PAD 355 CTASSERT(sizeof(struct iq_desc) == IQ_ESIZE); 356 357 enum { 358 /* iq flags */ 359 IQ_ALLOCATED = (1 << 0), /* firmware resources allocated */ 360 IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */ 361 IQ_INTR = (1 << 2), /* iq takes direct interrupt */ 362 IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */ 363 364 /* iq state */ 365 IQS_DISABLED = 0, 366 IQS_BUSY = 1, 367 IQS_IDLE = 2, 368 369 /* netmap related flags */ 370 NM_OFF = 0, 371 NM_ON = 1, 372 NM_BUSY = 2, 373 }; 374 375 /* 376 * Ingress Queue: T4 is producer, driver is consumer. 377 */ 378 struct sge_iq { 379 uint32_t flags; 380 volatile int state; 381 struct adapter *adapter; 382 struct iq_desc *desc; /* KVA of descriptor ring */ 383 int8_t intr_pktc_idx; /* packet count threshold index */ 384 uint8_t gen; /* generation bit */ 385 uint8_t intr_params; /* interrupt holdoff parameters */ 386 uint8_t intr_next; /* XXX: holdoff for next interrupt */ 387 uint16_t qsize; /* size (# of entries) of the queue */ 388 uint16_t sidx; /* index of the entry with the status page */ 389 uint16_t cidx; /* consumer index */ 390 uint16_t cntxt_id; /* SGE context id for the iq */ 391 uint16_t abs_id; /* absolute SGE id for the iq */ 392 393 STAILQ_ENTRY(sge_iq) link; 394 395 bus_dma_tag_t desc_tag; 396 bus_dmamap_t desc_map; 397 bus_addr_t ba; /* bus address of descriptor ring */ 398 }; 399 400 enum { 401 EQ_CTRL = 1, 402 EQ_ETH = 2, 403 EQ_OFLD = 3, 404 405 /* eq flags */ 406 EQ_TYPEMASK = 0x3, /* 2 lsbits hold the type (see above) */ 407 EQ_ALLOCATED = (1 << 2), /* firmware resources allocated */ 408 EQ_ENABLED = (1 << 3), /* open for business */ 409 }; 410 411 /* Listed in order of preference. Update t4_sysctls too if you change these */ 412 enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB}; 413 414 /* 415 * Egress Queue: driver is producer, T4 is consumer. 416 * 417 * Note: A free list is an egress queue (driver produces the buffers and T4 418 * consumes them) but it's special enough to have its own struct (see sge_fl). 419 */ 420 struct sge_eq { 421 unsigned int flags; /* MUST be first */ 422 unsigned int cntxt_id; /* SGE context id for the eq */ 423 struct mtx eq_lock; 424 425 struct tx_desc *desc; /* KVA of descriptor ring */ 426 uint16_t doorbells; 427 volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */ 428 u_int udb_qid; /* relative qid within the doorbell page */ 429 uint16_t sidx; /* index of the entry with the status page */ 430 uint16_t cidx; /* consumer idx (desc idx) */ 431 uint16_t pidx; /* producer idx (desc idx) */ 432 uint16_t equeqidx; /* EQUEQ last requested at this pidx */ 433 uint16_t dbidx; /* pidx of the most recent doorbell */ 434 uint16_t iqid; /* iq that gets egr_update for the eq */ 435 uint8_t tx_chan; /* tx channel used by the eq */ 436 volatile u_int equiq; /* EQUIQ outstanding */ 437 438 bus_dma_tag_t desc_tag; 439 bus_dmamap_t desc_map; 440 bus_addr_t ba; /* bus address of descriptor ring */ 441 char lockname[16]; 442 }; 443 444 struct sw_zone_info { 445 uma_zone_t zone; /* zone that this cluster comes from */ 446 int size; /* size of cluster: 2K, 4K, 9K, 16K, etc. */ 447 int type; /* EXT_xxx type of the cluster */ 448 int8_t head_hwidx; 449 int8_t tail_hwidx; 450 }; 451 452 struct hw_buf_info { 453 int8_t zidx; /* backpointer to zone; -ve means unused */ 454 int8_t next; /* next hwidx for this zone; -1 means no more */ 455 int size; 456 }; 457 458 enum { 459 NUM_MEMWIN = 3, 460 461 MEMWIN0_APERTURE = 2048, 462 MEMWIN0_BASE = 0x1b800, 463 464 MEMWIN1_APERTURE = 32768, 465 MEMWIN1_BASE = 0x28000, 466 467 MEMWIN2_APERTURE_T4 = 65536, 468 MEMWIN2_BASE_T4 = 0x30000, 469 470 MEMWIN2_APERTURE_T5 = 128 * 1024, 471 MEMWIN2_BASE_T5 = 0x60000, 472 }; 473 474 struct memwin { 475 struct rwlock mw_lock __aligned(CACHE_LINE_SIZE); 476 uint32_t mw_base; /* constant after setup_memwin */ 477 uint32_t mw_aperture; /* ditto */ 478 uint32_t mw_curpos; /* protected by mw_lock */ 479 }; 480 481 enum { 482 FL_STARVING = (1 << 0), /* on the adapter's list of starving fl's */ 483 FL_DOOMED = (1 << 1), /* about to be destroyed */ 484 FL_BUF_PACKING = (1 << 2), /* buffer packing enabled */ 485 FL_BUF_RESUME = (1 << 3), /* resume from the middle of the frame */ 486 }; 487 488 #define FL_RUNNING_LOW(fl) \ 489 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) <= fl->lowat) 490 #define FL_NOT_RUNNING_LOW(fl) \ 491 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) >= 2 * fl->lowat) 492 493 struct sge_fl { 494 struct mtx fl_lock; 495 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */ 496 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */ 497 struct cluster_layout cll_def; /* default refill zone, layout */ 498 uint16_t lowat; /* # of buffers <= this means fl needs help */ 499 int flags; 500 uint16_t buf_boundary; 501 502 /* The 16b idx all deal with hw descriptors */ 503 uint16_t dbidx; /* hw pidx after last doorbell */ 504 uint16_t sidx; /* index of status page */ 505 volatile uint16_t hw_cidx; 506 507 /* The 32b idx are all buffer idx, not hardware descriptor idx */ 508 uint32_t cidx; /* consumer index */ 509 uint32_t pidx; /* producer index */ 510 511 uint32_t dbval; 512 u_int rx_offset; /* offset in fl buf (when buffer packing) */ 513 volatile uint32_t *udb; 514 515 uint64_t mbuf_allocated;/* # of mbuf allocated from zone_mbuf */ 516 uint64_t mbuf_inlined; /* # of mbuf created within clusters */ 517 uint64_t cl_allocated; /* # of clusters allocated */ 518 uint64_t cl_recycled; /* # of clusters recycled */ 519 uint64_t cl_fast_recycled; /* # of clusters recycled (fast) */ 520 521 /* These 3 are valid when FL_BUF_RESUME is set, stale otherwise. */ 522 struct mbuf *m0; 523 struct mbuf **pnext; 524 u_int remaining; 525 526 uint16_t qsize; /* # of hw descriptors (status page included) */ 527 uint16_t cntxt_id; /* SGE context id for the freelist */ 528 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */ 529 bus_dma_tag_t desc_tag; 530 bus_dmamap_t desc_map; 531 char lockname[16]; 532 bus_addr_t ba; /* bus address of descriptor ring */ 533 struct cluster_layout cll_alt; /* alternate refill zone, layout */ 534 }; 535 536 struct mp_ring; 537 538 /* txq: SGE egress queue + what's needed for Ethernet NIC */ 539 struct sge_txq { 540 struct sge_eq eq; /* MUST be first */ 541 542 struct ifnet *ifp; /* the interface this txq belongs to */ 543 struct mp_ring *r; /* tx software ring */ 544 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */ 545 struct sglist *gl; 546 __be32 cpl_ctrl0; /* for convenience */ 547 int tc_idx; /* traffic class */ 548 549 struct task tx_reclaim_task; 550 /* stats for common events first */ 551 552 uint64_t txcsum; /* # of times hardware assisted with checksum */ 553 uint64_t tso_wrs; /* # of TSO work requests */ 554 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */ 555 uint64_t imm_wrs; /* # of work requests with immediate data */ 556 uint64_t sgl_wrs; /* # of work requests with direct SGL */ 557 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */ 558 uint64_t txpkts0_wrs; /* # of type0 coalesced tx work requests */ 559 uint64_t txpkts1_wrs; /* # of type1 coalesced tx work requests */ 560 uint64_t txpkts0_pkts; /* # of frames in type0 coalesced tx WRs */ 561 uint64_t txpkts1_pkts; /* # of frames in type1 coalesced tx WRs */ 562 563 /* stats for not-that-common events */ 564 } __aligned(CACHE_LINE_SIZE); 565 566 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */ 567 struct sge_rxq { 568 struct sge_iq iq; /* MUST be first */ 569 struct sge_fl fl; /* MUST follow iq */ 570 571 struct ifnet *ifp; /* the interface this rxq belongs to */ 572 #if defined(INET) || defined(INET6) 573 struct lro_ctrl lro; /* LRO state */ 574 #endif 575 576 /* stats for common events first */ 577 578 uint64_t rxcsum; /* # of times hardware assisted with checksum */ 579 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */ 580 581 /* stats for not-that-common events */ 582 583 } __aligned(CACHE_LINE_SIZE); 584 585 static inline struct sge_rxq * 586 iq_to_rxq(struct sge_iq *iq) 587 { 588 589 return (__containerof(iq, struct sge_rxq, iq)); 590 } 591 592 593 /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */ 594 struct sge_ofld_rxq { 595 struct sge_iq iq; /* MUST be first */ 596 struct sge_fl fl; /* MUST follow iq */ 597 } __aligned(CACHE_LINE_SIZE); 598 599 static inline struct sge_ofld_rxq * 600 iq_to_ofld_rxq(struct sge_iq *iq) 601 { 602 603 return (__containerof(iq, struct sge_ofld_rxq, iq)); 604 } 605 606 struct wrqe { 607 STAILQ_ENTRY(wrqe) link; 608 struct sge_wrq *wrq; 609 int wr_len; 610 char wr[] __aligned(16); 611 }; 612 613 struct wrq_cookie { 614 TAILQ_ENTRY(wrq_cookie) link; 615 int ndesc; 616 int pidx; 617 }; 618 619 /* 620 * wrq: SGE egress queue that is given prebuilt work requests. Both the control 621 * and offload tx queues are of this type. 622 */ 623 struct sge_wrq { 624 struct sge_eq eq; /* MUST be first */ 625 626 struct adapter *adapter; 627 struct task wrq_tx_task; 628 629 /* Tx desc reserved but WR not "committed" yet. */ 630 TAILQ_HEAD(wrq_incomplete_wrs , wrq_cookie) incomplete_wrs; 631 632 /* List of WRs ready to go out as soon as descriptors are available. */ 633 STAILQ_HEAD(, wrqe) wr_list; 634 u_int nwr_pending; 635 u_int ndesc_needed; 636 637 /* stats for common events first */ 638 639 uint64_t tx_wrs_direct; /* # of WRs written directly to desc ring. */ 640 uint64_t tx_wrs_ss; /* # of WRs copied from scratch space. */ 641 uint64_t tx_wrs_copied; /* # of WRs queued and copied to desc ring. */ 642 643 /* stats for not-that-common events */ 644 645 /* 646 * Scratch space for work requests that wrap around after reaching the 647 * status page, and some information about the last WR that used it. 648 */ 649 uint16_t ss_pidx; 650 uint16_t ss_len; 651 uint8_t ss[SGE_MAX_WR_LEN]; 652 653 } __aligned(CACHE_LINE_SIZE); 654 655 656 struct sge_nm_rxq { 657 struct vi_info *vi; 658 659 struct iq_desc *iq_desc; 660 uint16_t iq_abs_id; 661 uint16_t iq_cntxt_id; 662 uint16_t iq_cidx; 663 uint16_t iq_sidx; 664 uint8_t iq_gen; 665 666 __be64 *fl_desc; 667 uint16_t fl_cntxt_id; 668 uint32_t fl_cidx; 669 uint32_t fl_pidx; 670 uint32_t fl_sidx; 671 uint32_t fl_db_val; 672 u_int fl_hwidx:4; 673 674 u_int nid; /* netmap ring # for this queue */ 675 676 /* infrequently used items after this */ 677 678 bus_dma_tag_t iq_desc_tag; 679 bus_dmamap_t iq_desc_map; 680 bus_addr_t iq_ba; 681 int intr_idx; 682 683 bus_dma_tag_t fl_desc_tag; 684 bus_dmamap_t fl_desc_map; 685 bus_addr_t fl_ba; 686 } __aligned(CACHE_LINE_SIZE); 687 688 struct sge_nm_txq { 689 struct tx_desc *desc; 690 uint16_t cidx; 691 uint16_t pidx; 692 uint16_t sidx; 693 uint16_t equiqidx; /* EQUIQ last requested at this pidx */ 694 uint16_t equeqidx; /* EQUEQ last requested at this pidx */ 695 uint16_t dbidx; /* pidx of the most recent doorbell */ 696 uint16_t doorbells; 697 volatile uint32_t *udb; 698 u_int udb_qid; 699 u_int cntxt_id; 700 __be32 cpl_ctrl0; /* for convenience */ 701 u_int nid; /* netmap ring # for this queue */ 702 703 /* infrequently used items after this */ 704 705 bus_dma_tag_t desc_tag; 706 bus_dmamap_t desc_map; 707 bus_addr_t ba; 708 int iqidx; 709 } __aligned(CACHE_LINE_SIZE); 710 711 struct sge { 712 int nrxq; /* total # of Ethernet rx queues */ 713 int ntxq; /* total # of Ethernet tx tx queues */ 714 int nofldrxq; /* total # of TOE rx queues */ 715 int nofldtxq; /* total # of TOE tx queues */ 716 int nnmrxq; /* total # of netmap rx queues */ 717 int nnmtxq; /* total # of netmap tx queues */ 718 int niq; /* total # of ingress queues */ 719 int neq; /* total # of egress queues */ 720 721 struct sge_iq fwq; /* Firmware event queue */ 722 struct sge_wrq mgmtq; /* Management queue (control queue) */ 723 struct sge_wrq *ctrlq; /* Control queues */ 724 struct sge_txq *txq; /* NIC tx queues */ 725 struct sge_rxq *rxq; /* NIC rx queues */ 726 struct sge_wrq *ofld_txq; /* TOE tx queues */ 727 struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */ 728 struct sge_nm_txq *nm_txq; /* netmap tx queues */ 729 struct sge_nm_rxq *nm_rxq; /* netmap rx queues */ 730 731 uint16_t iq_start; 732 int eq_start; 733 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */ 734 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */ 735 736 int8_t safe_hwidx1; /* may not have room for metadata */ 737 int8_t safe_hwidx2; /* with room for metadata and maybe more */ 738 struct sw_zone_info sw_zone_info[SW_ZONE_SIZES]; 739 struct hw_buf_info hw_buf_info[SGE_FLBUF_SIZES]; 740 }; 741 742 struct rss_header; 743 typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *, 744 struct mbuf *); 745 typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *); 746 typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *); 747 748 struct adapter { 749 SLIST_ENTRY(adapter) link; 750 device_t dev; 751 struct cdev *cdev; 752 753 /* PCIe register resources */ 754 int regs_rid; 755 struct resource *regs_res; 756 int msix_rid; 757 struct resource *msix_res; 758 bus_space_handle_t bh; 759 bus_space_tag_t bt; 760 bus_size_t mmio_len; 761 int udbs_rid; 762 struct resource *udbs_res; 763 volatile uint8_t *udbs_base; 764 765 unsigned int pf; 766 unsigned int mbox; 767 unsigned int vpd_busy; 768 unsigned int vpd_flag; 769 770 /* Interrupt information */ 771 int intr_type; 772 int intr_count; 773 struct irq { 774 struct resource *res; 775 int rid; 776 volatile int nm_state; /* NM_OFF, NM_ON, or NM_BUSY */ 777 void *tag; 778 struct sge_rxq *rxq; 779 struct sge_nm_rxq *nm_rxq; 780 } __aligned(CACHE_LINE_SIZE) *irq; 781 782 bus_dma_tag_t dmat; /* Parent DMA tag */ 783 784 struct sge sge; 785 int lro_timeout; 786 787 struct taskqueue *tq[MAX_NCHAN]; /* General purpose taskqueues */ 788 struct port_info *port[MAX_NPORTS]; 789 uint8_t chan_map[MAX_NCHAN]; 790 791 void *tom_softc; /* (struct tom_data *) */ 792 struct tom_tunables tt; 793 void *iwarp_softc; /* (struct c4iw_dev *) */ 794 void *iscsi_ulp_softc; /* (struct cxgbei_data *) */ 795 struct l2t_data *l2t; /* L2 table */ 796 struct tid_info tids; 797 798 uint16_t doorbells; 799 int offload_map; /* ports with IFCAP_TOE enabled */ 800 int active_ulds; /* ULDs activated on this adapter */ 801 int flags; 802 int debug_flags; 803 804 char ifp_lockname[16]; 805 struct mtx ifp_lock; 806 struct ifnet *ifp; /* tracer ifp */ 807 struct ifmedia media; 808 int traceq; /* iq used by all tracers, -1 if none */ 809 int tracer_valid; /* bitmap of valid tracers */ 810 int tracer_enabled; /* bitmap of enabled tracers */ 811 812 char fw_version[16]; 813 char tp_version[16]; 814 char exprom_version[16]; 815 char cfg_file[32]; 816 u_int cfcsum; 817 struct adapter_params params; 818 const struct chip_params *chip_params; 819 struct t4_virt_res vres; 820 821 uint16_t nbmcaps; 822 uint16_t linkcaps; 823 uint16_t switchcaps; 824 uint16_t niccaps; 825 uint16_t toecaps; 826 uint16_t rdmacaps; 827 uint16_t tlscaps; 828 uint16_t iscsicaps; 829 uint16_t fcoecaps; 830 831 struct sysctl_ctx_list ctx; /* from adapter_full_init to full_uninit */ 832 833 struct mtx sc_lock; 834 char lockname[16]; 835 836 /* Starving free lists */ 837 struct mtx sfl_lock; /* same cache-line as sc_lock? but that's ok */ 838 TAILQ_HEAD(, sge_fl) sfl; 839 struct callout sfl_callout; 840 841 struct mtx reg_lock; /* for indirect register access */ 842 843 struct memwin memwin[NUM_MEMWIN]; /* memory windows */ 844 845 an_handler_t an_handler __aligned(CACHE_LINE_SIZE); 846 fw_msg_handler_t fw_msg_handler[7]; /* NUM_FW6_TYPES */ 847 cpl_handler_t cpl_handler[0xef]; /* NUM_CPL_CMDS */ 848 849 const char *last_op; 850 const void *last_op_thr; 851 int last_op_flags; 852 853 int sc_do_rxcopy; 854 }; 855 856 #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock) 857 #define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock) 858 #define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED) 859 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED) 860 861 #define ASSERT_SYNCHRONIZED_OP(sc) \ 862 KASSERT(IS_BUSY(sc) && \ 863 (mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \ 864 ("%s: operation not synchronized.", __func__)) 865 866 #define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock) 867 #define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock) 868 #define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED) 869 #define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED) 870 871 #define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock) 872 #define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock) 873 #define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock) 874 #define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED) 875 #define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED) 876 877 #define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl) 878 #define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl) 879 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl) 880 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl) 881 882 #define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock) 883 #define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock) 884 #define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock) 885 #define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED) 886 #define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED) 887 888 #define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq) 889 #define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq) 890 #define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq) 891 #define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq) 892 #define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq) 893 894 #define CH_DUMP_MBOX(sc, mbox, data_reg) \ 895 do { \ 896 if (sc->debug_flags & DF_DUMP_MBOX) { \ 897 log(LOG_NOTICE, \ 898 "%s mbox %u: %016llx %016llx %016llx %016llx " \ 899 "%016llx %016llx %016llx %016llx\n", \ 900 device_get_nameunit(sc->dev), mbox, \ 901 (unsigned long long)t4_read_reg64(sc, data_reg), \ 902 (unsigned long long)t4_read_reg64(sc, data_reg + 8), \ 903 (unsigned long long)t4_read_reg64(sc, data_reg + 16), \ 904 (unsigned long long)t4_read_reg64(sc, data_reg + 24), \ 905 (unsigned long long)t4_read_reg64(sc, data_reg + 32), \ 906 (unsigned long long)t4_read_reg64(sc, data_reg + 40), \ 907 (unsigned long long)t4_read_reg64(sc, data_reg + 48), \ 908 (unsigned long long)t4_read_reg64(sc, data_reg + 56)); \ 909 } \ 910 } while (0) 911 912 #define for_each_txq(vi, iter, q) \ 913 for (q = &vi->pi->adapter->sge.txq[vi->first_txq], iter = 0; \ 914 iter < vi->ntxq; ++iter, ++q) 915 #define for_each_rxq(vi, iter, q) \ 916 for (q = &vi->pi->adapter->sge.rxq[vi->first_rxq], iter = 0; \ 917 iter < vi->nrxq; ++iter, ++q) 918 #define for_each_ofld_txq(vi, iter, q) \ 919 for (q = &vi->pi->adapter->sge.ofld_txq[vi->first_ofld_txq], iter = 0; \ 920 iter < vi->nofldtxq; ++iter, ++q) 921 #define for_each_ofld_rxq(vi, iter, q) \ 922 for (q = &vi->pi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \ 923 iter < vi->nofldrxq; ++iter, ++q) 924 #define for_each_nm_txq(vi, iter, q) \ 925 for (q = &vi->pi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \ 926 iter < vi->nnmtxq; ++iter, ++q) 927 #define for_each_nm_rxq(vi, iter, q) \ 928 for (q = &vi->pi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \ 929 iter < vi->nnmrxq; ++iter, ++q) 930 #define for_each_vi(_pi, _iter, _vi) \ 931 for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \ 932 ++(_iter), ++(_vi)) 933 934 #define IDXINCR(idx, incr, wrap) do { \ 935 idx = wrap - idx > incr ? idx + incr : incr - (wrap - idx); \ 936 } while (0) 937 #define IDXDIFF(head, tail, wrap) \ 938 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 939 940 /* One for errors, one for firmware events */ 941 #define T4_EXTRA_INTR 2 942 943 static inline uint32_t 944 t4_read_reg(struct adapter *sc, uint32_t reg) 945 { 946 947 return bus_space_read_4(sc->bt, sc->bh, reg); 948 } 949 950 static inline void 951 t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val) 952 { 953 954 bus_space_write_4(sc->bt, sc->bh, reg, val); 955 } 956 957 static inline uint64_t 958 t4_read_reg64(struct adapter *sc, uint32_t reg) 959 { 960 961 return t4_bus_space_read_8(sc->bt, sc->bh, reg); 962 } 963 964 static inline void 965 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val) 966 { 967 968 t4_bus_space_write_8(sc->bt, sc->bh, reg, val); 969 } 970 971 static inline void 972 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val) 973 { 974 975 *val = pci_read_config(sc->dev, reg, 1); 976 } 977 978 static inline void 979 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val) 980 { 981 982 pci_write_config(sc->dev, reg, val, 1); 983 } 984 985 static inline void 986 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val) 987 { 988 989 *val = pci_read_config(sc->dev, reg, 2); 990 } 991 992 static inline void 993 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val) 994 { 995 996 pci_write_config(sc->dev, reg, val, 2); 997 } 998 999 static inline void 1000 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val) 1001 { 1002 1003 *val = pci_read_config(sc->dev, reg, 4); 1004 } 1005 1006 static inline void 1007 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val) 1008 { 1009 1010 pci_write_config(sc->dev, reg, val, 4); 1011 } 1012 1013 static inline struct port_info * 1014 adap2pinfo(struct adapter *sc, int idx) 1015 { 1016 1017 return (sc->port[idx]); 1018 } 1019 1020 static inline void 1021 t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[]) 1022 { 1023 1024 bcopy(hw_addr, sc->port[idx]->vi[0].hw_addr, ETHER_ADDR_LEN); 1025 } 1026 1027 static inline bool 1028 is_10G_port(const struct port_info *pi) 1029 { 1030 1031 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0); 1032 } 1033 1034 static inline bool 1035 is_40G_port(const struct port_info *pi) 1036 { 1037 1038 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) != 0); 1039 } 1040 1041 static inline int 1042 port_top_speed(const struct port_info *pi) 1043 { 1044 1045 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) 1046 return (100); 1047 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) 1048 return (40); 1049 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) 1050 return (10); 1051 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) 1052 return (1); 1053 1054 return (0); 1055 } 1056 1057 static inline int 1058 tx_resume_threshold(struct sge_eq *eq) 1059 { 1060 1061 /* not quite the same as qsize / 4, but this will do. */ 1062 return (eq->sidx / 4); 1063 } 1064 1065 static inline int 1066 t4_use_ldst(struct adapter *sc) 1067 { 1068 1069 #ifdef notyet 1070 return (sc->flags & FW_OK || !sc->use_bd); 1071 #else 1072 return (0); 1073 #endif 1074 } 1075 1076 /* t4_main.c */ 1077 int t4_os_find_pci_capability(struct adapter *, int); 1078 int t4_os_pci_save_state(struct adapter *); 1079 int t4_os_pci_restore_state(struct adapter *); 1080 void t4_os_portmod_changed(const struct adapter *, int); 1081 void t4_os_link_changed(struct adapter *, int, int, int); 1082 void t4_iterate(void (*)(struct adapter *, void *), void *); 1083 int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t); 1084 int t4_register_an_handler(struct adapter *, an_handler_t); 1085 int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t); 1086 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 1087 int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *); 1088 void doom_vi(struct adapter *, struct vi_info *); 1089 void end_synchronized_op(struct adapter *, int); 1090 int update_mac_settings(struct ifnet *, int); 1091 int adapter_full_init(struct adapter *); 1092 int adapter_full_uninit(struct adapter *); 1093 uint64_t cxgbe_get_counter(struct ifnet *, ift_counter); 1094 int vi_full_init(struct vi_info *); 1095 int vi_full_uninit(struct vi_info *); 1096 void vi_sysctls(struct vi_info *); 1097 void vi_tick(void *); 1098 1099 #ifdef DEV_NETMAP 1100 /* t4_netmap.c */ 1101 void cxgbe_nm_attach(struct vi_info *); 1102 void cxgbe_nm_detach(struct vi_info *); 1103 void t4_nm_intr(void *); 1104 #endif 1105 1106 /* t4_sge.c */ 1107 void t4_sge_modload(void); 1108 void t4_sge_modunload(void); 1109 uint64_t t4_sge_extfree_refs(void); 1110 void t4_init_sge_cpl_handlers(struct adapter *); 1111 void t4_tweak_chip_settings(struct adapter *); 1112 int t4_read_chip_settings(struct adapter *); 1113 int t4_create_dma_tag(struct adapter *); 1114 void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *, 1115 struct sysctl_oid_list *); 1116 int t4_destroy_dma_tag(struct adapter *); 1117 int t4_setup_adapter_queues(struct adapter *); 1118 int t4_teardown_adapter_queues(struct adapter *); 1119 int t4_setup_vi_queues(struct vi_info *); 1120 int t4_teardown_vi_queues(struct vi_info *); 1121 void t4_intr_all(void *); 1122 void t4_intr(void *); 1123 void t4_vi_intr(void *); 1124 void t4_intr_err(void *); 1125 void t4_intr_evt(void *); 1126 void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *); 1127 void t4_update_fl_bufsize(struct ifnet *); 1128 int parse_pkt(struct mbuf **); 1129 void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *); 1130 void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *); 1131 int tnl_cong(struct port_info *, int); 1132 1133 /* t4_tracer.c */ 1134 struct t4_tracer; 1135 void t4_tracer_modload(void); 1136 void t4_tracer_modunload(void); 1137 void t4_tracer_port_detach(struct adapter *); 1138 int t4_get_tracer(struct adapter *, struct t4_tracer *); 1139 int t4_set_tracer(struct adapter *, struct t4_tracer *); 1140 int t4_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *); 1141 int t5_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *); 1142 1143 static inline struct wrqe * 1144 alloc_wrqe(int wr_len, struct sge_wrq *wrq) 1145 { 1146 int len = offsetof(struct wrqe, wr) + wr_len; 1147 struct wrqe *wr; 1148 1149 wr = malloc(len, M_CXGBE, M_NOWAIT); 1150 if (__predict_false(wr == NULL)) 1151 return (NULL); 1152 wr->wr_len = wr_len; 1153 wr->wrq = wrq; 1154 return (wr); 1155 } 1156 1157 static inline void * 1158 wrtod(struct wrqe *wr) 1159 { 1160 return (&wr->wr[0]); 1161 } 1162 1163 static inline void 1164 free_wrqe(struct wrqe *wr) 1165 { 1166 free(wr, M_CXGBE); 1167 } 1168 1169 static inline void 1170 t4_wrq_tx(struct adapter *sc, struct wrqe *wr) 1171 { 1172 struct sge_wrq *wrq = wr->wrq; 1173 1174 TXQ_LOCK(wrq); 1175 t4_wrq_tx_locked(sc, wrq, wr); 1176 TXQ_UNLOCK(wrq); 1177 } 1178 1179 #endif 1180