1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 */ 30 31 #ifndef __T4_ADAPTER_H__ 32 #define __T4_ADAPTER_H__ 33 34 #include <sys/kernel.h> 35 #include <sys/bus.h> 36 #include <sys/counter.h> 37 #include <sys/rman.h> 38 #include <sys/types.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/rwlock.h> 42 #include <sys/seqc.h> 43 #include <sys/sx.h> 44 #include <sys/vmem.h> 45 #include <vm/uma.h> 46 47 #include <dev/pci/pcivar.h> 48 #include <dev/pci/pcireg.h> 49 #include <machine/bus.h> 50 #include <sys/socket.h> 51 #include <sys/sysctl.h> 52 #include <sys/taskqueue.h> 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_media.h> 57 #include <net/pfil.h> 58 #include <netinet/in.h> 59 #include <netinet/tcp_lro.h> 60 61 #include "offload.h" 62 #include "t4_ioctl.h" 63 #include "common/t4_msg.h" 64 #include "firmware/t4fw_interface.h" 65 66 #define KTR_CXGBE KTR_SPARE3 67 MALLOC_DECLARE(M_CXGBE); 68 #define CXGBE_UNIMPLEMENTED(s) \ 69 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__) 70 71 /* 72 * Same as LIST_HEAD from queue.h. This is to avoid conflict with LinuxKPI's 73 * LIST_HEAD when building iw_cxgbe. 74 */ 75 #define CXGBE_LIST_HEAD(name, type) \ 76 struct name { \ 77 struct type *lh_first; /* first element */ \ 78 } 79 80 #ifndef SYSCTL_ADD_UQUAD 81 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD 82 #define sysctl_handle_64 sysctl_handle_quad 83 #define CTLTYPE_U64 CTLTYPE_QUAD 84 #endif 85 86 SYSCTL_DECL(_hw_cxgbe); 87 88 struct adapter; 89 typedef struct adapter adapter_t; 90 91 enum { 92 /* 93 * All ingress queues use this entry size. Note that the firmware event 94 * queue and any iq expecting CPL_RX_PKT in the descriptor needs this to 95 * be at least 64. 96 */ 97 IQ_ESIZE = 64, 98 99 /* Default queue sizes for all kinds of ingress queues */ 100 FW_IQ_QSIZE = 256, 101 RX_IQ_QSIZE = 1024, 102 103 /* All egress queues use this entry size */ 104 EQ_ESIZE = 64, 105 106 /* Default queue sizes for all kinds of egress queues */ 107 CTRL_EQ_QSIZE = 1024, 108 TX_EQ_QSIZE = 1024, 109 110 SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */ 111 CL_METADATA_SIZE = CACHE_LINE_SIZE, 112 113 SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */ 114 TX_SGL_SEGS = 39, 115 TX_SGL_SEGS_TSO = 38, 116 TX_SGL_SEGS_VM = 38, 117 TX_SGL_SEGS_VM_TSO = 37, 118 TX_SGL_SEGS_EO_TSO = 30, /* XXX: lower for IPv6. */ 119 TX_SGL_SEGS_VXLAN_TSO = 37, 120 TX_WR_FLITS = SGE_MAX_WR_LEN / 8 121 }; 122 123 enum { 124 /* adapter intr_type */ 125 INTR_INTX = (1 << 0), 126 INTR_MSI = (1 << 1), 127 INTR_MSIX = (1 << 2) 128 }; 129 130 enum { 131 XGMAC_MTU = (1 << 0), 132 XGMAC_PROMISC = (1 << 1), 133 XGMAC_ALLMULTI = (1 << 2), 134 XGMAC_VLANEX = (1 << 3), 135 XGMAC_UCADDR = (1 << 4), 136 XGMAC_MCADDRS = (1 << 5), 137 138 XGMAC_ALL = 0xffff 139 }; 140 141 enum { 142 /* flags understood by begin_synchronized_op */ 143 HOLD_LOCK = (1 << 0), 144 SLEEP_OK = (1 << 1), 145 INTR_OK = (1 << 2), 146 147 /* flags understood by end_synchronized_op */ 148 LOCK_HELD = HOLD_LOCK, 149 }; 150 151 enum { 152 /* adapter flags. synch_op or adapter_lock. */ 153 FULL_INIT_DONE = (1 << 0), 154 FW_OK = (1 << 1), 155 CHK_MBOX_ACCESS = (1 << 2), 156 MASTER_PF = (1 << 3), 157 BUF_PACKING_OK = (1 << 6), 158 IS_VF = (1 << 7), 159 KERN_TLS_ON = (1 << 8), /* HW is configured for KERN_TLS */ 160 CXGBE_BUSY = (1 << 9), 161 162 /* adapter error_flags. reg_lock for HW_OFF_LIMITS, atomics for the rest. */ 163 ADAP_STOPPED = (1 << 0), /* Adapter has been stopped. */ 164 ADAP_FATAL_ERR = (1 << 1), /* Encountered a fatal error. */ 165 HW_OFF_LIMITS = (1 << 2), /* off limits to all except reset_thread */ 166 ADAP_CIM_ERR = (1 << 3), /* Error was related to FW/CIM. */ 167 168 /* port flags */ 169 HAS_TRACEQ = (1 << 3), 170 FIXED_IFMEDIA = (1 << 4), /* ifmedia list doesn't change. */ 171 172 /* VI flags */ 173 VI_DETACHING = (1 << 0), 174 VI_INIT_DONE = (1 << 1), 175 /* 1 << 2 is unused, was VI_SYSCTL_CTX */ 176 TX_USES_VM_WR = (1 << 3), 177 VI_SKIP_STATS = (1 << 4), 178 179 /* adapter debug_flags */ 180 DF_DUMP_MBOX = (1 << 0), /* Log all mbox cmd/rpl. */ 181 DF_LOAD_FW_ANYTIME = (1 << 1), /* Allow LOAD_FW after init */ 182 DF_DISABLE_TCB_CACHE = (1 << 2), /* Disable TCB cache (T6+) */ 183 DF_DISABLE_CFG_RETRY = (1 << 3), /* Disable fallback config */ 184 DF_VERBOSE_SLOWINTR = (1 << 4), /* Chatty slow intr handler */ 185 }; 186 187 #define IS_DETACHING(vi) ((vi)->flags & VI_DETACHING) 188 #define SET_DETACHING(vi) do {(vi)->flags |= VI_DETACHING;} while (0) 189 #define CLR_DETACHING(vi) do {(vi)->flags &= ~VI_DETACHING;} while (0) 190 #define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY) 191 #define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0) 192 #define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0) 193 194 struct vi_info { 195 device_t dev; 196 struct port_info *pi; 197 struct adapter *adapter; 198 199 if_t ifp; 200 struct pfil_head *pfil; 201 202 unsigned long flags; 203 int if_flags; 204 205 uint16_t *rss, *nm_rss; 206 uint16_t viid; /* opaque VI identifier */ 207 uint16_t smt_idx; 208 uint16_t vin; 209 uint8_t vfvld; 210 int16_t xact_addr_filt;/* index of exact MAC address filter */ 211 uint16_t rss_size; /* size of VI's RSS table slice */ 212 uint16_t rss_base; /* start of VI's RSS table slice */ 213 int hashen; 214 215 int nintr; 216 int first_intr; 217 218 /* These need to be int as they are used in sysctl */ 219 int ntxq; /* # of tx queues */ 220 int first_txq; /* index of first tx queue */ 221 int rsrv_noflowq; /* Reserve queue 0 for non-flowid packets */ 222 int nrxq; /* # of rx queues */ 223 int first_rxq; /* index of first rx queue */ 224 int nofldtxq; /* # of offload tx queues */ 225 int first_ofld_txq; /* index of first offload tx queue */ 226 int nofldrxq; /* # of offload rx queues */ 227 int first_ofld_rxq; /* index of first offload rx queue */ 228 int nnmtxq; 229 int first_nm_txq; 230 int nnmrxq; 231 int first_nm_rxq; 232 int tmr_idx; 233 int ofld_tmr_idx; 234 int pktc_idx; 235 int ofld_pktc_idx; 236 int qsize_rxq; 237 int qsize_txq; 238 239 struct timeval last_refreshed; 240 struct fw_vi_stats_vf stats; 241 struct mtx tick_mtx; 242 struct callout tick; 243 244 struct sysctl_ctx_list ctx; 245 struct sysctl_oid *rxq_oid; 246 struct sysctl_oid *txq_oid; 247 struct sysctl_oid *nm_rxq_oid; 248 struct sysctl_oid *nm_txq_oid; 249 struct sysctl_oid *ofld_rxq_oid; 250 struct sysctl_oid *ofld_txq_oid; 251 252 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */ 253 u_int txq_rr; 254 u_int rxq_rr; 255 }; 256 257 struct tx_ch_rl_params { 258 enum fw_sched_params_rate ratemode; /* %port (REL) or kbps (ABS) */ 259 uint32_t maxrate; 260 }; 261 262 /* CLRL state */ 263 enum clrl_state { 264 CS_UNINITIALIZED = 0, 265 CS_PARAMS_SET, /* sw parameters have been set. */ 266 CS_HW_UPDATE_REQUESTED, /* async HW update requested. */ 267 CS_HW_UPDATE_IN_PROGRESS, /* sync hw update in progress. */ 268 CS_HW_CONFIGURED /* configured in the hardware. */ 269 }; 270 271 /* CLRL flags */ 272 enum { 273 CF_USER = (1 << 0), /* was configured by driver ioctl. */ 274 }; 275 276 struct tx_cl_rl_params { 277 enum clrl_state state; 278 int refcount; 279 uint8_t flags; 280 enum fw_sched_params_rate ratemode; /* %port REL or ABS value */ 281 enum fw_sched_params_unit rateunit; /* kbps or pps (when ABS) */ 282 enum fw_sched_params_mode mode; /* aggr or per-flow */ 283 uint32_t maxrate; 284 uint16_t pktsize; 285 uint16_t burstsize; 286 }; 287 288 /* Tx scheduler parameters for a channel/port */ 289 struct tx_sched_params { 290 /* Channel Rate Limiter */ 291 struct tx_ch_rl_params ch_rl; 292 293 /* Class WRR */ 294 /* XXX */ 295 296 /* Class Rate Limiter (including the default pktsize and burstsize). */ 297 int pktsize; 298 int burstsize; 299 struct tx_cl_rl_params cl_rl[]; 300 }; 301 302 struct port_info { 303 device_t dev; 304 struct adapter *adapter; 305 306 struct vi_info *vi; 307 int nvi; 308 int up_vis; 309 int uld_vis; 310 bool vxlan_tcam_entry; 311 312 struct tx_sched_params *sched_params; 313 314 struct mtx pi_lock; 315 char lockname[16]; 316 unsigned long flags; 317 318 uint8_t lport; /* associated offload logical port */ 319 int8_t mdio_addr; 320 uint8_t port_type; 321 uint8_t mod_type; 322 uint8_t port_id; 323 uint8_t tx_chan; /* tx TP c-channel */ 324 uint8_t rx_chan; /* rx TP c-channel */ 325 uint8_t mps_bg_map; /* rx MPS buffer group bitmap */ 326 uint8_t rx_e_chan_map; /* rx TP e-channel bitmap */ 327 328 struct link_config link_cfg; 329 struct ifmedia media; 330 331 struct port_stats stats; 332 u_int tnl_cong_drops; 333 u_int tx_parse_error; 334 int fcs_reg; 335 uint64_t fcs_base; 336 337 struct sysctl_ctx_list ctx; 338 }; 339 340 #define IS_MAIN_VI(vi) ((vi) == &((vi)->pi->vi[0])) 341 342 struct cluster_metadata { 343 uma_zone_t zone; 344 caddr_t cl; 345 u_int refcount; 346 }; 347 348 struct fl_sdesc { 349 caddr_t cl; 350 uint16_t nmbuf; /* # of driver originated mbufs with ref on cluster */ 351 int16_t moff; /* offset of metadata from cl */ 352 uint8_t zidx; 353 }; 354 355 struct tx_desc { 356 __be64 flit[8]; 357 }; 358 359 struct tx_sdesc { 360 struct mbuf *m; /* m_nextpkt linked chain of frames */ 361 uint8_t desc_used; /* # of hardware descriptors used by the WR */ 362 }; 363 364 365 #define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header)) 366 struct iq_desc { 367 struct rss_header rss; 368 uint8_t cpl[IQ_PAD]; 369 struct rsp_ctrl rsp; 370 }; 371 #undef IQ_PAD 372 CTASSERT(sizeof(struct iq_desc) == IQ_ESIZE); 373 374 enum { 375 /* iq type */ 376 IQ_OTHER = FW_IQ_IQTYPE_OTHER, 377 IQ_ETH = FW_IQ_IQTYPE_NIC, 378 IQ_OFLD = FW_IQ_IQTYPE_OFLD, 379 380 /* iq flags */ 381 IQ_SW_ALLOCATED = (1 << 0), /* sw resources allocated */ 382 IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */ 383 IQ_RX_TIMESTAMP = (1 << 2), /* provide the SGE rx timestamp */ 384 IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */ 385 IQ_ADJ_CREDIT = (1 << 4), /* hw is off by 1 credit for this iq */ 386 IQ_HW_ALLOCATED = (1 << 5), /* fw/hw resources allocated */ 387 388 /* iq state */ 389 IQS_DISABLED = 0, 390 IQS_BUSY = 1, 391 IQS_IDLE = 2, 392 393 /* netmap related flags */ 394 NM_OFF = 0, 395 NM_ON = 1, 396 NM_BUSY = 2, 397 }; 398 399 enum { 400 CPL_COOKIE_RESERVED = 0, 401 CPL_COOKIE_FILTER, 402 CPL_COOKIE_DDP0, 403 CPL_COOKIE_DDP1, 404 CPL_COOKIE_TOM, 405 CPL_COOKIE_HASHFILTER, 406 CPL_COOKIE_ETHOFLD, 407 CPL_COOKIE_KERN_TLS, 408 409 NUM_CPL_COOKIES = 8 /* Limited by M_COOKIE. Do not increase. */ 410 }; 411 412 struct sge_iq; 413 struct rss_header; 414 typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *, 415 struct mbuf *); 416 typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *); 417 typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *); 418 419 /* 420 * Ingress Queue: T4 is producer, driver is consumer. 421 */ 422 struct sge_iq { 423 uint16_t flags; 424 uint8_t qtype; 425 volatile int state; 426 struct adapter *adapter; 427 struct iq_desc *desc; /* KVA of descriptor ring */ 428 int8_t intr_pktc_idx; /* packet count threshold index */ 429 uint8_t gen; /* generation bit */ 430 uint8_t intr_params; /* interrupt holdoff parameters */ 431 int8_t cong_drop; /* congestion drop settings for the queue */ 432 uint16_t qsize; /* size (# of entries) of the queue */ 433 uint16_t sidx; /* index of the entry with the status page */ 434 uint16_t cidx; /* consumer index */ 435 uint16_t cntxt_id; /* SGE context id for the iq */ 436 uint16_t abs_id; /* absolute SGE id for the iq */ 437 int16_t intr_idx; /* interrupt used by the queue */ 438 439 STAILQ_ENTRY(sge_iq) link; 440 441 bus_dma_tag_t desc_tag; 442 bus_dmamap_t desc_map; 443 bus_addr_t ba; /* bus address of descriptor ring */ 444 }; 445 446 enum { 447 /* eq type */ 448 EQ_CTRL = 1, 449 EQ_ETH = 2, 450 EQ_OFLD = 3, 451 452 /* eq flags */ 453 EQ_SW_ALLOCATED = (1 << 0), /* sw resources allocated */ 454 EQ_HW_ALLOCATED = (1 << 1), /* hw/fw resources allocated */ 455 EQ_ENABLED = (1 << 3), /* open for business */ 456 EQ_QFLUSH = (1 << 4), /* if_qflush in progress */ 457 }; 458 459 /* Listed in order of preference. Update t4_sysctls too if you change these */ 460 enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB}; 461 462 /* 463 * Egress Queue: driver is producer, T4 is consumer. 464 * 465 * Note: A free list is an egress queue (driver produces the buffers and T4 466 * consumes them) but it's special enough to have its own struct (see sge_fl). 467 */ 468 struct sge_eq { 469 unsigned int flags; /* MUST be first */ 470 unsigned int cntxt_id; /* SGE context id for the eq */ 471 unsigned int abs_id; /* absolute SGE id for the eq */ 472 uint8_t type; /* EQ_CTRL/EQ_ETH/EQ_OFLD */ 473 uint8_t doorbells; 474 uint8_t port_id; /* port_id of the port associated with the eq */ 475 uint8_t tx_chan; /* tx channel used by the eq */ 476 struct mtx eq_lock; 477 478 struct tx_desc *desc; /* KVA of descriptor ring */ 479 volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */ 480 u_int udb_qid; /* relative qid within the doorbell page */ 481 uint16_t sidx; /* index of the entry with the status page */ 482 uint16_t cidx; /* consumer idx (desc idx) */ 483 uint16_t pidx; /* producer idx (desc idx) */ 484 uint16_t equeqidx; /* EQUEQ last requested at this pidx */ 485 uint16_t dbidx; /* pidx of the most recent doorbell */ 486 uint16_t iqid; /* cached iq->cntxt_id (see iq below) */ 487 volatile u_int equiq; /* EQUIQ outstanding */ 488 struct sge_iq *iq; /* iq that receives egr_update for the eq */ 489 490 bus_dma_tag_t desc_tag; 491 bus_dmamap_t desc_map; 492 bus_addr_t ba; /* bus address of descriptor ring */ 493 char lockname[16]; 494 }; 495 496 struct rx_buf_info { 497 uma_zone_t zone; /* zone that this cluster comes from */ 498 uint16_t size1; /* same as size of cluster: 2K/4K/9K/16K. 499 * hwsize[hwidx1] = size1. No spare. */ 500 uint16_t size2; /* hwsize[hwidx2] = size2. 501 * spare in cluster = size1 - size2. */ 502 int8_t hwidx1; /* SGE bufsize idx for size1 */ 503 int8_t hwidx2; /* SGE bufsize idx for size2 */ 504 uint8_t type; /* EXT_xxx type of the cluster */ 505 }; 506 507 enum { 508 NUM_MEMWIN = 3, 509 510 MEMWIN0_APERTURE = 2048, 511 MEMWIN0_BASE = 0x1b800, 512 513 MEMWIN1_APERTURE = 32768, 514 MEMWIN1_BASE = 0x28000, 515 516 MEMWIN2_APERTURE_T4 = 65536, 517 MEMWIN2_BASE_T4 = 0x30000, 518 519 MEMWIN2_APERTURE_T5 = 128 * 1024, 520 MEMWIN2_BASE_T5 = 0x60000, 521 }; 522 523 struct memwin { 524 struct rwlock mw_lock __aligned(CACHE_LINE_SIZE); 525 uint32_t mw_base; /* constant after setup_memwin */ 526 uint32_t mw_aperture; /* ditto */ 527 uint32_t mw_curpos; /* protected by mw_lock */ 528 }; 529 530 enum { 531 FL_STARVING = (1 << 0), /* on the adapter's list of starving fl's */ 532 FL_DOOMED = (1 << 1), /* about to be destroyed */ 533 FL_BUF_PACKING = (1 << 2), /* buffer packing enabled */ 534 FL_BUF_RESUME = (1 << 3), /* resume from the middle of the frame */ 535 }; 536 537 #define FL_RUNNING_LOW(fl) \ 538 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) <= fl->lowat) 539 #define FL_NOT_RUNNING_LOW(fl) \ 540 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) >= 2 * fl->lowat) 541 542 struct sge_fl { 543 struct mtx fl_lock; 544 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */ 545 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */ 546 uint16_t zidx; /* refill zone idx */ 547 uint16_t safe_zidx; 548 uint16_t lowat; /* # of buffers <= this means fl needs help */ 549 int flags; 550 uint16_t buf_boundary; 551 552 /* The 16b idx all deal with hw descriptors */ 553 uint16_t dbidx; /* hw pidx after last doorbell */ 554 uint16_t sidx; /* index of status page */ 555 volatile uint16_t hw_cidx; 556 557 /* The 32b idx are all buffer idx, not hardware descriptor idx */ 558 uint32_t cidx; /* consumer index */ 559 uint32_t pidx; /* producer index */ 560 561 uint32_t dbval; 562 u_int rx_offset; /* offset in fl buf (when buffer packing) */ 563 volatile uint32_t *udb; 564 565 uint64_t cl_allocated; /* # of clusters allocated */ 566 uint64_t cl_recycled; /* # of clusters recycled */ 567 uint64_t cl_fast_recycled; /* # of clusters recycled (fast) */ 568 569 /* These 3 are valid when FL_BUF_RESUME is set, stale otherwise. */ 570 struct mbuf *m0; 571 struct mbuf **pnext; 572 u_int remaining; 573 574 uint16_t qsize; /* # of hw descriptors (status page included) */ 575 uint16_t cntxt_id; /* SGE context id for the freelist */ 576 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */ 577 bus_dma_tag_t desc_tag; 578 bus_dmamap_t desc_map; 579 char lockname[16]; 580 bus_addr_t ba; /* bus address of descriptor ring */ 581 }; 582 583 struct mp_ring; 584 585 struct txpkts { 586 uint8_t wr_type; /* type 0 or type 1 */ 587 uint8_t npkt; /* # of packets in this work request */ 588 uint8_t len16; /* # of 16B pieces used by this work request */ 589 uint8_t score; 590 uint8_t max_npkt; /* maximum number of packets allowed */ 591 uint16_t plen; /* total payload (sum of all packets) */ 592 593 /* straight from fw_eth_tx_pkts_vm_wr. */ 594 __u8 ethmacdst[6]; 595 __u8 ethmacsrc[6]; 596 __be16 ethtype; 597 __be16 vlantci; 598 599 struct mbuf *mb[15]; 600 }; 601 602 /* txq: SGE egress queue + what's needed for Ethernet NIC */ 603 struct sge_txq { 604 struct sge_eq eq; /* MUST be first */ 605 606 if_t ifp; /* the interface this txq belongs to */ 607 struct mp_ring *r; /* tx software ring */ 608 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */ 609 struct sglist *gl; 610 __be32 cpl_ctrl0; /* for convenience */ 611 int tc_idx; /* traffic class */ 612 uint64_t last_tx; /* cycle count when eth_tx was last called */ 613 struct txpkts txp; 614 615 struct task tx_reclaim_task; 616 /* stats for common events first */ 617 618 uint64_t txcsum; /* # of times hardware assisted with checksum */ 619 uint64_t tso_wrs; /* # of TSO work requests */ 620 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */ 621 uint64_t imm_wrs; /* # of work requests with immediate data */ 622 uint64_t sgl_wrs; /* # of work requests with direct SGL */ 623 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */ 624 uint64_t txpkts0_wrs; /* # of type0 coalesced tx work requests */ 625 uint64_t txpkts1_wrs; /* # of type1 coalesced tx work requests */ 626 uint64_t txpkts0_pkts; /* # of frames in type0 coalesced tx WRs */ 627 uint64_t txpkts1_pkts; /* # of frames in type1 coalesced tx WRs */ 628 uint64_t txpkts_flush; /* # of times txp had to be sent by tx_update */ 629 uint64_t raw_wrs; /* # of raw work requests (alloc_wr_mbuf) */ 630 uint64_t vxlan_tso_wrs; /* # of VXLAN TSO work requests */ 631 uint64_t vxlan_txcsum; 632 633 uint64_t kern_tls_records; 634 uint64_t kern_tls_short; 635 uint64_t kern_tls_partial; 636 uint64_t kern_tls_full; 637 uint64_t kern_tls_octets; 638 uint64_t kern_tls_waste; 639 uint64_t kern_tls_options; 640 uint64_t kern_tls_header; 641 uint64_t kern_tls_fin; 642 uint64_t kern_tls_fin_short; 643 uint64_t kern_tls_cbc; 644 uint64_t kern_tls_gcm; 645 646 /* stats for not-that-common events */ 647 648 /* Optional scratch space for constructing work requests. */ 649 uint8_t ss[SGE_MAX_WR_LEN] __aligned(16); 650 } __aligned(CACHE_LINE_SIZE); 651 652 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */ 653 struct sge_rxq { 654 struct sge_iq iq; /* MUST be first */ 655 struct sge_fl fl; /* MUST follow iq */ 656 657 if_t ifp; /* the interface this rxq belongs to */ 658 struct lro_ctrl lro; /* LRO state */ 659 660 /* stats for common events first */ 661 662 uint64_t rxcsum; /* # of times hardware assisted with checksum */ 663 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */ 664 uint64_t vxlan_rxcsum; 665 666 /* stats for not-that-common events */ 667 668 } __aligned(CACHE_LINE_SIZE); 669 670 static inline struct sge_rxq * 671 iq_to_rxq(struct sge_iq *iq) 672 { 673 674 return (__containerof(iq, struct sge_rxq, iq)); 675 } 676 677 /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */ 678 struct sge_ofld_rxq { 679 struct sge_iq iq; /* MUST be first */ 680 struct sge_fl fl; /* MUST follow iq */ 681 counter_u64_t rx_iscsi_ddp_setup_ok; 682 counter_u64_t rx_iscsi_ddp_setup_error; 683 uint64_t rx_iscsi_ddp_pdus; 684 uint64_t rx_iscsi_ddp_octets; 685 uint64_t rx_iscsi_fl_pdus; 686 uint64_t rx_iscsi_fl_octets; 687 uint64_t rx_iscsi_padding_errors; 688 uint64_t rx_iscsi_header_digest_errors; 689 uint64_t rx_iscsi_data_digest_errors; 690 uint64_t rx_aio_ddp_jobs; 691 uint64_t rx_aio_ddp_octets; 692 u_long rx_toe_tls_records; 693 u_long rx_toe_tls_octets; 694 u_long rx_toe_ddp_octets; 695 counter_u64_t ddp_buffer_alloc; 696 counter_u64_t ddp_buffer_reuse; 697 counter_u64_t ddp_buffer_free; 698 } __aligned(CACHE_LINE_SIZE); 699 700 static inline struct sge_ofld_rxq * 701 iq_to_ofld_rxq(struct sge_iq *iq) 702 { 703 704 return (__containerof(iq, struct sge_ofld_rxq, iq)); 705 } 706 707 struct wrqe { 708 STAILQ_ENTRY(wrqe) link; 709 struct sge_wrq *wrq; 710 int wr_len; 711 char wr[] __aligned(16); 712 }; 713 714 struct wrq_cookie { 715 TAILQ_ENTRY(wrq_cookie) link; 716 int ndesc; 717 int pidx; 718 }; 719 720 /* 721 * wrq: SGE egress queue that is given prebuilt work requests. Control queues 722 * are of this type. 723 */ 724 struct sge_wrq { 725 struct sge_eq eq; /* MUST be first */ 726 727 struct adapter *adapter; 728 struct task wrq_tx_task; 729 730 /* Tx desc reserved but WR not "committed" yet. */ 731 TAILQ_HEAD(wrq_incomplete_wrs , wrq_cookie) incomplete_wrs; 732 733 /* List of WRs ready to go out as soon as descriptors are available. */ 734 STAILQ_HEAD(, wrqe) wr_list; 735 u_int nwr_pending; 736 u_int ndesc_needed; 737 738 /* stats for common events first */ 739 740 uint64_t tx_wrs_direct; /* # of WRs written directly to desc ring. */ 741 uint64_t tx_wrs_ss; /* # of WRs copied from scratch space. */ 742 uint64_t tx_wrs_copied; /* # of WRs queued and copied to desc ring. */ 743 744 /* stats for not-that-common events */ 745 746 /* 747 * Scratch space for work requests that wrap around after reaching the 748 * status page, and some information about the last WR that used it. 749 */ 750 uint16_t ss_pidx; 751 uint16_t ss_len; 752 uint8_t ss[SGE_MAX_WR_LEN]; 753 754 } __aligned(CACHE_LINE_SIZE); 755 756 /* ofld_txq: SGE egress queue + miscellaneous items */ 757 struct sge_ofld_txq { 758 struct sge_wrq wrq; 759 counter_u64_t tx_iscsi_pdus; 760 counter_u64_t tx_iscsi_octets; 761 counter_u64_t tx_iscsi_iso_wrs; 762 counter_u64_t tx_aio_jobs; 763 counter_u64_t tx_aio_octets; 764 counter_u64_t tx_toe_tls_records; 765 counter_u64_t tx_toe_tls_octets; 766 } __aligned(CACHE_LINE_SIZE); 767 768 #define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1)) 769 struct sge_nm_rxq { 770 /* Items used by the driver rx ithread are in this cacheline. */ 771 volatile int nm_state __aligned(CACHE_LINE_SIZE); /* NM_OFF, NM_ON, or NM_BUSY */ 772 u_int nid; /* netmap ring # for this queue */ 773 struct vi_info *vi; 774 775 struct iq_desc *iq_desc; 776 uint16_t iq_abs_id; 777 uint16_t iq_cntxt_id; 778 uint16_t iq_cidx; 779 uint16_t iq_sidx; 780 uint8_t iq_gen; 781 uint32_t fl_sidx; 782 783 /* Items used by netmap rxsync are in this cacheline. */ 784 __be64 *fl_desc __aligned(CACHE_LINE_SIZE); 785 uint16_t fl_cntxt_id; 786 uint32_t fl_pidx; 787 uint32_t fl_sidx2; /* copy of fl_sidx */ 788 uint32_t fl_db_val; 789 u_int fl_db_saved; 790 u_int fl_db_threshold; /* in descriptors */ 791 u_int fl_hwidx:4; 792 793 /* 794 * fl_cidx is used by both the ithread and rxsync, the rest are not used 795 * in the rx fast path. 796 */ 797 uint32_t fl_cidx __aligned(CACHE_LINE_SIZE); 798 799 bus_dma_tag_t iq_desc_tag; 800 bus_dmamap_t iq_desc_map; 801 bus_addr_t iq_ba; 802 int intr_idx; 803 804 bus_dma_tag_t fl_desc_tag; 805 bus_dmamap_t fl_desc_map; 806 bus_addr_t fl_ba; 807 }; 808 809 #define INVALID_NM_TXQ_CNTXT_ID ((u_int)(-1)) 810 struct sge_nm_txq { 811 struct tx_desc *desc; 812 uint16_t cidx; 813 uint16_t pidx; 814 uint16_t sidx; 815 uint16_t equiqidx; /* EQUIQ last requested at this pidx */ 816 uint16_t equeqidx; /* EQUEQ last requested at this pidx */ 817 uint16_t dbidx; /* pidx of the most recent doorbell */ 818 uint8_t doorbells; 819 volatile uint32_t *udb; 820 u_int udb_qid; 821 u_int cntxt_id; 822 __be32 cpl_ctrl0; /* for convenience */ 823 __be32 op_pkd; /* ditto */ 824 u_int nid; /* netmap ring # for this queue */ 825 826 /* infrequently used items after this */ 827 828 bus_dma_tag_t desc_tag; 829 bus_dmamap_t desc_map; 830 bus_addr_t ba; 831 int iqidx; 832 } __aligned(CACHE_LINE_SIZE); 833 834 struct sge { 835 int nrxq; /* total # of Ethernet rx queues */ 836 int ntxq; /* total # of Ethernet tx queues */ 837 int nofldrxq; /* total # of TOE rx queues */ 838 int nofldtxq; /* total # of TOE tx queues */ 839 int nnmrxq; /* total # of netmap rx queues */ 840 int nnmtxq; /* total # of netmap tx queues */ 841 int niq; /* total # of ingress queues */ 842 int neq; /* total # of egress queues */ 843 844 struct sge_iq fwq; /* Firmware event queue */ 845 struct sge_wrq *ctrlq; /* Control queues */ 846 struct sge_txq *txq; /* NIC tx queues */ 847 struct sge_rxq *rxq; /* NIC rx queues */ 848 struct sge_ofld_txq *ofld_txq; /* TOE tx queues */ 849 struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */ 850 struct sge_nm_txq *nm_txq; /* netmap tx queues */ 851 struct sge_nm_rxq *nm_rxq; /* netmap rx queues */ 852 853 uint16_t iq_start; /* first cntxt_id */ 854 uint16_t iq_base; /* first abs_id */ 855 int eq_start; /* first cntxt_id */ 856 int eq_base; /* first abs_id */ 857 int iqmap_sz; 858 int eqmap_sz; 859 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */ 860 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */ 861 862 int8_t safe_zidx; 863 struct rx_buf_info rx_buf_info[SW_ZONE_SIZES]; 864 }; 865 866 struct devnames { 867 const char *nexus_name; 868 const char *ifnet_name; 869 const char *vi_ifnet_name; 870 const char *pf03_drv_name; 871 const char *vf_nexus_name; 872 const char *vf_ifnet_name; 873 }; 874 875 struct clip_entry; 876 877 #define CNT_CAL_INFO 3 878 struct clock_sync { 879 uint64_t hw_cur; 880 uint64_t hw_prev; 881 sbintime_t sbt_cur; 882 sbintime_t sbt_prev; 883 seqc_t gen; 884 }; 885 886 struct adapter { 887 SLIST_ENTRY(adapter) link; 888 device_t dev; 889 struct cdev *cdev; 890 const struct devnames *names; 891 892 /* PCIe register resources */ 893 int regs_rid; 894 struct resource *regs_res; 895 int msix_rid; 896 struct resource *msix_res; 897 bus_space_handle_t bh; 898 bus_space_tag_t bt; 899 bus_size_t mmio_len; 900 int udbs_rid; 901 struct resource *udbs_res; 902 volatile uint8_t *udbs_base; 903 904 unsigned int pf; 905 unsigned int mbox; 906 unsigned int vpd_busy; 907 unsigned int vpd_flag; 908 909 /* Interrupt information */ 910 int intr_type; 911 int intr_count; 912 struct irq { 913 struct resource *res; 914 int rid; 915 void *tag; 916 struct sge_rxq *rxq; 917 struct sge_nm_rxq *nm_rxq; 918 } __aligned(CACHE_LINE_SIZE) *irq; 919 int sge_gts_reg; 920 int sge_kdoorbell_reg; 921 922 bus_dma_tag_t dmat; /* Parent DMA tag */ 923 924 struct sge sge; 925 int lro_timeout; 926 int sc_do_rxcopy; 927 928 int vxlan_port; 929 u_int vxlan_refcount; 930 int rawf_base; 931 int nrawf; 932 933 struct taskqueue *tq[MAX_NPORTS]; /* General purpose taskqueues */ 934 struct port_info *port[MAX_NPORTS]; 935 uint8_t chan_map[MAX_NCHAN]; /* channel -> port */ 936 937 CXGBE_LIST_HEAD(, clip_entry) *clip_table; 938 TAILQ_HEAD(, clip_entry) clip_pending; /* these need hw update. */ 939 u_long clip_mask; 940 int clip_gen; 941 struct timeout_task clip_task; 942 943 void *tom_softc; /* (struct tom_data *) */ 944 struct tom_tunables tt; 945 struct t4_offload_policy *policy; 946 struct rwlock policy_lock; 947 948 void *iwarp_softc; /* (struct c4iw_dev *) */ 949 struct iw_tunables iwt; 950 void *iscsi_ulp_softc; /* (struct cxgbei_data *) */ 951 struct l2t_data *l2t; /* L2 table */ 952 struct smt_data *smt; /* Source MAC Table */ 953 struct tid_info tids; 954 vmem_t *key_map; 955 struct tls_tunables tlst; 956 957 uint8_t doorbells; 958 int offload_map; /* port_id's with IFCAP_TOE enabled */ 959 int bt_map; /* tx_chan's with BASE-T */ 960 int active_ulds; /* ULDs activated on this adapter */ 961 int flags; 962 int debug_flags; 963 int error_flags; /* Used by error handler and live reset. */ 964 965 char ifp_lockname[16]; 966 struct mtx ifp_lock; 967 if_t ifp; /* tracer ifp */ 968 struct ifmedia media; 969 int traceq; /* iq used by all tracers, -1 if none */ 970 int tracer_valid; /* bitmap of valid tracers */ 971 int tracer_enabled; /* bitmap of enabled tracers */ 972 973 char fw_version[16]; 974 char tp_version[16]; 975 char er_version[16]; 976 char bs_version[16]; 977 char cfg_file[32]; 978 u_int cfcsum; 979 struct adapter_params params; 980 const struct chip_params *chip_params; 981 struct t4_virt_res vres; 982 983 uint16_t nbmcaps; 984 uint16_t linkcaps; 985 uint16_t switchcaps; 986 uint16_t niccaps; 987 uint16_t toecaps; 988 uint16_t rdmacaps; 989 uint16_t cryptocaps; 990 uint16_t iscsicaps; 991 uint16_t fcoecaps; 992 993 struct sysctl_ctx_list ctx; 994 struct sysctl_oid *ctrlq_oid; 995 struct sysctl_oid *fwq_oid; 996 997 struct mtx sc_lock; 998 char lockname[16]; 999 1000 /* Starving free lists */ 1001 struct mtx sfl_lock; /* same cache-line as sc_lock? but that's ok */ 1002 TAILQ_HEAD(, sge_fl) sfl; 1003 struct callout sfl_callout; 1004 struct callout cal_callout; 1005 struct clock_sync cal_info[CNT_CAL_INFO]; 1006 int cal_current; 1007 int cal_count; 1008 uint32_t cal_gen; 1009 1010 /* 1011 * Driver code that can run when the adapter is suspended must use this 1012 * lock or a synchronized_op and check for HW_OFF_LIMITS before 1013 * accessing hardware. 1014 * 1015 * XXX: could be changed to rwlock. wlock in suspend/resume and for 1016 * indirect register access, rlock everywhere else. 1017 */ 1018 struct mtx reg_lock; 1019 1020 struct memwin memwin[NUM_MEMWIN]; /* memory windows */ 1021 1022 struct mtx tc_lock; 1023 struct task tc_task; 1024 1025 struct task fatal_error_task; 1026 struct task reset_task; 1027 const void *reset_thread; 1028 int num_resets; 1029 int incarnation; 1030 1031 const char *last_op; 1032 const void *last_op_thr; 1033 int last_op_flags; 1034 1035 int swintr; 1036 int sensor_resets; 1037 1038 struct callout ktls_tick; 1039 }; 1040 1041 #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock) 1042 #define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock) 1043 #define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED) 1044 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED) 1045 1046 #define ASSERT_SYNCHRONIZED_OP(sc) \ 1047 KASSERT(IS_BUSY(sc) && \ 1048 (mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \ 1049 ("%s: operation not synchronized.", __func__)) 1050 1051 #define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock) 1052 #define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock) 1053 #define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED) 1054 #define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED) 1055 1056 #define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock) 1057 #define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock) 1058 #define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock) 1059 #define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED) 1060 #define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED) 1061 1062 #define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl) 1063 #define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl) 1064 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl) 1065 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl) 1066 1067 #define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock) 1068 #define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock) 1069 #define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock) 1070 #define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED) 1071 #define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED) 1072 1073 #define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq) 1074 #define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq) 1075 #define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq) 1076 #define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq) 1077 #define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq) 1078 1079 #define for_each_txq(vi, iter, q) \ 1080 for (q = &vi->adapter->sge.txq[vi->first_txq], iter = 0; \ 1081 iter < vi->ntxq; ++iter, ++q) 1082 #define for_each_rxq(vi, iter, q) \ 1083 for (q = &vi->adapter->sge.rxq[vi->first_rxq], iter = 0; \ 1084 iter < vi->nrxq; ++iter, ++q) 1085 #define for_each_ofld_txq(vi, iter, q) \ 1086 for (q = &vi->adapter->sge.ofld_txq[vi->first_ofld_txq], iter = 0; \ 1087 iter < vi->nofldtxq; ++iter, ++q) 1088 #define for_each_ofld_rxq(vi, iter, q) \ 1089 for (q = &vi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \ 1090 iter < vi->nofldrxq; ++iter, ++q) 1091 #define for_each_nm_txq(vi, iter, q) \ 1092 for (q = &vi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \ 1093 iter < vi->nnmtxq; ++iter, ++q) 1094 #define for_each_nm_rxq(vi, iter, q) \ 1095 for (q = &vi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \ 1096 iter < vi->nnmrxq; ++iter, ++q) 1097 #define for_each_vi(_pi, _iter, _vi) \ 1098 for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \ 1099 ++(_iter), ++(_vi)) 1100 1101 #define IDXINCR(idx, incr, wrap) do { \ 1102 idx = wrap - idx > incr ? idx + incr : incr - (wrap - idx); \ 1103 } while (0) 1104 #define IDXDIFF(head, tail, wrap) \ 1105 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 1106 1107 /* One for errors, one for firmware events */ 1108 #define T4_EXTRA_INTR 2 1109 1110 /* One for firmware events */ 1111 #define T4VF_EXTRA_INTR 1 1112 1113 static inline int 1114 forwarding_intr_to_fwq(struct adapter *sc) 1115 { 1116 1117 return (sc->intr_count == 1); 1118 } 1119 1120 /* Works reliably inside a sync_op or with reg_lock held. */ 1121 static inline bool 1122 hw_off_limits(struct adapter *sc) 1123 { 1124 int off_limits = atomic_load_int(&sc->error_flags) & HW_OFF_LIMITS; 1125 1126 return (__predict_false(off_limits != 0)); 1127 } 1128 1129 static inline int 1130 mbuf_nsegs(struct mbuf *m) 1131 { 1132 M_ASSERTPKTHDR(m); 1133 KASSERT(m->m_pkthdr.inner_l5hlen > 0, 1134 ("%s: mbuf %p missing information on # of segments.", __func__, m)); 1135 1136 return (m->m_pkthdr.inner_l5hlen); 1137 } 1138 1139 static inline void 1140 set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) 1141 { 1142 M_ASSERTPKTHDR(m); 1143 m->m_pkthdr.inner_l5hlen = nsegs; 1144 } 1145 1146 /* Internal mbuf flags stored in PH_loc.eight[1]. */ 1147 #define MC_NOMAP 0x01 1148 #define MC_RAW_WR 0x02 1149 #define MC_TLS 0x04 1150 1151 static inline int 1152 mbuf_cflags(struct mbuf *m) 1153 { 1154 M_ASSERTPKTHDR(m); 1155 return (m->m_pkthdr.PH_loc.eight[4]); 1156 } 1157 1158 static inline void 1159 set_mbuf_cflags(struct mbuf *m, uint8_t flags) 1160 { 1161 M_ASSERTPKTHDR(m); 1162 m->m_pkthdr.PH_loc.eight[4] = flags; 1163 } 1164 1165 static inline int 1166 mbuf_len16(struct mbuf *m) 1167 { 1168 int n; 1169 1170 M_ASSERTPKTHDR(m); 1171 n = m->m_pkthdr.PH_loc.eight[0]; 1172 if (!(mbuf_cflags(m) & MC_TLS)) 1173 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 1174 1175 return (n); 1176 } 1177 1178 static inline void 1179 set_mbuf_len16(struct mbuf *m, uint8_t len16) 1180 { 1181 M_ASSERTPKTHDR(m); 1182 if (!(mbuf_cflags(m) & MC_TLS)) 1183 MPASS(len16 > 0 && len16 <= SGE_MAX_WR_LEN / 16); 1184 m->m_pkthdr.PH_loc.eight[0] = len16; 1185 } 1186 1187 static inline uint32_t 1188 t4_read_reg(struct adapter *sc, uint32_t reg) 1189 { 1190 if (hw_off_limits(sc)) 1191 MPASS(curthread == sc->reset_thread); 1192 return bus_space_read_4(sc->bt, sc->bh, reg); 1193 } 1194 1195 static inline void 1196 t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val) 1197 { 1198 if (hw_off_limits(sc)) 1199 MPASS(curthread == sc->reset_thread); 1200 bus_space_write_4(sc->bt, sc->bh, reg, val); 1201 } 1202 1203 static inline uint64_t 1204 t4_read_reg64(struct adapter *sc, uint32_t reg) 1205 { 1206 if (hw_off_limits(sc)) 1207 MPASS(curthread == sc->reset_thread); 1208 #ifdef __LP64__ 1209 return bus_space_read_8(sc->bt, sc->bh, reg); 1210 #else 1211 return (uint64_t)bus_space_read_4(sc->bt, sc->bh, reg) + 1212 ((uint64_t)bus_space_read_4(sc->bt, sc->bh, reg + 4) << 32); 1213 1214 #endif 1215 } 1216 1217 static inline void 1218 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val) 1219 { 1220 if (hw_off_limits(sc)) 1221 MPASS(curthread == sc->reset_thread); 1222 #ifdef __LP64__ 1223 bus_space_write_8(sc->bt, sc->bh, reg, val); 1224 #else 1225 bus_space_write_4(sc->bt, sc->bh, reg, val); 1226 bus_space_write_4(sc->bt, sc->bh, reg + 4, val>> 32); 1227 #endif 1228 } 1229 1230 static inline void 1231 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val) 1232 { 1233 if (hw_off_limits(sc)) 1234 MPASS(curthread == sc->reset_thread); 1235 *val = pci_read_config(sc->dev, reg, 1); 1236 } 1237 1238 static inline void 1239 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val) 1240 { 1241 if (hw_off_limits(sc)) 1242 MPASS(curthread == sc->reset_thread); 1243 pci_write_config(sc->dev, reg, val, 1); 1244 } 1245 1246 static inline void 1247 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val) 1248 { 1249 1250 if (hw_off_limits(sc)) 1251 MPASS(curthread == sc->reset_thread); 1252 *val = pci_read_config(sc->dev, reg, 2); 1253 } 1254 1255 static inline void 1256 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val) 1257 { 1258 if (hw_off_limits(sc)) 1259 MPASS(curthread == sc->reset_thread); 1260 pci_write_config(sc->dev, reg, val, 2); 1261 } 1262 1263 static inline void 1264 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val) 1265 { 1266 if (hw_off_limits(sc)) 1267 MPASS(curthread == sc->reset_thread); 1268 *val = pci_read_config(sc->dev, reg, 4); 1269 } 1270 1271 static inline void 1272 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val) 1273 { 1274 if (hw_off_limits(sc)) 1275 MPASS(curthread == sc->reset_thread); 1276 pci_write_config(sc->dev, reg, val, 4); 1277 } 1278 1279 static inline struct port_info * 1280 adap2pinfo(struct adapter *sc, int idx) 1281 { 1282 1283 return (sc->port[idx]); 1284 } 1285 1286 static inline void 1287 t4_os_set_hw_addr(struct port_info *pi, uint8_t hw_addr[]) 1288 { 1289 1290 bcopy(hw_addr, pi->vi[0].hw_addr, ETHER_ADDR_LEN); 1291 } 1292 1293 static inline int 1294 tx_resume_threshold(struct sge_eq *eq) 1295 { 1296 1297 /* not quite the same as qsize / 4, but this will do. */ 1298 return (eq->sidx / 4); 1299 } 1300 1301 static inline int 1302 t4_use_ldst(struct adapter *sc) 1303 { 1304 1305 #ifdef notyet 1306 return (sc->flags & FW_OK || !sc->use_bd); 1307 #else 1308 return (0); 1309 #endif 1310 } 1311 1312 static inline void 1313 CH_DUMP_MBOX(struct adapter *sc, int mbox, const int reg, 1314 const char *msg, const __be64 *const p, const bool err) 1315 { 1316 1317 if (!(sc->debug_flags & DF_DUMP_MBOX) && !err) 1318 return; 1319 if (p != NULL) { 1320 log(err ? LOG_ERR : LOG_DEBUG, 1321 "%s: mbox %u %s %016llx %016llx %016llx %016llx " 1322 "%016llx %016llx %016llx %016llx\n", 1323 device_get_nameunit(sc->dev), mbox, msg, 1324 (long long)be64_to_cpu(p[0]), (long long)be64_to_cpu(p[1]), 1325 (long long)be64_to_cpu(p[2]), (long long)be64_to_cpu(p[3]), 1326 (long long)be64_to_cpu(p[4]), (long long)be64_to_cpu(p[5]), 1327 (long long)be64_to_cpu(p[6]), (long long)be64_to_cpu(p[7])); 1328 } else { 1329 log(err ? LOG_ERR : LOG_DEBUG, 1330 "%s: mbox %u %s %016llx %016llx %016llx %016llx " 1331 "%016llx %016llx %016llx %016llx\n", 1332 device_get_nameunit(sc->dev), mbox, msg, 1333 (long long)t4_read_reg64(sc, reg), 1334 (long long)t4_read_reg64(sc, reg + 8), 1335 (long long)t4_read_reg64(sc, reg + 16), 1336 (long long)t4_read_reg64(sc, reg + 24), 1337 (long long)t4_read_reg64(sc, reg + 32), 1338 (long long)t4_read_reg64(sc, reg + 40), 1339 (long long)t4_read_reg64(sc, reg + 48), 1340 (long long)t4_read_reg64(sc, reg + 56)); 1341 } 1342 } 1343 1344 /* t4_main.c */ 1345 extern int t4_ntxq; 1346 extern int t4_nrxq; 1347 extern int t4_intr_types; 1348 extern int t4_tmr_idx; 1349 extern int t4_pktc_idx; 1350 extern unsigned int t4_qsize_rxq; 1351 extern unsigned int t4_qsize_txq; 1352 extern int t4_ddp_rcvbuf_len; 1353 extern unsigned int t4_ddp_rcvbuf_cache; 1354 extern device_method_t cxgbe_methods[]; 1355 1356 int t4_os_find_pci_capability(struct adapter *, int); 1357 int t4_os_pci_save_state(struct adapter *); 1358 int t4_os_pci_restore_state(struct adapter *); 1359 void t4_os_portmod_changed(struct port_info *); 1360 void t4_os_link_changed(struct port_info *); 1361 void t4_iterate(void (*)(struct adapter *, void *), void *); 1362 void t4_init_devnames(struct adapter *); 1363 void t4_add_adapter(struct adapter *); 1364 int t4_detach_common(device_t); 1365 int t4_map_bars_0_and_4(struct adapter *); 1366 int t4_map_bar_2(struct adapter *); 1367 int t4_setup_intr_handlers(struct adapter *); 1368 void t4_sysctls(struct adapter *); 1369 int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *); 1370 void end_synchronized_op(struct adapter *, int); 1371 void begin_vi_detach(struct adapter *, struct vi_info *); 1372 void end_vi_detach(struct adapter *, struct vi_info *); 1373 int update_mac_settings(if_t, int); 1374 int adapter_init(struct adapter *); 1375 int vi_init(struct vi_info *); 1376 void vi_sysctls(struct vi_info *); 1377 int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 1378 int alloc_atid(struct adapter *, void *); 1379 void *lookup_atid(struct adapter *, int); 1380 void free_atid(struct adapter *, int); 1381 void release_tid(struct adapter *, int, struct sge_wrq *); 1382 int cxgbe_media_change(if_t); 1383 void cxgbe_media_status(if_t, struct ifmediareq *); 1384 void t4_os_cim_err(struct adapter *); 1385 1386 #ifdef KERN_TLS 1387 /* t6_kern_tls.c */ 1388 int t6_tls_tag_alloc(if_t, union if_snd_tag_alloc_params *, 1389 struct m_snd_tag **); 1390 void t6_ktls_modload(void); 1391 void t6_ktls_modunload(void); 1392 int t6_ktls_try(if_t, struct socket *, struct ktls_session *); 1393 int t6_ktls_parse_pkt(struct mbuf *); 1394 int t6_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int); 1395 #endif 1396 1397 /* t4_keyctx.c */ 1398 struct auth_hash; 1399 union authctx; 1400 #ifdef KERN_TLS 1401 struct ktls_session; 1402 struct tls_key_req; 1403 struct tls_keyctx; 1404 #endif 1405 1406 void t4_aes_getdeckey(void *, const void *, unsigned int); 1407 void t4_copy_partial_hash(int, union authctx *, void *); 1408 void t4_init_gmac_hash(const char *, int, char *); 1409 void t4_init_hmac_digest(const struct auth_hash *, u_int, const char *, int, 1410 char *); 1411 #ifdef KERN_TLS 1412 u_int t4_tls_key_info_size(const struct ktls_session *); 1413 int t4_tls_proto_ver(const struct ktls_session *); 1414 int t4_tls_cipher_mode(const struct ktls_session *); 1415 int t4_tls_auth_mode(const struct ktls_session *); 1416 int t4_tls_hmac_ctrl(const struct ktls_session *); 1417 void t4_tls_key_ctx(const struct ktls_session *, int, struct tls_keyctx *); 1418 int t4_alloc_tls_keyid(struct adapter *); 1419 void t4_free_tls_keyid(struct adapter *, int); 1420 void t4_write_tlskey_wr(const struct ktls_session *, int, int, int, int, 1421 struct tls_key_req *); 1422 #endif 1423 1424 #ifdef DEV_NETMAP 1425 /* t4_netmap.c */ 1426 struct sge_nm_rxq; 1427 void cxgbe_nm_attach(struct vi_info *); 1428 void cxgbe_nm_detach(struct vi_info *); 1429 void service_nm_rxq(struct sge_nm_rxq *); 1430 int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int); 1431 int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *); 1432 int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int); 1433 int free_nm_txq(struct vi_info *, struct sge_nm_txq *); 1434 #endif 1435 1436 /* t4_sge.c */ 1437 void t4_sge_modload(void); 1438 void t4_sge_modunload(void); 1439 uint64_t t4_sge_extfree_refs(void); 1440 void t4_tweak_chip_settings(struct adapter *); 1441 int t4_verify_chip_settings(struct adapter *); 1442 void t4_init_rx_buf_info(struct adapter *); 1443 int t4_create_dma_tag(struct adapter *); 1444 void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *, 1445 struct sysctl_oid_list *); 1446 int t4_destroy_dma_tag(struct adapter *); 1447 int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 1448 bus_addr_t *, void **); 1449 int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 1450 void *); 1451 void free_fl_buffers(struct adapter *, struct sge_fl *); 1452 int t4_setup_adapter_queues(struct adapter *); 1453 int t4_teardown_adapter_queues(struct adapter *); 1454 int t4_setup_vi_queues(struct vi_info *); 1455 int t4_teardown_vi_queues(struct vi_info *); 1456 void t4_intr_all(void *); 1457 void t4_intr(void *); 1458 #ifdef DEV_NETMAP 1459 void t4_nm_intr(void *); 1460 void t4_vi_intr(void *); 1461 #endif 1462 void t4_intr_err(void *); 1463 void t4_intr_evt(void *); 1464 void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *); 1465 void t4_update_fl_bufsize(if_t); 1466 struct mbuf *alloc_wr_mbuf(int, int); 1467 int parse_pkt(struct mbuf **, bool); 1468 void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *); 1469 void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *); 1470 int t4_sge_set_conm_context(struct adapter *, int, int, int); 1471 void t4_register_an_handler(an_handler_t); 1472 void t4_register_fw_msg_handler(int, fw_msg_handler_t); 1473 void t4_register_cpl_handler(int, cpl_handler_t); 1474 void t4_register_shared_cpl_handler(int, cpl_handler_t, int); 1475 #ifdef RATELIMIT 1476 void send_etid_flush_wr(struct cxgbe_rate_tag *); 1477 #endif 1478 1479 /* t4_tracer.c */ 1480 struct t4_tracer; 1481 void t4_tracer_modload(void); 1482 void t4_tracer_modunload(void); 1483 void t4_tracer_port_detach(struct adapter *); 1484 int t4_get_tracer(struct adapter *, struct t4_tracer *); 1485 int t4_set_tracer(struct adapter *, struct t4_tracer *); 1486 int t4_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *); 1487 int t5_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *); 1488 1489 /* t4_sched.c */ 1490 int t4_set_sched_class(struct adapter *, struct t4_sched_params *); 1491 int t4_set_sched_queue(struct adapter *, struct t4_sched_queue *); 1492 int t4_init_tx_sched(struct adapter *); 1493 int t4_free_tx_sched(struct adapter *); 1494 void t4_update_tx_sched(struct adapter *); 1495 int t4_reserve_cl_rl_kbps(struct adapter *, int, u_int, int *); 1496 void t4_release_cl_rl(struct adapter *, int, int); 1497 int sysctl_tc(SYSCTL_HANDLER_ARGS); 1498 int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 1499 #ifdef RATELIMIT 1500 void t4_init_etid_table(struct adapter *); 1501 void t4_free_etid_table(struct adapter *); 1502 struct cxgbe_rate_tag *lookup_etid(struct adapter *, int); 1503 int cxgbe_rate_tag_alloc(if_t, union if_snd_tag_alloc_params *, 1504 struct m_snd_tag **); 1505 void cxgbe_rate_tag_free_locked(struct cxgbe_rate_tag *); 1506 void cxgbe_ratelimit_query(if_t, struct if_ratelimit_query_results *); 1507 #endif 1508 1509 /* t4_filter.c */ 1510 int get_filter_mode(struct adapter *, uint32_t *); 1511 int set_filter_mode(struct adapter *, uint32_t); 1512 int set_filter_mask(struct adapter *, uint32_t); 1513 int get_filter(struct adapter *, struct t4_filter *); 1514 int set_filter(struct adapter *, struct t4_filter *); 1515 int del_filter(struct adapter *, struct t4_filter *); 1516 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 1517 int t4_hashfilter_ao_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 1518 int t4_hashfilter_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 1519 int t4_del_hashfilter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 1520 void free_hftid_hash(struct tid_info *); 1521 1522 static inline struct wrqe * 1523 alloc_wrqe(int wr_len, struct sge_wrq *wrq) 1524 { 1525 int len = offsetof(struct wrqe, wr) + wr_len; 1526 struct wrqe *wr; 1527 1528 wr = malloc(len, M_CXGBE, M_NOWAIT); 1529 if (__predict_false(wr == NULL)) 1530 return (NULL); 1531 wr->wr_len = wr_len; 1532 wr->wrq = wrq; 1533 return (wr); 1534 } 1535 1536 static inline void * 1537 wrtod(struct wrqe *wr) 1538 { 1539 return (&wr->wr[0]); 1540 } 1541 1542 static inline void 1543 free_wrqe(struct wrqe *wr) 1544 { 1545 free(wr, M_CXGBE); 1546 } 1547 1548 static inline void 1549 t4_wrq_tx(struct adapter *sc, struct wrqe *wr) 1550 { 1551 struct sge_wrq *wrq = wr->wrq; 1552 1553 TXQ_LOCK(wrq); 1554 t4_wrq_tx_locked(sc, wrq, wr); 1555 TXQ_UNLOCK(wrq); 1556 } 1557 1558 static inline int 1559 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 1560 int len) 1561 { 1562 1563 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 1564 } 1565 1566 static inline int 1567 write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 1568 const uint32_t *val, int len) 1569 { 1570 1571 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 1572 } 1573 1574 /* Number of len16 -> number of descriptors */ 1575 static inline int 1576 tx_len16_to_desc(int len16) 1577 { 1578 1579 return (howmany(len16, EQ_ESIZE / 16)); 1580 } 1581 #endif 1582