1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 * 31 */ 32 33 #ifndef __T4_ADAPTER_H__ 34 #define __T4_ADAPTER_H__ 35 36 #include <sys/kernel.h> 37 #include <sys/bus.h> 38 #include <sys/counter.h> 39 #include <sys/rman.h> 40 #include <sys/types.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/rwlock.h> 44 #include <sys/sx.h> 45 #include <sys/vmem.h> 46 #include <vm/uma.h> 47 48 #include <dev/pci/pcivar.h> 49 #include <dev/pci/pcireg.h> 50 #include <machine/bus.h> 51 #include <sys/socket.h> 52 #include <sys/sysctl.h> 53 #include <sys/taskqueue.h> 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_var.h> 57 #include <net/if_media.h> 58 #include <net/pfil.h> 59 #include <netinet/in.h> 60 #include <netinet/tcp_lro.h> 61 62 #include "offload.h" 63 #include "t4_ioctl.h" 64 #include "common/t4_msg.h" 65 #include "firmware/t4fw_interface.h" 66 67 #define KTR_CXGBE KTR_SPARE3 68 MALLOC_DECLARE(M_CXGBE); 69 #define CXGBE_UNIMPLEMENTED(s) \ 70 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__) 71 72 /* 73 * Same as LIST_HEAD from queue.h. This is to avoid conflict with LinuxKPI's 74 * LIST_HEAD when building iw_cxgbe. 75 */ 76 #define CXGBE_LIST_HEAD(name, type) \ 77 struct name { \ 78 struct type *lh_first; /* first element */ \ 79 } 80 81 #ifndef SYSCTL_ADD_UQUAD 82 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD 83 #define sysctl_handle_64 sysctl_handle_quad 84 #define CTLTYPE_U64 CTLTYPE_QUAD 85 #endif 86 87 SYSCTL_DECL(_hw_cxgbe); 88 89 struct adapter; 90 typedef struct adapter adapter_t; 91 92 enum { 93 /* 94 * All ingress queues use this entry size. Note that the firmware event 95 * queue and any iq expecting CPL_RX_PKT in the descriptor needs this to 96 * be at least 64. 97 */ 98 IQ_ESIZE = 64, 99 100 /* Default queue sizes for all kinds of ingress queues */ 101 FW_IQ_QSIZE = 256, 102 RX_IQ_QSIZE = 1024, 103 104 /* All egress queues use this entry size */ 105 EQ_ESIZE = 64, 106 107 /* Default queue sizes for all kinds of egress queues */ 108 CTRL_EQ_QSIZE = 1024, 109 TX_EQ_QSIZE = 1024, 110 111 SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */ 112 CL_METADATA_SIZE = CACHE_LINE_SIZE, 113 114 SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */ 115 TX_SGL_SEGS = 39, 116 TX_SGL_SEGS_TSO = 38, 117 TX_SGL_SEGS_VM = 38, 118 TX_SGL_SEGS_VM_TSO = 37, 119 TX_SGL_SEGS_EO_TSO = 30, /* XXX: lower for IPv6. */ 120 TX_SGL_SEGS_VXLAN_TSO = 37, 121 TX_WR_FLITS = SGE_MAX_WR_LEN / 8 122 }; 123 124 enum { 125 /* adapter intr_type */ 126 INTR_INTX = (1 << 0), 127 INTR_MSI = (1 << 1), 128 INTR_MSIX = (1 << 2) 129 }; 130 131 enum { 132 XGMAC_MTU = (1 << 0), 133 XGMAC_PROMISC = (1 << 1), 134 XGMAC_ALLMULTI = (1 << 2), 135 XGMAC_VLANEX = (1 << 3), 136 XGMAC_UCADDR = (1 << 4), 137 XGMAC_MCADDRS = (1 << 5), 138 139 XGMAC_ALL = 0xffff 140 }; 141 142 enum { 143 /* flags understood by begin_synchronized_op */ 144 HOLD_LOCK = (1 << 0), 145 SLEEP_OK = (1 << 1), 146 INTR_OK = (1 << 2), 147 148 /* flags understood by end_synchronized_op */ 149 LOCK_HELD = HOLD_LOCK, 150 }; 151 152 enum { 153 /* adapter flags. synch_op or adapter_lock. */ 154 FULL_INIT_DONE = (1 << 0), 155 FW_OK = (1 << 1), 156 CHK_MBOX_ACCESS = (1 << 2), 157 MASTER_PF = (1 << 3), 158 BUF_PACKING_OK = (1 << 6), 159 IS_VF = (1 << 7), 160 KERN_TLS_ON = (1 << 8), /* HW is configured for KERN_TLS */ 161 CXGBE_BUSY = (1 << 9), 162 163 /* adapter error_flags. reg_lock for HW_OFF_LIMITS, atomics for the rest. */ 164 ADAP_STOPPED = (1 << 0), /* Adapter has been stopped. */ 165 ADAP_FATAL_ERR = (1 << 1), /* Encountered a fatal error. */ 166 HW_OFF_LIMITS = (1 << 2), /* off limits to all except reset_thread */ 167 ADAP_CIM_ERR = (1 << 3), /* Error was related to FW/CIM. */ 168 169 /* port flags */ 170 HAS_TRACEQ = (1 << 3), 171 FIXED_IFMEDIA = (1 << 4), /* ifmedia list doesn't change. */ 172 173 /* VI flags */ 174 DOOMED = (1 << 0), 175 VI_INIT_DONE = (1 << 1), 176 /* 1 << 2 is unused, was VI_SYSCTL_CTX */ 177 TX_USES_VM_WR = (1 << 3), 178 VI_SKIP_STATS = (1 << 4), 179 180 /* adapter debug_flags */ 181 DF_DUMP_MBOX = (1 << 0), /* Log all mbox cmd/rpl. */ 182 DF_LOAD_FW_ANYTIME = (1 << 1), /* Allow LOAD_FW after init */ 183 DF_DISABLE_TCB_CACHE = (1 << 2), /* Disable TCB cache (T6+) */ 184 DF_DISABLE_CFG_RETRY = (1 << 3), /* Disable fallback config */ 185 DF_VERBOSE_SLOWINTR = (1 << 4), /* Chatty slow intr handler */ 186 }; 187 188 #define IS_DOOMED(vi) ((vi)->flags & DOOMED) 189 #define SET_DOOMED(vi) do {(vi)->flags |= DOOMED;} while (0) 190 #define IS_BUSY(sc) ((sc)->flags & CXGBE_BUSY) 191 #define SET_BUSY(sc) do {(sc)->flags |= CXGBE_BUSY;} while (0) 192 #define CLR_BUSY(sc) do {(sc)->flags &= ~CXGBE_BUSY;} while (0) 193 194 struct vi_info { 195 device_t dev; 196 struct port_info *pi; 197 struct adapter *adapter; 198 199 struct ifnet *ifp; 200 struct pfil_head *pfil; 201 202 unsigned long flags; 203 int if_flags; 204 205 uint16_t *rss, *nm_rss; 206 uint16_t viid; /* opaque VI identifier */ 207 uint16_t smt_idx; 208 uint16_t vin; 209 uint8_t vfvld; 210 int16_t xact_addr_filt;/* index of exact MAC address filter */ 211 uint16_t rss_size; /* size of VI's RSS table slice */ 212 uint16_t rss_base; /* start of VI's RSS table slice */ 213 int hashen; 214 215 int nintr; 216 int first_intr; 217 218 /* These need to be int as they are used in sysctl */ 219 int ntxq; /* # of tx queues */ 220 int first_txq; /* index of first tx queue */ 221 int rsrv_noflowq; /* Reserve queue 0 for non-flowid packets */ 222 int nrxq; /* # of rx queues */ 223 int first_rxq; /* index of first rx queue */ 224 int nofldtxq; /* # of offload tx queues */ 225 int first_ofld_txq; /* index of first offload tx queue */ 226 int nofldrxq; /* # of offload rx queues */ 227 int first_ofld_rxq; /* index of first offload rx queue */ 228 int nnmtxq; 229 int first_nm_txq; 230 int nnmrxq; 231 int first_nm_rxq; 232 int tmr_idx; 233 int ofld_tmr_idx; 234 int pktc_idx; 235 int ofld_pktc_idx; 236 int qsize_rxq; 237 int qsize_txq; 238 239 struct timeval last_refreshed; 240 struct fw_vi_stats_vf stats; 241 struct mtx tick_mtx; 242 struct callout tick; 243 244 struct sysctl_ctx_list ctx; 245 struct sysctl_oid *rxq_oid; 246 struct sysctl_oid *txq_oid; 247 struct sysctl_oid *nm_rxq_oid; 248 struct sysctl_oid *nm_txq_oid; 249 struct sysctl_oid *ofld_rxq_oid; 250 struct sysctl_oid *ofld_txq_oid; 251 252 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */ 253 u_int txq_rr; 254 u_int rxq_rr; 255 }; 256 257 struct tx_ch_rl_params { 258 enum fw_sched_params_rate ratemode; /* %port (REL) or kbps (ABS) */ 259 uint32_t maxrate; 260 }; 261 262 /* CLRL state */ 263 enum clrl_state { 264 CS_UNINITIALIZED = 0, 265 CS_PARAMS_SET, /* sw parameters have been set. */ 266 CS_HW_UPDATE_REQUESTED, /* async HW update requested. */ 267 CS_HW_UPDATE_IN_PROGRESS, /* sync hw update in progress. */ 268 CS_HW_CONFIGURED /* configured in the hardware. */ 269 }; 270 271 /* CLRL flags */ 272 enum { 273 CF_USER = (1 << 0), /* was configured by driver ioctl. */ 274 }; 275 276 struct tx_cl_rl_params { 277 enum clrl_state state; 278 int refcount; 279 uint8_t flags; 280 enum fw_sched_params_rate ratemode; /* %port REL or ABS value */ 281 enum fw_sched_params_unit rateunit; /* kbps or pps (when ABS) */ 282 enum fw_sched_params_mode mode; /* aggr or per-flow */ 283 uint32_t maxrate; 284 uint16_t pktsize; 285 uint16_t burstsize; 286 }; 287 288 /* Tx scheduler parameters for a channel/port */ 289 struct tx_sched_params { 290 /* Channel Rate Limiter */ 291 struct tx_ch_rl_params ch_rl; 292 293 /* Class WRR */ 294 /* XXX */ 295 296 /* Class Rate Limiter (including the default pktsize and burstsize). */ 297 int pktsize; 298 int burstsize; 299 struct tx_cl_rl_params cl_rl[]; 300 }; 301 302 struct port_info { 303 device_t dev; 304 struct adapter *adapter; 305 306 struct vi_info *vi; 307 int nvi; 308 int up_vis; 309 int uld_vis; 310 bool vxlan_tcam_entry; 311 312 struct tx_sched_params *sched_params; 313 314 struct mtx pi_lock; 315 char lockname[16]; 316 unsigned long flags; 317 318 uint8_t lport; /* associated offload logical port */ 319 int8_t mdio_addr; 320 uint8_t port_type; 321 uint8_t mod_type; 322 uint8_t port_id; 323 uint8_t tx_chan; 324 uint8_t mps_bg_map; /* rx MPS buffer group bitmap */ 325 uint8_t rx_e_chan_map; /* rx TP e-channel bitmap */ 326 uint8_t rx_c_chan; /* rx TP c-channel */ 327 328 struct link_config link_cfg; 329 struct ifmedia media; 330 331 struct port_stats stats; 332 u_int tnl_cong_drops; 333 u_int tx_parse_error; 334 int fcs_reg; 335 uint64_t fcs_base; 336 337 struct sysctl_ctx_list ctx; 338 }; 339 340 #define IS_MAIN_VI(vi) ((vi) == &((vi)->pi->vi[0])) 341 342 struct cluster_metadata { 343 uma_zone_t zone; 344 caddr_t cl; 345 u_int refcount; 346 }; 347 348 struct fl_sdesc { 349 caddr_t cl; 350 uint16_t nmbuf; /* # of driver originated mbufs with ref on cluster */ 351 int16_t moff; /* offset of metadata from cl */ 352 uint8_t zidx; 353 }; 354 355 struct tx_desc { 356 __be64 flit[8]; 357 }; 358 359 struct tx_sdesc { 360 struct mbuf *m; /* m_nextpkt linked chain of frames */ 361 uint8_t desc_used; /* # of hardware descriptors used by the WR */ 362 }; 363 364 365 #define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header)) 366 struct iq_desc { 367 struct rss_header rss; 368 uint8_t cpl[IQ_PAD]; 369 struct rsp_ctrl rsp; 370 }; 371 #undef IQ_PAD 372 CTASSERT(sizeof(struct iq_desc) == IQ_ESIZE); 373 374 enum { 375 /* iq type */ 376 IQ_OTHER = FW_IQ_IQTYPE_OTHER, 377 IQ_ETH = FW_IQ_IQTYPE_NIC, 378 IQ_OFLD = FW_IQ_IQTYPE_OFLD, 379 380 /* iq flags */ 381 IQ_SW_ALLOCATED = (1 << 0), /* sw resources allocated */ 382 IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */ 383 IQ_RX_TIMESTAMP = (1 << 2), /* provide the SGE rx timestamp */ 384 IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */ 385 IQ_ADJ_CREDIT = (1 << 4), /* hw is off by 1 credit for this iq */ 386 IQ_HW_ALLOCATED = (1 << 5), /* fw/hw resources allocated */ 387 388 /* iq state */ 389 IQS_DISABLED = 0, 390 IQS_BUSY = 1, 391 IQS_IDLE = 2, 392 393 /* netmap related flags */ 394 NM_OFF = 0, 395 NM_ON = 1, 396 NM_BUSY = 2, 397 }; 398 399 enum { 400 CPL_COOKIE_RESERVED = 0, 401 CPL_COOKIE_FILTER, 402 CPL_COOKIE_DDP0, 403 CPL_COOKIE_DDP1, 404 CPL_COOKIE_TOM, 405 CPL_COOKIE_HASHFILTER, 406 CPL_COOKIE_ETHOFLD, 407 CPL_COOKIE_KERN_TLS, 408 409 NUM_CPL_COOKIES = 8 /* Limited by M_COOKIE. Do not increase. */ 410 }; 411 412 struct sge_iq; 413 struct rss_header; 414 typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *, 415 struct mbuf *); 416 typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *); 417 typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *); 418 419 /* 420 * Ingress Queue: T4 is producer, driver is consumer. 421 */ 422 struct sge_iq { 423 uint16_t flags; 424 uint8_t qtype; 425 volatile int state; 426 struct adapter *adapter; 427 struct iq_desc *desc; /* KVA of descriptor ring */ 428 int8_t intr_pktc_idx; /* packet count threshold index */ 429 uint8_t gen; /* generation bit */ 430 uint8_t intr_params; /* interrupt holdoff parameters */ 431 int8_t cong_drop; /* congestion drop settings for the queue */ 432 uint16_t qsize; /* size (# of entries) of the queue */ 433 uint16_t sidx; /* index of the entry with the status page */ 434 uint16_t cidx; /* consumer index */ 435 uint16_t cntxt_id; /* SGE context id for the iq */ 436 uint16_t abs_id; /* absolute SGE id for the iq */ 437 int16_t intr_idx; /* interrupt used by the queue */ 438 439 STAILQ_ENTRY(sge_iq) link; 440 441 bus_dma_tag_t desc_tag; 442 bus_dmamap_t desc_map; 443 bus_addr_t ba; /* bus address of descriptor ring */ 444 }; 445 446 enum { 447 /* eq type */ 448 EQ_CTRL = 1, 449 EQ_ETH = 2, 450 EQ_OFLD = 3, 451 452 /* eq flags */ 453 EQ_SW_ALLOCATED = (1 << 0), /* sw resources allocated */ 454 EQ_HW_ALLOCATED = (1 << 1), /* hw/fw resources allocated */ 455 EQ_ENABLED = (1 << 3), /* open for business */ 456 EQ_QFLUSH = (1 << 4), /* if_qflush in progress */ 457 }; 458 459 /* Listed in order of preference. Update t4_sysctls too if you change these */ 460 enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB}; 461 462 /* 463 * Egress Queue: driver is producer, T4 is consumer. 464 * 465 * Note: A free list is an egress queue (driver produces the buffers and T4 466 * consumes them) but it's special enough to have its own struct (see sge_fl). 467 */ 468 struct sge_eq { 469 unsigned int flags; /* MUST be first */ 470 unsigned int cntxt_id; /* SGE context id for the eq */ 471 unsigned int abs_id; /* absolute SGE id for the eq */ 472 uint8_t type; /* EQ_CTRL/EQ_ETH/EQ_OFLD */ 473 uint8_t doorbells; 474 uint8_t tx_chan; /* tx channel used by the eq */ 475 struct mtx eq_lock; 476 477 struct tx_desc *desc; /* KVA of descriptor ring */ 478 volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */ 479 u_int udb_qid; /* relative qid within the doorbell page */ 480 uint16_t sidx; /* index of the entry with the status page */ 481 uint16_t cidx; /* consumer idx (desc idx) */ 482 uint16_t pidx; /* producer idx (desc idx) */ 483 uint16_t equeqidx; /* EQUEQ last requested at this pidx */ 484 uint16_t dbidx; /* pidx of the most recent doorbell */ 485 uint16_t iqid; /* cached iq->cntxt_id (see iq below) */ 486 volatile u_int equiq; /* EQUIQ outstanding */ 487 struct sge_iq *iq; /* iq that receives egr_update for the eq */ 488 489 bus_dma_tag_t desc_tag; 490 bus_dmamap_t desc_map; 491 bus_addr_t ba; /* bus address of descriptor ring */ 492 char lockname[16]; 493 }; 494 495 struct rx_buf_info { 496 uma_zone_t zone; /* zone that this cluster comes from */ 497 uint16_t size1; /* same as size of cluster: 2K/4K/9K/16K. 498 * hwsize[hwidx1] = size1. No spare. */ 499 uint16_t size2; /* hwsize[hwidx2] = size2. 500 * spare in cluster = size1 - size2. */ 501 int8_t hwidx1; /* SGE bufsize idx for size1 */ 502 int8_t hwidx2; /* SGE bufsize idx for size2 */ 503 uint8_t type; /* EXT_xxx type of the cluster */ 504 }; 505 506 enum { 507 NUM_MEMWIN = 3, 508 509 MEMWIN0_APERTURE = 2048, 510 MEMWIN0_BASE = 0x1b800, 511 512 MEMWIN1_APERTURE = 32768, 513 MEMWIN1_BASE = 0x28000, 514 515 MEMWIN2_APERTURE_T4 = 65536, 516 MEMWIN2_BASE_T4 = 0x30000, 517 518 MEMWIN2_APERTURE_T5 = 128 * 1024, 519 MEMWIN2_BASE_T5 = 0x60000, 520 }; 521 522 struct memwin { 523 struct rwlock mw_lock __aligned(CACHE_LINE_SIZE); 524 uint32_t mw_base; /* constant after setup_memwin */ 525 uint32_t mw_aperture; /* ditto */ 526 uint32_t mw_curpos; /* protected by mw_lock */ 527 }; 528 529 enum { 530 FL_STARVING = (1 << 0), /* on the adapter's list of starving fl's */ 531 FL_DOOMED = (1 << 1), /* about to be destroyed */ 532 FL_BUF_PACKING = (1 << 2), /* buffer packing enabled */ 533 FL_BUF_RESUME = (1 << 3), /* resume from the middle of the frame */ 534 }; 535 536 #define FL_RUNNING_LOW(fl) \ 537 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) <= fl->lowat) 538 #define FL_NOT_RUNNING_LOW(fl) \ 539 (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) >= 2 * fl->lowat) 540 541 struct sge_fl { 542 struct mtx fl_lock; 543 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */ 544 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */ 545 uint16_t zidx; /* refill zone idx */ 546 uint16_t safe_zidx; 547 uint16_t lowat; /* # of buffers <= this means fl needs help */ 548 int flags; 549 uint16_t buf_boundary; 550 551 /* The 16b idx all deal with hw descriptors */ 552 uint16_t dbidx; /* hw pidx after last doorbell */ 553 uint16_t sidx; /* index of status page */ 554 volatile uint16_t hw_cidx; 555 556 /* The 32b idx are all buffer idx, not hardware descriptor idx */ 557 uint32_t cidx; /* consumer index */ 558 uint32_t pidx; /* producer index */ 559 560 uint32_t dbval; 561 u_int rx_offset; /* offset in fl buf (when buffer packing) */ 562 volatile uint32_t *udb; 563 564 uint64_t cl_allocated; /* # of clusters allocated */ 565 uint64_t cl_recycled; /* # of clusters recycled */ 566 uint64_t cl_fast_recycled; /* # of clusters recycled (fast) */ 567 568 /* These 3 are valid when FL_BUF_RESUME is set, stale otherwise. */ 569 struct mbuf *m0; 570 struct mbuf **pnext; 571 u_int remaining; 572 573 uint16_t qsize; /* # of hw descriptors (status page included) */ 574 uint16_t cntxt_id; /* SGE context id for the freelist */ 575 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */ 576 bus_dma_tag_t desc_tag; 577 bus_dmamap_t desc_map; 578 char lockname[16]; 579 bus_addr_t ba; /* bus address of descriptor ring */ 580 }; 581 582 struct mp_ring; 583 584 struct txpkts { 585 uint8_t wr_type; /* type 0 or type 1 */ 586 uint8_t npkt; /* # of packets in this work request */ 587 uint8_t len16; /* # of 16B pieces used by this work request */ 588 uint8_t score; 589 uint8_t max_npkt; /* maximum number of packets allowed */ 590 uint16_t plen; /* total payload (sum of all packets) */ 591 592 /* straight from fw_eth_tx_pkts_vm_wr. */ 593 __u8 ethmacdst[6]; 594 __u8 ethmacsrc[6]; 595 __be16 ethtype; 596 __be16 vlantci; 597 598 struct mbuf *mb[15]; 599 }; 600 601 /* txq: SGE egress queue + what's needed for Ethernet NIC */ 602 struct sge_txq { 603 struct sge_eq eq; /* MUST be first */ 604 605 struct ifnet *ifp; /* the interface this txq belongs to */ 606 struct mp_ring *r; /* tx software ring */ 607 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */ 608 struct sglist *gl; 609 __be32 cpl_ctrl0; /* for convenience */ 610 int tc_idx; /* traffic class */ 611 uint64_t last_tx; /* cycle count when eth_tx was last called */ 612 struct txpkts txp; 613 614 struct task tx_reclaim_task; 615 /* stats for common events first */ 616 617 uint64_t txcsum; /* # of times hardware assisted with checksum */ 618 uint64_t tso_wrs; /* # of TSO work requests */ 619 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */ 620 uint64_t imm_wrs; /* # of work requests with immediate data */ 621 uint64_t sgl_wrs; /* # of work requests with direct SGL */ 622 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */ 623 uint64_t txpkts0_wrs; /* # of type0 coalesced tx work requests */ 624 uint64_t txpkts1_wrs; /* # of type1 coalesced tx work requests */ 625 uint64_t txpkts0_pkts; /* # of frames in type0 coalesced tx WRs */ 626 uint64_t txpkts1_pkts; /* # of frames in type1 coalesced tx WRs */ 627 uint64_t txpkts_flush; /* # of times txp had to be sent by tx_update */ 628 uint64_t raw_wrs; /* # of raw work requests (alloc_wr_mbuf) */ 629 uint64_t vxlan_tso_wrs; /* # of VXLAN TSO work requests */ 630 uint64_t vxlan_txcsum; 631 632 uint64_t kern_tls_records; 633 uint64_t kern_tls_short; 634 uint64_t kern_tls_partial; 635 uint64_t kern_tls_full; 636 uint64_t kern_tls_octets; 637 uint64_t kern_tls_waste; 638 uint64_t kern_tls_options; 639 uint64_t kern_tls_header; 640 uint64_t kern_tls_fin; 641 uint64_t kern_tls_fin_short; 642 uint64_t kern_tls_cbc; 643 uint64_t kern_tls_gcm; 644 645 /* stats for not-that-common events */ 646 647 /* Optional scratch space for constructing work requests. */ 648 uint8_t ss[SGE_MAX_WR_LEN] __aligned(16); 649 } __aligned(CACHE_LINE_SIZE); 650 651 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */ 652 struct sge_rxq { 653 struct sge_iq iq; /* MUST be first */ 654 struct sge_fl fl; /* MUST follow iq */ 655 656 struct ifnet *ifp; /* the interface this rxq belongs to */ 657 struct lro_ctrl lro; /* LRO state */ 658 659 /* stats for common events first */ 660 661 uint64_t rxcsum; /* # of times hardware assisted with checksum */ 662 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */ 663 uint64_t vxlan_rxcsum; 664 665 /* stats for not-that-common events */ 666 667 } __aligned(CACHE_LINE_SIZE); 668 669 static inline struct sge_rxq * 670 iq_to_rxq(struct sge_iq *iq) 671 { 672 673 return (__containerof(iq, struct sge_rxq, iq)); 674 } 675 676 /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */ 677 struct sge_ofld_rxq { 678 struct sge_iq iq; /* MUST be first */ 679 struct sge_fl fl; /* MUST follow iq */ 680 counter_u64_t rx_iscsi_ddp_setup_ok; 681 counter_u64_t rx_iscsi_ddp_setup_error; 682 uint64_t rx_iscsi_ddp_pdus; 683 uint64_t rx_iscsi_ddp_octets; 684 uint64_t rx_iscsi_fl_pdus; 685 uint64_t rx_iscsi_fl_octets; 686 uint64_t rx_iscsi_padding_errors; 687 uint64_t rx_iscsi_header_digest_errors; 688 uint64_t rx_iscsi_data_digest_errors; 689 u_long rx_toe_tls_records; 690 u_long rx_toe_tls_octets; 691 } __aligned(CACHE_LINE_SIZE); 692 693 static inline struct sge_ofld_rxq * 694 iq_to_ofld_rxq(struct sge_iq *iq) 695 { 696 697 return (__containerof(iq, struct sge_ofld_rxq, iq)); 698 } 699 700 struct wrqe { 701 STAILQ_ENTRY(wrqe) link; 702 struct sge_wrq *wrq; 703 int wr_len; 704 char wr[] __aligned(16); 705 }; 706 707 struct wrq_cookie { 708 TAILQ_ENTRY(wrq_cookie) link; 709 int ndesc; 710 int pidx; 711 }; 712 713 /* 714 * wrq: SGE egress queue that is given prebuilt work requests. Control queues 715 * are of this type. 716 */ 717 struct sge_wrq { 718 struct sge_eq eq; /* MUST be first */ 719 720 struct adapter *adapter; 721 struct task wrq_tx_task; 722 723 /* Tx desc reserved but WR not "committed" yet. */ 724 TAILQ_HEAD(wrq_incomplete_wrs , wrq_cookie) incomplete_wrs; 725 726 /* List of WRs ready to go out as soon as descriptors are available. */ 727 STAILQ_HEAD(, wrqe) wr_list; 728 u_int nwr_pending; 729 u_int ndesc_needed; 730 731 /* stats for common events first */ 732 733 uint64_t tx_wrs_direct; /* # of WRs written directly to desc ring. */ 734 uint64_t tx_wrs_ss; /* # of WRs copied from scratch space. */ 735 uint64_t tx_wrs_copied; /* # of WRs queued and copied to desc ring. */ 736 737 /* stats for not-that-common events */ 738 739 /* 740 * Scratch space for work requests that wrap around after reaching the 741 * status page, and some information about the last WR that used it. 742 */ 743 uint16_t ss_pidx; 744 uint16_t ss_len; 745 uint8_t ss[SGE_MAX_WR_LEN]; 746 747 } __aligned(CACHE_LINE_SIZE); 748 749 /* ofld_txq: SGE egress queue + miscellaneous items */ 750 struct sge_ofld_txq { 751 struct sge_wrq wrq; 752 counter_u64_t tx_iscsi_pdus; 753 counter_u64_t tx_iscsi_octets; 754 counter_u64_t tx_iscsi_iso_wrs; 755 counter_u64_t tx_toe_tls_records; 756 counter_u64_t tx_toe_tls_octets; 757 } __aligned(CACHE_LINE_SIZE); 758 759 #define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1)) 760 struct sge_nm_rxq { 761 /* Items used by the driver rx ithread are in this cacheline. */ 762 volatile int nm_state __aligned(CACHE_LINE_SIZE); /* NM_OFF, NM_ON, or NM_BUSY */ 763 u_int nid; /* netmap ring # for this queue */ 764 struct vi_info *vi; 765 766 struct iq_desc *iq_desc; 767 uint16_t iq_abs_id; 768 uint16_t iq_cntxt_id; 769 uint16_t iq_cidx; 770 uint16_t iq_sidx; 771 uint8_t iq_gen; 772 uint32_t fl_sidx; 773 774 /* Items used by netmap rxsync are in this cacheline. */ 775 __be64 *fl_desc __aligned(CACHE_LINE_SIZE); 776 uint16_t fl_cntxt_id; 777 uint32_t fl_pidx; 778 uint32_t fl_sidx2; /* copy of fl_sidx */ 779 uint32_t fl_db_val; 780 u_int fl_db_saved; 781 u_int fl_db_threshold; /* in descriptors */ 782 u_int fl_hwidx:4; 783 784 /* 785 * fl_cidx is used by both the ithread and rxsync, the rest are not used 786 * in the rx fast path. 787 */ 788 uint32_t fl_cidx __aligned(CACHE_LINE_SIZE); 789 790 bus_dma_tag_t iq_desc_tag; 791 bus_dmamap_t iq_desc_map; 792 bus_addr_t iq_ba; 793 int intr_idx; 794 795 bus_dma_tag_t fl_desc_tag; 796 bus_dmamap_t fl_desc_map; 797 bus_addr_t fl_ba; 798 }; 799 800 #define INVALID_NM_TXQ_CNTXT_ID ((u_int)(-1)) 801 struct sge_nm_txq { 802 struct tx_desc *desc; 803 uint16_t cidx; 804 uint16_t pidx; 805 uint16_t sidx; 806 uint16_t equiqidx; /* EQUIQ last requested at this pidx */ 807 uint16_t equeqidx; /* EQUEQ last requested at this pidx */ 808 uint16_t dbidx; /* pidx of the most recent doorbell */ 809 uint8_t doorbells; 810 volatile uint32_t *udb; 811 u_int udb_qid; 812 u_int cntxt_id; 813 __be32 cpl_ctrl0; /* for convenience */ 814 __be32 op_pkd; /* ditto */ 815 u_int nid; /* netmap ring # for this queue */ 816 817 /* infrequently used items after this */ 818 819 bus_dma_tag_t desc_tag; 820 bus_dmamap_t desc_map; 821 bus_addr_t ba; 822 int iqidx; 823 } __aligned(CACHE_LINE_SIZE); 824 825 struct sge { 826 int nrxq; /* total # of Ethernet rx queues */ 827 int ntxq; /* total # of Ethernet tx queues */ 828 int nofldrxq; /* total # of TOE rx queues */ 829 int nofldtxq; /* total # of TOE tx queues */ 830 int nnmrxq; /* total # of netmap rx queues */ 831 int nnmtxq; /* total # of netmap tx queues */ 832 int niq; /* total # of ingress queues */ 833 int neq; /* total # of egress queues */ 834 835 struct sge_iq fwq; /* Firmware event queue */ 836 struct sge_wrq *ctrlq; /* Control queues */ 837 struct sge_txq *txq; /* NIC tx queues */ 838 struct sge_rxq *rxq; /* NIC rx queues */ 839 struct sge_ofld_txq *ofld_txq; /* TOE tx queues */ 840 struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */ 841 struct sge_nm_txq *nm_txq; /* netmap tx queues */ 842 struct sge_nm_rxq *nm_rxq; /* netmap rx queues */ 843 844 uint16_t iq_start; /* first cntxt_id */ 845 uint16_t iq_base; /* first abs_id */ 846 int eq_start; /* first cntxt_id */ 847 int eq_base; /* first abs_id */ 848 int iqmap_sz; 849 int eqmap_sz; 850 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */ 851 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */ 852 853 int8_t safe_zidx; 854 struct rx_buf_info rx_buf_info[SW_ZONE_SIZES]; 855 }; 856 857 struct devnames { 858 const char *nexus_name; 859 const char *ifnet_name; 860 const char *vi_ifnet_name; 861 const char *pf03_drv_name; 862 const char *vf_nexus_name; 863 const char *vf_ifnet_name; 864 }; 865 866 struct clip_entry; 867 868 struct adapter { 869 SLIST_ENTRY(adapter) link; 870 device_t dev; 871 struct cdev *cdev; 872 const struct devnames *names; 873 874 /* PCIe register resources */ 875 int regs_rid; 876 struct resource *regs_res; 877 int msix_rid; 878 struct resource *msix_res; 879 bus_space_handle_t bh; 880 bus_space_tag_t bt; 881 bus_size_t mmio_len; 882 int udbs_rid; 883 struct resource *udbs_res; 884 volatile uint8_t *udbs_base; 885 886 unsigned int pf; 887 unsigned int mbox; 888 unsigned int vpd_busy; 889 unsigned int vpd_flag; 890 891 /* Interrupt information */ 892 int intr_type; 893 int intr_count; 894 struct irq { 895 struct resource *res; 896 int rid; 897 void *tag; 898 struct sge_rxq *rxq; 899 struct sge_nm_rxq *nm_rxq; 900 } __aligned(CACHE_LINE_SIZE) *irq; 901 int sge_gts_reg; 902 int sge_kdoorbell_reg; 903 904 bus_dma_tag_t dmat; /* Parent DMA tag */ 905 906 struct sge sge; 907 int lro_timeout; 908 int sc_do_rxcopy; 909 910 int vxlan_port; 911 u_int vxlan_refcount; 912 int rawf_base; 913 int nrawf; 914 915 struct taskqueue *tq[MAX_NCHAN]; /* General purpose taskqueues */ 916 struct port_info *port[MAX_NPORTS]; 917 uint8_t chan_map[MAX_NCHAN]; /* channel -> port */ 918 919 CXGBE_LIST_HEAD(, clip_entry) *clip_table; 920 TAILQ_HEAD(, clip_entry) clip_pending; /* these need hw update. */ 921 u_long clip_mask; 922 int clip_gen; 923 struct timeout_task clip_task; 924 925 void *tom_softc; /* (struct tom_data *) */ 926 struct tom_tunables tt; 927 struct t4_offload_policy *policy; 928 struct rwlock policy_lock; 929 930 void *iwarp_softc; /* (struct c4iw_dev *) */ 931 struct iw_tunables iwt; 932 void *iscsi_ulp_softc; /* (struct cxgbei_data *) */ 933 void *ccr_softc; /* (struct ccr_softc *) */ 934 struct l2t_data *l2t; /* L2 table */ 935 struct smt_data *smt; /* Source MAC Table */ 936 struct tid_info tids; 937 vmem_t *key_map; 938 struct tls_tunables tlst; 939 940 uint8_t doorbells; 941 int offload_map; /* port_id's with IFCAP_TOE enabled */ 942 int bt_map; /* tx_chan's with BASE-T */ 943 int active_ulds; /* ULDs activated on this adapter */ 944 int flags; 945 int debug_flags; 946 int error_flags; /* Used by error handler and live reset. */ 947 948 char ifp_lockname[16]; 949 struct mtx ifp_lock; 950 struct ifnet *ifp; /* tracer ifp */ 951 struct ifmedia media; 952 int traceq; /* iq used by all tracers, -1 if none */ 953 int tracer_valid; /* bitmap of valid tracers */ 954 int tracer_enabled; /* bitmap of enabled tracers */ 955 956 char fw_version[16]; 957 char tp_version[16]; 958 char er_version[16]; 959 char bs_version[16]; 960 char cfg_file[32]; 961 u_int cfcsum; 962 struct adapter_params params; 963 const struct chip_params *chip_params; 964 struct t4_virt_res vres; 965 966 uint16_t nbmcaps; 967 uint16_t linkcaps; 968 uint16_t switchcaps; 969 uint16_t niccaps; 970 uint16_t toecaps; 971 uint16_t rdmacaps; 972 uint16_t cryptocaps; 973 uint16_t iscsicaps; 974 uint16_t fcoecaps; 975 976 struct sysctl_ctx_list ctx; 977 struct sysctl_oid *ctrlq_oid; 978 struct sysctl_oid *fwq_oid; 979 980 struct mtx sc_lock; 981 char lockname[16]; 982 983 /* Starving free lists */ 984 struct mtx sfl_lock; /* same cache-line as sc_lock? but that's ok */ 985 TAILQ_HEAD(, sge_fl) sfl; 986 struct callout sfl_callout; 987 988 /* 989 * Driver code that can run when the adapter is suspended must use this 990 * lock or a synchronized_op and check for HW_OFF_LIMITS before 991 * accessing hardware. 992 * 993 * XXX: could be changed to rwlock. wlock in suspend/resume and for 994 * indirect register access, rlock everywhere else. 995 */ 996 struct mtx reg_lock; 997 998 struct memwin memwin[NUM_MEMWIN]; /* memory windows */ 999 1000 struct mtx tc_lock; 1001 struct task tc_task; 1002 1003 struct task fatal_error_task; 1004 struct task reset_task; 1005 const void *reset_thread; 1006 int num_resets; 1007 int incarnation; 1008 1009 const char *last_op; 1010 const void *last_op_thr; 1011 int last_op_flags; 1012 1013 int swintr; 1014 int sensor_resets; 1015 1016 struct callout ktls_tick; 1017 }; 1018 1019 #define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock) 1020 #define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock) 1021 #define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED) 1022 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED) 1023 1024 #define ASSERT_SYNCHRONIZED_OP(sc) \ 1025 KASSERT(IS_BUSY(sc) && \ 1026 (mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \ 1027 ("%s: operation not synchronized.", __func__)) 1028 1029 #define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock) 1030 #define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock) 1031 #define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED) 1032 #define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED) 1033 1034 #define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock) 1035 #define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock) 1036 #define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock) 1037 #define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED) 1038 #define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED) 1039 1040 #define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl) 1041 #define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl) 1042 #define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl) 1043 #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl) 1044 1045 #define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock) 1046 #define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock) 1047 #define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock) 1048 #define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED) 1049 #define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED) 1050 1051 #define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq) 1052 #define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq) 1053 #define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq) 1054 #define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq) 1055 #define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq) 1056 1057 #define for_each_txq(vi, iter, q) \ 1058 for (q = &vi->adapter->sge.txq[vi->first_txq], iter = 0; \ 1059 iter < vi->ntxq; ++iter, ++q) 1060 #define for_each_rxq(vi, iter, q) \ 1061 for (q = &vi->adapter->sge.rxq[vi->first_rxq], iter = 0; \ 1062 iter < vi->nrxq; ++iter, ++q) 1063 #define for_each_ofld_txq(vi, iter, q) \ 1064 for (q = &vi->adapter->sge.ofld_txq[vi->first_ofld_txq], iter = 0; \ 1065 iter < vi->nofldtxq; ++iter, ++q) 1066 #define for_each_ofld_rxq(vi, iter, q) \ 1067 for (q = &vi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \ 1068 iter < vi->nofldrxq; ++iter, ++q) 1069 #define for_each_nm_txq(vi, iter, q) \ 1070 for (q = &vi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \ 1071 iter < vi->nnmtxq; ++iter, ++q) 1072 #define for_each_nm_rxq(vi, iter, q) \ 1073 for (q = &vi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \ 1074 iter < vi->nnmrxq; ++iter, ++q) 1075 #define for_each_vi(_pi, _iter, _vi) \ 1076 for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \ 1077 ++(_iter), ++(_vi)) 1078 1079 #define IDXINCR(idx, incr, wrap) do { \ 1080 idx = wrap - idx > incr ? idx + incr : incr - (wrap - idx); \ 1081 } while (0) 1082 #define IDXDIFF(head, tail, wrap) \ 1083 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 1084 1085 /* One for errors, one for firmware events */ 1086 #define T4_EXTRA_INTR 2 1087 1088 /* One for firmware events */ 1089 #define T4VF_EXTRA_INTR 1 1090 1091 static inline int 1092 forwarding_intr_to_fwq(struct adapter *sc) 1093 { 1094 1095 return (sc->intr_count == 1); 1096 } 1097 1098 /* Works reliably inside a sync_op or with reg_lock held. */ 1099 static inline bool 1100 hw_off_limits(struct adapter *sc) 1101 { 1102 int off_limits = atomic_load_int(&sc->error_flags) & HW_OFF_LIMITS; 1103 1104 return (__predict_false(off_limits != 0)); 1105 } 1106 1107 static inline uint32_t 1108 t4_read_reg(struct adapter *sc, uint32_t reg) 1109 { 1110 if (hw_off_limits(sc)) 1111 MPASS(curthread == sc->reset_thread); 1112 return bus_space_read_4(sc->bt, sc->bh, reg); 1113 } 1114 1115 static inline void 1116 t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val) 1117 { 1118 if (hw_off_limits(sc)) 1119 MPASS(curthread == sc->reset_thread); 1120 bus_space_write_4(sc->bt, sc->bh, reg, val); 1121 } 1122 1123 static inline uint64_t 1124 t4_read_reg64(struct adapter *sc, uint32_t reg) 1125 { 1126 if (hw_off_limits(sc)) 1127 MPASS(curthread == sc->reset_thread); 1128 #ifdef __LP64__ 1129 return bus_space_read_8(sc->bt, sc->bh, reg); 1130 #else 1131 return (uint64_t)bus_space_read_4(sc->bt, sc->bh, reg) + 1132 ((uint64_t)bus_space_read_4(sc->bt, sc->bh, reg + 4) << 32); 1133 1134 #endif 1135 } 1136 1137 static inline void 1138 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val) 1139 { 1140 if (hw_off_limits(sc)) 1141 MPASS(curthread == sc->reset_thread); 1142 #ifdef __LP64__ 1143 bus_space_write_8(sc->bt, sc->bh, reg, val); 1144 #else 1145 bus_space_write_4(sc->bt, sc->bh, reg, val); 1146 bus_space_write_4(sc->bt, sc->bh, reg + 4, val>> 32); 1147 #endif 1148 } 1149 1150 static inline void 1151 t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val) 1152 { 1153 if (hw_off_limits(sc)) 1154 MPASS(curthread == sc->reset_thread); 1155 *val = pci_read_config(sc->dev, reg, 1); 1156 } 1157 1158 static inline void 1159 t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val) 1160 { 1161 if (hw_off_limits(sc)) 1162 MPASS(curthread == sc->reset_thread); 1163 pci_write_config(sc->dev, reg, val, 1); 1164 } 1165 1166 static inline void 1167 t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val) 1168 { 1169 1170 if (hw_off_limits(sc)) 1171 MPASS(curthread == sc->reset_thread); 1172 *val = pci_read_config(sc->dev, reg, 2); 1173 } 1174 1175 static inline void 1176 t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val) 1177 { 1178 if (hw_off_limits(sc)) 1179 MPASS(curthread == sc->reset_thread); 1180 pci_write_config(sc->dev, reg, val, 2); 1181 } 1182 1183 static inline void 1184 t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val) 1185 { 1186 if (hw_off_limits(sc)) 1187 MPASS(curthread == sc->reset_thread); 1188 *val = pci_read_config(sc->dev, reg, 4); 1189 } 1190 1191 static inline void 1192 t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val) 1193 { 1194 if (hw_off_limits(sc)) 1195 MPASS(curthread == sc->reset_thread); 1196 pci_write_config(sc->dev, reg, val, 4); 1197 } 1198 1199 static inline struct port_info * 1200 adap2pinfo(struct adapter *sc, int idx) 1201 { 1202 1203 return (sc->port[idx]); 1204 } 1205 1206 static inline void 1207 t4_os_set_hw_addr(struct port_info *pi, uint8_t hw_addr[]) 1208 { 1209 1210 bcopy(hw_addr, pi->vi[0].hw_addr, ETHER_ADDR_LEN); 1211 } 1212 1213 static inline int 1214 tx_resume_threshold(struct sge_eq *eq) 1215 { 1216 1217 /* not quite the same as qsize / 4, but this will do. */ 1218 return (eq->sidx / 4); 1219 } 1220 1221 static inline int 1222 t4_use_ldst(struct adapter *sc) 1223 { 1224 1225 #ifdef notyet 1226 return (sc->flags & FW_OK || !sc->use_bd); 1227 #else 1228 return (0); 1229 #endif 1230 } 1231 1232 static inline void 1233 CH_DUMP_MBOX(struct adapter *sc, int mbox, const int reg, 1234 const char *msg, const __be64 *const p, const bool err) 1235 { 1236 1237 if (!(sc->debug_flags & DF_DUMP_MBOX) && !err) 1238 return; 1239 if (p != NULL) { 1240 log(err ? LOG_ERR : LOG_DEBUG, 1241 "%s: mbox %u %s %016llx %016llx %016llx %016llx " 1242 "%016llx %016llx %016llx %016llx\n", 1243 device_get_nameunit(sc->dev), mbox, msg, 1244 (long long)be64_to_cpu(p[0]), (long long)be64_to_cpu(p[1]), 1245 (long long)be64_to_cpu(p[2]), (long long)be64_to_cpu(p[3]), 1246 (long long)be64_to_cpu(p[4]), (long long)be64_to_cpu(p[5]), 1247 (long long)be64_to_cpu(p[6]), (long long)be64_to_cpu(p[7])); 1248 } else { 1249 log(err ? LOG_ERR : LOG_DEBUG, 1250 "%s: mbox %u %s %016llx %016llx %016llx %016llx " 1251 "%016llx %016llx %016llx %016llx\n", 1252 device_get_nameunit(sc->dev), mbox, msg, 1253 (long long)t4_read_reg64(sc, reg), 1254 (long long)t4_read_reg64(sc, reg + 8), 1255 (long long)t4_read_reg64(sc, reg + 16), 1256 (long long)t4_read_reg64(sc, reg + 24), 1257 (long long)t4_read_reg64(sc, reg + 32), 1258 (long long)t4_read_reg64(sc, reg + 40), 1259 (long long)t4_read_reg64(sc, reg + 48), 1260 (long long)t4_read_reg64(sc, reg + 56)); 1261 } 1262 } 1263 1264 /* t4_main.c */ 1265 extern int t4_ntxq; 1266 extern int t4_nrxq; 1267 extern int t4_intr_types; 1268 extern int t4_tmr_idx; 1269 extern int t4_pktc_idx; 1270 extern unsigned int t4_qsize_rxq; 1271 extern unsigned int t4_qsize_txq; 1272 extern device_method_t cxgbe_methods[]; 1273 1274 int t4_os_find_pci_capability(struct adapter *, int); 1275 int t4_os_pci_save_state(struct adapter *); 1276 int t4_os_pci_restore_state(struct adapter *); 1277 void t4_os_portmod_changed(struct port_info *); 1278 void t4_os_link_changed(struct port_info *); 1279 void t4_iterate(void (*)(struct adapter *, void *), void *); 1280 void t4_init_devnames(struct adapter *); 1281 void t4_add_adapter(struct adapter *); 1282 int t4_detach_common(device_t); 1283 int t4_map_bars_0_and_4(struct adapter *); 1284 int t4_map_bar_2(struct adapter *); 1285 int t4_setup_intr_handlers(struct adapter *); 1286 void t4_sysctls(struct adapter *); 1287 int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *); 1288 void doom_vi(struct adapter *, struct vi_info *); 1289 void end_synchronized_op(struct adapter *, int); 1290 int update_mac_settings(struct ifnet *, int); 1291 int adapter_init(struct adapter *); 1292 int vi_init(struct vi_info *); 1293 void vi_sysctls(struct vi_info *); 1294 int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 1295 int alloc_atid(struct adapter *, void *); 1296 void *lookup_atid(struct adapter *, int); 1297 void free_atid(struct adapter *, int); 1298 void release_tid(struct adapter *, int, struct sge_wrq *); 1299 int cxgbe_media_change(struct ifnet *); 1300 void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 1301 void t4_os_cim_err(struct adapter *); 1302 1303 #ifdef KERN_TLS 1304 /* t6_kern_tls.c */ 1305 int t6_tls_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *, 1306 struct m_snd_tag **); 1307 void t6_ktls_modload(void); 1308 void t6_ktls_modunload(void); 1309 int t6_ktls_try(struct ifnet *, struct socket *, struct ktls_session *); 1310 int t6_ktls_parse_pkt(struct mbuf *, int *, int *); 1311 int t6_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int, u_int); 1312 #endif 1313 1314 /* t4_keyctx.c */ 1315 struct auth_hash; 1316 union authctx; 1317 #ifdef KERN_TLS 1318 struct ktls_session; 1319 struct tls_key_req; 1320 struct tls_keyctx; 1321 #endif 1322 1323 void t4_aes_getdeckey(void *, const void *, unsigned int); 1324 void t4_copy_partial_hash(int, union authctx *, void *); 1325 void t4_init_gmac_hash(const char *, int, char *); 1326 void t4_init_hmac_digest(const struct auth_hash *, u_int, const char *, int, 1327 char *); 1328 #ifdef KERN_TLS 1329 u_int t4_tls_key_info_size(const struct ktls_session *); 1330 int t4_tls_proto_ver(const struct ktls_session *); 1331 int t4_tls_cipher_mode(const struct ktls_session *); 1332 int t4_tls_auth_mode(const struct ktls_session *); 1333 int t4_tls_hmac_ctrl(const struct ktls_session *); 1334 void t4_tls_key_ctx(const struct ktls_session *, int, struct tls_keyctx *); 1335 int t4_alloc_tls_keyid(struct adapter *); 1336 void t4_free_tls_keyid(struct adapter *, int); 1337 void t4_write_tlskey_wr(const struct ktls_session *, int, int, int, int, 1338 struct tls_key_req *); 1339 #endif 1340 1341 #ifdef DEV_NETMAP 1342 /* t4_netmap.c */ 1343 struct sge_nm_rxq; 1344 void cxgbe_nm_attach(struct vi_info *); 1345 void cxgbe_nm_detach(struct vi_info *); 1346 void service_nm_rxq(struct sge_nm_rxq *); 1347 int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int); 1348 int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *); 1349 int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int); 1350 int free_nm_txq(struct vi_info *, struct sge_nm_txq *); 1351 #endif 1352 1353 /* t4_sge.c */ 1354 void t4_sge_modload(void); 1355 void t4_sge_modunload(void); 1356 uint64_t t4_sge_extfree_refs(void); 1357 void t4_tweak_chip_settings(struct adapter *); 1358 int t4_verify_chip_settings(struct adapter *); 1359 void t4_init_rx_buf_info(struct adapter *); 1360 int t4_create_dma_tag(struct adapter *); 1361 void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *, 1362 struct sysctl_oid_list *); 1363 int t4_destroy_dma_tag(struct adapter *); 1364 int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 1365 bus_addr_t *, void **); 1366 int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 1367 void *); 1368 void free_fl_buffers(struct adapter *, struct sge_fl *); 1369 int t4_setup_adapter_queues(struct adapter *); 1370 int t4_teardown_adapter_queues(struct adapter *); 1371 int t4_setup_vi_queues(struct vi_info *); 1372 int t4_teardown_vi_queues(struct vi_info *); 1373 void t4_intr_all(void *); 1374 void t4_intr(void *); 1375 #ifdef DEV_NETMAP 1376 void t4_nm_intr(void *); 1377 void t4_vi_intr(void *); 1378 #endif 1379 void t4_intr_err(void *); 1380 void t4_intr_evt(void *); 1381 void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *); 1382 void t4_update_fl_bufsize(struct ifnet *); 1383 struct mbuf *alloc_wr_mbuf(int, int); 1384 int parse_pkt(struct mbuf **, bool); 1385 void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *); 1386 void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *); 1387 int t4_sge_set_conm_context(struct adapter *, int, int, int); 1388 void t4_register_an_handler(an_handler_t); 1389 void t4_register_fw_msg_handler(int, fw_msg_handler_t); 1390 void t4_register_cpl_handler(int, cpl_handler_t); 1391 void t4_register_shared_cpl_handler(int, cpl_handler_t, int); 1392 #ifdef RATELIMIT 1393 int ethofld_transmit(struct ifnet *, struct mbuf *); 1394 void send_etid_flush_wr(struct cxgbe_rate_tag *); 1395 #endif 1396 1397 /* t4_tracer.c */ 1398 struct t4_tracer; 1399 void t4_tracer_modload(void); 1400 void t4_tracer_modunload(void); 1401 void t4_tracer_port_detach(struct adapter *); 1402 int t4_get_tracer(struct adapter *, struct t4_tracer *); 1403 int t4_set_tracer(struct adapter *, struct t4_tracer *); 1404 int t4_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *); 1405 int t5_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *); 1406 1407 /* t4_sched.c */ 1408 int t4_set_sched_class(struct adapter *, struct t4_sched_params *); 1409 int t4_set_sched_queue(struct adapter *, struct t4_sched_queue *); 1410 int t4_init_tx_sched(struct adapter *); 1411 int t4_free_tx_sched(struct adapter *); 1412 void t4_update_tx_sched(struct adapter *); 1413 int t4_reserve_cl_rl_kbps(struct adapter *, int, u_int, int *); 1414 void t4_release_cl_rl(struct adapter *, int, int); 1415 int sysctl_tc(SYSCTL_HANDLER_ARGS); 1416 int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 1417 #ifdef RATELIMIT 1418 void t4_init_etid_table(struct adapter *); 1419 void t4_free_etid_table(struct adapter *); 1420 struct cxgbe_rate_tag *lookup_etid(struct adapter *, int); 1421 int cxgbe_rate_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *, 1422 struct m_snd_tag **); 1423 void cxgbe_rate_tag_free_locked(struct cxgbe_rate_tag *); 1424 void cxgbe_ratelimit_query(struct ifnet *, struct if_ratelimit_query_results *); 1425 #endif 1426 1427 /* t4_filter.c */ 1428 int get_filter_mode(struct adapter *, uint32_t *); 1429 int set_filter_mode(struct adapter *, uint32_t); 1430 int set_filter_mask(struct adapter *, uint32_t); 1431 int get_filter(struct adapter *, struct t4_filter *); 1432 int set_filter(struct adapter *, struct t4_filter *); 1433 int del_filter(struct adapter *, struct t4_filter *); 1434 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 1435 int t4_hashfilter_ao_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 1436 int t4_hashfilter_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 1437 int t4_del_hashfilter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 1438 void free_hftid_hash(struct tid_info *); 1439 1440 static inline struct wrqe * 1441 alloc_wrqe(int wr_len, struct sge_wrq *wrq) 1442 { 1443 int len = offsetof(struct wrqe, wr) + wr_len; 1444 struct wrqe *wr; 1445 1446 wr = malloc(len, M_CXGBE, M_NOWAIT); 1447 if (__predict_false(wr == NULL)) 1448 return (NULL); 1449 wr->wr_len = wr_len; 1450 wr->wrq = wrq; 1451 return (wr); 1452 } 1453 1454 static inline void * 1455 wrtod(struct wrqe *wr) 1456 { 1457 return (&wr->wr[0]); 1458 } 1459 1460 static inline void 1461 free_wrqe(struct wrqe *wr) 1462 { 1463 free(wr, M_CXGBE); 1464 } 1465 1466 static inline void 1467 t4_wrq_tx(struct adapter *sc, struct wrqe *wr) 1468 { 1469 struct sge_wrq *wrq = wr->wrq; 1470 1471 TXQ_LOCK(wrq); 1472 t4_wrq_tx_locked(sc, wrq, wr); 1473 TXQ_UNLOCK(wrq); 1474 } 1475 1476 static inline int 1477 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 1478 int len) 1479 { 1480 1481 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 1482 } 1483 1484 static inline int 1485 write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 1486 const uint32_t *val, int len) 1487 { 1488 1489 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 1490 } 1491 1492 /* Number of len16 -> number of descriptors */ 1493 static inline int 1494 tx_len16_to_desc(int len16) 1495 { 1496 1497 return (howmany(len16, EQ_ESIZE / 16)); 1498 } 1499 #endif 1500