1 /************************************************************************** 2 3 Copyright (c) 2007, Chelsio Inc. 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Neither the name of the Chelsio Corporation nor the names of its 13 contributors may be used to endorse or promote products derived from 14 this software without specific prior written permission. 15 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 POSSIBILITY OF SUCH DAMAGE. 27 28 29 $FreeBSD$ 30 31 ***************************************************************************/ 32 33 34 #ifndef _CXGB_ADAPTER_H_ 35 #define _CXGB_ADAPTER_H_ 36 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/sx.h> 40 #include <sys/rman.h> 41 #include <sys/mbuf.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/condvar.h> 45 46 #include <net/ethernet.h> 47 #include <net/if.h> 48 #include <net/if_media.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 53 #include <sys/bus_dma.h> 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 57 #ifdef CONFIG_DEFINED 58 #include <cxgb_osdep.h> 59 #include <t3cdev.h> 60 #include <ulp/toecore/cxgb_toedev.h> 61 #include <sys/mbufq.h> 62 #else 63 #include <dev/cxgb/cxgb_osdep.h> 64 #include <dev/cxgb/t3cdev.h> 65 #include <dev/cxgb/sys/mbufq.h> 66 #include <dev/cxgb/ulp/toecore/cxgb_toedev.h> 67 #endif 68 69 #define USE_SX 70 71 struct adapter; 72 struct sge_qset; 73 extern int cxgb_debug; 74 75 #ifdef DEBUG_LOCKING 76 #define MTX_INIT(lock, lockname, class, flags) \ 77 do { \ 78 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \ 79 mtx_init((lock), lockname, class, flags); \ 80 } while (0) 81 82 #define MTX_DESTROY(lock) \ 83 do { \ 84 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \ 85 mtx_destroy((lock)); \ 86 } while (0) 87 88 #define SX_INIT(lock, lockname) \ 89 do { \ 90 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \ 91 sx_init((lock), lockname); \ 92 } while (0) 93 94 #define SX_DESTROY(lock) \ 95 do { \ 96 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \ 97 sx_destroy((lock)); \ 98 } while (0) 99 #else 100 #define MTX_INIT mtx_init 101 #define MTX_DESTROY mtx_destroy 102 #define SX_INIT sx_init 103 #define SX_DESTROY sx_destroy 104 #endif 105 106 struct port_info { 107 struct adapter *adapter; 108 struct ifnet *ifp; 109 int if_flags; 110 const struct port_type_info *port_type; 111 struct cphy phy; 112 struct cmac mac; 113 struct link_config link_config; 114 struct ifmedia media; 115 #ifdef USE_SX 116 struct sx lock; 117 #else 118 struct mtx lock; 119 #endif 120 uint8_t port_id; 121 uint8_t tx_chan; 122 uint8_t txpkt_intf; 123 uint8_t nqsets; 124 uint8_t first_qset; 125 126 uint8_t hw_addr[ETHER_ADDR_LEN]; 127 struct taskqueue *tq; 128 struct task start_task; 129 struct task timer_reclaim_task; 130 struct cdev *port_cdev; 131 132 #define PORT_LOCK_NAME_LEN 32 133 #define TASKQ_NAME_LEN 32 134 #define PORT_NAME_LEN 32 135 char lockbuf[PORT_LOCK_NAME_LEN]; 136 char taskqbuf[TASKQ_NAME_LEN]; 137 char namebuf[PORT_NAME_LEN]; 138 }; 139 140 enum { /* adapter flags */ 141 FULL_INIT_DONE = (1 << 0), 142 USING_MSI = (1 << 1), 143 USING_MSIX = (1 << 2), 144 QUEUES_BOUND = (1 << 3), 145 FW_UPTODATE = (1 << 4), 146 TPS_UPTODATE = (1 << 5), 147 }; 148 149 #define FL_Q_SIZE 4096 150 #define JUMBO_Q_SIZE 1024 151 #define RSPQ_Q_SIZE 1024 152 #define TX_ETH_Q_SIZE 1024 153 154 enum { TXQ_ETH = 0, 155 TXQ_OFLD = 1, 156 TXQ_CTRL = 2, }; 157 158 159 /* careful, the following are set on priv_flags and must not collide with 160 * IFF_ flags! 161 */ 162 enum { 163 LRO_ACTIVE = (1 << 8), 164 }; 165 166 /* Max concurrent LRO sessions per queue set */ 167 #define MAX_LRO_SES 8 168 169 struct t3_lro_session { 170 struct mbuf *head; 171 struct mbuf *tail; 172 uint32_t seq; 173 uint16_t ip_len; 174 uint16_t mss; 175 uint16_t vtag; 176 uint8_t npkts; 177 }; 178 179 struct lro_state { 180 unsigned short enabled; 181 unsigned short active_idx; 182 unsigned int nactive; 183 struct t3_lro_session sess[MAX_LRO_SES]; 184 }; 185 186 #define RX_BUNDLE_SIZE 8 187 188 struct rsp_desc; 189 190 struct sge_rspq { 191 uint32_t credits; 192 uint32_t size; 193 uint32_t cidx; 194 uint32_t gen; 195 uint32_t polling; 196 uint32_t holdoff_tmr; 197 uint32_t next_holdoff; 198 uint32_t imm_data; 199 struct rsp_desc *desc; 200 uint32_t cntxt_id; 201 struct mtx lock; 202 struct mbuf *rx_head; /* offload packet receive queue head */ 203 struct mbuf *rx_tail; /* offload packet receive queue tail */ 204 205 uint32_t offload_pkts; 206 uint32_t offload_bundles; 207 uint32_t pure_rsps; 208 uint32_t unhandled_irqs; 209 210 bus_addr_t phys_addr; 211 bus_dma_tag_t desc_tag; 212 bus_dmamap_t desc_map; 213 214 struct t3_mbuf_hdr rspq_mh; 215 #define RSPQ_NAME_LEN 32 216 char lockbuf[RSPQ_NAME_LEN]; 217 218 }; 219 220 #ifndef DISABLE_MBUF_IOVEC 221 #define rspq_mbuf rspq_mh.mh_head 222 #endif 223 224 struct rx_desc; 225 struct rx_sw_desc; 226 227 struct sge_fl { 228 uint32_t buf_size; 229 uint32_t credits; 230 uint32_t size; 231 uint32_t cidx; 232 uint32_t pidx; 233 uint32_t gen; 234 struct rx_desc *desc; 235 struct rx_sw_desc *sdesc; 236 bus_addr_t phys_addr; 237 uint32_t cntxt_id; 238 uint64_t empty; 239 bus_dma_tag_t desc_tag; 240 bus_dmamap_t desc_map; 241 bus_dma_tag_t entry_tag; 242 uma_zone_t zone; 243 int type; 244 }; 245 246 struct tx_desc; 247 struct tx_sw_desc; 248 249 #define TXQ_TRANSMITTING 0x1 250 251 struct sge_txq { 252 uint64_t flags; 253 uint32_t in_use; 254 uint32_t size; 255 uint32_t processed; 256 uint32_t cleaned; 257 uint32_t stop_thres; 258 uint32_t cidx; 259 uint32_t pidx; 260 uint32_t gen; 261 uint32_t unacked; 262 struct tx_desc *desc; 263 struct tx_sw_desc *sdesc; 264 uint32_t token; 265 bus_addr_t phys_addr; 266 struct task qresume_task; 267 struct task qreclaim_task; 268 struct port_info *port; 269 uint32_t cntxt_id; 270 uint64_t stops; 271 uint64_t restarts; 272 bus_dma_tag_t desc_tag; 273 bus_dmamap_t desc_map; 274 bus_dma_tag_t entry_tag; 275 struct mbuf_head sendq; 276 /* 277 * cleanq should really be an buf_ring to avoid extra 278 * mbuf touches 279 */ 280 struct mbuf_head cleanq; 281 struct buf_ring txq_mr; 282 struct mbuf *immpkt; 283 uint32_t txq_drops; 284 uint32_t txq_skipped; 285 uint32_t txq_coalesced; 286 uint32_t txq_enqueued; 287 unsigned long txq_frees; 288 struct mtx lock; 289 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1]; 290 bus_dma_segment_t txq_segs[TX_MAX_SEGS]; 291 struct mbuf *txq_m_vec[TX_WR_COUNT_MAX]; 292 #define TXQ_NAME_LEN 32 293 char lockbuf[TXQ_NAME_LEN]; 294 }; 295 296 297 enum { 298 SGE_PSTAT_TSO, /* # of TSO requests */ 299 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */ 300 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ 301 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ 302 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ 303 SGE_PSTATS_LRO_QUEUED, /* # of LRO appended packets */ 304 SGE_PSTATS_LRO_FLUSHED, /* # of LRO flushed packets */ 305 SGE_PSTATS_LRO_X_STREAMS, /* # of exceeded LRO contexts */ 306 }; 307 308 #define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1) 309 310 #define QS_EXITING 0x1 311 #define QS_RUNNING 0x2 312 #define QS_BOUND 0x4 313 314 struct sge_qset { 315 struct sge_rspq rspq; 316 struct sge_fl fl[SGE_RXQ_PER_SET]; 317 struct lro_state lro; 318 struct sge_txq txq[SGE_TXQ_PER_SET]; 319 uint32_t txq_stopped; /* which Tx queues are stopped */ 320 uint64_t port_stats[SGE_PSTAT_MAX]; 321 struct port_info *port; 322 int idx; /* qset # */ 323 int qs_cpuid; 324 int qs_flags; 325 struct cv qs_cv; 326 struct mtx qs_mtx; 327 #define QS_NAME_LEN 32 328 char namebuf[QS_NAME_LEN]; 329 }; 330 331 struct sge { 332 struct sge_qset qs[SGE_QSETS]; 333 struct mtx reg_lock; 334 }; 335 336 struct filter_info; 337 338 struct adapter { 339 device_t dev; 340 int flags; 341 TAILQ_ENTRY(adapter) adapter_entry; 342 343 /* PCI register resources */ 344 int regs_rid; 345 struct resource *regs_res; 346 bus_space_handle_t bh; 347 bus_space_tag_t bt; 348 bus_size_t mmio_len; 349 uint32_t link_width; 350 351 /* DMA resources */ 352 bus_dma_tag_t parent_dmat; 353 bus_dma_tag_t rx_dmat; 354 bus_dma_tag_t rx_jumbo_dmat; 355 bus_dma_tag_t tx_dmat; 356 357 /* Interrupt resources */ 358 struct resource *irq_res; 359 int irq_rid; 360 void *intr_tag; 361 362 uint32_t msix_regs_rid; 363 struct resource *msix_regs_res; 364 365 struct resource *msix_irq_res[SGE_QSETS]; 366 int msix_irq_rid[SGE_QSETS]; 367 void *msix_intr_tag[SGE_QSETS]; 368 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */ 369 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */ 370 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */ 371 union { 372 uint8_t fill[SGE_QSETS]; 373 uint64_t coalesce; 374 } u; 375 376 #define tunq_fill u.fill 377 #define tunq_coalesce u.coalesce 378 379 struct filter_info *filters; 380 381 /* Tasks */ 382 struct task ext_intr_task; 383 struct task slow_intr_task; 384 struct task tick_task; 385 struct task process_responses_task; 386 struct taskqueue *tq; 387 struct callout cxgb_tick_ch; 388 struct callout sge_timer_ch; 389 390 /* Register lock for use by the hardware layer */ 391 struct mtx mdio_lock; 392 struct mtx elmer_lock; 393 394 /* Bookkeeping for the hardware layer */ 395 struct adapter_params params; 396 unsigned int slow_intr_mask; 397 unsigned long irq_stats[IRQ_NUM_STATS]; 398 399 struct sge sge; 400 struct mc7 pmrx; 401 struct mc7 pmtx; 402 struct mc7 cm; 403 struct mc5 mc5; 404 405 struct port_info port[MAX_NPORTS]; 406 device_t portdev[MAX_NPORTS]; 407 struct t3cdev tdev; 408 char fw_version[64]; 409 uint32_t open_device_map; 410 uint32_t registered_device_map; 411 #ifdef USE_SX 412 struct sx lock; 413 #else 414 struct mtx lock; 415 #endif 416 driver_intr_t *cxgb_intr; 417 int msi_count; 418 419 #define ADAPTER_LOCK_NAME_LEN 32 420 char lockbuf[ADAPTER_LOCK_NAME_LEN]; 421 char reglockbuf[ADAPTER_LOCK_NAME_LEN]; 422 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN]; 423 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN]; 424 }; 425 426 struct t3_rx_mode { 427 428 uint32_t idx; 429 struct port_info *port; 430 }; 431 432 433 #define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock) 434 #define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock) 435 #define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock) 436 #define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock) 437 438 439 #ifdef USE_SX 440 #define PORT_LOCK(port) sx_xlock(&(port)->lock); 441 #define PORT_UNLOCK(port) sx_xunlock(&(port)->lock); 442 #define PORT_LOCK_INIT(port, name) SX_INIT(&(port)->lock, name) 443 #define PORT_LOCK_DEINIT(port) SX_DESTROY(&(port)->lock) 444 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED) 445 446 #define ADAPTER_LOCK(adap) sx_xlock(&(adap)->lock); 447 #define ADAPTER_UNLOCK(adap) sx_xunlock(&(adap)->lock); 448 #define ADAPTER_LOCK_INIT(adap, name) SX_INIT(&(adap)->lock, name) 449 #define ADAPTER_LOCK_DEINIT(adap) SX_DESTROY(&(adap)->lock) 450 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED) 451 #else 452 #define PORT_LOCK(port) mtx_lock(&(port)->lock); 453 #define PORT_UNLOCK(port) mtx_unlock(&(port)->lock); 454 #define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF) 455 #define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock) 456 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED) 457 458 #define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock); 459 #define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock); 460 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF) 461 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock) 462 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED) 463 #endif 464 465 466 static __inline uint32_t 467 t3_read_reg(adapter_t *adapter, uint32_t reg_addr) 468 { 469 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr)); 470 } 471 472 static __inline void 473 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val) 474 { 475 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val); 476 } 477 478 static __inline void 479 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val) 480 { 481 *val = pci_read_config(adapter->dev, reg, 4); 482 } 483 484 static __inline void 485 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val) 486 { 487 pci_write_config(adapter->dev, reg, val, 4); 488 } 489 490 static __inline void 491 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val) 492 { 493 *val = pci_read_config(adapter->dev, reg, 2); 494 } 495 496 static __inline void 497 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val) 498 { 499 pci_write_config(adapter->dev, reg, val, 2); 500 } 501 502 static __inline uint8_t * 503 t3_get_next_mcaddr(struct t3_rx_mode *rm) 504 { 505 uint8_t *macaddr = NULL; 506 507 if (rm->idx == 0) 508 macaddr = (uint8_t *)rm->port->hw_addr; 509 510 rm->idx++; 511 return (macaddr); 512 } 513 514 static __inline void 515 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port) 516 { 517 rm->idx = 0; 518 rm->port = port; 519 } 520 521 static __inline struct port_info * 522 adap2pinfo(struct adapter *adap, int idx) 523 { 524 return &adap->port[idx]; 525 } 526 527 int t3_os_find_pci_capability(adapter_t *adapter, int cap); 528 int t3_os_pci_save_state(struct adapter *adapter); 529 int t3_os_pci_restore_state(struct adapter *adapter); 530 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, 531 int speed, int duplex, int fc); 532 void t3_sge_err_intr_handler(adapter_t *adapter); 533 int t3_offload_tx(struct t3cdev *, struct mbuf *); 534 void t3_os_ext_intr_handler(adapter_t *adapter); 535 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]); 536 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m); 537 538 539 int t3_sge_alloc(struct adapter *); 540 int t3_sge_free(struct adapter *); 541 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *, 542 int, struct port_info *); 543 void t3_free_sge_resources(adapter_t *); 544 void t3_sge_start(adapter_t *); 545 void t3_sge_stop(adapter_t *); 546 void t3b_intr(void *data); 547 void t3_intr_msi(void *data); 548 void t3_intr_msix(void *data); 549 int t3_encap(struct sge_qset *, struct mbuf **, int); 550 551 int t3_sge_init_adapter(adapter_t *); 552 int t3_sge_init_port(struct port_info *); 553 void t3_sge_deinit_sw(adapter_t *); 554 void t3_free_tx_desc(struct sge_txq *q, int n); 555 void t3_free_tx_desc_all(struct sge_txq *q); 556 557 void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m, 558 int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro); 559 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad); 560 void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state); 561 562 void t3_add_attach_sysctls(adapter_t *sc); 563 void t3_add_configured_sysctls(adapter_t *sc); 564 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 565 unsigned char *data); 566 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); 567 /* 568 * XXX figure out how we can return this to being private to sge 569 */ 570 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC)) 571 572 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field))) 573 574 static __inline struct sge_qset * 575 fl_to_qset(struct sge_fl *q, int qidx) 576 { 577 return container_of(q, struct sge_qset, fl[qidx]); 578 } 579 580 static __inline struct sge_qset * 581 rspq_to_qset(struct sge_rspq *q) 582 { 583 return container_of(q, struct sge_qset, rspq); 584 } 585 586 static __inline struct sge_qset * 587 txq_to_qset(struct sge_txq *q, int qidx) 588 { 589 return container_of(q, struct sge_qset, txq[qidx]); 590 } 591 592 static __inline struct adapter * 593 tdev2adap(struct t3cdev *d) 594 { 595 return container_of(d, struct adapter, tdev); 596 } 597 598 #undef container_of 599 600 #define OFFLOAD_DEVMAP_BIT 15 601 static inline int offload_running(adapter_t *adapter) 602 { 603 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT); 604 } 605 606 #ifdef IFNET_MULTIQUEUE 607 int cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m); 608 int cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *m); 609 int32_t cxgb_pcpu_get_cookie(struct ifnet *ifp, struct in6_addr *lip, uint16_t lport, 610 struct in6_addr *rip, uint16_t rport, int ipv6); 611 void cxgb_pcpu_shutdown_threads(struct adapter *sc); 612 void cxgb_pcpu_startup_threads(struct adapter *sc); 613 #endif 614 615 int process_responses(adapter_t *adap, struct sge_qset *qs, int budget); 616 int cxgb_tx_common(struct ifnet *ifp, struct sge_qset *qs, uint32_t txmax); 617 void t3_free_qset(adapter_t *sc, struct sge_qset *q); 618 int cxgb_dequeue_packet(struct ifnet *, struct sge_txq *, struct mbuf **); 619 void cxgb_start(struct ifnet *ifp); 620 void refill_fl_service(adapter_t *adap, struct sge_fl *fl); 621 622 #endif 623