1 /************************************************************************** 2 3 Copyright (c) 2007-2009, Chelsio Inc. 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Neither the name of the Chelsio Corporation nor the names of its 13 contributors may be used to endorse or promote products derived from 14 this software without specific prior written permission. 15 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 POSSIBILITY OF SUCH DAMAGE. 27 28 $FreeBSD$ 29 30 ***************************************************************************/ 31 32 33 #ifndef _CXGB_ADAPTER_H_ 34 #define _CXGB_ADAPTER_H_ 35 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/rman.h> 39 #include <sys/mbuf.h> 40 #include <sys/socket.h> 41 #include <sys/sockio.h> 42 #include <sys/condvar.h> 43 #include <sys/buf_ring.h> 44 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_media.h> 48 #include <net/if_dl.h> 49 #include <netinet/tcp_lro.h> 50 51 #include <machine/bus.h> 52 #include <machine/resource.h> 53 54 #include <sys/bus_dma.h> 55 #include <dev/pci/pcireg.h> 56 #include <dev/pci/pcivar.h> 57 58 #include <cxgb_osdep.h> 59 #include <t3cdev.h> 60 #include <sys/mbufq.h> 61 62 struct adapter; 63 struct sge_qset; 64 extern int cxgb_debug; 65 66 #ifdef DEBUG_LOCKING 67 #define MTX_INIT(lock, lockname, class, flags) \ 68 do { \ 69 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \ 70 mtx_init((lock), lockname, class, flags); \ 71 } while (0) 72 73 #define MTX_DESTROY(lock) \ 74 do { \ 75 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \ 76 mtx_destroy((lock)); \ 77 } while (0) 78 79 #else 80 #define MTX_INIT mtx_init 81 #define MTX_DESTROY mtx_destroy 82 #endif 83 84 enum { 85 LF_NO = 0, 86 LF_MAYBE, 87 LF_YES 88 }; 89 90 struct port_info { 91 struct adapter *adapter; 92 struct ifnet *ifp; 93 int if_flags; 94 int flags; 95 const struct port_type_info *port_type; 96 struct cphy phy; 97 struct cmac mac; 98 struct link_config link_config; 99 struct ifmedia media; 100 struct mtx lock; 101 uint32_t port_id; 102 uint32_t tx_chan; 103 uint32_t txpkt_intf; 104 uint32_t first_qset; 105 uint32_t nqsets; 106 int link_fault; 107 108 uint8_t hw_addr[ETHER_ADDR_LEN]; 109 struct callout link_check_ch; 110 struct task link_check_task; 111 struct task timer_reclaim_task; 112 struct cdev *port_cdev; 113 114 #define PORT_LOCK_NAME_LEN 32 115 #define PORT_NAME_LEN 32 116 char lockbuf[PORT_LOCK_NAME_LEN]; 117 char namebuf[PORT_NAME_LEN]; 118 } __aligned(L1_CACHE_BYTES); 119 120 enum { 121 /* adapter flags */ 122 FULL_INIT_DONE = (1 << 0), 123 USING_MSI = (1 << 1), 124 USING_MSIX = (1 << 2), 125 QUEUES_BOUND = (1 << 3), 126 FW_UPTODATE = (1 << 4), 127 TPS_UPTODATE = (1 << 5), 128 CXGB_SHUTDOWN = (1 << 6), 129 CXGB_OFLD_INIT = (1 << 7), 130 TP_PARITY_INIT = (1 << 8), 131 CXGB_BUSY = (1 << 9), 132 133 /* port flags */ 134 DOOMED = (1 << 0), 135 }; 136 #define IS_DOOMED(p) (p->flags & DOOMED) 137 #define SET_DOOMED(p) do {p->flags |= DOOMED;} while (0) 138 #define IS_BUSY(sc) (sc->flags & CXGB_BUSY) 139 #define SET_BUSY(sc) do {sc->flags |= CXGB_BUSY;} while (0) 140 #define CLR_BUSY(sc) do {sc->flags &= ~CXGB_BUSY;} while (0) 141 142 #define FL_Q_SIZE 4096 143 #define JUMBO_Q_SIZE 1024 144 #define RSPQ_Q_SIZE 2048 145 #define TX_ETH_Q_SIZE 1024 146 #define TX_OFLD_Q_SIZE 1024 147 #define TX_CTRL_Q_SIZE 256 148 149 enum { TXQ_ETH = 0, 150 TXQ_OFLD = 1, 151 TXQ_CTRL = 2, }; 152 153 154 /* 155 * work request size in bytes 156 */ 157 #define WR_LEN (WR_FLITS * 8) 158 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso)) 159 160 struct lro_state { 161 unsigned short enabled; 162 struct lro_ctrl ctrl; 163 }; 164 165 #define RX_BUNDLE_SIZE 8 166 167 struct rsp_desc; 168 169 struct sge_rspq { 170 uint32_t credits; 171 uint32_t size; 172 uint32_t cidx; 173 uint32_t gen; 174 uint32_t polling; 175 uint32_t holdoff_tmr; 176 uint32_t next_holdoff; 177 uint32_t imm_data; 178 uint32_t async_notif; 179 uint32_t cntxt_id; 180 uint32_t offload_pkts; 181 uint32_t offload_bundles; 182 uint32_t pure_rsps; 183 uint32_t unhandled_irqs; 184 uint32_t starved; 185 186 bus_addr_t phys_addr; 187 bus_dma_tag_t desc_tag; 188 bus_dmamap_t desc_map; 189 190 struct t3_mbuf_hdr rspq_mh; 191 struct rsp_desc *desc; 192 struct mtx lock; 193 #define RSPQ_NAME_LEN 32 194 char lockbuf[RSPQ_NAME_LEN]; 195 uint32_t rspq_dump_start; 196 uint32_t rspq_dump_count; 197 }; 198 199 struct rx_desc; 200 struct rx_sw_desc; 201 202 struct sge_fl { 203 uint32_t buf_size; 204 uint32_t credits; 205 uint32_t size; 206 uint32_t cidx; 207 uint32_t pidx; 208 uint32_t gen; 209 uint32_t db_pending; 210 bus_addr_t phys_addr; 211 uint32_t cntxt_id; 212 uint32_t empty; 213 bus_dma_tag_t desc_tag; 214 bus_dmamap_t desc_map; 215 bus_dma_tag_t entry_tag; 216 uma_zone_t zone; 217 struct rx_desc *desc; 218 struct rx_sw_desc *sdesc; 219 int type; 220 }; 221 222 struct tx_desc; 223 struct tx_sw_desc; 224 225 #define TXQ_TRANSMITTING 0x1 226 227 struct sge_txq { 228 uint64_t flags; 229 uint32_t in_use; 230 uint32_t size; 231 uint32_t processed; 232 uint32_t cleaned; 233 uint32_t stop_thres; 234 uint32_t cidx; 235 uint32_t pidx; 236 uint32_t gen; 237 uint32_t unacked; 238 uint32_t db_pending; 239 struct tx_desc *desc; 240 struct tx_sw_desc *sdesc; 241 uint32_t token; 242 bus_addr_t phys_addr; 243 struct task qresume_task; 244 struct task qreclaim_task; 245 uint32_t cntxt_id; 246 uint64_t stops; 247 uint64_t restarts; 248 bus_dma_tag_t desc_tag; 249 bus_dmamap_t desc_map; 250 bus_dma_tag_t entry_tag; 251 struct mbuf_head sendq; 252 253 struct buf_ring *txq_mr; 254 struct ifaltq *txq_ifq; 255 struct callout txq_timer; 256 struct callout txq_watchdog; 257 uint64_t txq_coalesced; 258 uint32_t txq_skipped; 259 uint32_t txq_enqueued; 260 uint32_t txq_dump_start; 261 uint32_t txq_dump_count; 262 uint64_t txq_direct_packets; 263 uint64_t txq_direct_bytes; 264 uint64_t txq_frees; 265 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1]; 266 }; 267 268 269 enum { 270 SGE_PSTAT_TSO, /* # of TSO requests */ 271 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */ 272 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ 273 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ 274 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ 275 }; 276 277 #define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1) 278 279 #define QS_EXITING 0x1 280 #define QS_RUNNING 0x2 281 #define QS_BOUND 0x4 282 #define QS_FLUSHING 0x8 283 #define QS_TIMEOUT 0x10 284 285 struct sge_qset { 286 struct sge_rspq rspq; 287 struct sge_fl fl[SGE_RXQ_PER_SET]; 288 struct lro_state lro; 289 struct sge_txq txq[SGE_TXQ_PER_SET]; 290 uint32_t txq_stopped; /* which Tx queues are stopped */ 291 uint64_t port_stats[SGE_PSTAT_MAX]; 292 struct port_info *port; 293 int idx; /* qset # */ 294 int qs_flags; 295 int coalescing; 296 struct cv qs_cv; 297 struct mtx lock; 298 #define QS_NAME_LEN 32 299 char namebuf[QS_NAME_LEN]; 300 }; 301 302 struct sge { 303 struct sge_qset qs[SGE_QSETS]; 304 struct mtx reg_lock; 305 }; 306 307 struct filter_info; 308 309 struct adapter { 310 device_t dev; 311 int flags; 312 TAILQ_ENTRY(adapter) adapter_entry; 313 314 /* PCI register resources */ 315 int regs_rid; 316 struct resource *regs_res; 317 int udbs_rid; 318 struct resource *udbs_res; 319 bus_space_handle_t bh; 320 bus_space_tag_t bt; 321 bus_size_t mmio_len; 322 uint32_t link_width; 323 324 /* DMA resources */ 325 bus_dma_tag_t parent_dmat; 326 bus_dma_tag_t rx_dmat; 327 bus_dma_tag_t rx_jumbo_dmat; 328 bus_dma_tag_t tx_dmat; 329 330 /* Interrupt resources */ 331 struct resource *irq_res; 332 int irq_rid; 333 void *intr_tag; 334 335 uint32_t msix_regs_rid; 336 struct resource *msix_regs_res; 337 338 struct resource *msix_irq_res[SGE_QSETS]; 339 int msix_irq_rid[SGE_QSETS]; 340 void *msix_intr_tag[SGE_QSETS]; 341 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */ 342 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */ 343 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */ 344 union { 345 uint8_t fill[SGE_QSETS]; 346 uint64_t coalesce; 347 } u; 348 349 #define tunq_fill u.fill 350 #define tunq_coalesce u.coalesce 351 352 struct filter_info *filters; 353 354 /* Tasks */ 355 struct task slow_intr_task; 356 struct task tick_task; 357 struct taskqueue *tq; 358 struct callout cxgb_tick_ch; 359 struct callout sge_timer_ch; 360 361 /* Register lock for use by the hardware layer */ 362 struct mtx mdio_lock; 363 struct mtx elmer_lock; 364 365 /* Bookkeeping for the hardware layer */ 366 struct adapter_params params; 367 unsigned int slow_intr_mask; 368 unsigned long irq_stats[IRQ_NUM_STATS]; 369 370 struct sge sge; 371 struct mc7 pmrx; 372 struct mc7 pmtx; 373 struct mc7 cm; 374 struct mc5 mc5; 375 376 struct port_info port[MAX_NPORTS]; 377 device_t portdev[MAX_NPORTS]; 378 struct t3cdev tdev; 379 char fw_version[64]; 380 char port_types[MAX_NPORTS + 1]; 381 uint32_t open_device_map; 382 uint32_t registered_device_map; 383 struct mtx lock; 384 driver_intr_t *cxgb_intr; 385 int msi_count; 386 387 #define ADAPTER_LOCK_NAME_LEN 32 388 char lockbuf[ADAPTER_LOCK_NAME_LEN]; 389 char reglockbuf[ADAPTER_LOCK_NAME_LEN]; 390 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN]; 391 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN]; 392 393 int timestamp; 394 }; 395 396 struct t3_rx_mode { 397 398 uint32_t idx; 399 struct port_info *port; 400 }; 401 402 #define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock) 403 #define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock) 404 #define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock) 405 #define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock) 406 407 408 #define PORT_LOCK(port) mtx_lock(&(port)->lock); 409 #define PORT_UNLOCK(port) mtx_unlock(&(port)->lock); 410 #define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF) 411 #define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock) 412 #define PORT_LOCK_ASSERT_NOTOWNED(port) mtx_assert(&(port)->lock, MA_NOTOWNED) 413 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED) 414 415 #define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock); 416 #define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock); 417 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF) 418 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock) 419 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED) 420 #define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED) 421 422 423 static __inline uint32_t 424 t3_read_reg(adapter_t *adapter, uint32_t reg_addr) 425 { 426 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr)); 427 } 428 429 static __inline void 430 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val) 431 { 432 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val); 433 } 434 435 static __inline void 436 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val) 437 { 438 *val = pci_read_config(adapter->dev, reg, 4); 439 } 440 441 static __inline void 442 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val) 443 { 444 pci_write_config(adapter->dev, reg, val, 4); 445 } 446 447 static __inline void 448 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val) 449 { 450 *val = pci_read_config(adapter->dev, reg, 2); 451 } 452 453 static __inline void 454 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val) 455 { 456 pci_write_config(adapter->dev, reg, val, 2); 457 } 458 459 static __inline uint8_t * 460 t3_get_next_mcaddr(struct t3_rx_mode *rm) 461 { 462 uint8_t *macaddr = NULL; 463 struct ifnet *ifp = rm->port->ifp; 464 struct ifmultiaddr *ifma; 465 int i = 0; 466 467 if_maddr_rlock(ifp); 468 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 469 if (ifma->ifma_addr->sa_family != AF_LINK) 470 continue; 471 if (i == rm->idx) { 472 macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 473 break; 474 } 475 i++; 476 } 477 if_maddr_runlock(ifp); 478 479 rm->idx++; 480 return (macaddr); 481 } 482 483 static __inline void 484 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port) 485 { 486 rm->idx = 0; 487 rm->port = port; 488 } 489 490 static __inline struct port_info * 491 adap2pinfo(struct adapter *adap, int idx) 492 { 493 return &adap->port[idx]; 494 } 495 496 int t3_os_find_pci_capability(adapter_t *adapter, int cap); 497 int t3_os_pci_save_state(struct adapter *adapter); 498 int t3_os_pci_restore_state(struct adapter *adapter); 499 void t3_os_link_intr(struct port_info *); 500 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, 501 int speed, int duplex, int fc, int mac_was_reset); 502 void t3_os_phymod_changed(struct adapter *adap, int port_id); 503 void t3_sge_err_intr_handler(adapter_t *adapter); 504 int t3_offload_tx(struct t3cdev *, struct mbuf *); 505 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]); 506 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m); 507 508 509 int t3_sge_alloc(struct adapter *); 510 int t3_sge_free(struct adapter *); 511 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *, 512 int, struct port_info *); 513 void t3_free_sge_resources(adapter_t *, int); 514 void t3_sge_start(adapter_t *); 515 void t3_sge_stop(adapter_t *); 516 void t3b_intr(void *data); 517 void t3_intr_msi(void *data); 518 void t3_intr_msix(void *data); 519 520 int t3_sge_init_adapter(adapter_t *); 521 int t3_sge_reset_adapter(adapter_t *); 522 int t3_sge_init_port(struct port_info *); 523 void t3_free_tx_desc(struct sge_qset *qs, int n, int qid); 524 525 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad); 526 527 void t3_add_attach_sysctls(adapter_t *sc); 528 void t3_add_configured_sysctls(adapter_t *sc); 529 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 530 unsigned char *data); 531 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); 532 533 /* 534 * XXX figure out how we can return this to being private to sge 535 */ 536 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC)) 537 538 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field))) 539 540 static __inline struct sge_qset * 541 fl_to_qset(struct sge_fl *q, int qidx) 542 { 543 return container_of(q, struct sge_qset, fl[qidx]); 544 } 545 546 static __inline struct sge_qset * 547 rspq_to_qset(struct sge_rspq *q) 548 { 549 return container_of(q, struct sge_qset, rspq); 550 } 551 552 static __inline struct sge_qset * 553 txq_to_qset(struct sge_txq *q, int qidx) 554 { 555 return container_of(q, struct sge_qset, txq[qidx]); 556 } 557 558 static __inline struct adapter * 559 tdev2adap(struct t3cdev *d) 560 { 561 return container_of(d, struct adapter, tdev); 562 } 563 564 #undef container_of 565 566 #define OFFLOAD_DEVMAP_BIT 15 567 static inline int offload_running(adapter_t *adapter) 568 { 569 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT); 570 } 571 572 void cxgb_tx_watchdog(void *arg); 573 int cxgb_transmit(struct ifnet *ifp, struct mbuf *m); 574 void cxgb_qflush(struct ifnet *ifp); 575 void cxgb_start(struct ifnet *ifp); 576 #endif 577