1 /************************************************************************** 2 3 Copyright (c) 2007-2009, Chelsio Inc. 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Neither the name of the Chelsio Corporation nor the names of its 13 contributors may be used to endorse or promote products derived from 14 this software without specific prior written permission. 15 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 POSSIBILITY OF SUCH DAMAGE. 27 28 $FreeBSD$ 29 30 ***************************************************************************/ 31 32 33 #ifndef _CXGB_ADAPTER_H_ 34 #define _CXGB_ADAPTER_H_ 35 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/rman.h> 39 #include <sys/mbuf.h> 40 #include <sys/socket.h> 41 #include <sys/sockio.h> 42 #include <sys/condvar.h> 43 #include <sys/buf_ring.h> 44 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_media.h> 48 #include <net/if_dl.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 53 #include <sys/bus_dma.h> 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pcivar.h> 56 57 #include <cxgb_osdep.h> 58 #include <t3cdev.h> 59 #include <sys/mbufq.h> 60 61 #ifdef LRO_SUPPORTED 62 #include <netinet/tcp_lro.h> 63 #endif 64 65 struct adapter; 66 struct sge_qset; 67 extern int cxgb_debug; 68 69 #ifdef DEBUG_LOCKING 70 #define MTX_INIT(lock, lockname, class, flags) \ 71 do { \ 72 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \ 73 mtx_init((lock), lockname, class, flags); \ 74 } while (0) 75 76 #define MTX_DESTROY(lock) \ 77 do { \ 78 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \ 79 mtx_destroy((lock)); \ 80 } while (0) 81 82 #else 83 #define MTX_INIT mtx_init 84 #define MTX_DESTROY mtx_destroy 85 #endif 86 87 enum { 88 LF_NO = 0, 89 LF_MAYBE, 90 LF_YES 91 }; 92 93 struct port_info { 94 struct adapter *adapter; 95 struct ifnet *ifp; 96 int if_flags; 97 int flags; 98 const struct port_type_info *port_type; 99 struct cphy phy; 100 struct cmac mac; 101 struct link_config link_config; 102 struct ifmedia media; 103 struct mtx lock; 104 uint32_t port_id; 105 uint32_t tx_chan; 106 uint32_t txpkt_intf; 107 uint32_t first_qset; 108 uint32_t nqsets; 109 int link_fault; 110 111 uint8_t hw_addr[ETHER_ADDR_LEN]; 112 struct task timer_reclaim_task; 113 struct cdev *port_cdev; 114 115 #define PORT_LOCK_NAME_LEN 32 116 #define PORT_NAME_LEN 32 117 char lockbuf[PORT_LOCK_NAME_LEN]; 118 char namebuf[PORT_NAME_LEN]; 119 } __aligned(L1_CACHE_BYTES); 120 121 enum { 122 /* adapter flags */ 123 FULL_INIT_DONE = (1 << 0), 124 USING_MSI = (1 << 1), 125 USING_MSIX = (1 << 2), 126 QUEUES_BOUND = (1 << 3), 127 FW_UPTODATE = (1 << 4), 128 TPS_UPTODATE = (1 << 5), 129 CXGB_SHUTDOWN = (1 << 6), 130 CXGB_OFLD_INIT = (1 << 7), 131 TP_PARITY_INIT = (1 << 8), 132 CXGB_BUSY = (1 << 9), 133 134 /* port flags */ 135 DOOMED = (1 << 0), 136 }; 137 #define IS_DOOMED(p) (p->flags & DOOMED) 138 #define SET_DOOMED(p) do {p->flags |= DOOMED;} while (0) 139 #define DOOMED(p) (p->flags & DOOMED) 140 #define IS_BUSY(sc) (sc->flags & CXGB_BUSY) 141 #define SET_BUSY(sc) do {sc->flags |= CXGB_BUSY;} while (0) 142 #define CLR_BUSY(sc) do {sc->flags &= ~CXGB_BUSY;} while (0) 143 144 #define FL_Q_SIZE 4096 145 #define JUMBO_Q_SIZE 1024 146 #define RSPQ_Q_SIZE 1024 147 #define TX_ETH_Q_SIZE 1024 148 149 enum { TXQ_ETH = 0, 150 TXQ_OFLD = 1, 151 TXQ_CTRL = 2, }; 152 153 154 /* 155 * work request size in bytes 156 */ 157 #define WR_LEN (WR_FLITS * 8) 158 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso)) 159 160 #ifdef LRO_SUPPORTED 161 struct lro_state { 162 unsigned short enabled; 163 struct lro_ctrl ctrl; 164 }; 165 #endif 166 167 #define RX_BUNDLE_SIZE 8 168 169 struct rsp_desc; 170 171 struct sge_rspq { 172 uint32_t credits; 173 uint32_t size; 174 uint32_t cidx; 175 uint32_t gen; 176 uint32_t polling; 177 uint32_t holdoff_tmr; 178 uint32_t next_holdoff; 179 uint32_t imm_data; 180 uint32_t async_notif; 181 uint32_t cntxt_id; 182 uint32_t offload_pkts; 183 uint32_t offload_bundles; 184 uint32_t pure_rsps; 185 uint32_t unhandled_irqs; 186 187 bus_addr_t phys_addr; 188 bus_dma_tag_t desc_tag; 189 bus_dmamap_t desc_map; 190 191 struct t3_mbuf_hdr rspq_mh; 192 struct rsp_desc *desc; 193 struct mtx lock; 194 #define RSPQ_NAME_LEN 32 195 char lockbuf[RSPQ_NAME_LEN]; 196 uint32_t rspq_dump_start; 197 uint32_t rspq_dump_count; 198 }; 199 200 struct rx_desc; 201 struct rx_sw_desc; 202 203 struct sge_fl { 204 uint32_t buf_size; 205 uint32_t credits; 206 uint32_t size; 207 uint32_t cidx; 208 uint32_t pidx; 209 uint32_t gen; 210 bus_addr_t phys_addr; 211 uint32_t cntxt_id; 212 uint32_t empty; 213 bus_dma_tag_t desc_tag; 214 bus_dmamap_t desc_map; 215 bus_dma_tag_t entry_tag; 216 uma_zone_t zone; 217 struct rx_desc *desc; 218 struct rx_sw_desc *sdesc; 219 int type; 220 }; 221 222 struct tx_desc; 223 struct tx_sw_desc; 224 225 #define TXQ_TRANSMITTING 0x1 226 227 struct sge_txq { 228 uint64_t flags; 229 uint32_t in_use; 230 uint32_t size; 231 uint32_t processed; 232 uint32_t cleaned; 233 uint32_t stop_thres; 234 uint32_t cidx; 235 uint32_t pidx; 236 uint32_t gen; 237 uint32_t unacked; 238 struct tx_desc *desc; 239 struct tx_sw_desc *sdesc; 240 uint32_t token; 241 bus_addr_t phys_addr; 242 struct task qresume_task; 243 struct task qreclaim_task; 244 uint32_t cntxt_id; 245 uint64_t stops; 246 uint64_t restarts; 247 bus_dma_tag_t desc_tag; 248 bus_dmamap_t desc_map; 249 bus_dma_tag_t entry_tag; 250 struct mbuf_head sendq; 251 252 struct buf_ring *txq_mr; 253 struct ifaltq *txq_ifq; 254 struct callout txq_timer; 255 struct callout txq_watchdog; 256 uint64_t txq_coalesced; 257 uint32_t txq_drops; 258 uint32_t txq_skipped; 259 uint32_t txq_enqueued; 260 uint32_t txq_dump_start; 261 uint32_t txq_dump_count; 262 uint64_t txq_direct_packets; 263 uint64_t txq_direct_bytes; 264 uint64_t txq_frees; 265 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1]; 266 }; 267 268 269 enum { 270 SGE_PSTAT_TSO, /* # of TSO requests */ 271 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */ 272 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ 273 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ 274 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ 275 }; 276 277 #define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1) 278 279 #define QS_EXITING 0x1 280 #define QS_RUNNING 0x2 281 #define QS_BOUND 0x4 282 #define QS_FLUSHING 0x8 283 #define QS_TIMEOUT 0x10 284 285 struct sge_qset { 286 struct sge_rspq rspq; 287 struct sge_fl fl[SGE_RXQ_PER_SET]; 288 #ifdef LRO_SUPPORTED 289 struct lro_state lro; 290 #endif 291 struct sge_txq txq[SGE_TXQ_PER_SET]; 292 uint32_t txq_stopped; /* which Tx queues are stopped */ 293 uint64_t port_stats[SGE_PSTAT_MAX]; 294 struct port_info *port; 295 int idx; /* qset # */ 296 int qs_flags; 297 int coalescing; 298 struct cv qs_cv; 299 struct mtx lock; 300 #define QS_NAME_LEN 32 301 char namebuf[QS_NAME_LEN]; 302 }; 303 304 struct sge { 305 struct sge_qset qs[SGE_QSETS]; 306 struct mtx reg_lock; 307 }; 308 309 struct filter_info; 310 311 struct adapter { 312 device_t dev; 313 int flags; 314 TAILQ_ENTRY(adapter) adapter_entry; 315 316 /* PCI register resources */ 317 int regs_rid; 318 struct resource *regs_res; 319 int udbs_rid; 320 struct resource *udbs_res; 321 bus_space_handle_t bh; 322 bus_space_tag_t bt; 323 bus_size_t mmio_len; 324 uint32_t link_width; 325 326 /* DMA resources */ 327 bus_dma_tag_t parent_dmat; 328 bus_dma_tag_t rx_dmat; 329 bus_dma_tag_t rx_jumbo_dmat; 330 bus_dma_tag_t tx_dmat; 331 332 /* Interrupt resources */ 333 struct resource *irq_res; 334 int irq_rid; 335 void *intr_tag; 336 337 uint32_t msix_regs_rid; 338 struct resource *msix_regs_res; 339 340 struct resource *msix_irq_res[SGE_QSETS]; 341 int msix_irq_rid[SGE_QSETS]; 342 void *msix_intr_tag[SGE_QSETS]; 343 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */ 344 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */ 345 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */ 346 union { 347 uint8_t fill[SGE_QSETS]; 348 uint64_t coalesce; 349 } u; 350 351 #define tunq_fill u.fill 352 #define tunq_coalesce u.coalesce 353 354 struct filter_info *filters; 355 356 /* Tasks */ 357 struct task ext_intr_task; 358 struct task slow_intr_task; 359 struct task tick_task; 360 struct taskqueue *tq; 361 struct callout cxgb_tick_ch; 362 struct callout sge_timer_ch; 363 364 /* Register lock for use by the hardware layer */ 365 struct mtx mdio_lock; 366 struct mtx elmer_lock; 367 368 /* Bookkeeping for the hardware layer */ 369 struct adapter_params params; 370 unsigned int slow_intr_mask; 371 unsigned long irq_stats[IRQ_NUM_STATS]; 372 373 struct sge sge; 374 struct mc7 pmrx; 375 struct mc7 pmtx; 376 struct mc7 cm; 377 struct mc5 mc5; 378 379 struct port_info port[MAX_NPORTS]; 380 device_t portdev[MAX_NPORTS]; 381 struct t3cdev tdev; 382 char fw_version[64]; 383 char port_types[MAX_NPORTS + 1]; 384 uint32_t open_device_map; 385 uint32_t registered_device_map; 386 struct mtx lock; 387 driver_intr_t *cxgb_intr; 388 int msi_count; 389 390 #define ADAPTER_LOCK_NAME_LEN 32 391 char lockbuf[ADAPTER_LOCK_NAME_LEN]; 392 char reglockbuf[ADAPTER_LOCK_NAME_LEN]; 393 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN]; 394 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN]; 395 }; 396 397 struct t3_rx_mode { 398 399 uint32_t idx; 400 struct port_info *port; 401 }; 402 403 #define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock) 404 #define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock) 405 #define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock) 406 #define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock) 407 408 409 #define PORT_LOCK(port) mtx_lock(&(port)->lock); 410 #define PORT_UNLOCK(port) mtx_unlock(&(port)->lock); 411 #define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF) 412 #define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock) 413 #define PORT_LOCK_ASSERT_NOTOWNED(port) mtx_assert(&(port)->lock, MA_NOTOWNED) 414 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED) 415 416 #define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock); 417 #define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock); 418 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF) 419 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock) 420 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED) 421 #define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED) 422 423 424 static __inline uint32_t 425 t3_read_reg(adapter_t *adapter, uint32_t reg_addr) 426 { 427 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr)); 428 } 429 430 static __inline void 431 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val) 432 { 433 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val); 434 } 435 436 static __inline void 437 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val) 438 { 439 *val = pci_read_config(adapter->dev, reg, 4); 440 } 441 442 static __inline void 443 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val) 444 { 445 pci_write_config(adapter->dev, reg, val, 4); 446 } 447 448 static __inline void 449 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val) 450 { 451 *val = pci_read_config(adapter->dev, reg, 2); 452 } 453 454 static __inline void 455 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val) 456 { 457 pci_write_config(adapter->dev, reg, val, 2); 458 } 459 460 static __inline uint8_t * 461 t3_get_next_mcaddr(struct t3_rx_mode *rm) 462 { 463 uint8_t *macaddr = NULL; 464 struct ifnet *ifp = rm->port->ifp; 465 struct ifmultiaddr *ifma; 466 int i = 0; 467 468 if_maddr_rlock(ifp); 469 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 470 if (ifma->ifma_addr->sa_family != AF_LINK) 471 continue; 472 if (i == rm->idx) { 473 macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 474 break; 475 } 476 i++; 477 } 478 if_maddr_runlock(ifp); 479 480 rm->idx++; 481 return (macaddr); 482 } 483 484 static __inline void 485 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port) 486 { 487 rm->idx = 0; 488 rm->port = port; 489 } 490 491 static __inline struct port_info * 492 adap2pinfo(struct adapter *adap, int idx) 493 { 494 return &adap->port[idx]; 495 } 496 497 int t3_os_find_pci_capability(adapter_t *adapter, int cap); 498 int t3_os_pci_save_state(struct adapter *adapter); 499 int t3_os_pci_restore_state(struct adapter *adapter); 500 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, 501 int speed, int duplex, int fc, int mac_was_reset); 502 void t3_os_phymod_changed(struct adapter *adap, int port_id); 503 void t3_sge_err_intr_handler(adapter_t *adapter); 504 int t3_offload_tx(struct t3cdev *, struct mbuf *); 505 void t3_os_ext_intr_handler(adapter_t *adapter); 506 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]); 507 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m); 508 509 510 int t3_sge_alloc(struct adapter *); 511 int t3_sge_free(struct adapter *); 512 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *, 513 int, struct port_info *); 514 void t3_free_sge_resources(adapter_t *); 515 void t3_sge_start(adapter_t *); 516 void t3_sge_stop(adapter_t *); 517 void t3b_intr(void *data); 518 void t3_intr_msi(void *data); 519 void t3_intr_msix(void *data); 520 521 int t3_sge_init_adapter(adapter_t *); 522 int t3_sge_reset_adapter(adapter_t *); 523 int t3_sge_init_port(struct port_info *); 524 void t3_free_tx_desc(struct sge_qset *qs, int n, int qid); 525 526 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad); 527 528 void t3_add_attach_sysctls(adapter_t *sc); 529 void t3_add_configured_sysctls(adapter_t *sc); 530 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 531 unsigned char *data); 532 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); 533 534 #define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \ 535 (hz * (a)->params.linkpoll_period) / 10 : \ 536 (a)->params.stats_update_period * hz) 537 538 /* 539 * XXX figure out how we can return this to being private to sge 540 */ 541 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC)) 542 543 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field))) 544 545 static __inline struct sge_qset * 546 fl_to_qset(struct sge_fl *q, int qidx) 547 { 548 return container_of(q, struct sge_qset, fl[qidx]); 549 } 550 551 static __inline struct sge_qset * 552 rspq_to_qset(struct sge_rspq *q) 553 { 554 return container_of(q, struct sge_qset, rspq); 555 } 556 557 static __inline struct sge_qset * 558 txq_to_qset(struct sge_txq *q, int qidx) 559 { 560 return container_of(q, struct sge_qset, txq[qidx]); 561 } 562 563 static __inline struct adapter * 564 tdev2adap(struct t3cdev *d) 565 { 566 return container_of(d, struct adapter, tdev); 567 } 568 569 #undef container_of 570 571 #define OFFLOAD_DEVMAP_BIT 15 572 static inline int offload_running(adapter_t *adapter) 573 { 574 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT); 575 } 576 577 void cxgb_tx_watchdog(void *arg); 578 int cxgb_transmit(struct ifnet *ifp, struct mbuf *m); 579 void cxgb_qflush(struct ifnet *ifp); 580 void cxgb_start(struct ifnet *ifp); 581 #endif 582