1 /************************************************************************** 2 3 Copyright (c) 2007, Chelsio Inc. 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Neither the name of the Chelsio Corporation nor the names of its 13 contributors may be used to endorse or promote products derived from 14 this software without specific prior written permission. 15 16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 POSSIBILITY OF SUCH DAMAGE. 27 28 29 $FreeBSD$ 30 31 ***************************************************************************/ 32 33 34 35 #ifndef _CXGB_ADAPTER_H_ 36 #define _CXGB_ADAPTER_H_ 37 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/sx.h> 41 #include <sys/rman.h> 42 #include <sys/mbuf.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 46 #include <net/ethernet.h> 47 #include <net/if.h> 48 #include <net/if_media.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <sys/bus_dma.h> 53 #include <dev/pci/pcireg.h> 54 #include <dev/pci/pcivar.h> 55 56 #ifdef CONFIG_DEFINED 57 #include <cxgb_osdep.h> 58 #include <ulp/toecore/toedev.h> 59 #include <sys/mbufq.h> 60 #else 61 #include <dev/cxgb/cxgb_osdep.h> 62 #include <dev/cxgb/sys/mbufq.h> 63 #include <dev/cxgb/ulp/toecore/toedev.h> 64 #endif 65 66 #define USE_SX 67 68 struct adapter; 69 struct sge_qset; 70 extern int cxgb_debug; 71 72 #ifdef DEBUG_LOCKING 73 #define MTX_INIT(lock, lockname, class, flags) \ 74 do { \ 75 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \ 76 mtx_init((lock), lockname, class, flags); \ 77 } while (0) 78 79 #define MTX_DESTROY(lock) \ 80 do { \ 81 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \ 82 mtx_destroy((lock)); \ 83 } while (0) 84 85 #define SX_INIT(lock, lockname) \ 86 do { \ 87 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \ 88 sx_init((lock), lockname); \ 89 } while (0) 90 91 #define SX_DESTROY(lock) \ 92 do { \ 93 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \ 94 sx_destroy((lock)); \ 95 } while (0) 96 #else 97 #define MTX_INIT mtx_init 98 #define MTX_DESTROY mtx_destroy 99 #define SX_INIT sx_init 100 #define SX_DESTROY sx_destroy 101 #endif 102 103 struct port_info { 104 struct adapter *adapter; 105 struct ifnet *ifp; 106 int if_flags; 107 const struct port_type_info *port_type; 108 struct cphy phy; 109 struct cmac mac; 110 struct link_config link_config; 111 struct ifmedia media; 112 #ifdef USE_SX 113 struct sx lock; 114 #else 115 struct mtx lock; 116 #endif 117 uint8_t port_id; 118 uint8_t tx_chan; 119 uint8_t txpkt_intf; 120 uint8_t nqsets; 121 uint8_t first_qset; 122 123 uint8_t hw_addr[ETHER_ADDR_LEN]; 124 struct taskqueue *tq; 125 struct task start_task; 126 struct task timer_reclaim_task; 127 struct cdev *port_cdev; 128 129 #define PORT_NAME_LEN 32 130 #define TASKQ_NAME_LEN 32 131 char lockbuf[PORT_NAME_LEN]; 132 char taskqbuf[TASKQ_NAME_LEN]; 133 }; 134 135 enum { /* adapter flags */ 136 FULL_INIT_DONE = (1 << 0), 137 USING_MSI = (1 << 1), 138 USING_MSIX = (1 << 2), 139 QUEUES_BOUND = (1 << 3), 140 FW_UPTODATE = (1 << 4), 141 TPS_UPTODATE = (1 << 5), 142 }; 143 144 145 #define FL_Q_SIZE 4096 146 #define JUMBO_Q_SIZE 512 147 #define RSPQ_Q_SIZE 1024 148 #define TX_ETH_Q_SIZE 1024 149 150 151 152 /* 153 * Types of Tx queues in each queue set. Order here matters, do not change. 154 * XXX TOE is not implemented yet, so the extra queues are just placeholders. 155 */ 156 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; 157 158 159 /* careful, the following are set on priv_flags and must not collide with 160 * IFF_ flags! 161 */ 162 enum { 163 LRO_ACTIVE = (1 << 8), 164 }; 165 166 /* Max concurrent LRO sessions per queue set */ 167 #define MAX_LRO_SES 8 168 169 struct t3_lro_session { 170 struct mbuf *head; 171 struct mbuf *tail; 172 uint32_t seq; 173 uint16_t ip_len; 174 uint16_t mss; 175 uint16_t vtag; 176 uint8_t npkts; 177 }; 178 179 struct lro_state { 180 unsigned short enabled; 181 unsigned short active_idx; 182 unsigned int nactive; 183 struct t3_lro_session sess[MAX_LRO_SES]; 184 }; 185 186 #define RX_BUNDLE_SIZE 8 187 188 struct rsp_desc; 189 190 struct sge_rspq { 191 uint32_t credits; 192 uint32_t size; 193 uint32_t cidx; 194 uint32_t gen; 195 uint32_t polling; 196 uint32_t holdoff_tmr; 197 uint32_t next_holdoff; 198 uint32_t imm_data; 199 struct rsp_desc *desc; 200 uint32_t cntxt_id; 201 struct mtx lock; 202 struct mbuf *rx_head; /* offload packet receive queue head */ 203 struct mbuf *rx_tail; /* offload packet receive queue tail */ 204 205 uint32_t offload_pkts; 206 uint32_t offload_bundles; 207 uint32_t pure_rsps; 208 uint32_t unhandled_irqs; 209 210 bus_addr_t phys_addr; 211 bus_dma_tag_t desc_tag; 212 bus_dmamap_t desc_map; 213 214 struct t3_mbuf_hdr rspq_mh; 215 #define RSPQ_NAME_LEN 32 216 char lockbuf[RSPQ_NAME_LEN]; 217 218 }; 219 220 #ifndef DISABLE_MBUF_IOVEC 221 #define rspq_mbuf rspq_mh.mh_head 222 #endif 223 224 struct rx_desc; 225 struct rx_sw_desc; 226 227 struct sge_fl { 228 uint32_t buf_size; 229 uint32_t credits; 230 uint32_t size; 231 uint32_t cidx; 232 uint32_t pidx; 233 uint32_t gen; 234 struct rx_desc *desc; 235 struct rx_sw_desc *sdesc; 236 bus_addr_t phys_addr; 237 uint32_t cntxt_id; 238 uint64_t empty; 239 bus_dma_tag_t desc_tag; 240 bus_dmamap_t desc_map; 241 bus_dma_tag_t entry_tag; 242 uma_zone_t zone; 243 int type; 244 }; 245 246 struct tx_desc; 247 struct tx_sw_desc; 248 249 #define TXQ_TRANSMITTING 0x1 250 251 struct sge_txq { 252 uint64_t flags; 253 uint32_t in_use; 254 uint32_t size; 255 uint32_t processed; 256 uint32_t cleaned; 257 uint32_t stop_thres; 258 uint32_t cidx; 259 uint32_t pidx; 260 uint32_t gen; 261 uint32_t unacked; 262 struct tx_desc *desc; 263 struct tx_sw_desc *sdesc; 264 uint32_t token; 265 bus_addr_t phys_addr; 266 struct task qresume_task; 267 struct task qreclaim_task; 268 struct port_info *port; 269 uint32_t cntxt_id; 270 uint64_t stops; 271 uint64_t restarts; 272 bus_dma_tag_t desc_tag; 273 bus_dmamap_t desc_map; 274 bus_dma_tag_t entry_tag; 275 struct mbuf_head sendq; 276 struct mtx lock; 277 #define TXQ_NAME_LEN 32 278 char lockbuf[TXQ_NAME_LEN]; 279 }; 280 281 282 enum { 283 SGE_PSTAT_TSO, /* # of TSO requests */ 284 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */ 285 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ 286 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ 287 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ 288 SGE_PSTATS_LRO_QUEUED, /* # of LRO appended packets */ 289 SGE_PSTATS_LRO_FLUSHED, /* # of LRO flushed packets */ 290 SGE_PSTATS_LRO_X_STREAMS, /* # of exceeded LRO contexts */ 291 }; 292 293 #define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1) 294 295 struct sge_qset { 296 struct sge_rspq rspq; 297 struct sge_fl fl[SGE_RXQ_PER_SET]; 298 struct lro_state lro; 299 struct sge_txq txq[SGE_TXQ_PER_SET]; 300 uint32_t txq_stopped; /* which Tx queues are stopped */ 301 uint64_t port_stats[SGE_PSTAT_MAX]; 302 struct port_info *port; 303 int idx; /* qset # */ 304 }; 305 306 struct sge { 307 struct sge_qset qs[SGE_QSETS]; 308 struct mtx reg_lock; 309 }; 310 311 struct filter_info; 312 313 struct adapter { 314 device_t dev; 315 int flags; 316 TAILQ_ENTRY(adapter) adapter_entry; 317 318 /* PCI register resources */ 319 int regs_rid; 320 struct resource *regs_res; 321 bus_space_handle_t bh; 322 bus_space_tag_t bt; 323 bus_size_t mmio_len; 324 uint32_t link_width; 325 326 /* DMA resources */ 327 bus_dma_tag_t parent_dmat; 328 bus_dma_tag_t rx_dmat; 329 bus_dma_tag_t rx_jumbo_dmat; 330 bus_dma_tag_t tx_dmat; 331 332 /* Interrupt resources */ 333 struct resource *irq_res; 334 int irq_rid; 335 void *intr_tag; 336 337 uint32_t msix_regs_rid; 338 struct resource *msix_regs_res; 339 340 struct resource *msix_irq_res[SGE_QSETS]; 341 int msix_irq_rid[SGE_QSETS]; 342 void *msix_intr_tag[SGE_QSETS]; 343 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */ 344 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */ 345 346 struct filter_info *filters; 347 348 /* Tasks */ 349 struct task ext_intr_task; 350 struct task slow_intr_task; 351 struct task tick_task; 352 struct task process_responses_task; 353 struct taskqueue *tq; 354 struct callout cxgb_tick_ch; 355 struct callout sge_timer_ch; 356 357 /* Register lock for use by the hardware layer */ 358 struct mtx mdio_lock; 359 struct mtx elmer_lock; 360 361 /* Bookkeeping for the hardware layer */ 362 struct adapter_params params; 363 unsigned int slow_intr_mask; 364 unsigned long irq_stats[IRQ_NUM_STATS]; 365 366 struct sge sge; 367 struct mc7 pmrx; 368 struct mc7 pmtx; 369 struct mc7 cm; 370 struct mc5 mc5; 371 372 struct port_info port[MAX_NPORTS]; 373 device_t portdev[MAX_NPORTS]; 374 struct toedev tdev; 375 char fw_version[64]; 376 uint32_t open_device_map; 377 uint32_t registered_device_map; 378 #ifdef USE_SX 379 struct sx lock; 380 #else 381 struct mtx lock; 382 #endif 383 driver_intr_t *cxgb_intr; 384 int msi_count; 385 386 #define ADAPTER_LOCK_NAME_LEN 32 387 char lockbuf[ADAPTER_LOCK_NAME_LEN]; 388 char reglockbuf[ADAPTER_LOCK_NAME_LEN]; 389 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN]; 390 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN]; 391 }; 392 393 struct t3_rx_mode { 394 395 uint32_t idx; 396 struct port_info *port; 397 }; 398 399 400 #define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock) 401 #define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock) 402 #define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock) 403 #define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock) 404 405 406 #ifdef USE_SX 407 #define PORT_LOCK(port) sx_xlock(&(port)->lock); 408 #define PORT_UNLOCK(port) sx_xunlock(&(port)->lock); 409 #define PORT_LOCK_INIT(port, name) SX_INIT(&(port)->lock, name) 410 #define PORT_LOCK_DEINIT(port) SX_DESTROY(&(port)->lock) 411 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED) 412 413 #define ADAPTER_LOCK(adap) sx_xlock(&(adap)->lock); 414 #define ADAPTER_UNLOCK(adap) sx_xunlock(&(adap)->lock); 415 #define ADAPTER_LOCK_INIT(adap, name) SX_INIT(&(adap)->lock, name) 416 #define ADAPTER_LOCK_DEINIT(adap) SX_DESTROY(&(adap)->lock) 417 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED) 418 #else 419 #define PORT_LOCK(port) mtx_lock(&(port)->lock); 420 #define PORT_UNLOCK(port) mtx_unlock(&(port)->lock); 421 #define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF) 422 #define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock) 423 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED) 424 425 #define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock); 426 #define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock); 427 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF) 428 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock) 429 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED) 430 #endif 431 432 433 static __inline uint32_t 434 t3_read_reg(adapter_t *adapter, uint32_t reg_addr) 435 { 436 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr)); 437 } 438 439 static __inline void 440 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val) 441 { 442 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val); 443 } 444 445 static __inline void 446 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val) 447 { 448 *val = pci_read_config(adapter->dev, reg, 4); 449 } 450 451 static __inline void 452 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val) 453 { 454 pci_write_config(adapter->dev, reg, val, 4); 455 } 456 457 static __inline void 458 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val) 459 { 460 *val = pci_read_config(adapter->dev, reg, 2); 461 } 462 463 static __inline void 464 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val) 465 { 466 pci_write_config(adapter->dev, reg, val, 2); 467 } 468 469 static __inline uint8_t * 470 t3_get_next_mcaddr(struct t3_rx_mode *rm) 471 { 472 uint8_t *macaddr = NULL; 473 474 if (rm->idx == 0) 475 macaddr = rm->port->hw_addr; 476 477 rm->idx++; 478 return (macaddr); 479 } 480 481 static __inline void 482 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port) 483 { 484 rm->idx = 0; 485 rm->port = port; 486 } 487 488 static __inline struct port_info * 489 adap2pinfo(struct adapter *adap, int idx) 490 { 491 return &adap->port[idx]; 492 } 493 494 int t3_os_find_pci_capability(adapter_t *adapter, int cap); 495 int t3_os_pci_save_state(struct adapter *adapter); 496 int t3_os_pci_restore_state(struct adapter *adapter); 497 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, 498 int speed, int duplex, int fc); 499 void t3_sge_err_intr_handler(adapter_t *adapter); 500 int t3_offload_tx(struct toedev *, struct mbuf *); 501 void t3_os_ext_intr_handler(adapter_t *adapter); 502 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]); 503 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m); 504 505 506 int t3_sge_alloc(struct adapter *); 507 int t3_sge_free(struct adapter *); 508 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *, 509 int, struct port_info *); 510 void t3_free_sge_resources(adapter_t *); 511 void t3_sge_start(adapter_t *); 512 void t3_sge_stop(adapter_t *); 513 void t3b_intr(void *data); 514 void t3_intr_msi(void *data); 515 void t3_intr_msix(void *data); 516 int t3_encap(struct port_info *, struct mbuf **, int *free); 517 518 int t3_sge_init_adapter(adapter_t *); 519 int t3_sge_init_port(struct port_info *); 520 void t3_sge_deinit_sw(adapter_t *); 521 522 void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m, 523 int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro); 524 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad); 525 void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state); 526 527 void t3_add_sysctls(adapter_t *sc); 528 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 529 unsigned char *data); 530 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); 531 /* 532 * XXX figure out how we can return this to being private to sge 533 */ 534 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC)) 535 536 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field))) 537 538 static __inline struct sge_qset * 539 fl_to_qset(struct sge_fl *q, int qidx) 540 { 541 return container_of(q, struct sge_qset, fl[qidx]); 542 } 543 544 static __inline struct sge_qset * 545 rspq_to_qset(struct sge_rspq *q) 546 { 547 return container_of(q, struct sge_qset, rspq); 548 } 549 550 static __inline struct sge_qset * 551 txq_to_qset(struct sge_txq *q, int qidx) 552 { 553 return container_of(q, struct sge_qset, txq[qidx]); 554 } 555 556 static __inline struct adapter * 557 tdev2adap(struct toedev *d) 558 { 559 return container_of(d, struct adapter, tdev); 560 } 561 562 #undef container_of 563 564 #define OFFLOAD_DEVMAP_BIT 15 565 static inline int offload_running(adapter_t *adapter) 566 { 567 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT); 568 } 569 570 571 #endif 572