1 /**************************************************************************
2 SPDX-License-Identifier: BSD-2-Clause
3
4 Copyright (c) 2007-2009, Chelsio Inc.
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Neither the name of the Chelsio Corporation nor the names of its
14 contributors may be used to endorse or promote products derived from
15 this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 POSSIBILITY OF SUCH DAMAGE.
28
29 ***************************************************************************/
30
31
32 #ifndef _CXGB_ADAPTER_H_
33 #define _CXGB_ADAPTER_H_
34
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/rman.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/condvar.h>
42 #include <sys/buf_ring.h>
43 #include <sys/taskqueue.h>
44
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_var.h>
48 #include <net/if_media.h>
49 #include <net/if_dl.h>
50 #include <netinet/in.h>
51 #include <netinet/tcp_lro.h>
52
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58
59 #include <cxgb_osdep.h>
60
61 struct adapter;
62 struct sge_qset;
63 extern int cxgb_debug;
64
65 #ifdef DEBUG_LOCKING
66 #define MTX_INIT(lock, lockname, class, flags) \
67 do { \
68 printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
69 mtx_init((lock), lockname, class, flags); \
70 } while (0)
71
72 #define MTX_DESTROY(lock) \
73 do { \
74 printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
75 mtx_destroy((lock)); \
76 } while (0)
77
78 #else
79 #define MTX_INIT mtx_init
80 #define MTX_DESTROY mtx_destroy
81 #endif
82
83 enum {
84 LF_NO = 0,
85 LF_MAYBE,
86 LF_YES
87 };
88
89 struct port_info {
90 struct adapter *adapter;
91 if_t ifp;
92 int if_flags;
93 int flags;
94 const struct port_type_info *port_type;
95 struct cphy phy;
96 struct cmac mac;
97 struct timeval last_refreshed;
98 struct link_config link_config;
99 struct ifmedia media;
100 struct mtx lock;
101 uint32_t port_id;
102 uint32_t tx_chan;
103 uint32_t txpkt_intf;
104 uint32_t first_qset;
105 uint32_t nqsets;
106 int link_fault;
107
108 uint8_t hw_addr[ETHER_ADDR_LEN];
109 struct callout link_check_ch;
110 struct task link_check_task;
111 struct task timer_reclaim_task;
112 struct cdev *port_cdev;
113
114 #define PORT_LOCK_NAME_LEN 32
115 #define PORT_NAME_LEN 32
116 char lockbuf[PORT_LOCK_NAME_LEN];
117 char namebuf[PORT_NAME_LEN];
118 } __aligned(CACHE_LINE_SIZE);
119
120 enum {
121 /* adapter flags */
122 FULL_INIT_DONE = (1 << 0),
123 USING_MSI = (1 << 1),
124 USING_MSIX = (1 << 2),
125 QUEUES_BOUND = (1 << 3),
126 FW_UPTODATE = (1 << 4),
127 TPS_UPTODATE = (1 << 5),
128 CXGB_SHUTDOWN = (1 << 6),
129 CXGB_OFLD_INIT = (1 << 7),
130 TP_PARITY_INIT = (1 << 8),
131 CXGB_BUSY = (1 << 9),
132 TOM_INIT_DONE = (1 << 10),
133
134 /* port flags */
135 DOOMED = (1 << 0),
136 };
137 #define IS_DOOMED(p) (p->flags & DOOMED)
138 #define SET_DOOMED(p) do {p->flags |= DOOMED;} while (0)
139 #define IS_BUSY(sc) (sc->flags & CXGB_BUSY)
140 #define SET_BUSY(sc) do {sc->flags |= CXGB_BUSY;} while (0)
141 #define CLR_BUSY(sc) do {sc->flags &= ~CXGB_BUSY;} while (0)
142
143 #define FL_Q_SIZE 4096
144 #define JUMBO_Q_SIZE 1024
145 #define RSPQ_Q_SIZE 2048
146 #define TX_ETH_Q_SIZE 1024
147 #define TX_OFLD_Q_SIZE 1024
148 #define TX_CTRL_Q_SIZE 256
149
150 enum { TXQ_ETH = 0,
151 TXQ_OFLD = 1,
152 TXQ_CTRL = 2, };
153
154
155 /*
156 * work request size in bytes
157 */
158 #define WR_LEN (WR_FLITS * 8)
159 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
160
161 struct lro_state {
162 unsigned short enabled;
163 struct lro_ctrl ctrl;
164 };
165
166 #define RX_BUNDLE_SIZE 8
167
168 struct rsp_desc;
169
170 struct sge_rspq {
171 uint32_t credits;
172 uint32_t size;
173 uint32_t cidx;
174 uint32_t gen;
175 uint32_t polling;
176 uint32_t holdoff_tmr;
177 uint32_t next_holdoff;
178 uint32_t imm_data;
179 uint32_t async_notif;
180 uint32_t cntxt_id;
181 uint32_t offload_pkts;
182 uint32_t pure_rsps;
183 uint32_t unhandled_irqs;
184 uint32_t starved;
185
186 bus_addr_t phys_addr;
187 bus_dma_tag_t desc_tag;
188 bus_dmamap_t desc_map;
189
190 struct t3_mbuf_hdr rspq_mh;
191 struct rsp_desc *desc;
192 struct mtx lock;
193 #define RSPQ_NAME_LEN 32
194 char lockbuf[RSPQ_NAME_LEN];
195 uint32_t rspq_dump_start;
196 uint32_t rspq_dump_count;
197 };
198
199 struct rx_desc;
200 struct rx_sw_desc;
201
202 struct sge_fl {
203 uint32_t buf_size;
204 uint32_t credits;
205 uint32_t size;
206 uint32_t cidx;
207 uint32_t pidx;
208 uint32_t gen;
209 uint32_t db_pending;
210 bus_addr_t phys_addr;
211 uint32_t cntxt_id;
212 uint32_t empty;
213 bus_dma_tag_t desc_tag;
214 bus_dmamap_t desc_map;
215 bus_dma_tag_t entry_tag;
216 uma_zone_t zone;
217 struct rx_desc *desc;
218 struct rx_sw_desc *sdesc;
219 int type;
220 };
221
222 struct tx_desc;
223 struct tx_sw_desc;
224
225 #define TXQ_TRANSMITTING 0x1
226
227 struct sge_txq {
228 uint64_t flags;
229 uint32_t in_use;
230 uint32_t size;
231 uint32_t processed;
232 uint32_t cleaned;
233 uint32_t stop_thres;
234 uint32_t cidx;
235 uint32_t pidx;
236 uint32_t gen;
237 uint32_t unacked;
238 uint32_t db_pending;
239 struct tx_desc *desc;
240 struct tx_sw_desc *sdesc;
241 uint32_t token;
242 bus_addr_t phys_addr;
243 struct task qresume_task;
244 struct task qreclaim_task;
245 uint32_t cntxt_id;
246 uint64_t stops;
247 uint64_t restarts;
248 bus_dma_tag_t desc_tag;
249 bus_dmamap_t desc_map;
250 bus_dma_tag_t entry_tag;
251 struct mbufq sendq;
252
253 struct buf_ring *txq_mr;
254 struct ifaltq *txq_ifq;
255 struct callout txq_timer;
256 struct callout txq_watchdog;
257 uint64_t txq_coalesced;
258 uint32_t txq_skipped;
259 uint32_t txq_enqueued;
260 uint32_t txq_dump_start;
261 uint32_t txq_dump_count;
262 uint64_t txq_direct_packets;
263 uint64_t txq_direct_bytes;
264 uint64_t txq_frees;
265 struct sg_ent txq_sgl[TX_MAX_SEGS / 2 + 1];
266 };
267
268 #define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
269
270 #define QS_EXITING 0x1
271 #define QS_RUNNING 0x2
272 #define QS_BOUND 0x4
273 #define QS_FLUSHING 0x8
274 #define QS_TIMEOUT 0x10
275
276 struct sge_qset {
277 struct sge_rspq rspq;
278 struct sge_fl fl[SGE_RXQ_PER_SET];
279 struct lro_state lro;
280 struct sge_txq txq[SGE_TXQ_PER_SET];
281 uint32_t txq_stopped; /* which Tx queues are stopped */
282 struct port_info *port;
283 struct adapter *adap;
284 int idx; /* qset # */
285 int qs_flags;
286 int coalescing;
287 struct cv qs_cv;
288 struct mtx lock;
289 #define QS_NAME_LEN 32
290 char namebuf[QS_NAME_LEN];
291 };
292
293 struct sge {
294 struct sge_qset qs[SGE_QSETS];
295 struct mtx reg_lock;
296 };
297
298 struct filter_info;
299
300 typedef int (*cpl_handler_t)(struct sge_qset *, struct rsp_desc *,
301 struct mbuf *);
302
303 struct adapter {
304 SLIST_ENTRY(adapter) link;
305 device_t dev;
306 int flags;
307
308 /* PCI register resources */
309 int regs_rid;
310 struct resource *regs_res;
311 int udbs_rid;
312 struct resource *udbs_res;
313 bus_space_handle_t bh;
314 bus_space_tag_t bt;
315 bus_size_t mmio_len;
316 uint32_t link_width;
317
318 /* DMA resources */
319 bus_dma_tag_t parent_dmat;
320 bus_dma_tag_t rx_dmat;
321 bus_dma_tag_t rx_jumbo_dmat;
322 bus_dma_tag_t tx_dmat;
323
324 /* Interrupt resources */
325 struct resource *irq_res;
326 int irq_rid;
327 void *intr_tag;
328
329 uint32_t msix_regs_rid;
330 struct resource *msix_regs_res;
331
332 struct resource *msix_irq_res[SGE_QSETS];
333 int msix_irq_rid[SGE_QSETS];
334 void *msix_intr_tag[SGE_QSETS];
335 uint8_t rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
336 uint8_t rrss_map[SGE_QSETS]; /* revers RSS map table */
337 uint16_t rspq_map[RSS_TABLE_SIZE]; /* maps 7-bit cookie to qidx */
338 union {
339 uint8_t fill[SGE_QSETS];
340 uint64_t coalesce;
341 } u;
342
343 #define tunq_fill u.fill
344 #define tunq_coalesce u.coalesce
345
346 struct filter_info *filters;
347
348 /* Tasks */
349 struct task slow_intr_task;
350 struct task tick_task;
351 struct taskqueue *tq;
352 struct callout cxgb_tick_ch;
353 struct callout sge_timer_ch;
354
355 /* Register lock for use by the hardware layer */
356 struct mtx mdio_lock;
357 struct mtx elmer_lock;
358
359 /* Bookkeeping for the hardware layer */
360 struct adapter_params params;
361 unsigned int slow_intr_mask;
362 unsigned long irq_stats[IRQ_NUM_STATS];
363
364 unsigned nqsets;
365 struct sge sge;
366 struct mc7 pmrx;
367 struct mc7 pmtx;
368 struct mc7 cm;
369 struct mc5 mc5;
370
371 struct port_info port[MAX_NPORTS];
372 device_t portdev[MAX_NPORTS];
373 #ifdef TCP_OFFLOAD
374 void *tom_softc;
375 void *iwarp_softc;
376 #endif
377 char fw_version[64];
378 char port_types[MAX_NPORTS + 1];
379 uint32_t open_device_map;
380 #ifdef TCP_OFFLOAD
381 int offload_map;
382 #endif
383 struct mtx lock;
384 driver_intr_t *cxgb_intr;
385 int msi_count;
386
387 #define ADAPTER_LOCK_NAME_LEN 32
388 char lockbuf[ADAPTER_LOCK_NAME_LEN];
389 char reglockbuf[ADAPTER_LOCK_NAME_LEN];
390 char mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
391 char elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
392
393 int timestamp;
394
395 #ifdef TCP_OFFLOAD
396 #define NUM_CPL_HANDLERS 0xa7
397 cpl_handler_t cpl_handler[NUM_CPL_HANDLERS] __aligned(CACHE_LINE_SIZE);
398 #endif
399 };
400
401 struct t3_rx_mode {
402
403 uint32_t idx;
404 struct port_info *port;
405 };
406
407 #define MDIO_LOCK(adapter) mtx_lock(&(adapter)->mdio_lock)
408 #define MDIO_UNLOCK(adapter) mtx_unlock(&(adapter)->mdio_lock)
409 #define ELMR_LOCK(adapter) mtx_lock(&(adapter)->elmer_lock)
410 #define ELMR_UNLOCK(adapter) mtx_unlock(&(adapter)->elmer_lock)
411
412
413 #define PORT_LOCK(port) mtx_lock(&(port)->lock);
414 #define PORT_UNLOCK(port) mtx_unlock(&(port)->lock);
415 #define PORT_LOCK_INIT(port, name) mtx_init(&(port)->lock, name, 0, MTX_DEF)
416 #define PORT_LOCK_DEINIT(port) mtx_destroy(&(port)->lock)
417 #define PORT_LOCK_ASSERT_NOTOWNED(port) mtx_assert(&(port)->lock, MA_NOTOWNED)
418 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
419
420 #define ADAPTER_LOCK(adap) mtx_lock(&(adap)->lock);
421 #define ADAPTER_UNLOCK(adap) mtx_unlock(&(adap)->lock);
422 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
423 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
424 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
425 #define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED)
426
427
428 static __inline uint32_t
t3_read_reg(adapter_t * adapter,uint32_t reg_addr)429 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
430 {
431 return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
432 }
433
434 static __inline void
t3_write_reg(adapter_t * adapter,uint32_t reg_addr,uint32_t val)435 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
436 {
437 bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
438 }
439
440 static __inline void
t3_os_pci_read_config_4(adapter_t * adapter,int reg,uint32_t * val)441 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
442 {
443 *val = pci_read_config(adapter->dev, reg, 4);
444 }
445
446 static __inline void
t3_os_pci_write_config_4(adapter_t * adapter,int reg,uint32_t val)447 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
448 {
449 pci_write_config(adapter->dev, reg, val, 4);
450 }
451
452 static __inline void
t3_os_pci_read_config_2(adapter_t * adapter,int reg,uint16_t * val)453 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
454 {
455 *val = pci_read_config(adapter->dev, reg, 2);
456 }
457
458 static __inline void
t3_os_pci_write_config_2(adapter_t * adapter,int reg,uint16_t val)459 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
460 {
461 pci_write_config(adapter->dev, reg, val, 2);
462 }
463
464 static __inline void
t3_init_rx_mode(struct t3_rx_mode * rm,struct port_info * port)465 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
466 {
467 rm->idx = 0;
468 rm->port = port;
469 }
470
471 static __inline struct port_info *
adap2pinfo(struct adapter * adap,int idx)472 adap2pinfo(struct adapter *adap, int idx)
473 {
474 return &adap->port[idx];
475 }
476
477 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
478 int t3_os_pci_save_state(struct adapter *adapter);
479 int t3_os_pci_restore_state(struct adapter *adapter);
480 void t3_os_link_intr(struct port_info *);
481 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
482 int speed, int duplex, int fc, int mac_was_reset);
483 void t3_os_phymod_changed(struct adapter *adap, int port_id);
484 void t3_sge_err_intr_handler(adapter_t *adapter);
485 #ifdef TCP_OFFLOAD
486 int t3_offload_tx(struct adapter *, struct mbuf *);
487 #endif
488 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
489 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
490 int t3_register_cpl_handler(struct adapter *, int, cpl_handler_t);
491
492 int t3_sge_alloc(struct adapter *);
493 int t3_sge_free(struct adapter *);
494 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
495 int, struct port_info *);
496 void t3_free_sge_resources(adapter_t *, int);
497 void t3_sge_start(adapter_t *);
498 void t3_sge_stop(adapter_t *);
499 void t3b_intr(void *data);
500 void t3_intr_msi(void *data);
501 void t3_intr_msix(void *data);
502
503 int t3_sge_init_adapter(adapter_t *);
504 int t3_sge_reset_adapter(adapter_t *);
505 int t3_sge_init_port(struct port_info *);
506 void t3_free_tx_desc(struct sge_qset *qs, int n, int qid);
507
508 void t3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad);
509
510 void t3_add_attach_sysctls(adapter_t *sc);
511 void t3_add_configured_sysctls(adapter_t *sc);
512 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
513 unsigned char *data);
514 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
515
516 /*
517 * XXX figure out how we can return this to being private to sge
518 */
519 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
520
521 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
522
523 static __inline struct sge_qset *
fl_to_qset(struct sge_fl * q,int qidx)524 fl_to_qset(struct sge_fl *q, int qidx)
525 {
526 return container_of(q, struct sge_qset, fl[qidx]);
527 }
528
529 static __inline struct sge_qset *
rspq_to_qset(struct sge_rspq * q)530 rspq_to_qset(struct sge_rspq *q)
531 {
532 return container_of(q, struct sge_qset, rspq);
533 }
534
535 static __inline struct sge_qset *
txq_to_qset(struct sge_txq * q,int qidx)536 txq_to_qset(struct sge_txq *q, int qidx)
537 {
538 return container_of(q, struct sge_qset, txq[qidx]);
539 }
540
541 #undef container_of
542
543 #define OFFLOAD_DEVMAP_BIT (1 << MAX_NPORTS)
offload_running(adapter_t * adapter)544 static inline int offload_running(adapter_t *adapter)
545 {
546 return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
547 }
548
549 void cxgb_tx_watchdog(void *arg);
550 int cxgb_transmit(if_t ifp, struct mbuf *m);
551 void cxgb_qflush(if_t ifp);
552 void t3_iterate(void (*)(struct adapter *, void *), void *);
553 void cxgb_refresh_stats(struct port_info *);
554
555 #ifdef DEBUGNET
556 int cxgb_debugnet_encap(struct sge_qset *qs, struct mbuf **m);
557 int cxgb_debugnet_poll_rx(adapter_t *adap, struct sge_qset *qs);
558 int cxgb_debugnet_poll_tx(struct sge_qset *qs);
559 #endif
560
561 #endif
562