xref: /freebsd/sys/dev/cxgb/cxgb_adapter.h (revision 3e96c7e790c3d5388385ced9c80192fb6f08ecfc)
1 /**************************************************************************
2 
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 
29 $FreeBSD$
30 
31 ***************************************************************************/
32 
33 
34 
35 #ifndef _CXGB_ADAPTER_H_
36 #define _CXGB_ADAPTER_H_
37 
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/sx.h>
41 #include <sys/rman.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/if_media.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <sys/bus_dma.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 
56 #ifdef CONFIG_DEFINED
57 #include <cxgb_osdep.h>
58 #include <t3cdev.h>
59 #include <sys/mbufq.h>
60 #include <ulp/toecore/cxgb_toedev.h>
61 #else
62 #include <dev/cxgb/cxgb_osdep.h>
63 #include <dev/cxgb/t3cdev.h>
64 #include <dev/cxgb/sys/mbufq.h>
65 #include <dev/cxgb/ulp/toecore/cxgb_toedev.h>
66 #endif
67 
68 #define USE_SX
69 
70 struct adapter;
71 struct sge_qset;
72 extern int cxgb_debug;
73 
74 #ifdef DEBUG_LOCKING
75 #define MTX_INIT(lock, lockname, class, flags) \
76 	do { \
77 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
78 		mtx_init((lock), lockname, class, flags);		\
79 	} while (0)
80 
81 #define MTX_DESTROY(lock) \
82 	do { \
83 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
84 		mtx_destroy((lock));					\
85 	} while (0)
86 
87 #define SX_INIT(lock, lockname) \
88 	do { \
89 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
90 		sx_init((lock), lockname);		\
91 	} while (0)
92 
93 #define SX_DESTROY(lock) \
94 	do { \
95 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
96 		sx_destroy((lock));					\
97 	} while (0)
98 #else
99 #define MTX_INIT mtx_init
100 #define MTX_DESTROY mtx_destroy
101 #define SX_INIT sx_init
102 #define SX_DESTROY sx_destroy
103 #endif
104 
105 struct port_info {
106 	struct adapter	*adapter;
107 	struct ifnet	*ifp;
108 	int		if_flags;
109 	const struct port_type_info *port_type;
110 	struct cphy	phy;
111 	struct cmac	mac;
112 	struct link_config link_config;
113 	struct ifmedia	media;
114 #ifdef USE_SX
115 	struct sx	lock;
116 #else
117 	struct mtx	lock;
118 #endif
119 	uint8_t		port_id;
120 	uint8_t		tx_chan;
121 	uint8_t		txpkt_intf;
122 	uint8_t		nqsets;
123 	uint8_t         first_qset;
124 
125 	uint8_t		hw_addr[ETHER_ADDR_LEN];
126 	struct taskqueue *tq;
127 	struct task     start_task;
128 	struct task	timer_reclaim_task;
129 	struct cdev     *port_cdev;
130 
131 #define PORT_NAME_LEN 32
132 #define TASKQ_NAME_LEN 32
133 	char            lockbuf[PORT_NAME_LEN];
134 	char            taskqbuf[TASKQ_NAME_LEN];
135 };
136 
137 enum {				/* adapter flags */
138 	FULL_INIT_DONE	= (1 << 0),
139 	USING_MSI	= (1 << 1),
140 	USING_MSIX	= (1 << 2),
141 	QUEUES_BOUND	= (1 << 3),
142 	FW_UPTODATE     = (1 << 4),
143 	TPS_UPTODATE    = (1 << 5),
144 };
145 
146 
147 #define FL_Q_SIZE	4096
148 #define JUMBO_Q_SIZE	512
149 #define RSPQ_Q_SIZE	1024
150 #define TX_ETH_Q_SIZE	1024
151 
152 
153 
154 /*
155  * Types of Tx queues in each queue set.  Order here matters, do not change.
156  * XXX TOE is not implemented yet, so the extra queues are just placeholders.
157  */
158 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
159 
160 
161 /* careful, the following are set on priv_flags and must not collide with
162  * IFF_ flags!
163  */
164 enum {
165 	LRO_ACTIVE = (1 << 8),
166 };
167 
168 /* Max concurrent LRO sessions per queue set */
169 #define MAX_LRO_SES 8
170 
171 struct t3_lro_session {
172 	struct mbuf *head;
173 	struct mbuf *tail;
174 	uint32_t seq;
175 	uint16_t ip_len;
176 	uint16_t mss;
177 	uint16_t vtag;
178 	uint8_t npkts;
179 };
180 
181 struct lro_state {
182 	unsigned short enabled;
183 	unsigned short active_idx;
184 	unsigned int nactive;
185 	struct t3_lro_session sess[MAX_LRO_SES];
186 };
187 
188 #define RX_BUNDLE_SIZE 8
189 
190 struct rsp_desc;
191 
192 struct sge_rspq {
193 	uint32_t	credits;
194 	uint32_t	size;
195 	uint32_t	cidx;
196 	uint32_t	gen;
197 	uint32_t	polling;
198 	uint32_t	holdoff_tmr;
199 	uint32_t	next_holdoff;
200 	uint32_t        imm_data;
201 	struct rsp_desc	*desc;
202 	uint32_t	cntxt_id;
203 	struct mtx      lock;
204 	struct mbuf     *rx_head;    /* offload packet receive queue head */
205 	struct mbuf     *rx_tail;    /* offload packet receive queue tail */
206 
207 	uint32_t        offload_pkts;
208 	uint32_t        offload_bundles;
209 	uint32_t        pure_rsps;
210 	uint32_t        unhandled_irqs;
211 
212 	bus_addr_t	phys_addr;
213 	bus_dma_tag_t	desc_tag;
214 	bus_dmamap_t	desc_map;
215 
216 	struct t3_mbuf_hdr rspq_mh;
217 #define RSPQ_NAME_LEN  32
218 	char            lockbuf[RSPQ_NAME_LEN];
219 
220 };
221 
222 #ifndef DISABLE_MBUF_IOVEC
223 #define rspq_mbuf rspq_mh.mh_head
224 #endif
225 
226 struct rx_desc;
227 struct rx_sw_desc;
228 
229 struct sge_fl {
230 	uint32_t	buf_size;
231 	uint32_t	credits;
232 	uint32_t	size;
233 	uint32_t	cidx;
234 	uint32_t	pidx;
235 	uint32_t	gen;
236 	struct rx_desc	*desc;
237 	struct rx_sw_desc *sdesc;
238 	bus_addr_t	phys_addr;
239 	uint32_t	cntxt_id;
240 	uint64_t	empty;
241 	bus_dma_tag_t	desc_tag;
242 	bus_dmamap_t	desc_map;
243 	bus_dma_tag_t   entry_tag;
244 	uma_zone_t      zone;
245 	int             type;
246 };
247 
248 struct tx_desc;
249 struct tx_sw_desc;
250 
251 #define TXQ_TRANSMITTING    0x1
252 
253 struct sge_txq {
254 	uint64_t	flags;
255 	uint32_t	in_use;
256 	uint32_t	size;
257 	uint32_t	processed;
258 	uint32_t	cleaned;
259 	uint32_t	stop_thres;
260 	uint32_t	cidx;
261 	uint32_t	pidx;
262 	uint32_t	gen;
263 	uint32_t	unacked;
264 	struct tx_desc	*desc;
265 	struct tx_sw_desc *sdesc;
266 	uint32_t	token;
267 	bus_addr_t	phys_addr;
268 	struct task     qresume_task;
269 	struct task     qreclaim_task;
270 	struct port_info *port;
271 	uint32_t	cntxt_id;
272 	uint64_t	stops;
273 	uint64_t	restarts;
274 	bus_dma_tag_t	desc_tag;
275 	bus_dmamap_t	desc_map;
276 	bus_dma_tag_t   entry_tag;
277 	struct mbuf_head sendq;
278 	struct mtx      lock;
279 #define TXQ_NAME_LEN  32
280 	char            lockbuf[TXQ_NAME_LEN];
281 };
282 
283 
284 enum {
285 	SGE_PSTAT_TSO,              /* # of TSO requests */
286 	SGE_PSTAT_RX_CSUM_GOOD,     /* # of successful RX csum offloads */
287 	SGE_PSTAT_TX_CSUM,          /* # of TX checksum offloads */
288 	SGE_PSTAT_VLANEX,           /* # of VLAN tag extractions */
289 	SGE_PSTAT_VLANINS,          /* # of VLAN tag insertions */
290 	SGE_PSTATS_LRO_QUEUED,	    /* # of LRO appended packets */
291 	SGE_PSTATS_LRO_FLUSHED,	    /* # of LRO flushed packets */
292 	SGE_PSTATS_LRO_X_STREAMS,   /* # of exceeded LRO contexts */
293 };
294 
295 #define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
296 
297 struct sge_qset {
298 	struct sge_rspq		rspq;
299 	struct sge_fl		fl[SGE_RXQ_PER_SET];
300 	struct lro_state        lro;
301 	struct sge_txq		txq[SGE_TXQ_PER_SET];
302 	uint32_t                txq_stopped;       /* which Tx queues are stopped */
303 	uint64_t                port_stats[SGE_PSTAT_MAX];
304 	struct port_info        *port;
305 	int                     idx; /* qset # */
306 };
307 
308 struct sge {
309 	struct sge_qset	        qs[SGE_QSETS];
310 	struct mtx              reg_lock;
311 };
312 
313 struct filter_info;
314 
315 struct adapter {
316 	device_t		dev;
317 	int			flags;
318 	TAILQ_ENTRY(adapter)    adapter_entry;
319 
320 	/* PCI register resources */
321 	int			regs_rid;
322 	struct resource		*regs_res;
323 	bus_space_handle_t	bh;
324 	bus_space_tag_t		bt;
325 	bus_size_t              mmio_len;
326 	uint32_t                link_width;
327 
328 	/* DMA resources */
329 	bus_dma_tag_t		parent_dmat;
330 	bus_dma_tag_t		rx_dmat;
331 	bus_dma_tag_t		rx_jumbo_dmat;
332 	bus_dma_tag_t		tx_dmat;
333 
334 	/* Interrupt resources */
335 	struct resource		*irq_res;
336 	int			irq_rid;
337 	void			*intr_tag;
338 
339 	uint32_t		msix_regs_rid;
340 	struct resource		*msix_regs_res;
341 
342 	struct resource		*msix_irq_res[SGE_QSETS];
343 	int			msix_irq_rid[SGE_QSETS];
344 	void			*msix_intr_tag[SGE_QSETS];
345 	uint8_t                 rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
346 	uint8_t                 rrss_map[SGE_QSETS]; /* revers RSS map table */
347 
348 	struct filter_info      *filters;
349 
350 	/* Tasks */
351 	struct task		ext_intr_task;
352 	struct task		slow_intr_task;
353 	struct task		tick_task;
354 	struct task		process_responses_task;
355 	struct taskqueue	*tq;
356 	struct callout		cxgb_tick_ch;
357 	struct callout		sge_timer_ch;
358 
359 	/* Register lock for use by the hardware layer */
360 	struct mtx		mdio_lock;
361 	struct mtx		elmer_lock;
362 
363 	/* Bookkeeping for the hardware layer */
364 	struct adapter_params  params;
365 	unsigned int slow_intr_mask;
366 	unsigned long irq_stats[IRQ_NUM_STATS];
367 
368 	struct sge              sge;
369 	struct mc7              pmrx;
370 	struct mc7              pmtx;
371 	struct mc7              cm;
372 	struct mc5              mc5;
373 
374 	struct port_info	port[MAX_NPORTS];
375 	device_t		portdev[MAX_NPORTS];
376 	struct t3cdev           tdev;
377 	char                    fw_version[64];
378 	uint32_t                open_device_map;
379 	uint32_t                registered_device_map;
380 #ifdef USE_SX
381 	struct sx               lock;
382 #else
383 	struct mtx              lock;
384 #endif
385 	driver_intr_t           *cxgb_intr;
386 	int                     msi_count;
387 
388 #define ADAPTER_LOCK_NAME_LEN	32
389 	char                    lockbuf[ADAPTER_LOCK_NAME_LEN];
390 	char                    reglockbuf[ADAPTER_LOCK_NAME_LEN];
391 	char                    mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
392 	char                    elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
393 };
394 
395 struct t3_rx_mode {
396 
397 	uint32_t                idx;
398 	struct port_info        *port;
399 };
400 
401 
402 #define MDIO_LOCK(adapter)	mtx_lock(&(adapter)->mdio_lock)
403 #define MDIO_UNLOCK(adapter)	mtx_unlock(&(adapter)->mdio_lock)
404 #define ELMR_LOCK(adapter)	mtx_lock(&(adapter)->elmer_lock)
405 #define ELMR_UNLOCK(adapter)	mtx_unlock(&(adapter)->elmer_lock)
406 
407 
408 #ifdef USE_SX
409 #define PORT_LOCK(port)		     sx_xlock(&(port)->lock);
410 #define PORT_UNLOCK(port)	     sx_xunlock(&(port)->lock);
411 #define PORT_LOCK_INIT(port, name)   SX_INIT(&(port)->lock, name)
412 #define PORT_LOCK_DEINIT(port)       SX_DESTROY(&(port)->lock)
413 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
414 
415 #define ADAPTER_LOCK(adap)	           sx_xlock(&(adap)->lock);
416 #define ADAPTER_UNLOCK(adap)	           sx_xunlock(&(adap)->lock);
417 #define ADAPTER_LOCK_INIT(adap, name)      SX_INIT(&(adap)->lock, name)
418 #define ADAPTER_LOCK_DEINIT(adap)          SX_DESTROY(&(adap)->lock)
419 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
420 #else
421 #define PORT_LOCK(port)		     mtx_lock(&(port)->lock);
422 #define PORT_UNLOCK(port)	     mtx_unlock(&(port)->lock);
423 #define PORT_LOCK_INIT(port, name)   mtx_init(&(port)->lock, name, 0, MTX_DEF)
424 #define PORT_LOCK_DEINIT(port)       mtx_destroy(&(port)->lock)
425 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
426 
427 #define ADAPTER_LOCK(adap)	mtx_lock(&(adap)->lock);
428 #define ADAPTER_UNLOCK(adap)	mtx_unlock(&(adap)->lock);
429 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
430 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
431 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED)
432 #endif
433 
434 
435 static __inline uint32_t
436 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
437 {
438 	return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
439 }
440 
441 static __inline void
442 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
443 {
444 	bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
445 }
446 
447 static __inline void
448 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
449 {
450 	*val = pci_read_config(adapter->dev, reg, 4);
451 }
452 
453 static __inline void
454 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
455 {
456 	pci_write_config(adapter->dev, reg, val, 4);
457 }
458 
459 static __inline void
460 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
461 {
462 	*val = pci_read_config(adapter->dev, reg, 2);
463 }
464 
465 static __inline void
466 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
467 {
468 	pci_write_config(adapter->dev, reg, val, 2);
469 }
470 
471 static __inline uint8_t *
472 t3_get_next_mcaddr(struct t3_rx_mode *rm)
473 {
474 	uint8_t *macaddr = NULL;
475 
476 	if (rm->idx == 0)
477 		macaddr = rm->port->hw_addr;
478 
479 	rm->idx++;
480 	return (macaddr);
481 }
482 
483 static __inline void
484 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
485 {
486 	rm->idx = 0;
487 	rm->port = port;
488 }
489 
490 static __inline struct port_info *
491 adap2pinfo(struct adapter *adap, int idx)
492 {
493 	return &adap->port[idx];
494 }
495 
496 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
497 int t3_os_pci_save_state(struct adapter *adapter);
498 int t3_os_pci_restore_state(struct adapter *adapter);
499 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
500 			int speed, int duplex, int fc);
501 void t3_sge_err_intr_handler(adapter_t *adapter);
502 int t3_offload_tx(struct t3cdev *, struct mbuf *);
503 void t3_os_ext_intr_handler(adapter_t *adapter);
504 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
505 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
506 
507 
508 int t3_sge_alloc(struct adapter *);
509 int t3_sge_free(struct adapter *);
510 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
511     int, struct port_info *);
512 void t3_free_sge_resources(adapter_t *);
513 void t3_sge_start(adapter_t *);
514 void t3_sge_stop(adapter_t *);
515 void t3b_intr(void *data);
516 void t3_intr_msi(void *data);
517 void t3_intr_msix(void *data);
518 int t3_encap(struct port_info *, struct mbuf **, int *free);
519 
520 int t3_sge_init_adapter(adapter_t *);
521 int t3_sge_init_port(struct port_info *);
522 void t3_sge_deinit_sw(adapter_t *);
523 
524 void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
525     int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
526 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
527 void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state);
528 
529 void t3_add_sysctls(adapter_t *sc);
530 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
531     unsigned char *data);
532 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
533 /*
534  * XXX figure out how we can return this to being private to sge
535  */
536 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
537 
538 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
539 
540 static __inline struct sge_qset *
541 fl_to_qset(struct sge_fl *q, int qidx)
542 {
543 	return container_of(q, struct sge_qset, fl[qidx]);
544 }
545 
546 static __inline struct sge_qset *
547 rspq_to_qset(struct sge_rspq *q)
548 {
549 	return container_of(q, struct sge_qset, rspq);
550 }
551 
552 static __inline struct sge_qset *
553 txq_to_qset(struct sge_txq *q, int qidx)
554 {
555 	return container_of(q, struct sge_qset, txq[qidx]);
556 }
557 
558 static __inline struct adapter *
559 tdev2adap(struct t3cdev *d)
560 {
561 	return container_of(d, struct adapter, tdev);
562 }
563 
564 #undef container_of
565 
566 #define OFFLOAD_DEVMAP_BIT 15
567 static inline int offload_running(adapter_t *adapter)
568 {
569         return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
570 }
571 
572 
573 #endif
574