xref: /freebsd/sys/dev/cxgb/cxgb_adapter.h (revision 2be1a816b9ff69588e55be0a84cbe2a31efc0f2f)
1 /**************************************************************************
2 
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 
29 $FreeBSD$
30 
31 ***************************************************************************/
32 
33 
34 #ifndef _CXGB_ADAPTER_H_
35 #define _CXGB_ADAPTER_H_
36 
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/sx.h>
40 #include <sys/rman.h>
41 #include <sys/mbuf.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/condvar.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/if_media.h>
49 #include <net/if_dl.h>
50 
51 #include <machine/bus.h>
52 #include <machine/resource.h>
53 
54 #include <sys/bus_dma.h>
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcivar.h>
57 
58 
59 #ifdef CONFIG_DEFINED
60 #include <cxgb_osdep.h>
61 #include <t3cdev.h>
62 #include <ulp/toecore/cxgb_toedev.h>
63 #include <sys/mbufq.h>
64 #else
65 #include <dev/cxgb/cxgb_osdep.h>
66 #include <dev/cxgb/t3cdev.h>
67 #include <dev/cxgb/sys/mbufq.h>
68 #include <dev/cxgb/ulp/toecore/cxgb_toedev.h>
69 #endif
70 
71 #define USE_SX
72 
73 struct adapter;
74 struct sge_qset;
75 extern int cxgb_debug;
76 
77 #ifdef DEBUG_LOCKING
78 #define MTX_INIT(lock, lockname, class, flags) \
79 	do { \
80 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
81 		mtx_init((lock), lockname, class, flags);		\
82 	} while (0)
83 
84 #define MTX_DESTROY(lock) \
85 	do { \
86 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
87 		mtx_destroy((lock));					\
88 	} while (0)
89 
90 #define SX_INIT(lock, lockname) \
91 	do { \
92 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
93 		sx_init((lock), lockname);		\
94 	} while (0)
95 
96 #define SX_DESTROY(lock) \
97 	do { \
98 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
99 		sx_destroy((lock));					\
100 	} while (0)
101 #else
102 #define MTX_INIT mtx_init
103 #define MTX_DESTROY mtx_destroy
104 #define SX_INIT sx_init
105 #define SX_DESTROY sx_destroy
106 #endif
107 
108 struct port_info {
109 	struct adapter	*adapter;
110 	struct ifnet	*ifp;
111 	int		if_flags;
112 	const struct port_type_info *port_type;
113 	struct cphy	phy;
114 	struct cmac	mac;
115 	struct link_config link_config;
116 	struct ifmedia	media;
117 #ifdef USE_SX
118 	struct sx	lock;
119 #else
120 	struct mtx	lock;
121 #endif
122 	uint8_t		port_id;
123 	uint8_t		tx_chan;
124 	uint8_t		txpkt_intf;
125 	uint8_t         first_qset;
126 	uint32_t	nqsets;
127 
128 	uint8_t		hw_addr[ETHER_ADDR_LEN];
129 	struct taskqueue *tq;
130 	struct task     start_task;
131 	struct task	timer_reclaim_task;
132 	struct cdev     *port_cdev;
133 
134 #define PORT_LOCK_NAME_LEN 32
135 #define TASKQ_NAME_LEN 32
136 #define PORT_NAME_LEN 32
137 	char            lockbuf[PORT_LOCK_NAME_LEN];
138 	char            taskqbuf[TASKQ_NAME_LEN];
139 	char            namebuf[PORT_NAME_LEN];
140 };
141 
142 enum {				/* adapter flags */
143 	FULL_INIT_DONE	= (1 << 0),
144 	USING_MSI	= (1 << 1),
145 	USING_MSIX	= (1 << 2),
146 	QUEUES_BOUND	= (1 << 3),
147 	FW_UPTODATE     = (1 << 4),
148 	TPS_UPTODATE    = (1 << 5),
149 	CXGB_SHUTDOWN	= (1 << 6),
150 	CXGB_OFLD_INIT	= (1 << 7),
151 	TP_PARITY_INIT  = (1 << 8),
152 };
153 
154 #define FL_Q_SIZE	4096
155 
156 #ifdef __i386__
157 #define JUMBO_Q_SIZE	256
158 #else
159 #define JUMBO_Q_SIZE	1024
160 #endif
161 #define RSPQ_Q_SIZE	1024
162 #define TX_ETH_Q_SIZE	1024
163 
164 enum { TXQ_ETH = 0,
165        TXQ_OFLD = 1,
166        TXQ_CTRL = 2, };
167 
168 
169 /*
170  * work request size in bytes
171  */
172 #define WR_LEN (WR_FLITS * 8)
173 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt))
174 
175 
176 /* careful, the following are set on priv_flags and must not collide with
177  * IFF_ flags!
178  */
179 enum {
180 	LRO_ACTIVE = (1 << 8),
181 };
182 
183 /* Max concurrent LRO sessions per queue set */
184 #define MAX_LRO_SES 8
185 
186 struct t3_lro_session {
187 	struct mbuf *head;
188 	struct mbuf *tail;
189 	uint32_t seq;
190 	uint16_t ip_len;
191 	uint16_t mss;
192 	uint16_t vtag;
193 	uint8_t npkts;
194 };
195 
196 struct lro_state {
197 	unsigned short enabled;
198 	unsigned short active_idx;
199 	unsigned int nactive;
200 	struct t3_lro_session sess[MAX_LRO_SES];
201 };
202 
203 #define RX_BUNDLE_SIZE 8
204 
205 struct rsp_desc;
206 
207 struct sge_rspq {
208 	uint32_t	credits;
209 	uint32_t	size;
210 	uint32_t	cidx;
211 	uint32_t	gen;
212 	uint32_t	polling;
213 	uint32_t	holdoff_tmr;
214 	uint32_t	next_holdoff;
215 	uint32_t        imm_data;
216 	uint32_t        async_notif;
217 	uint32_t	cntxt_id;
218 	uint32_t        offload_pkts;
219 	uint32_t        offload_bundles;
220 	uint32_t        pure_rsps;
221 	uint32_t        unhandled_irqs;
222 
223 	bus_addr_t	phys_addr;
224 	bus_dma_tag_t	desc_tag;
225 	bus_dmamap_t	desc_map;
226 
227 	struct t3_mbuf_hdr rspq_mh;
228 	struct rsp_desc	*desc;
229 	struct mtx      lock;
230 #define RSPQ_NAME_LEN  32
231 	char            lockbuf[RSPQ_NAME_LEN];
232 	uint32_t	rspq_dump_start;
233 	uint32_t	rspq_dump_count;
234 };
235 
236 #ifndef DISABLE_MBUF_IOVEC
237 #define rspq_mbuf rspq_mh.mh_head
238 #endif
239 
240 struct rx_desc;
241 struct rx_sw_desc;
242 
243 struct sge_fl {
244 	uint32_t	buf_size;
245 	uint32_t	credits;
246 	uint32_t	size;
247 	uint32_t	cidx;
248 	uint32_t	pidx;
249 	uint32_t	gen;
250 	bus_addr_t	phys_addr;
251 	uint32_t	cntxt_id;
252 	uint64_t	empty;
253 	bus_dma_tag_t	desc_tag;
254 	bus_dmamap_t	desc_map;
255 	bus_dma_tag_t   entry_tag;
256 	uma_zone_t      zone;
257 	struct rx_desc	*desc;
258 	struct rx_sw_desc *sdesc;
259 	int             type;
260 };
261 
262 struct tx_desc;
263 struct tx_sw_desc;
264 
265 #define TXQ_TRANSMITTING    0x1
266 
267 struct sge_txq {
268 	uint64_t	flags;
269 	uint32_t	in_use;
270 	uint32_t	size;
271 	uint32_t	processed;
272 	uint32_t	cleaned;
273 	uint32_t	stop_thres;
274 	uint32_t	cidx;
275 	uint32_t	pidx;
276 	uint32_t	gen;
277 	uint32_t	unacked;
278 	struct tx_desc	*desc;
279 	struct tx_sw_desc *sdesc;
280 	uint32_t	token;
281 	bus_addr_t	phys_addr;
282 	struct task     qresume_task;
283 	struct task     qreclaim_task;
284 	struct port_info *port;
285 	uint32_t	cntxt_id;
286 	uint64_t	stops;
287 	uint64_t	restarts;
288 	bus_dma_tag_t	desc_tag;
289 	bus_dmamap_t	desc_map;
290 	bus_dma_tag_t   entry_tag;
291 	struct mbuf_head sendq;
292 	/*
293 	 * cleanq should really be an buf_ring to avoid extra
294 	 * mbuf touches
295 	 */
296 	struct mbuf_head cleanq;
297 	struct buf_ring txq_mr;
298 	struct mbuf     *immpkt;
299 	uint32_t        txq_drops;
300 	uint32_t        txq_skipped;
301 	uint32_t        txq_coalesced;
302 	uint32_t        txq_enqueued;
303 	uint32_t	txq_dump_start;
304 	uint32_t	txq_dump_count;
305 	unsigned long   txq_frees;
306 	struct mtx      lock;
307 	struct sg_ent  txq_sgl[TX_MAX_SEGS / 2 + 1];
308 	#define TXQ_NAME_LEN  32
309 	char            lockbuf[TXQ_NAME_LEN];
310 };
311 
312 
313 enum {
314 	SGE_PSTAT_TSO,              /* # of TSO requests */
315 	SGE_PSTAT_RX_CSUM_GOOD,     /* # of successful RX csum offloads */
316 	SGE_PSTAT_TX_CSUM,          /* # of TX checksum offloads */
317 	SGE_PSTAT_VLANEX,           /* # of VLAN tag extractions */
318 	SGE_PSTAT_VLANINS,          /* # of VLAN tag insertions */
319 	SGE_PSTATS_LRO_QUEUED,	    /* # of LRO appended packets */
320 	SGE_PSTATS_LRO_FLUSHED,	    /* # of LRO flushed packets */
321 	SGE_PSTATS_LRO_X_STREAMS,   /* # of exceeded LRO contexts */
322 };
323 
324 #define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
325 
326 #define QS_EXITING              0x1
327 #define QS_RUNNING              0x2
328 #define QS_BOUND                0x4
329 
330 struct sge_qset {
331 	struct sge_rspq		rspq;
332 	struct sge_fl		fl[SGE_RXQ_PER_SET];
333 	struct lro_state        lro;
334 	struct sge_txq		txq[SGE_TXQ_PER_SET];
335 	uint32_t                txq_stopped;       /* which Tx queues are stopped */
336 	uint64_t                port_stats[SGE_PSTAT_MAX];
337 	struct port_info        *port;
338 	int                     idx; /* qset # */
339 	int                     qs_cpuid;
340 	int                     qs_flags;
341 	struct cv		qs_cv;
342 	struct mtx		qs_mtx;
343 #define QS_NAME_LEN 32
344 	char                    namebuf[QS_NAME_LEN];
345 };
346 
347 struct sge {
348 	struct sge_qset	        qs[SGE_QSETS];
349 	struct mtx              reg_lock;
350 };
351 
352 struct filter_info;
353 
354 struct adapter {
355 	device_t		dev;
356 	int			flags;
357 	TAILQ_ENTRY(adapter)    adapter_entry;
358 
359 	/* PCI register resources */
360 	int			regs_rid;
361 	struct resource		*regs_res;
362 	int			udbs_rid;
363 	struct resource		*udbs_res;
364 	bus_space_handle_t	bh;
365 	bus_space_tag_t		bt;
366 	bus_size_t              mmio_len;
367 	uint32_t                link_width;
368 
369 	/* DMA resources */
370 	bus_dma_tag_t		parent_dmat;
371 	bus_dma_tag_t		rx_dmat;
372 	bus_dma_tag_t		rx_jumbo_dmat;
373 	bus_dma_tag_t		tx_dmat;
374 
375 	/* Interrupt resources */
376 	struct resource		*irq_res;
377 	int			irq_rid;
378 	void			*intr_tag;
379 
380 	uint32_t		msix_regs_rid;
381 	struct resource		*msix_regs_res;
382 
383 	struct resource		*msix_irq_res[SGE_QSETS];
384 	int			msix_irq_rid[SGE_QSETS];
385 	void			*msix_intr_tag[SGE_QSETS];
386 	uint8_t                 rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
387 	uint8_t                 rrss_map[SGE_QSETS]; /* revers RSS map table */
388 	uint16_t                rspq_map[RSS_TABLE_SIZE];     /* maps 7-bit cookie to qidx */
389 	union {
390 		uint8_t                 fill[SGE_QSETS];
391 		uint64_t                coalesce;
392 	} u;
393 
394 #define tunq_fill u.fill
395 #define tunq_coalesce u.coalesce
396 
397 	struct filter_info      *filters;
398 
399 	/* Tasks */
400 	struct task		ext_intr_task;
401 	struct task		slow_intr_task;
402 	struct task		tick_task;
403 	struct task		process_responses_task;
404 	struct taskqueue	*tq;
405 	struct callout		cxgb_tick_ch;
406 	struct callout		sge_timer_ch;
407 
408 	/* Register lock for use by the hardware layer */
409 	struct mtx		mdio_lock;
410 	struct mtx		elmer_lock;
411 
412 	/* Bookkeeping for the hardware layer */
413 	struct adapter_params  params;
414 	unsigned int slow_intr_mask;
415 	unsigned long irq_stats[IRQ_NUM_STATS];
416 
417 	struct sge              sge;
418 	struct mc7              pmrx;
419 	struct mc7              pmtx;
420 	struct mc7              cm;
421 	struct mc5              mc5;
422 
423 	struct port_info	port[MAX_NPORTS];
424 	device_t		portdev[MAX_NPORTS];
425 	struct t3cdev           tdev;
426 	char                    fw_version[64];
427 	uint32_t                open_device_map;
428 	uint32_t                registered_device_map;
429 #ifdef USE_SX
430 	struct sx               lock;
431 #else
432 	struct mtx              lock;
433 #endif
434 	driver_intr_t           *cxgb_intr;
435 	int                     msi_count;
436 
437 #define ADAPTER_LOCK_NAME_LEN	32
438 	char                    lockbuf[ADAPTER_LOCK_NAME_LEN];
439 	char                    reglockbuf[ADAPTER_LOCK_NAME_LEN];
440 	char                    mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
441 	char                    elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
442 };
443 
444 struct t3_rx_mode {
445 
446 	uint32_t                idx;
447 	struct port_info        *port;
448 };
449 
450 
451 #define MDIO_LOCK(adapter)	mtx_lock(&(adapter)->mdio_lock)
452 #define MDIO_UNLOCK(adapter)	mtx_unlock(&(adapter)->mdio_lock)
453 #define ELMR_LOCK(adapter)	mtx_lock(&(adapter)->elmer_lock)
454 #define ELMR_UNLOCK(adapter)	mtx_unlock(&(adapter)->elmer_lock)
455 
456 
457 #ifdef USE_SX
458 #define PORT_LOCK(port)		     sx_xlock(&(port)->lock);
459 #define PORT_UNLOCK(port)	     sx_xunlock(&(port)->lock);
460 #define PORT_LOCK_INIT(port, name)   SX_INIT(&(port)->lock, name)
461 #define PORT_LOCK_DEINIT(port)       SX_DESTROY(&(port)->lock)
462 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
463 
464 #define ADAPTER_LOCK(adap)	           sx_xlock(&(adap)->lock);
465 #define ADAPTER_UNLOCK(adap)	           sx_xunlock(&(adap)->lock);
466 #define ADAPTER_LOCK_INIT(adap, name)      SX_INIT(&(adap)->lock, name)
467 #define ADAPTER_LOCK_DEINIT(adap)          SX_DESTROY(&(adap)->lock)
468 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
469 #else
470 #define PORT_LOCK(port)		     mtx_lock(&(port)->lock);
471 #define PORT_UNLOCK(port)	     mtx_unlock(&(port)->lock);
472 #define PORT_LOCK_INIT(port, name)   mtx_init(&(port)->lock, name, 0, MTX_DEF)
473 #define PORT_LOCK_DEINIT(port)       mtx_destroy(&(port)->lock)
474 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
475 
476 #define ADAPTER_LOCK(adap)	mtx_lock(&(adap)->lock);
477 #define ADAPTER_UNLOCK(adap)	mtx_unlock(&(adap)->lock);
478 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
479 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
480 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED)
481 #endif
482 
483 
484 static __inline uint32_t
485 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
486 {
487 	return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
488 }
489 
490 static __inline void
491 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
492 {
493 	bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
494 }
495 
496 static __inline void
497 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
498 {
499 	*val = pci_read_config(adapter->dev, reg, 4);
500 }
501 
502 static __inline void
503 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
504 {
505 	pci_write_config(adapter->dev, reg, val, 4);
506 }
507 
508 static __inline void
509 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
510 {
511 	*val = pci_read_config(adapter->dev, reg, 2);
512 }
513 
514 static __inline void
515 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
516 {
517 	pci_write_config(adapter->dev, reg, val, 2);
518 }
519 
520 static __inline uint8_t *
521 t3_get_next_mcaddr(struct t3_rx_mode *rm)
522 {
523 	uint8_t *macaddr = NULL;
524 	struct ifnet *ifp = rm->port->ifp;
525 	struct ifmultiaddr *ifma;
526 	int i = 0;
527 
528 	IF_ADDR_LOCK(ifp);
529 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
530 		if (ifma->ifma_addr->sa_family != AF_LINK)
531 			continue;
532 		if (i == rm->idx) {
533 			macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
534 			break;
535 		}
536 		i++;
537 	}
538 	IF_ADDR_UNLOCK(ifp);
539 
540 
541 	rm->idx++;
542 	return (macaddr);
543 }
544 
545 static __inline void
546 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
547 {
548 	rm->idx = 0;
549 	rm->port = port;
550 }
551 
552 static __inline struct port_info *
553 adap2pinfo(struct adapter *adap, int idx)
554 {
555 	return &adap->port[idx];
556 }
557 
558 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
559 int t3_os_pci_save_state(struct adapter *adapter);
560 int t3_os_pci_restore_state(struct adapter *adapter);
561 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
562 			int speed, int duplex, int fc);
563 void t3_sge_err_intr_handler(adapter_t *adapter);
564 int t3_offload_tx(struct t3cdev *, struct mbuf *);
565 void t3_os_ext_intr_handler(adapter_t *adapter);
566 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
567 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
568 
569 
570 int t3_sge_alloc(struct adapter *);
571 int t3_sge_free(struct adapter *);
572 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
573     int, struct port_info *);
574 void t3_free_sge_resources(adapter_t *);
575 void t3_sge_start(adapter_t *);
576 void t3_sge_stop(adapter_t *);
577 void t3b_intr(void *data);
578 void t3_intr_msi(void *data);
579 void t3_intr_msix(void *data);
580 int t3_encap(struct sge_qset *, struct mbuf **, int);
581 
582 int t3_sge_init_adapter(adapter_t *);
583 int t3_sge_reset_adapter(adapter_t *);
584 int t3_sge_init_port(struct port_info *);
585 void t3_sge_deinit_sw(adapter_t *);
586 void t3_free_tx_desc(struct sge_txq *q, int n);
587 void t3_free_tx_desc_all(struct sge_txq *q);
588 
589 void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
590     int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
591 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
592 void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state);
593 
594 void t3_add_attach_sysctls(adapter_t *sc);
595 void t3_add_configured_sysctls(adapter_t *sc);
596 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
597     unsigned char *data);
598 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
599 /*
600  * XXX figure out how we can return this to being private to sge
601  */
602 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
603 
604 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
605 
606 static __inline struct sge_qset *
607 fl_to_qset(struct sge_fl *q, int qidx)
608 {
609 	return container_of(q, struct sge_qset, fl[qidx]);
610 }
611 
612 static __inline struct sge_qset *
613 rspq_to_qset(struct sge_rspq *q)
614 {
615 	return container_of(q, struct sge_qset, rspq);
616 }
617 
618 static __inline struct sge_qset *
619 txq_to_qset(struct sge_txq *q, int qidx)
620 {
621 	return container_of(q, struct sge_qset, txq[qidx]);
622 }
623 
624 static __inline struct adapter *
625 tdev2adap(struct t3cdev *d)
626 {
627 	return container_of(d, struct adapter, tdev);
628 }
629 
630 #undef container_of
631 
632 #define OFFLOAD_DEVMAP_BIT 15
633 static inline int offload_running(adapter_t *adapter)
634 {
635         return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
636 }
637 
638 int cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m);
639 int cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *m);
640 void cxgb_pcpu_shutdown_threads(struct adapter *sc);
641 void cxgb_pcpu_startup_threads(struct adapter *sc);
642 
643 int process_responses(adapter_t *adap, struct sge_qset *qs, int budget);
644 void t3_free_qset(adapter_t *sc, struct sge_qset *q);
645 void cxgb_start(struct ifnet *ifp);
646 void refill_fl_service(adapter_t *adap, struct sge_fl *fl);
647 #endif
648