xref: /freebsd/sys/dev/cxgb/cxgb_adapter.h (revision 25dd52cdb10d223b9258836e23cc6ae4ea333b86)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2009, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 $FreeBSD$
29 
30 ***************************************************************************/
31 
32 
33 #ifndef _CXGB_ADAPTER_H_
34 #define _CXGB_ADAPTER_H_
35 
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/sx.h>
39 #include <sys/rman.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/condvar.h>
44 #include <sys/buf_ring.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/if_media.h>
49 #include <net/if_dl.h>
50 
51 #include <machine/bus.h>
52 #include <machine/resource.h>
53 
54 #include <sys/bus_dma.h>
55 #include <dev/pci/pcireg.h>
56 #include <dev/pci/pcivar.h>
57 
58 #include <cxgb_osdep.h>
59 #include <t3cdev.h>
60 #include <sys/mbufq.h>
61 
62 #ifdef LRO_SUPPORTED
63 #include <netinet/tcp_lro.h>
64 #endif
65 
66 #define USE_SX
67 
68 struct adapter;
69 struct sge_qset;
70 extern int cxgb_debug;
71 
72 #ifdef DEBUG_LOCKING
73 #define MTX_INIT(lock, lockname, class, flags) \
74 	do { \
75 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
76 		mtx_init((lock), lockname, class, flags);		\
77 	} while (0)
78 
79 #define MTX_DESTROY(lock) \
80 	do { \
81 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
82 		mtx_destroy((lock));					\
83 	} while (0)
84 
85 #define SX_INIT(lock, lockname) \
86 	do { \
87 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
88 		sx_init((lock), lockname);		\
89 	} while (0)
90 
91 #define SX_DESTROY(lock) \
92 	do { \
93 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
94 		sx_destroy((lock));					\
95 	} while (0)
96 #else
97 #define MTX_INIT mtx_init
98 #define MTX_DESTROY mtx_destroy
99 #define SX_INIT sx_init
100 #define SX_DESTROY sx_destroy
101 #endif
102 
103 enum {
104 	LF_NO = 0,
105 	LF_MAYBE,
106 	LF_YES
107 };
108 
109 struct port_info {
110 	struct adapter	*adapter;
111 	struct ifnet	*ifp;
112 	int		if_flags;
113 	const struct port_type_info *port_type;
114 	struct cphy	phy;
115 	struct cmac	mac;
116 	struct link_config link_config;
117 	struct ifmedia	media;
118 #ifdef USE_SX
119 	struct sx	lock;
120 #else
121 	struct mtx	lock;
122 #endif
123 	uint8_t		port_id;
124 	uint8_t		tx_chan;
125 	uint8_t		txpkt_intf;
126 	uint8_t         first_qset;
127 	uint32_t	nqsets;
128 	int		link_fault;
129 
130 	uint8_t		hw_addr[ETHER_ADDR_LEN];
131 	struct task	timer_reclaim_task;
132 	struct cdev     *port_cdev;
133 
134 #define PORT_LOCK_NAME_LEN 32
135 #define PORT_NAME_LEN 32
136 	char            lockbuf[PORT_LOCK_NAME_LEN];
137 	char            namebuf[PORT_NAME_LEN];
138 };
139 
140 enum {				/* adapter flags */
141 	FULL_INIT_DONE	= (1 << 0),
142 	USING_MSI	= (1 << 1),
143 	USING_MSIX	= (1 << 2),
144 	QUEUES_BOUND	= (1 << 3),
145 	FW_UPTODATE     = (1 << 4),
146 	TPS_UPTODATE    = (1 << 5),
147 	CXGB_SHUTDOWN	= (1 << 6),
148 	CXGB_OFLD_INIT	= (1 << 7),
149 	TP_PARITY_INIT  = (1 << 8),
150 };
151 
152 #define FL_Q_SIZE	4096
153 #define JUMBO_Q_SIZE	1024
154 #define RSPQ_Q_SIZE	1024
155 #define TX_ETH_Q_SIZE	1024
156 
157 enum { TXQ_ETH = 0,
158        TXQ_OFLD = 1,
159        TXQ_CTRL = 2, };
160 
161 
162 /*
163  * work request size in bytes
164  */
165 #define WR_LEN (WR_FLITS * 8)
166 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
167 
168 #ifdef LRO_SUPPORTED
169 struct lro_state {
170 	unsigned short enabled;
171 	struct lro_ctrl ctrl;
172 };
173 #endif
174 
175 #define RX_BUNDLE_SIZE 8
176 
177 struct rsp_desc;
178 
179 struct sge_rspq {
180 	uint32_t	credits;
181 	uint32_t	size;
182 	uint32_t	cidx;
183 	uint32_t	gen;
184 	uint32_t	polling;
185 	uint32_t	holdoff_tmr;
186 	uint32_t	next_holdoff;
187 	uint32_t        imm_data;
188 	uint32_t        async_notif;
189 	uint32_t	cntxt_id;
190 	uint32_t        offload_pkts;
191 	uint32_t        offload_bundles;
192 	uint32_t        pure_rsps;
193 	uint32_t        unhandled_irqs;
194 
195 	bus_addr_t	phys_addr;
196 	bus_dma_tag_t	desc_tag;
197 	bus_dmamap_t	desc_map;
198 
199 	struct t3_mbuf_hdr rspq_mh;
200 	struct rsp_desc	*desc;
201 	struct mtx      lock;
202 #define RSPQ_NAME_LEN  32
203 	char            lockbuf[RSPQ_NAME_LEN];
204 	uint32_t	rspq_dump_start;
205 	uint32_t	rspq_dump_count;
206 };
207 
208 #ifndef DISABLE_MBUF_IOVEC
209 #define rspq_mbuf rspq_mh.mh_head
210 #endif
211 
212 struct rx_desc;
213 struct rx_sw_desc;
214 
215 struct sge_fl {
216 	uint32_t	buf_size;
217 	uint32_t	credits;
218 	uint32_t	size;
219 	uint32_t	cidx;
220 	uint32_t	pidx;
221 	uint32_t	gen;
222 	bus_addr_t	phys_addr;
223 	uint32_t	cntxt_id;
224 	uint32_t	empty;
225 	bus_dma_tag_t	desc_tag;
226 	bus_dmamap_t	desc_map;
227 	bus_dma_tag_t   entry_tag;
228 	uma_zone_t      zone;
229 	struct rx_desc	*desc;
230 	struct rx_sw_desc *sdesc;
231 	int             type;
232 };
233 
234 struct tx_desc;
235 struct tx_sw_desc;
236 
237 #define TXQ_TRANSMITTING    0x1
238 
239 struct sge_txq {
240 	uint64_t	flags;
241 	uint32_t	in_use;
242 	uint32_t	size;
243 	uint32_t	processed;
244 	uint32_t	cleaned;
245 	uint32_t	stop_thres;
246 	uint32_t	cidx;
247 	uint32_t	pidx;
248 	uint32_t	gen;
249 	uint32_t	unacked;
250 	struct tx_desc	*desc;
251 	struct tx_sw_desc *sdesc;
252 	uint32_t	token;
253 	bus_addr_t	phys_addr;
254 	struct task     qresume_task;
255 	struct task     qreclaim_task;
256 	struct port_info *port;
257 	uint32_t	cntxt_id;
258 	uint64_t	stops;
259 	uint64_t	restarts;
260 	bus_dma_tag_t	desc_tag;
261 	bus_dmamap_t	desc_map;
262 	bus_dma_tag_t   entry_tag;
263 	struct mbuf_head sendq;
264 	/*
265 	 * cleanq should really be an buf_ring to avoid extra
266 	 * mbuf touches
267 	 */
268 	struct mbuf_head cleanq;
269 	struct buf_ring *txq_mr;
270 	struct ifaltq	*txq_ifq;
271 	struct mbuf     *immpkt;
272 
273 	uint32_t        txq_drops;
274 	uint32_t        txq_skipped;
275 	uint32_t        txq_coalesced;
276 	uint32_t        txq_enqueued;
277 	uint32_t	txq_dump_start;
278 	uint32_t	txq_dump_count;
279 	unsigned long   txq_frees;
280 	struct mtx      lock;
281 	struct sg_ent  txq_sgl[TX_MAX_SEGS / 2 + 1];
282 	#define TXQ_NAME_LEN  32
283 	char            lockbuf[TXQ_NAME_LEN];
284 };
285 
286 
287 enum {
288 	SGE_PSTAT_TSO,              /* # of TSO requests */
289 	SGE_PSTAT_RX_CSUM_GOOD,     /* # of successful RX csum offloads */
290 	SGE_PSTAT_TX_CSUM,          /* # of TX checksum offloads */
291 	SGE_PSTAT_VLANEX,           /* # of VLAN tag extractions */
292 	SGE_PSTAT_VLANINS,          /* # of VLAN tag insertions */
293 };
294 
295 #define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
296 
297 #define QS_EXITING              0x1
298 #define QS_RUNNING              0x2
299 #define QS_BOUND                0x4
300 
301 struct sge_qset {
302 	struct sge_rspq		rspq;
303 	struct sge_fl		fl[SGE_RXQ_PER_SET];
304 #ifdef LRO_SUPPORTED
305 	struct lro_state        lro;
306 #endif
307 	struct sge_txq		txq[SGE_TXQ_PER_SET];
308 	uint32_t                txq_stopped;       /* which Tx queues are stopped */
309 	uint64_t                port_stats[SGE_PSTAT_MAX];
310 	struct port_info        *port;
311 	int                     idx; /* qset # */
312 	int                     qs_cpuid;
313 	int                     qs_flags;
314 	struct cv		qs_cv;
315 	struct mtx		qs_mtx;
316 #define QS_NAME_LEN 32
317 	char                    namebuf[QS_NAME_LEN];
318 };
319 
320 struct sge {
321 	struct sge_qset	        qs[SGE_QSETS];
322 	struct mtx              reg_lock;
323 };
324 
325 struct filter_info;
326 
327 struct adapter {
328 	device_t		dev;
329 	int			flags;
330 	TAILQ_ENTRY(adapter)    adapter_entry;
331 
332 	/* PCI register resources */
333 	int			regs_rid;
334 	struct resource		*regs_res;
335 	int			udbs_rid;
336 	struct resource		*udbs_res;
337 	bus_space_handle_t	bh;
338 	bus_space_tag_t		bt;
339 	bus_size_t              mmio_len;
340 	uint32_t                link_width;
341 
342 	/* DMA resources */
343 	bus_dma_tag_t		parent_dmat;
344 	bus_dma_tag_t		rx_dmat;
345 	bus_dma_tag_t		rx_jumbo_dmat;
346 	bus_dma_tag_t		tx_dmat;
347 
348 	/* Interrupt resources */
349 	struct resource		*irq_res;
350 	int			irq_rid;
351 	void			*intr_tag;
352 
353 	uint32_t		msix_regs_rid;
354 	struct resource		*msix_regs_res;
355 
356 	struct resource		*msix_irq_res[SGE_QSETS];
357 	int			msix_irq_rid[SGE_QSETS];
358 	void			*msix_intr_tag[SGE_QSETS];
359 	uint8_t                 rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
360 	uint8_t                 rrss_map[SGE_QSETS]; /* revers RSS map table */
361 	uint16_t                rspq_map[RSS_TABLE_SIZE];     /* maps 7-bit cookie to qidx */
362 	union {
363 		uint8_t                 fill[SGE_QSETS];
364 		uint64_t                coalesce;
365 	} u;
366 
367 #define tunq_fill u.fill
368 #define tunq_coalesce u.coalesce
369 
370 	struct filter_info      *filters;
371 
372 	/* Tasks */
373 	struct task		ext_intr_task;
374 	struct task		slow_intr_task;
375 	struct task		tick_task;
376 	struct taskqueue	*tq;
377 	struct callout		cxgb_tick_ch;
378 	struct callout		sge_timer_ch;
379 
380 	unsigned int		check_task_cnt;
381 
382 	/* Register lock for use by the hardware layer */
383 	struct mtx		mdio_lock;
384 	struct mtx		elmer_lock;
385 
386 	/* Bookkeeping for the hardware layer */
387 	struct adapter_params  params;
388 	unsigned int slow_intr_mask;
389 	unsigned long irq_stats[IRQ_NUM_STATS];
390 
391 	struct sge              sge;
392 	struct mc7              pmrx;
393 	struct mc7              pmtx;
394 	struct mc7              cm;
395 	struct mc5              mc5;
396 
397 	struct port_info	port[MAX_NPORTS];
398 	device_t		portdev[MAX_NPORTS];
399 	struct t3cdev           tdev;
400 	char                    fw_version[64];
401 	char                    port_types[MAX_NPORTS + 1];
402 	uint32_t                open_device_map;
403 	uint32_t                registered_device_map;
404 #ifdef USE_SX
405 	struct sx               lock;
406 #else
407 	struct mtx              lock;
408 #endif
409 	driver_intr_t           *cxgb_intr;
410 	int                     msi_count;
411 
412 #define ADAPTER_LOCK_NAME_LEN	32
413 	char                    lockbuf[ADAPTER_LOCK_NAME_LEN];
414 	char                    reglockbuf[ADAPTER_LOCK_NAME_LEN];
415 	char                    mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
416 	char                    elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
417 };
418 
419 struct t3_rx_mode {
420 
421 	uint32_t                idx;
422 	struct port_info        *port;
423 };
424 
425 
426 #define MDIO_LOCK(adapter)	mtx_lock(&(adapter)->mdio_lock)
427 #define MDIO_UNLOCK(adapter)	mtx_unlock(&(adapter)->mdio_lock)
428 #define ELMR_LOCK(adapter)	mtx_lock(&(adapter)->elmer_lock)
429 #define ELMR_UNLOCK(adapter)	mtx_unlock(&(adapter)->elmer_lock)
430 
431 
432 #ifdef USE_SX
433 #define PORT_LOCK(port)		     sx_xlock(&(port)->lock);
434 #define PORT_UNLOCK(port)	     sx_xunlock(&(port)->lock);
435 #define PORT_LOCK_INIT(port, name)   SX_INIT(&(port)->lock, name)
436 #define PORT_LOCK_DEINIT(port)       SX_DESTROY(&(port)->lock)
437 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
438 
439 #define ADAPTER_LOCK(adap)	           sx_xlock(&(adap)->lock);
440 #define ADAPTER_UNLOCK(adap)	           sx_xunlock(&(adap)->lock);
441 #define ADAPTER_LOCK_INIT(adap, name)      SX_INIT(&(adap)->lock, name)
442 #define ADAPTER_LOCK_DEINIT(adap)          SX_DESTROY(&(adap)->lock)
443 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
444 #define ADAPTER_LOCK_ASSERT_OWNED(adap) sx_assert(&(adap)->lock, SA_LOCKED)
445 #else
446 #define PORT_LOCK(port)		     mtx_lock(&(port)->lock);
447 #define PORT_UNLOCK(port)	     mtx_unlock(&(port)->lock);
448 #define PORT_LOCK_INIT(port, name)   mtx_init(&(port)->lock, name, 0, MTX_DEF)
449 #define PORT_LOCK_DEINIT(port)       mtx_destroy(&(port)->lock)
450 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
451 
452 #define ADAPTER_LOCK(adap)	mtx_lock(&(adap)->lock);
453 #define ADAPTER_UNLOCK(adap)	mtx_unlock(&(adap)->lock);
454 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
455 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
456 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MA_NOTOWNED)
457 #define ADAPTER_LOCK_ASSERT_OWNED(adap) mtx_assert(&(adap)->lock, MA_OWNED)
458 #endif
459 
460 
461 static __inline uint32_t
462 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
463 {
464 	return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
465 }
466 
467 static __inline void
468 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
469 {
470 	bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
471 }
472 
473 static __inline void
474 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
475 {
476 	*val = pci_read_config(adapter->dev, reg, 4);
477 }
478 
479 static __inline void
480 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
481 {
482 	pci_write_config(adapter->dev, reg, val, 4);
483 }
484 
485 static __inline void
486 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
487 {
488 	*val = pci_read_config(adapter->dev, reg, 2);
489 }
490 
491 static __inline void
492 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
493 {
494 	pci_write_config(adapter->dev, reg, val, 2);
495 }
496 
497 static __inline uint8_t *
498 t3_get_next_mcaddr(struct t3_rx_mode *rm)
499 {
500 	uint8_t *macaddr = NULL;
501 	struct ifnet *ifp = rm->port->ifp;
502 	struct ifmultiaddr *ifma;
503 	int i = 0;
504 
505 	IF_ADDR_LOCK(ifp);
506 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
507 		if (ifma->ifma_addr->sa_family != AF_LINK)
508 			continue;
509 		if (i == rm->idx) {
510 			macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
511 			break;
512 		}
513 		i++;
514 	}
515 	IF_ADDR_UNLOCK(ifp);
516 
517 
518 	rm->idx++;
519 	return (macaddr);
520 }
521 
522 static __inline void
523 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
524 {
525 	rm->idx = 0;
526 	rm->port = port;
527 }
528 
529 static __inline struct port_info *
530 adap2pinfo(struct adapter *adap, int idx)
531 {
532 	return &adap->port[idx];
533 }
534 
535 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
536 int t3_os_pci_save_state(struct adapter *adapter);
537 int t3_os_pci_restore_state(struct adapter *adapter);
538 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
539 			int speed, int duplex, int fc);
540 void t3_os_phymod_changed(struct adapter *adap, int port_id);
541 void t3_sge_err_intr_handler(adapter_t *adapter);
542 int t3_offload_tx(struct t3cdev *, struct mbuf *);
543 void t3_os_ext_intr_handler(adapter_t *adapter);
544 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
545 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
546 
547 
548 int t3_sge_alloc(struct adapter *);
549 int t3_sge_free(struct adapter *);
550 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
551     int, struct port_info *);
552 void t3_free_sge_resources(adapter_t *);
553 void t3_sge_start(adapter_t *);
554 void t3_sge_stop(adapter_t *);
555 void t3b_intr(void *data);
556 void t3_intr_msi(void *data);
557 void t3_intr_msix(void *data);
558 int t3_encap(struct sge_qset *, struct mbuf **, int);
559 
560 int t3_sge_init_adapter(adapter_t *);
561 int t3_sge_reset_adapter(adapter_t *);
562 int t3_sge_init_port(struct port_info *);
563 void t3_sge_deinit_sw(adapter_t *);
564 void t3_free_tx_desc(struct sge_txq *q, int n);
565 void t3_free_tx_desc_all(struct sge_txq *q);
566 
567 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
568 
569 void t3_add_attach_sysctls(adapter_t *sc);
570 void t3_add_configured_sysctls(adapter_t *sc);
571 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
572     unsigned char *data);
573 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
574 
575 #define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \
576     (hz * (a)->params.linkpoll_period) / 10 : \
577     (a)->params.stats_update_period * hz)
578 
579 /*
580  * XXX figure out how we can return this to being private to sge
581  */
582 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
583 
584 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
585 
586 static __inline struct sge_qset *
587 fl_to_qset(struct sge_fl *q, int qidx)
588 {
589 	return container_of(q, struct sge_qset, fl[qidx]);
590 }
591 
592 static __inline struct sge_qset *
593 rspq_to_qset(struct sge_rspq *q)
594 {
595 	return container_of(q, struct sge_qset, rspq);
596 }
597 
598 static __inline struct sge_qset *
599 txq_to_qset(struct sge_txq *q, int qidx)
600 {
601 	return container_of(q, struct sge_qset, txq[qidx]);
602 }
603 
604 static __inline struct adapter *
605 tdev2adap(struct t3cdev *d)
606 {
607 	return container_of(d, struct adapter, tdev);
608 }
609 
610 #undef container_of
611 
612 #define OFFLOAD_DEVMAP_BIT 15
613 static inline int offload_running(adapter_t *adapter)
614 {
615         return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
616 }
617 
618 int cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m);
619 int cxgb_pcpu_transmit(struct ifnet *ifp, struct mbuf *m);
620 void cxgb_pcpu_shutdown_threads(struct adapter *sc);
621 void cxgb_pcpu_startup_threads(struct adapter *sc);
622 
623 int process_responses(adapter_t *adap, struct sge_qset *qs, int budget);
624 void t3_free_qset(adapter_t *sc, struct sge_qset *q);
625 void cxgb_start(struct ifnet *ifp);
626 void refill_fl_service(adapter_t *adap, struct sge_fl *fl);
627 #endif
628