xref: /freebsd/sys/dev/cxgb/cxgb_adapter.h (revision ea906c4152774dff300bb26fbfc1e4188351c89a)
1 /**************************************************************************
2 
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 
29 $FreeBSD$
30 
31 ***************************************************************************/
32 
33 
34 #ifndef _CXGB_ADAPTER_H_
35 #define _CXGB_ADAPTER_H_
36 
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/sx.h>
40 #include <sys/rman.h>
41 #include <sys/mbuf.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/condvar.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/if_media.h>
49 #include <net/if_dl.h>
50 #include <netinet/tcp_lro.h>
51 
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 
55 #include <sys/bus_dma.h>
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 
59 
60 #ifdef CONFIG_DEFINED
61 #include <cxgb_osdep.h>
62 #include <t3cdev.h>
63 #include <ulp/toecore/cxgb_toedev.h>
64 #include <sys/mbufq.h>
65 #else
66 #include <dev/cxgb/cxgb_osdep.h>
67 #include <dev/cxgb/t3cdev.h>
68 #include <dev/cxgb/sys/mbufq.h>
69 #include <dev/cxgb/ulp/toecore/cxgb_toedev.h>
70 #endif
71 
72 #define USE_SX
73 
74 struct adapter;
75 struct sge_qset;
76 extern int cxgb_debug;
77 
78 #ifdef DEBUG_LOCKING
79 #define MTX_INIT(lock, lockname, class, flags) \
80 	do { \
81 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
82 		mtx_init((lock), lockname, class, flags);		\
83 	} while (0)
84 
85 #define MTX_DESTROY(lock) \
86 	do { \
87 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
88 		mtx_destroy((lock));					\
89 	} while (0)
90 
91 #define SX_INIT(lock, lockname) \
92 	do { \
93 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
94 		sx_init((lock), lockname);		\
95 	} while (0)
96 
97 #define SX_DESTROY(lock) \
98 	do { \
99 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
100 		sx_destroy((lock));					\
101 	} while (0)
102 #else
103 #define MTX_INIT mtx_init
104 #define MTX_DESTROY mtx_destroy
105 #define SX_INIT sx_init
106 #define SX_DESTROY sx_destroy
107 #endif
108 
109 struct port_info {
110 	struct adapter	*adapter;
111 	struct ifnet	*ifp;
112 	int		if_flags;
113 	const struct port_type_info *port_type;
114 	struct cphy	phy;
115 	struct cmac	mac;
116 	struct link_config link_config;
117 	struct ifmedia	media;
118 #ifdef USE_SX
119 	struct sx	lock;
120 #else
121 	struct mtx	lock;
122 #endif
123 	uint8_t		port_id;
124 	uint8_t		tx_chan;
125 	uint8_t		txpkt_intf;
126 	uint8_t         first_qset;
127 	uint32_t	nqsets;
128 
129 	uint8_t		hw_addr[ETHER_ADDR_LEN];
130 	struct taskqueue *tq;
131 	struct task     start_task;
132 	struct task	timer_reclaim_task;
133 	struct cdev     *port_cdev;
134 
135 #define PORT_LOCK_NAME_LEN 32
136 #define TASKQ_NAME_LEN 32
137 #define PORT_NAME_LEN 32
138 	char            lockbuf[PORT_LOCK_NAME_LEN];
139 	char            taskqbuf[TASKQ_NAME_LEN];
140 	char            namebuf[PORT_NAME_LEN];
141 };
142 
143 enum {				/* adapter flags */
144 	FULL_INIT_DONE	= (1 << 0),
145 	USING_MSI	= (1 << 1),
146 	USING_MSIX	= (1 << 2),
147 	QUEUES_BOUND	= (1 << 3),
148 	FW_UPTODATE     = (1 << 4),
149 	TPS_UPTODATE    = (1 << 5),
150 	CXGB_SHUTDOWN	= (1 << 6),
151 	CXGB_OFLD_INIT	= (1 << 7),
152 	TP_PARITY_INIT  = (1 << 8),
153 };
154 
155 #define FL_Q_SIZE	4096
156 
157 #ifdef __i386__
158 #define JUMBO_Q_SIZE	256
159 #else
160 #define JUMBO_Q_SIZE	1024
161 #endif
162 #define RSPQ_Q_SIZE	1024
163 #define TX_ETH_Q_SIZE	1024
164 
165 enum { TXQ_ETH = 0,
166        TXQ_OFLD = 1,
167        TXQ_CTRL = 2, };
168 
169 
170 /*
171  * work request size in bytes
172  */
173 #define WR_LEN (WR_FLITS * 8)
174 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
175 
176 struct lro_state {
177 	unsigned short enabled;
178 	struct lro_ctrl ctrl;
179 };
180 
181 #define RX_BUNDLE_SIZE 8
182 
183 struct rsp_desc;
184 
185 struct sge_rspq {
186 	uint32_t	credits;
187 	uint32_t	size;
188 	uint32_t	cidx;
189 	uint32_t	gen;
190 	uint32_t	polling;
191 	uint32_t	holdoff_tmr;
192 	uint32_t	next_holdoff;
193 	uint32_t        imm_data;
194 	uint32_t        async_notif;
195 	uint32_t	cntxt_id;
196 	uint32_t        offload_pkts;
197 	uint32_t        offload_bundles;
198 	uint32_t        pure_rsps;
199 	uint32_t        unhandled_irqs;
200 
201 	bus_addr_t	phys_addr;
202 	bus_dma_tag_t	desc_tag;
203 	bus_dmamap_t	desc_map;
204 
205 	struct t3_mbuf_hdr rspq_mh;
206 	struct rsp_desc	*desc;
207 	struct mtx      lock;
208 #define RSPQ_NAME_LEN  32
209 	char            lockbuf[RSPQ_NAME_LEN];
210 	uint32_t	rspq_dump_start;
211 	uint32_t	rspq_dump_count;
212 };
213 
214 #ifndef DISABLE_MBUF_IOVEC
215 #define rspq_mbuf rspq_mh.mh_head
216 #endif
217 
218 struct rx_desc;
219 struct rx_sw_desc;
220 
221 struct sge_fl {
222 	uint32_t	buf_size;
223 	uint32_t	credits;
224 	uint32_t	size;
225 	uint32_t	cidx;
226 	uint32_t	pidx;
227 	uint32_t	gen;
228 	bus_addr_t	phys_addr;
229 	uint32_t	cntxt_id;
230 	uint64_t	empty;
231 	bus_dma_tag_t	desc_tag;
232 	bus_dmamap_t	desc_map;
233 	bus_dma_tag_t   entry_tag;
234 	uma_zone_t      zone;
235 	struct rx_desc	*desc;
236 	struct rx_sw_desc *sdesc;
237 	int             type;
238 };
239 
240 struct tx_desc;
241 struct tx_sw_desc;
242 
243 #define TXQ_TRANSMITTING    0x1
244 
245 struct sge_txq {
246 	uint64_t	flags;
247 	uint32_t	in_use;
248 	uint32_t	size;
249 	uint32_t	processed;
250 	uint32_t	cleaned;
251 	uint32_t	stop_thres;
252 	uint32_t	cidx;
253 	uint32_t	pidx;
254 	uint32_t	gen;
255 	uint32_t	unacked;
256 	struct tx_desc	*desc;
257 	struct tx_sw_desc *sdesc;
258 	uint32_t	token;
259 	bus_addr_t	phys_addr;
260 	struct task     qresume_task;
261 	struct task     qreclaim_task;
262 	struct port_info *port;
263 	uint32_t	cntxt_id;
264 	uint64_t	stops;
265 	uint64_t	restarts;
266 	bus_dma_tag_t	desc_tag;
267 	bus_dmamap_t	desc_map;
268 	bus_dma_tag_t   entry_tag;
269 	struct mbuf_head sendq;
270 	/*
271 	 * cleanq should really be an buf_ring to avoid extra
272 	 * mbuf touches
273 	 */
274 	struct mbuf_head cleanq;
275 	struct buf_ring txq_mr;
276 	struct mbuf     *immpkt;
277 	uint32_t        txq_drops;
278 	uint32_t        txq_skipped;
279 	uint32_t        txq_coalesced;
280 	uint32_t        txq_enqueued;
281 	uint32_t	txq_dump_start;
282 	uint32_t	txq_dump_count;
283 	unsigned long   txq_frees;
284 	struct mtx      lock;
285 	struct sg_ent  txq_sgl[TX_MAX_SEGS / 2 + 1];
286 	#define TXQ_NAME_LEN  32
287 	char            lockbuf[TXQ_NAME_LEN];
288 };
289 
290 
291 enum {
292 	SGE_PSTAT_TSO,              /* # of TSO requests */
293 	SGE_PSTAT_RX_CSUM_GOOD,     /* # of successful RX csum offloads */
294 	SGE_PSTAT_TX_CSUM,          /* # of TX checksum offloads */
295 	SGE_PSTAT_VLANEX,           /* # of VLAN tag extractions */
296 	SGE_PSTAT_VLANINS,          /* # of VLAN tag insertions */
297 };
298 
299 #define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
300 
301 #define QS_EXITING              0x1
302 #define QS_RUNNING              0x2
303 #define QS_BOUND                0x4
304 
305 struct sge_qset {
306 	struct sge_rspq		rspq;
307 	struct sge_fl		fl[SGE_RXQ_PER_SET];
308 	struct lro_state        lro;
309 	struct sge_txq		txq[SGE_TXQ_PER_SET];
310 	uint32_t                txq_stopped;       /* which Tx queues are stopped */
311 	uint64_t                port_stats[SGE_PSTAT_MAX];
312 	struct port_info        *port;
313 	int                     idx; /* qset # */
314 	int                     qs_cpuid;
315 	int                     qs_flags;
316 	struct cv		qs_cv;
317 	struct mtx		qs_mtx;
318 #define QS_NAME_LEN 32
319 	char                    namebuf[QS_NAME_LEN];
320 };
321 
322 struct sge {
323 	struct sge_qset	        qs[SGE_QSETS];
324 	struct mtx              reg_lock;
325 };
326 
327 struct filter_info;
328 
329 struct adapter {
330 	device_t		dev;
331 	int			flags;
332 	TAILQ_ENTRY(adapter)    adapter_entry;
333 
334 	/* PCI register resources */
335 	int			regs_rid;
336 	struct resource		*regs_res;
337 	int			udbs_rid;
338 	struct resource		*udbs_res;
339 	bus_space_handle_t	bh;
340 	bus_space_tag_t		bt;
341 	bus_size_t              mmio_len;
342 	uint32_t                link_width;
343 
344 	/* DMA resources */
345 	bus_dma_tag_t		parent_dmat;
346 	bus_dma_tag_t		rx_dmat;
347 	bus_dma_tag_t		rx_jumbo_dmat;
348 	bus_dma_tag_t		tx_dmat;
349 
350 	/* Interrupt resources */
351 	struct resource		*irq_res;
352 	int			irq_rid;
353 	void			*intr_tag;
354 
355 	uint32_t		msix_regs_rid;
356 	struct resource		*msix_regs_res;
357 
358 	struct resource		*msix_irq_res[SGE_QSETS];
359 	int			msix_irq_rid[SGE_QSETS];
360 	void			*msix_intr_tag[SGE_QSETS];
361 	uint8_t                 rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
362 	uint8_t                 rrss_map[SGE_QSETS]; /* revers RSS map table */
363 	uint16_t                rspq_map[RSS_TABLE_SIZE];     /* maps 7-bit cookie to qidx */
364 	union {
365 		uint8_t                 fill[SGE_QSETS];
366 		uint64_t                coalesce;
367 	} u;
368 
369 #define tunq_fill u.fill
370 #define tunq_coalesce u.coalesce
371 
372 	struct filter_info      *filters;
373 
374 	/* Tasks */
375 	struct task		ext_intr_task;
376 	struct task		slow_intr_task;
377 	struct task		tick_task;
378 	struct task		process_responses_task;
379 	struct taskqueue	*tq;
380 	struct callout		cxgb_tick_ch;
381 	struct callout		sge_timer_ch;
382 
383 	unsigned int		check_task_cnt;
384 
385 	/* Register lock for use by the hardware layer */
386 	struct mtx		mdio_lock;
387 	struct mtx		elmer_lock;
388 
389 	/* Bookkeeping for the hardware layer */
390 	struct adapter_params  params;
391 	unsigned int slow_intr_mask;
392 	unsigned long irq_stats[IRQ_NUM_STATS];
393 
394 	struct sge              sge;
395 	struct mc7              pmrx;
396 	struct mc7              pmtx;
397 	struct mc7              cm;
398 	struct mc5              mc5;
399 
400 	struct port_info	port[MAX_NPORTS];
401 	device_t		portdev[MAX_NPORTS];
402 	struct t3cdev           tdev;
403 	char                    fw_version[64];
404 	uint32_t                open_device_map;
405 	uint32_t                registered_device_map;
406 #ifdef USE_SX
407 	struct sx               lock;
408 #else
409 	struct mtx              lock;
410 #endif
411 	driver_intr_t           *cxgb_intr;
412 	int                     msi_count;
413 
414 #define ADAPTER_LOCK_NAME_LEN	32
415 	char                    lockbuf[ADAPTER_LOCK_NAME_LEN];
416 	char                    reglockbuf[ADAPTER_LOCK_NAME_LEN];
417 	char                    mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
418 	char                    elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
419 };
420 
421 struct t3_rx_mode {
422 
423 	uint32_t                idx;
424 	struct port_info        *port;
425 };
426 
427 
428 #define MDIO_LOCK(adapter)	mtx_lock(&(adapter)->mdio_lock)
429 #define MDIO_UNLOCK(adapter)	mtx_unlock(&(adapter)->mdio_lock)
430 #define ELMR_LOCK(adapter)	mtx_lock(&(adapter)->elmer_lock)
431 #define ELMR_UNLOCK(adapter)	mtx_unlock(&(adapter)->elmer_lock)
432 
433 
434 #ifdef USE_SX
435 #define PORT_LOCK(port)		     sx_xlock(&(port)->lock);
436 #define PORT_UNLOCK(port)	     sx_xunlock(&(port)->lock);
437 #define PORT_LOCK_INIT(port, name)   SX_INIT(&(port)->lock, name)
438 #define PORT_LOCK_DEINIT(port)       SX_DESTROY(&(port)->lock)
439 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
440 
441 #define ADAPTER_LOCK(adap)	           sx_xlock(&(adap)->lock);
442 #define ADAPTER_UNLOCK(adap)	           sx_xunlock(&(adap)->lock);
443 #define ADAPTER_LOCK_INIT(adap, name)      SX_INIT(&(adap)->lock, name)
444 #define ADAPTER_LOCK_DEINIT(adap)          SX_DESTROY(&(adap)->lock)
445 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
446 #else
447 #define PORT_LOCK(port)		     mtx_lock(&(port)->lock);
448 #define PORT_UNLOCK(port)	     mtx_unlock(&(port)->lock);
449 #define PORT_LOCK_INIT(port, name)   mtx_init(&(port)->lock, name, 0, MTX_DEF)
450 #define PORT_LOCK_DEINIT(port)       mtx_destroy(&(port)->lock)
451 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
452 
453 #define ADAPTER_LOCK(adap)	mtx_lock(&(adap)->lock);
454 #define ADAPTER_UNLOCK(adap)	mtx_unlock(&(adap)->lock);
455 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
456 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
457 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED)
458 #endif
459 
460 
461 static __inline uint32_t
462 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
463 {
464 	return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
465 }
466 
467 static __inline void
468 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
469 {
470 	bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
471 }
472 
473 static __inline void
474 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
475 {
476 	*val = pci_read_config(adapter->dev, reg, 4);
477 }
478 
479 static __inline void
480 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
481 {
482 	pci_write_config(adapter->dev, reg, val, 4);
483 }
484 
485 static __inline void
486 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
487 {
488 	*val = pci_read_config(adapter->dev, reg, 2);
489 }
490 
491 static __inline void
492 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
493 {
494 	pci_write_config(adapter->dev, reg, val, 2);
495 }
496 
497 static __inline uint8_t *
498 t3_get_next_mcaddr(struct t3_rx_mode *rm)
499 {
500 	uint8_t *macaddr = NULL;
501 	struct ifnet *ifp = rm->port->ifp;
502 	struct ifmultiaddr *ifma;
503 	int i = 0;
504 
505 	IF_ADDR_LOCK(ifp);
506 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
507 		if (ifma->ifma_addr->sa_family != AF_LINK)
508 			continue;
509 		if (i == rm->idx) {
510 			macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
511 			break;
512 		}
513 		i++;
514 	}
515 	IF_ADDR_UNLOCK(ifp);
516 
517 
518 	rm->idx++;
519 	return (macaddr);
520 }
521 
522 static __inline void
523 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
524 {
525 	rm->idx = 0;
526 	rm->port = port;
527 }
528 
529 static __inline struct port_info *
530 adap2pinfo(struct adapter *adap, int idx)
531 {
532 	return &adap->port[idx];
533 }
534 
535 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
536 int t3_os_pci_save_state(struct adapter *adapter);
537 int t3_os_pci_restore_state(struct adapter *adapter);
538 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
539 			int speed, int duplex, int fc);
540 void t3_os_phymod_changed(struct adapter *adap, int port_id);
541 void t3_sge_err_intr_handler(adapter_t *adapter);
542 int t3_offload_tx(struct t3cdev *, struct mbuf *);
543 void t3_os_ext_intr_handler(adapter_t *adapter);
544 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
545 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
546 
547 
548 int t3_sge_alloc(struct adapter *);
549 int t3_sge_free(struct adapter *);
550 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
551     int, struct port_info *);
552 void t3_free_sge_resources(adapter_t *);
553 void t3_sge_start(adapter_t *);
554 void t3_sge_stop(adapter_t *);
555 void t3b_intr(void *data);
556 void t3_intr_msi(void *data);
557 void t3_intr_msix(void *data);
558 int t3_encap(struct sge_qset *, struct mbuf **, int);
559 
560 int t3_sge_init_adapter(adapter_t *);
561 int t3_sge_reset_adapter(adapter_t *);
562 int t3_sge_init_port(struct port_info *);
563 void t3_sge_deinit_sw(adapter_t *);
564 void t3_free_tx_desc(struct sge_txq *q, int n);
565 void t3_free_tx_desc_all(struct sge_txq *q);
566 
567 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
568 
569 void t3_add_attach_sysctls(adapter_t *sc);
570 void t3_add_configured_sysctls(adapter_t *sc);
571 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
572     unsigned char *data);
573 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
574 
575 #define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \
576     (hz * (a)->params.linkpoll_period) / 10 : \
577     (a)->params.stats_update_period * hz)
578 
579 /*
580  * XXX figure out how we can return this to being private to sge
581  */
582 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
583 
584 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
585 
586 static __inline struct sge_qset *
587 fl_to_qset(struct sge_fl *q, int qidx)
588 {
589 	return container_of(q, struct sge_qset, fl[qidx]);
590 }
591 
592 static __inline struct sge_qset *
593 rspq_to_qset(struct sge_rspq *q)
594 {
595 	return container_of(q, struct sge_qset, rspq);
596 }
597 
598 static __inline struct sge_qset *
599 txq_to_qset(struct sge_txq *q, int qidx)
600 {
601 	return container_of(q, struct sge_qset, txq[qidx]);
602 }
603 
604 static __inline struct adapter *
605 tdev2adap(struct t3cdev *d)
606 {
607 	return container_of(d, struct adapter, tdev);
608 }
609 
610 #undef container_of
611 
612 #define OFFLOAD_DEVMAP_BIT 15
613 static inline int offload_running(adapter_t *adapter)
614 {
615         return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
616 }
617 
618 int cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m);
619 int cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *m);
620 void cxgb_pcpu_shutdown_threads(struct adapter *sc);
621 void cxgb_pcpu_startup_threads(struct adapter *sc);
622 
623 int process_responses(adapter_t *adap, struct sge_qset *qs, int budget);
624 void t3_free_qset(adapter_t *sc, struct sge_qset *q);
625 void cxgb_start(struct ifnet *ifp);
626 void refill_fl_service(adapter_t *adap, struct sge_fl *fl);
627 #endif
628