xref: /freebsd/sys/dev/cxgb/cxgb_adapter.h (revision 995dc984471c92c03daad19a1d35af46c086ef3e)
1 /**************************************************************************
2 
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 
29 $FreeBSD$
30 
31 ***************************************************************************/
32 
33 
34 #ifndef _CXGB_ADAPTER_H_
35 #define _CXGB_ADAPTER_H_
36 
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/sx.h>
40 #include <sys/rman.h>
41 #include <sys/mbuf.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/condvar.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/if_media.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 
53 #include <sys/bus_dma.h>
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 
57 #ifdef CONFIG_DEFINED
58 #include <cxgb_osdep.h>
59 #include <t3cdev.h>
60 #include <ulp/toecore/cxgb_toedev.h>
61 #include <sys/mbufq.h>
62 #else
63 #include <dev/cxgb/cxgb_osdep.h>
64 #include <dev/cxgb/t3cdev.h>
65 #include <dev/cxgb/sys/mbufq.h>
66 #include <dev/cxgb/ulp/toecore/cxgb_toedev.h>
67 #endif
68 
69 #define USE_SX
70 
71 struct adapter;
72 struct sge_qset;
73 extern int cxgb_debug;
74 
75 #ifdef DEBUG_LOCKING
76 #define MTX_INIT(lock, lockname, class, flags) \
77 	do { \
78 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
79 		mtx_init((lock), lockname, class, flags);		\
80 	} while (0)
81 
82 #define MTX_DESTROY(lock) \
83 	do { \
84 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
85 		mtx_destroy((lock));					\
86 	} while (0)
87 
88 #define SX_INIT(lock, lockname) \
89 	do { \
90 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
91 		sx_init((lock), lockname);		\
92 	} while (0)
93 
94 #define SX_DESTROY(lock) \
95 	do { \
96 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
97 		sx_destroy((lock));					\
98 	} while (0)
99 #else
100 #define MTX_INIT mtx_init
101 #define MTX_DESTROY mtx_destroy
102 #define SX_INIT sx_init
103 #define SX_DESTROY sx_destroy
104 #endif
105 
106 struct port_info {
107 	struct adapter	*adapter;
108 	struct ifnet	*ifp;
109 	int		if_flags;
110 	const struct port_type_info *port_type;
111 	struct cphy	phy;
112 	struct cmac	mac;
113 	struct link_config link_config;
114 	struct ifmedia	media;
115 #ifdef USE_SX
116 	struct sx	lock;
117 #else
118 	struct mtx	lock;
119 #endif
120 	uint8_t		port_id;
121 	uint8_t		tx_chan;
122 	uint8_t		txpkt_intf;
123 	uint8_t         first_qset;
124 	uint32_t	nqsets;
125 
126 	uint8_t		hw_addr[ETHER_ADDR_LEN];
127 	struct taskqueue *tq;
128 	struct task     start_task;
129 	struct task	timer_reclaim_task;
130 	struct cdev     *port_cdev;
131 
132 #define PORT_LOCK_NAME_LEN 32
133 #define TASKQ_NAME_LEN 32
134 #define PORT_NAME_LEN 32
135 	char            lockbuf[PORT_LOCK_NAME_LEN];
136 	char            taskqbuf[TASKQ_NAME_LEN];
137 	char            namebuf[PORT_NAME_LEN];
138 };
139 
140 enum {				/* adapter flags */
141 	FULL_INIT_DONE	= (1 << 0),
142 	USING_MSI	= (1 << 1),
143 	USING_MSIX	= (1 << 2),
144 	QUEUES_BOUND	= (1 << 3),
145 	FW_UPTODATE     = (1 << 4),
146 	TPS_UPTODATE    = (1 << 5),
147 };
148 
149 #define FL_Q_SIZE	4096
150 #define JUMBO_Q_SIZE	1024
151 #define RSPQ_Q_SIZE	1024
152 #define TX_ETH_Q_SIZE	1024
153 
154 enum { TXQ_ETH = 0,
155        TXQ_OFLD = 1,
156        TXQ_CTRL = 2, };
157 
158 
159 /*
160  * work request size in bytes
161  */
162 #define WR_LEN (WR_FLITS * 8)
163 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt))
164 
165 
166 /* careful, the following are set on priv_flags and must not collide with
167  * IFF_ flags!
168  */
169 enum {
170 	LRO_ACTIVE = (1 << 8),
171 };
172 
173 /* Max concurrent LRO sessions per queue set */
174 #define MAX_LRO_SES 8
175 
176 struct t3_lro_session {
177 	struct mbuf *head;
178 	struct mbuf *tail;
179 	uint32_t seq;
180 	uint16_t ip_len;
181 	uint16_t mss;
182 	uint16_t vtag;
183 	uint8_t npkts;
184 };
185 
186 struct lro_state {
187 	unsigned short enabled;
188 	unsigned short active_idx;
189 	unsigned int nactive;
190 	struct t3_lro_session sess[MAX_LRO_SES];
191 };
192 
193 #define RX_BUNDLE_SIZE 8
194 
195 struct rsp_desc;
196 
197 struct sge_rspq {
198 	uint32_t	credits;
199 	uint32_t	size;
200 	uint32_t	cidx;
201 	uint32_t	gen;
202 	uint32_t	polling;
203 	uint32_t	holdoff_tmr;
204 	uint32_t	next_holdoff;
205 	uint32_t        imm_data;
206 	uint32_t	cntxt_id;
207 	uint32_t        offload_pkts;
208 	uint32_t        offload_bundles;
209 	uint32_t        pure_rsps;
210 	uint32_t        unhandled_irqs;
211 
212 	bus_addr_t	phys_addr;
213 	bus_dma_tag_t	desc_tag;
214 	bus_dmamap_t	desc_map;
215 
216 	struct t3_mbuf_hdr rspq_mh;
217 	struct rsp_desc	*desc;
218 	struct mtx      lock;
219 #define RSPQ_NAME_LEN  32
220 	char            lockbuf[RSPQ_NAME_LEN];
221 	uint32_t	rspq_dump_start;
222 	uint32_t	rspq_dump_count;
223 };
224 
225 #ifndef DISABLE_MBUF_IOVEC
226 #define rspq_mbuf rspq_mh.mh_head
227 #endif
228 
229 struct rx_desc;
230 struct rx_sw_desc;
231 
232 struct sge_fl {
233 	uint32_t	buf_size;
234 	uint32_t	credits;
235 	uint32_t	size;
236 	uint32_t	cidx;
237 	uint32_t	pidx;
238 	uint32_t	gen;
239 	bus_addr_t	phys_addr;
240 	uint32_t	cntxt_id;
241 	uint64_t	empty;
242 	bus_dma_tag_t	desc_tag;
243 	bus_dmamap_t	desc_map;
244 	bus_dma_tag_t   entry_tag;
245 	uma_zone_t      zone;
246 	struct rx_desc	*desc;
247 	struct rx_sw_desc *sdesc;
248 	int             type;
249 };
250 
251 struct tx_desc;
252 struct tx_sw_desc;
253 
254 #define TXQ_TRANSMITTING    0x1
255 
256 struct sge_txq {
257 	uint64_t	flags;
258 	uint32_t	in_use;
259 	uint32_t	size;
260 	uint32_t	processed;
261 	uint32_t	cleaned;
262 	uint32_t	stop_thres;
263 	uint32_t	cidx;
264 	uint32_t	pidx;
265 	uint32_t	gen;
266 	uint32_t	unacked;
267 	struct tx_desc	*desc;
268 	struct tx_sw_desc *sdesc;
269 	uint32_t	token;
270 	bus_addr_t	phys_addr;
271 	struct task     qresume_task;
272 	struct task     qreclaim_task;
273 	struct port_info *port;
274 	uint32_t	cntxt_id;
275 	uint64_t	stops;
276 	uint64_t	restarts;
277 	bus_dma_tag_t	desc_tag;
278 	bus_dmamap_t	desc_map;
279 	bus_dma_tag_t   entry_tag;
280 	struct mbuf_head sendq;
281 	/*
282 	 * cleanq should really be an buf_ring to avoid extra
283 	 * mbuf touches
284 	 */
285 	struct mbuf_head cleanq;
286 	struct buf_ring txq_mr;
287 	struct mbuf     *immpkt;
288 	uint32_t        txq_drops;
289 	uint32_t        txq_skipped;
290 	uint32_t        txq_coalesced;
291 	uint32_t        txq_enqueued;
292 	uint32_t	txq_dump_start;
293 	uint32_t	txq_dump_count;
294 	unsigned long   txq_frees;
295 	struct mtx      lock;
296 	struct sg_ent  txq_sgl[TX_MAX_SEGS / 2 + 1];
297 	#define TXQ_NAME_LEN  32
298 	char            lockbuf[TXQ_NAME_LEN];
299 };
300 
301 
302 enum {
303 	SGE_PSTAT_TSO,              /* # of TSO requests */
304 	SGE_PSTAT_RX_CSUM_GOOD,     /* # of successful RX csum offloads */
305 	SGE_PSTAT_TX_CSUM,          /* # of TX checksum offloads */
306 	SGE_PSTAT_VLANEX,           /* # of VLAN tag extractions */
307 	SGE_PSTAT_VLANINS,          /* # of VLAN tag insertions */
308 	SGE_PSTATS_LRO_QUEUED,	    /* # of LRO appended packets */
309 	SGE_PSTATS_LRO_FLUSHED,	    /* # of LRO flushed packets */
310 	SGE_PSTATS_LRO_X_STREAMS,   /* # of exceeded LRO contexts */
311 };
312 
313 #define SGE_PSTAT_MAX (SGE_PSTATS_LRO_X_STREAMS+1)
314 
315 #define QS_EXITING              0x1
316 #define QS_RUNNING              0x2
317 #define QS_BOUND                0x4
318 
319 struct sge_qset {
320 	struct sge_rspq		rspq;
321 	struct sge_fl		fl[SGE_RXQ_PER_SET];
322 	struct lro_state        lro;
323 	struct sge_txq		txq[SGE_TXQ_PER_SET];
324 	uint32_t                txq_stopped;       /* which Tx queues are stopped */
325 	uint64_t                port_stats[SGE_PSTAT_MAX];
326 	struct port_info        *port;
327 	int                     idx; /* qset # */
328 	int                     qs_cpuid;
329 	int                     qs_flags;
330 	struct cv		qs_cv;
331 	struct mtx		qs_mtx;
332 #define QS_NAME_LEN 32
333 	char                    namebuf[QS_NAME_LEN];
334 };
335 
336 struct sge {
337 	struct sge_qset	        qs[SGE_QSETS];
338 	struct mtx              reg_lock;
339 };
340 
341 struct filter_info;
342 
343 struct adapter {
344 	device_t		dev;
345 	int			flags;
346 	TAILQ_ENTRY(adapter)    adapter_entry;
347 
348 	/* PCI register resources */
349 	int			regs_rid;
350 	struct resource		*regs_res;
351 	bus_space_handle_t	bh;
352 	bus_space_tag_t		bt;
353 	bus_size_t              mmio_len;
354 	uint32_t                link_width;
355 
356 	/* DMA resources */
357 	bus_dma_tag_t		parent_dmat;
358 	bus_dma_tag_t		rx_dmat;
359 	bus_dma_tag_t		rx_jumbo_dmat;
360 	bus_dma_tag_t		tx_dmat;
361 
362 	/* Interrupt resources */
363 	struct resource		*irq_res;
364 	int			irq_rid;
365 	void			*intr_tag;
366 
367 	uint32_t		msix_regs_rid;
368 	struct resource		*msix_regs_res;
369 
370 	struct resource		*msix_irq_res[SGE_QSETS];
371 	int			msix_irq_rid[SGE_QSETS];
372 	void			*msix_intr_tag[SGE_QSETS];
373 	uint8_t                 rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
374 	uint8_t                 rrss_map[SGE_QSETS]; /* revers RSS map table */
375 	uint16_t                rspq_map[RSS_TABLE_SIZE];     /* maps 7-bit cookie to qidx */
376 	union {
377 		uint8_t                 fill[SGE_QSETS];
378 		uint64_t                coalesce;
379 	} u;
380 
381 #define tunq_fill u.fill
382 #define tunq_coalesce u.coalesce
383 
384 	struct filter_info      *filters;
385 
386 	/* Tasks */
387 	struct task		ext_intr_task;
388 	struct task		slow_intr_task;
389 	struct task		tick_task;
390 	struct task		process_responses_task;
391 	struct taskqueue	*tq;
392 	struct callout		cxgb_tick_ch;
393 	struct callout		sge_timer_ch;
394 
395 	/* Register lock for use by the hardware layer */
396 	struct mtx		mdio_lock;
397 	struct mtx		elmer_lock;
398 
399 	/* Bookkeeping for the hardware layer */
400 	struct adapter_params  params;
401 	unsigned int slow_intr_mask;
402 	unsigned long irq_stats[IRQ_NUM_STATS];
403 
404 	struct sge              sge;
405 	struct mc7              pmrx;
406 	struct mc7              pmtx;
407 	struct mc7              cm;
408 	struct mc5              mc5;
409 
410 	struct port_info	port[MAX_NPORTS];
411 	device_t		portdev[MAX_NPORTS];
412 	struct t3cdev           tdev;
413 	char                    fw_version[64];
414 	uint32_t                open_device_map;
415 	uint32_t                registered_device_map;
416 #ifdef USE_SX
417 	struct sx               lock;
418 #else
419 	struct mtx              lock;
420 #endif
421 	driver_intr_t           *cxgb_intr;
422 	int                     msi_count;
423 
424 #define ADAPTER_LOCK_NAME_LEN	32
425 	char                    lockbuf[ADAPTER_LOCK_NAME_LEN];
426 	char                    reglockbuf[ADAPTER_LOCK_NAME_LEN];
427 	char                    mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
428 	char                    elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
429 };
430 
431 struct t3_rx_mode {
432 
433 	uint32_t                idx;
434 	struct port_info        *port;
435 };
436 
437 
438 #define MDIO_LOCK(adapter)	mtx_lock(&(adapter)->mdio_lock)
439 #define MDIO_UNLOCK(adapter)	mtx_unlock(&(adapter)->mdio_lock)
440 #define ELMR_LOCK(adapter)	mtx_lock(&(adapter)->elmer_lock)
441 #define ELMR_UNLOCK(adapter)	mtx_unlock(&(adapter)->elmer_lock)
442 
443 
444 #ifdef USE_SX
445 #define PORT_LOCK(port)		     sx_xlock(&(port)->lock);
446 #define PORT_UNLOCK(port)	     sx_xunlock(&(port)->lock);
447 #define PORT_LOCK_INIT(port, name)   SX_INIT(&(port)->lock, name)
448 #define PORT_LOCK_DEINIT(port)       SX_DESTROY(&(port)->lock)
449 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
450 
451 #define ADAPTER_LOCK(adap)	           sx_xlock(&(adap)->lock);
452 #define ADAPTER_UNLOCK(adap)	           sx_xunlock(&(adap)->lock);
453 #define ADAPTER_LOCK_INIT(adap, name)      SX_INIT(&(adap)->lock, name)
454 #define ADAPTER_LOCK_DEINIT(adap)          SX_DESTROY(&(adap)->lock)
455 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
456 #else
457 #define PORT_LOCK(port)		     mtx_lock(&(port)->lock);
458 #define PORT_UNLOCK(port)	     mtx_unlock(&(port)->lock);
459 #define PORT_LOCK_INIT(port, name)   mtx_init(&(port)->lock, name, 0, MTX_DEF)
460 #define PORT_LOCK_DEINIT(port)       mtx_destroy(&(port)->lock)
461 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
462 
463 #define ADAPTER_LOCK(adap)	mtx_lock(&(adap)->lock);
464 #define ADAPTER_UNLOCK(adap)	mtx_unlock(&(adap)->lock);
465 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
466 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
467 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED)
468 #endif
469 
470 
471 static __inline uint32_t
472 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
473 {
474 	return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
475 }
476 
477 static __inline void
478 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
479 {
480 	bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
481 }
482 
483 static __inline void
484 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
485 {
486 	*val = pci_read_config(adapter->dev, reg, 4);
487 }
488 
489 static __inline void
490 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
491 {
492 	pci_write_config(adapter->dev, reg, val, 4);
493 }
494 
495 static __inline void
496 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
497 {
498 	*val = pci_read_config(adapter->dev, reg, 2);
499 }
500 
501 static __inline void
502 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
503 {
504 	pci_write_config(adapter->dev, reg, val, 2);
505 }
506 
507 static __inline uint8_t *
508 t3_get_next_mcaddr(struct t3_rx_mode *rm)
509 {
510 	uint8_t *macaddr = NULL;
511 
512 	if (rm->idx == 0)
513 		macaddr = (uint8_t *)rm->port->hw_addr;
514 
515 	rm->idx++;
516 	return (macaddr);
517 }
518 
519 static __inline void
520 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
521 {
522 	rm->idx = 0;
523 	rm->port = port;
524 }
525 
526 static __inline struct port_info *
527 adap2pinfo(struct adapter *adap, int idx)
528 {
529 	return &adap->port[idx];
530 }
531 
532 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
533 int t3_os_pci_save_state(struct adapter *adapter);
534 int t3_os_pci_restore_state(struct adapter *adapter);
535 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
536 			int speed, int duplex, int fc);
537 void t3_sge_err_intr_handler(adapter_t *adapter);
538 int t3_offload_tx(struct t3cdev *, struct mbuf *);
539 void t3_os_ext_intr_handler(adapter_t *adapter);
540 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
541 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
542 
543 
544 int t3_sge_alloc(struct adapter *);
545 int t3_sge_free(struct adapter *);
546 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
547     int, struct port_info *);
548 void t3_free_sge_resources(adapter_t *);
549 void t3_sge_start(adapter_t *);
550 void t3_sge_stop(adapter_t *);
551 void t3b_intr(void *data);
552 void t3_intr_msi(void *data);
553 void t3_intr_msix(void *data);
554 int t3_encap(struct sge_qset *, struct mbuf **, int);
555 
556 int t3_sge_init_adapter(adapter_t *);
557 int t3_sge_reset_adapter(adapter_t *);
558 int t3_sge_init_port(struct port_info *);
559 void t3_sge_deinit_sw(adapter_t *);
560 void t3_free_tx_desc(struct sge_txq *q, int n);
561 void t3_free_tx_desc_all(struct sge_txq *q);
562 
563 void t3_rx_eth_lro(adapter_t *adap, struct sge_rspq *rq, struct mbuf *m,
564     int ethpad, uint32_t rss_hash, uint32_t rss_csum, int lro);
565 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
566 void t3_lro_flush(adapter_t *adap, struct sge_qset *qs, struct lro_state *state);
567 
568 void t3_add_attach_sysctls(adapter_t *sc);
569 void t3_add_configured_sysctls(adapter_t *sc);
570 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
571     unsigned char *data);
572 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
573 /*
574  * XXX figure out how we can return this to being private to sge
575  */
576 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
577 
578 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
579 
580 static __inline struct sge_qset *
581 fl_to_qset(struct sge_fl *q, int qidx)
582 {
583 	return container_of(q, struct sge_qset, fl[qidx]);
584 }
585 
586 static __inline struct sge_qset *
587 rspq_to_qset(struct sge_rspq *q)
588 {
589 	return container_of(q, struct sge_qset, rspq);
590 }
591 
592 static __inline struct sge_qset *
593 txq_to_qset(struct sge_txq *q, int qidx)
594 {
595 	return container_of(q, struct sge_qset, txq[qidx]);
596 }
597 
598 static __inline struct adapter *
599 tdev2adap(struct t3cdev *d)
600 {
601 	return container_of(d, struct adapter, tdev);
602 }
603 
604 #undef container_of
605 
606 #define OFFLOAD_DEVMAP_BIT 15
607 static inline int offload_running(adapter_t *adapter)
608 {
609         return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
610 }
611 
612 int cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m);
613 int cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *m);
614 void cxgb_pcpu_shutdown_threads(struct adapter *sc);
615 void cxgb_pcpu_startup_threads(struct adapter *sc);
616 
617 int process_responses(adapter_t *adap, struct sge_qset *qs, int budget);
618 void t3_free_qset(adapter_t *sc, struct sge_qset *q);
619 void cxgb_start(struct ifnet *ifp);
620 void refill_fl_service(adapter_t *adap, struct sge_fl *fl);
621 #endif
622