xref: /freebsd/sys/dev/cxgb/cxgb_adapter.h (revision a1b5a8955e828f049b425b7b2141999823059923)
1 /**************************************************************************
2 
3 Copyright (c) 2007-2008, Chelsio Inc.
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8 
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11 
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15 
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27 
28 $FreeBSD$
29 
30 ***************************************************************************/
31 
32 
33 #ifndef _CXGB_ADAPTER_H_
34 #define _CXGB_ADAPTER_H_
35 
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/sx.h>
39 #include <sys/rman.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/condvar.h>
44 
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/if_media.h>
48 #include <net/if_dl.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 
53 #include <sys/bus_dma.h>
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 
57 #include <cxgb_osdep.h>
58 #include <t3cdev.h>
59 #include <sys/mbufq.h>
60 
61 #ifdef LRO_SUPPORTED
62 #include <netinet/tcp_lro.h>
63 #endif
64 
65 #define USE_SX
66 
67 struct adapter;
68 struct sge_qset;
69 extern int cxgb_debug;
70 
71 #ifdef DEBUG_LOCKING
72 #define MTX_INIT(lock, lockname, class, flags) \
73 	do { \
74 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
75 		mtx_init((lock), lockname, class, flags);		\
76 	} while (0)
77 
78 #define MTX_DESTROY(lock) \
79 	do { \
80 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
81 		mtx_destroy((lock));					\
82 	} while (0)
83 
84 #define SX_INIT(lock, lockname) \
85 	do { \
86 		printf("initializing %s at %s:%d\n", lockname, __FILE__, __LINE__); \
87 		sx_init((lock), lockname);		\
88 	} while (0)
89 
90 #define SX_DESTROY(lock) \
91 	do { \
92 		printf("destroying %s at %s:%d\n", (lock)->lock_object.lo_name, __FILE__, __LINE__); \
93 		sx_destroy((lock));					\
94 	} while (0)
95 #else
96 #define MTX_INIT mtx_init
97 #define MTX_DESTROY mtx_destroy
98 #define SX_INIT sx_init
99 #define SX_DESTROY sx_destroy
100 #endif
101 
102 struct port_info {
103 	struct adapter	*adapter;
104 	struct ifnet	*ifp;
105 	int		if_flags;
106 	const struct port_type_info *port_type;
107 	struct cphy	phy;
108 	struct cmac	mac;
109 	struct link_config link_config;
110 	struct ifmedia	media;
111 #ifdef USE_SX
112 	struct sx	lock;
113 #else
114 	struct mtx	lock;
115 #endif
116 	uint8_t		port_id;
117 	uint8_t		tx_chan;
118 	uint8_t		txpkt_intf;
119 	uint8_t         first_qset;
120 	uint32_t	nqsets;
121 
122 	uint8_t		hw_addr[ETHER_ADDR_LEN];
123 	struct task	timer_reclaim_task;
124 	struct cdev     *port_cdev;
125 
126 #define PORT_LOCK_NAME_LEN 32
127 #define PORT_NAME_LEN 32
128 	char            lockbuf[PORT_LOCK_NAME_LEN];
129 	char            namebuf[PORT_NAME_LEN];
130 };
131 
132 enum {				/* adapter flags */
133 	FULL_INIT_DONE	= (1 << 0),
134 	USING_MSI	= (1 << 1),
135 	USING_MSIX	= (1 << 2),
136 	QUEUES_BOUND	= (1 << 3),
137 	FW_UPTODATE     = (1 << 4),
138 	TPS_UPTODATE    = (1 << 5),
139 	CXGB_SHUTDOWN	= (1 << 6),
140 	CXGB_OFLD_INIT	= (1 << 7),
141 	TP_PARITY_INIT  = (1 << 8),
142 };
143 
144 #define FL_Q_SIZE	4096
145 #define JUMBO_Q_SIZE	1024
146 #define RSPQ_Q_SIZE	1024
147 #define TX_ETH_Q_SIZE	1024
148 
149 enum { TXQ_ETH = 0,
150        TXQ_OFLD = 1,
151        TXQ_CTRL = 2, };
152 
153 
154 /*
155  * work request size in bytes
156  */
157 #define WR_LEN (WR_FLITS * 8)
158 #define PIO_LEN (WR_LEN - sizeof(struct cpl_tx_pkt_lso))
159 
160 #ifdef LRO_SUPPORTED
161 struct lro_state {
162 	unsigned short enabled;
163 	struct lro_ctrl ctrl;
164 };
165 #endif
166 
167 #define RX_BUNDLE_SIZE 8
168 
169 struct rsp_desc;
170 
171 struct sge_rspq {
172 	uint32_t	credits;
173 	uint32_t	size;
174 	uint32_t	cidx;
175 	uint32_t	gen;
176 	uint32_t	polling;
177 	uint32_t	holdoff_tmr;
178 	uint32_t	next_holdoff;
179 	uint32_t        imm_data;
180 	uint32_t        async_notif;
181 	uint32_t	cntxt_id;
182 	uint32_t        offload_pkts;
183 	uint32_t        offload_bundles;
184 	uint32_t        pure_rsps;
185 	uint32_t        unhandled_irqs;
186 
187 	bus_addr_t	phys_addr;
188 	bus_dma_tag_t	desc_tag;
189 	bus_dmamap_t	desc_map;
190 
191 	struct t3_mbuf_hdr rspq_mh;
192 	struct rsp_desc	*desc;
193 	struct mtx      lock;
194 #define RSPQ_NAME_LEN  32
195 	char            lockbuf[RSPQ_NAME_LEN];
196 	uint32_t	rspq_dump_start;
197 	uint32_t	rspq_dump_count;
198 };
199 
200 #ifndef DISABLE_MBUF_IOVEC
201 #define rspq_mbuf rspq_mh.mh_head
202 #endif
203 
204 struct rx_desc;
205 struct rx_sw_desc;
206 
207 struct sge_fl {
208 	uint32_t	buf_size;
209 	uint32_t	credits;
210 	uint32_t	size;
211 	uint32_t	cidx;
212 	uint32_t	pidx;
213 	uint32_t	gen;
214 	bus_addr_t	phys_addr;
215 	uint32_t	cntxt_id;
216 	uint64_t	empty;
217 	bus_dma_tag_t	desc_tag;
218 	bus_dmamap_t	desc_map;
219 	bus_dma_tag_t   entry_tag;
220 	uma_zone_t      zone;
221 	struct rx_desc	*desc;
222 	struct rx_sw_desc *sdesc;
223 	int             type;
224 };
225 
226 struct tx_desc;
227 struct tx_sw_desc;
228 
229 #define TXQ_TRANSMITTING    0x1
230 
231 struct sge_txq {
232 	uint64_t	flags;
233 	uint32_t	in_use;
234 	uint32_t	size;
235 	uint32_t	processed;
236 	uint32_t	cleaned;
237 	uint32_t	stop_thres;
238 	uint32_t	cidx;
239 	uint32_t	pidx;
240 	uint32_t	gen;
241 	uint32_t	unacked;
242 	struct tx_desc	*desc;
243 	struct tx_sw_desc *sdesc;
244 	uint32_t	token;
245 	bus_addr_t	phys_addr;
246 	struct task     qresume_task;
247 	struct task     qreclaim_task;
248 	struct port_info *port;
249 	uint32_t	cntxt_id;
250 	uint64_t	stops;
251 	uint64_t	restarts;
252 	bus_dma_tag_t	desc_tag;
253 	bus_dmamap_t	desc_map;
254 	bus_dma_tag_t   entry_tag;
255 	struct mbuf_head sendq;
256 	/*
257 	 * cleanq should really be an buf_ring to avoid extra
258 	 * mbuf touches
259 	 */
260 	struct mbuf_head cleanq;
261 	struct buf_ring txq_mr;
262 	struct mbuf     *immpkt;
263 	uint32_t        txq_drops;
264 	uint32_t        txq_skipped;
265 	uint32_t        txq_coalesced;
266 	uint32_t        txq_enqueued;
267 	uint32_t	txq_dump_start;
268 	uint32_t	txq_dump_count;
269 	unsigned long   txq_frees;
270 	struct mtx      lock;
271 	struct sg_ent  txq_sgl[TX_MAX_SEGS / 2 + 1];
272 	#define TXQ_NAME_LEN  32
273 	char            lockbuf[TXQ_NAME_LEN];
274 };
275 
276 
277 enum {
278 	SGE_PSTAT_TSO,              /* # of TSO requests */
279 	SGE_PSTAT_RX_CSUM_GOOD,     /* # of successful RX csum offloads */
280 	SGE_PSTAT_TX_CSUM,          /* # of TX checksum offloads */
281 	SGE_PSTAT_VLANEX,           /* # of VLAN tag extractions */
282 	SGE_PSTAT_VLANINS,          /* # of VLAN tag insertions */
283 };
284 
285 #define SGE_PSTAT_MAX (SGE_PSTAT_VLANINS+1)
286 
287 #define QS_EXITING              0x1
288 #define QS_RUNNING              0x2
289 #define QS_BOUND                0x4
290 
291 struct sge_qset {
292 	struct sge_rspq		rspq;
293 	struct sge_fl		fl[SGE_RXQ_PER_SET];
294 #ifdef LRO_SUPPORTED
295 	struct lro_state        lro;
296 #endif
297 	struct sge_txq		txq[SGE_TXQ_PER_SET];
298 	uint32_t                txq_stopped;       /* which Tx queues are stopped */
299 	uint64_t                port_stats[SGE_PSTAT_MAX];
300 	struct port_info        *port;
301 	int                     idx; /* qset # */
302 	int                     qs_cpuid;
303 	int                     qs_flags;
304 	struct cv		qs_cv;
305 	struct mtx		qs_mtx;
306 #define QS_NAME_LEN 32
307 	char                    namebuf[QS_NAME_LEN];
308 };
309 
310 struct sge {
311 	struct sge_qset	        qs[SGE_QSETS];
312 	struct mtx              reg_lock;
313 };
314 
315 struct filter_info;
316 
317 struct adapter {
318 	device_t		dev;
319 	int			flags;
320 	TAILQ_ENTRY(adapter)    adapter_entry;
321 
322 	/* PCI register resources */
323 	int			regs_rid;
324 	struct resource		*regs_res;
325 	int			udbs_rid;
326 	struct resource		*udbs_res;
327 	bus_space_handle_t	bh;
328 	bus_space_tag_t		bt;
329 	bus_size_t              mmio_len;
330 	uint32_t                link_width;
331 
332 	/* DMA resources */
333 	bus_dma_tag_t		parent_dmat;
334 	bus_dma_tag_t		rx_dmat;
335 	bus_dma_tag_t		rx_jumbo_dmat;
336 	bus_dma_tag_t		tx_dmat;
337 
338 	/* Interrupt resources */
339 	struct resource		*irq_res;
340 	int			irq_rid;
341 	void			*intr_tag;
342 
343 	uint32_t		msix_regs_rid;
344 	struct resource		*msix_regs_res;
345 
346 	struct resource		*msix_irq_res[SGE_QSETS];
347 	int			msix_irq_rid[SGE_QSETS];
348 	void			*msix_intr_tag[SGE_QSETS];
349 	uint8_t                 rxpkt_map[8]; /* maps RX_PKT interface values to port ids */
350 	uint8_t                 rrss_map[SGE_QSETS]; /* revers RSS map table */
351 	uint16_t                rspq_map[RSS_TABLE_SIZE];     /* maps 7-bit cookie to qidx */
352 	union {
353 		uint8_t                 fill[SGE_QSETS];
354 		uint64_t                coalesce;
355 	} u;
356 
357 #define tunq_fill u.fill
358 #define tunq_coalesce u.coalesce
359 
360 	struct filter_info      *filters;
361 
362 	/* Tasks */
363 	struct task		ext_intr_task;
364 	struct task		slow_intr_task;
365 	struct task		tick_task;
366 	struct taskqueue	*tq;
367 	struct callout		cxgb_tick_ch;
368 	struct callout		sge_timer_ch;
369 
370 	unsigned int		check_task_cnt;
371 
372 	/* Register lock for use by the hardware layer */
373 	struct mtx		mdio_lock;
374 	struct mtx		elmer_lock;
375 
376 	/* Bookkeeping for the hardware layer */
377 	struct adapter_params  params;
378 	unsigned int slow_intr_mask;
379 	unsigned long irq_stats[IRQ_NUM_STATS];
380 
381 	struct sge              sge;
382 	struct mc7              pmrx;
383 	struct mc7              pmtx;
384 	struct mc7              cm;
385 	struct mc5              mc5;
386 
387 	struct port_info	port[MAX_NPORTS];
388 	device_t		portdev[MAX_NPORTS];
389 	struct t3cdev           tdev;
390 	char                    fw_version[64];
391 	uint32_t                open_device_map;
392 	uint32_t                registered_device_map;
393 #ifdef USE_SX
394 	struct sx               lock;
395 #else
396 	struct mtx              lock;
397 #endif
398 	driver_intr_t           *cxgb_intr;
399 	int                     msi_count;
400 
401 #define ADAPTER_LOCK_NAME_LEN	32
402 	char                    lockbuf[ADAPTER_LOCK_NAME_LEN];
403 	char                    reglockbuf[ADAPTER_LOCK_NAME_LEN];
404 	char                    mdiolockbuf[ADAPTER_LOCK_NAME_LEN];
405 	char                    elmerlockbuf[ADAPTER_LOCK_NAME_LEN];
406 };
407 
408 struct t3_rx_mode {
409 
410 	uint32_t                idx;
411 	struct port_info        *port;
412 };
413 
414 
415 #define MDIO_LOCK(adapter)	mtx_lock(&(adapter)->mdio_lock)
416 #define MDIO_UNLOCK(adapter)	mtx_unlock(&(adapter)->mdio_lock)
417 #define ELMR_LOCK(adapter)	mtx_lock(&(adapter)->elmer_lock)
418 #define ELMR_UNLOCK(adapter)	mtx_unlock(&(adapter)->elmer_lock)
419 
420 
421 #ifdef USE_SX
422 #define PORT_LOCK(port)		     sx_xlock(&(port)->lock);
423 #define PORT_UNLOCK(port)	     sx_xunlock(&(port)->lock);
424 #define PORT_LOCK_INIT(port, name)   SX_INIT(&(port)->lock, name)
425 #define PORT_LOCK_DEINIT(port)       SX_DESTROY(&(port)->lock)
426 #define PORT_LOCK_ASSERT_OWNED(port) sx_assert(&(port)->lock, SA_LOCKED)
427 
428 #define ADAPTER_LOCK(adap)	           sx_xlock(&(adap)->lock);
429 #define ADAPTER_UNLOCK(adap)	           sx_xunlock(&(adap)->lock);
430 #define ADAPTER_LOCK_INIT(adap, name)      SX_INIT(&(adap)->lock, name)
431 #define ADAPTER_LOCK_DEINIT(adap)          SX_DESTROY(&(adap)->lock)
432 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) sx_assert(&(adap)->lock, SA_UNLOCKED)
433 #else
434 #define PORT_LOCK(port)		     mtx_lock(&(port)->lock);
435 #define PORT_UNLOCK(port)	     mtx_unlock(&(port)->lock);
436 #define PORT_LOCK_INIT(port, name)   mtx_init(&(port)->lock, name, 0, MTX_DEF)
437 #define PORT_LOCK_DEINIT(port)       mtx_destroy(&(port)->lock)
438 #define PORT_LOCK_ASSERT_OWNED(port) mtx_assert(&(port)->lock, MA_OWNED)
439 
440 #define ADAPTER_LOCK(adap)	mtx_lock(&(adap)->lock);
441 #define ADAPTER_UNLOCK(adap)	mtx_unlock(&(adap)->lock);
442 #define ADAPTER_LOCK_INIT(adap, name) mtx_init(&(adap)->lock, name, 0, MTX_DEF)
443 #define ADAPTER_LOCK_DEINIT(adap) mtx_destroy(&(adap)->lock)
444 #define ADAPTER_LOCK_ASSERT_NOTOWNED(adap) mtx_assert(&(adap)->lock, MO_NOTOWNED)
445 #endif
446 
447 
448 static __inline uint32_t
449 t3_read_reg(adapter_t *adapter, uint32_t reg_addr)
450 {
451 	return (bus_space_read_4(adapter->bt, adapter->bh, reg_addr));
452 }
453 
454 static __inline void
455 t3_write_reg(adapter_t *adapter, uint32_t reg_addr, uint32_t val)
456 {
457 	bus_space_write_4(adapter->bt, adapter->bh, reg_addr, val);
458 }
459 
460 static __inline void
461 t3_os_pci_read_config_4(adapter_t *adapter, int reg, uint32_t *val)
462 {
463 	*val = pci_read_config(adapter->dev, reg, 4);
464 }
465 
466 static __inline void
467 t3_os_pci_write_config_4(adapter_t *adapter, int reg, uint32_t val)
468 {
469 	pci_write_config(adapter->dev, reg, val, 4);
470 }
471 
472 static __inline void
473 t3_os_pci_read_config_2(adapter_t *adapter, int reg, uint16_t *val)
474 {
475 	*val = pci_read_config(adapter->dev, reg, 2);
476 }
477 
478 static __inline void
479 t3_os_pci_write_config_2(adapter_t *adapter, int reg, uint16_t val)
480 {
481 	pci_write_config(adapter->dev, reg, val, 2);
482 }
483 
484 static __inline uint8_t *
485 t3_get_next_mcaddr(struct t3_rx_mode *rm)
486 {
487 	uint8_t *macaddr = NULL;
488 	struct ifnet *ifp = rm->port->ifp;
489 	struct ifmultiaddr *ifma;
490 	int i = 0;
491 
492 	IF_ADDR_LOCK(ifp);
493 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
494 		if (ifma->ifma_addr->sa_family != AF_LINK)
495 			continue;
496 		if (i == rm->idx) {
497 			macaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
498 			break;
499 		}
500 		i++;
501 	}
502 	IF_ADDR_UNLOCK(ifp);
503 
504 
505 	rm->idx++;
506 	return (macaddr);
507 }
508 
509 static __inline void
510 t3_init_rx_mode(struct t3_rx_mode *rm, struct port_info *port)
511 {
512 	rm->idx = 0;
513 	rm->port = port;
514 }
515 
516 static __inline struct port_info *
517 adap2pinfo(struct adapter *adap, int idx)
518 {
519 	return &adap->port[idx];
520 }
521 
522 int t3_os_find_pci_capability(adapter_t *adapter, int cap);
523 int t3_os_pci_save_state(struct adapter *adapter);
524 int t3_os_pci_restore_state(struct adapter *adapter);
525 void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status,
526 			int speed, int duplex, int fc);
527 void t3_os_phymod_changed(struct adapter *adap, int port_id);
528 void t3_sge_err_intr_handler(adapter_t *adapter);
529 int t3_offload_tx(struct t3cdev *, struct mbuf *);
530 void t3_os_ext_intr_handler(adapter_t *adapter);
531 void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]);
532 int t3_mgmt_tx(adapter_t *adap, struct mbuf *m);
533 
534 
535 int t3_sge_alloc(struct adapter *);
536 int t3_sge_free(struct adapter *);
537 int t3_sge_alloc_qset(adapter_t *, uint32_t, int, int, const struct qset_params *,
538     int, struct port_info *);
539 void t3_free_sge_resources(adapter_t *);
540 void t3_sge_start(adapter_t *);
541 void t3_sge_stop(adapter_t *);
542 void t3b_intr(void *data);
543 void t3_intr_msi(void *data);
544 void t3_intr_msix(void *data);
545 int t3_encap(struct sge_qset *, struct mbuf **, int);
546 
547 int t3_sge_init_adapter(adapter_t *);
548 int t3_sge_reset_adapter(adapter_t *);
549 int t3_sge_init_port(struct port_info *);
550 void t3_sge_deinit_sw(adapter_t *);
551 void t3_free_tx_desc(struct sge_txq *q, int n);
552 void t3_free_tx_desc_all(struct sge_txq *q);
553 
554 void t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad);
555 
556 void t3_add_attach_sysctls(adapter_t *sc);
557 void t3_add_configured_sysctls(adapter_t *sc);
558 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
559     unsigned char *data);
560 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
561 
562 #define CXGB_TICKS(a) ((a)->params.linkpoll_period ? \
563     (hz * (a)->params.linkpoll_period) / 10 : \
564     (a)->params.stats_update_period * hz)
565 
566 /*
567  * XXX figure out how we can return this to being private to sge
568  */
569 #define desc_reclaimable(q) ((int)((q)->processed - (q)->cleaned - TX_MAX_DESC))
570 
571 #define container_of(p, stype, field) ((stype *)(((uint8_t *)(p)) - offsetof(stype, field)))
572 
573 static __inline struct sge_qset *
574 fl_to_qset(struct sge_fl *q, int qidx)
575 {
576 	return container_of(q, struct sge_qset, fl[qidx]);
577 }
578 
579 static __inline struct sge_qset *
580 rspq_to_qset(struct sge_rspq *q)
581 {
582 	return container_of(q, struct sge_qset, rspq);
583 }
584 
585 static __inline struct sge_qset *
586 txq_to_qset(struct sge_txq *q, int qidx)
587 {
588 	return container_of(q, struct sge_qset, txq[qidx]);
589 }
590 
591 static __inline struct adapter *
592 tdev2adap(struct t3cdev *d)
593 {
594 	return container_of(d, struct adapter, tdev);
595 }
596 
597 #undef container_of
598 
599 #define OFFLOAD_DEVMAP_BIT 15
600 static inline int offload_running(adapter_t *adapter)
601 {
602         return isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT);
603 }
604 
605 int cxgb_pcpu_enqueue_packet(struct ifnet *ifp, struct mbuf *m);
606 int cxgb_pcpu_start(struct ifnet *ifp, struct mbuf *m);
607 void cxgb_pcpu_shutdown_threads(struct adapter *sc);
608 void cxgb_pcpu_startup_threads(struct adapter *sc);
609 
610 int process_responses(adapter_t *adap, struct sge_qset *qs, int budget);
611 void t3_free_qset(adapter_t *sc, struct sge_qset *q);
612 void cxgb_start(struct ifnet *ifp);
613 void refill_fl_service(adapter_t *adap, struct sge_fl *fl);
614 #endif
615