xref: /titanic_41/usr/src/uts/common/io/ixgbe/ixgbe_sw.h (revision 26fd77009b17f8c8fb32eb362584cfd635e87ad9)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #ifndef	_IXGBE_SW_H
29 #define	_IXGBE_SW_H
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 #include <sys/types.h>
36 #include <sys/conf.h>
37 #include <sys/debug.h>
38 #include <sys/stropts.h>
39 #include <sys/stream.h>
40 #include <sys/strsun.h>
41 #include <sys/strlog.h>
42 #include <sys/kmem.h>
43 #include <sys/stat.h>
44 #include <sys/kstat.h>
45 #include <sys/modctl.h>
46 #include <sys/errno.h>
47 #include <sys/dlpi.h>
48 #include <sys/mac_provider.h>
49 #include <sys/mac_ether.h>
50 #include <sys/vlan.h>
51 #include <sys/ddi.h>
52 #include <sys/sunddi.h>
53 #include <sys/pci.h>
54 #include <sys/pcie.h>
55 #include <sys/sdt.h>
56 #include <sys/ethernet.h>
57 #include <sys/pattr.h>
58 #include <sys/strsubr.h>
59 #include <sys/netlb.h>
60 #include <sys/random.h>
61 #include <inet/common.h>
62 #include <inet/tcp.h>
63 #include <inet/ip.h>
64 #include <inet/mi.h>
65 #include <inet/nd.h>
66 #include <sys/bitmap.h>
67 #include <sys/ddifm.h>
68 #include <sys/fm/protocol.h>
69 #include <sys/fm/util.h>
70 #include <sys/fm/io/ddi.h>
71 #include "ixgbe_api.h"
72 
73 #define	MODULE_NAME			"ixgbe"	/* module name */
74 
75 #define	IXGBE_FAILURE			DDI_FAILURE
76 
77 #define	IXGBE_UNKNOWN			0x00
78 #define	IXGBE_INITIALIZED		0x01
79 #define	IXGBE_STARTED			0x02
80 #define	IXGBE_SUSPENDED			0x04
81 
82 #define	MAX_NUM_UNICAST_ADDRESSES 	0x10
83 #define	MAX_NUM_MULTICAST_ADDRESSES 	0x1000
84 #define	IXGBE_INTR_NONE			0
85 #define	IXGBE_INTR_MSIX			1
86 #define	IXGBE_INTR_MSI			2
87 #define	IXGBE_INTR_LEGACY		3
88 
89 #define	IXGBE_POLL_NULL			-1
90 
91 #define	MAX_COOKIE			18
92 #define	MIN_NUM_TX_DESC			2
93 
94 #define	IXGBE_TX_DESC_LIMIT		32	/* tx desc limitation	*/
95 
96 #define	IXGBE_ADAPTER_REGSET		1	/* map adapter registers */
97 
98 /*
99  * MAX_xx_QUEUE_NUM and MAX_INTR_VECTOR values need to be the maximum of all
100  * supported silicon types.
101  */
102 #define	MAX_TX_QUEUE_NUM		128
103 #define	MAX_RX_QUEUE_NUM		128
104 #define	MAX_INTR_VECTOR			64
105 
106 /*
107  * Maximum values for user configurable parameters
108  */
109 #define	MAX_RX_GROUP_NUM		1
110 #define	MAX_TX_RING_SIZE		4096
111 #define	MAX_RX_RING_SIZE		4096
112 
113 #define	MAX_MTU				16366
114 #define	MAX_RX_LIMIT_PER_INTR		4096
115 #define	MAX_INTR_THROTTLING_82598	65535
116 #define	MAX_INTR_THROTTLING_82599	0x7FC
117 
118 #define	MAX_RX_COPY_THRESHOLD		9216
119 #define	MAX_TX_COPY_THRESHOLD		9216
120 #define	MAX_TX_RECYCLE_THRESHOLD	DEFAULT_TX_RING_SIZE
121 #define	MAX_TX_OVERLOAD_THRESHOLD	DEFAULT_TX_RING_SIZE
122 #define	MAX_TX_RESCHED_THRESHOLD	DEFAULT_TX_RING_SIZE
123 
124 /*
125  * Minimum values for user configurable parameters
126  */
127 #define	MIN_RX_GROUP_NUM		1
128 #define	MIN_TX_RING_SIZE		64
129 #define	MIN_RX_RING_SIZE		64
130 
131 #define	MIN_MTU				ETHERMIN
132 #define	MIN_RX_LIMIT_PER_INTR		16
133 #define	MIN_INTR_THROTTLING		0
134 #define	MIN_TX_COPY_THRESHOLD		0
135 #define	MIN_RX_COPY_THRESHOLD		0
136 #define	MIN_TX_RECYCLE_THRESHOLD	MIN_NUM_TX_DESC
137 #define	MIN_TX_OVERLOAD_THRESHOLD	MIN_NUM_TX_DESC
138 #define	MIN_TX_RESCHED_THRESHOLD	MIN_NUM_TX_DESC
139 
140 /*
141  * Default values for user configurable parameters
142  */
143 #define	DEFAULT_RX_GROUP_NUM		1
144 #define	DEFAULT_TX_RING_SIZE		1024
145 #define	DEFAULT_RX_RING_SIZE		1024
146 
147 #define	DEFAULT_MTU			ETHERMTU
148 #define	DEFAULT_RX_LIMIT_PER_INTR	256
149 #define	DEFAULT_INTR_THROTTLING_82598	200	/* In unit of 256 nsec */
150 #define	DEFAULT_INTR_THROTTLING_82599	26	/* In unit of 2 usec */
151 #define	DEFAULT_RX_COPY_THRESHOLD	128
152 #define	DEFAULT_TX_COPY_THRESHOLD	512
153 #define	DEFAULT_TX_RECYCLE_THRESHOLD	(MAX_COOKIE + 1)
154 #define	DEFAULT_TX_OVERLOAD_THRESHOLD	MIN_NUM_TX_DESC
155 #define	DEFAULT_TX_RESCHED_THRESHOLD	128
156 #define	DEFAULT_FCRTH			0x20000
157 #define	DEFAULT_FCRTL			0x10000
158 #define	DEFAULT_FCPAUSE			0xFFFF
159 
160 #define	DEFAULT_TX_HCKSUM_ENABLE	B_TRUE
161 #define	DEFAULT_RX_HCKSUM_ENABLE	B_TRUE
162 #define	DEFAULT_LSO_ENABLE		B_TRUE
163 #define	DEFAULT_MR_ENABLE		B_TRUE
164 #define	DEFAULT_TX_HEAD_WB_ENABLE	B_TRUE
165 
166 #define	IXGBE_LSO_MAXLEN		65535
167 
168 #define	TX_DRAIN_TIME			200
169 #define	RX_DRAIN_TIME			200
170 
171 #define	STALL_WATCHDOG_TIMEOUT		8	/* 8 seconds */
172 #define	MAX_LINK_DOWN_TIMEOUT		8	/* 8 seconds */
173 
174 /*
175  * Extra register bit masks for 82598
176  */
177 #define	IXGBE_PCS1GANA_FDC	0x20
178 #define	IXGBE_PCS1GANLP_LPFD	0x20
179 #define	IXGBE_PCS1GANLP_LPHD	0x40
180 
181 /*
182  * Defined for IP header alignment.
183  */
184 #define	IPHDR_ALIGN_ROOM		2
185 
186 /*
187  * Bit flags for attach_progress
188  */
189 #define	ATTACH_PROGRESS_PCI_CONFIG	0x0001	/* PCI config setup */
190 #define	ATTACH_PROGRESS_REGS_MAP	0x0002	/* Registers mapped */
191 #define	ATTACH_PROGRESS_PROPS		0x0004	/* Properties initialized */
192 #define	ATTACH_PROGRESS_ALLOC_INTR	0x0008	/* Interrupts allocated */
193 #define	ATTACH_PROGRESS_ALLOC_RINGS	0x0010	/* Rings allocated */
194 #define	ATTACH_PROGRESS_ADD_INTR	0x0020	/* Intr handlers added */
195 #define	ATTACH_PROGRESS_LOCKS		0x0040	/* Locks initialized */
196 #define	ATTACH_PROGRESS_INIT		0x0080	/* Device initialized */
197 #define	ATTACH_PROGRESS_INIT_RINGS	0x0100	/* Rings initialized */
198 #define	ATTACH_PROGRESS_STATS		0x0200	/* Kstats created */
199 #define	ATTACH_PROGRESS_NDD		0x0400	/* NDD initialized */
200 #define	ATTACH_PROGRESS_MAC		0x0800	/* MAC registered */
201 #define	ATTACH_PROGRESS_ENABLE_INTR	0x1000	/* DDI interrupts enabled */
202 #define	ATTACH_PROGRESS_FM_INIT		0x2000	/* FMA initialized */
203 #define	ATTACH_PROGRESS_LSC_TASKQ	0x4000	/* LSC taskq created */
204 
205 #define	PROP_DEFAULT_MTU		"default_mtu"
206 #define	PROP_FLOW_CONTROL		"flow_control"
207 #define	PROP_TX_QUEUE_NUM		"tx_queue_number"
208 #define	PROP_TX_RING_SIZE		"tx_ring_size"
209 #define	PROP_RX_QUEUE_NUM		"rx_queue_number"
210 #define	PROP_RX_RING_SIZE		"rx_ring_size"
211 #define	PROP_RX_GROUP_NUM		"rx_group_number"
212 
213 #define	PROP_INTR_FORCE			"intr_force"
214 #define	PROP_TX_HCKSUM_ENABLE		"tx_hcksum_enable"
215 #define	PROP_RX_HCKSUM_ENABLE		"rx_hcksum_enable"
216 #define	PROP_LSO_ENABLE			"lso_enable"
217 #define	PROP_MR_ENABLE			"mr_enable"
218 #define	PROP_TX_HEAD_WB_ENABLE		"tx_head_wb_enable"
219 #define	PROP_TX_COPY_THRESHOLD		"tx_copy_threshold"
220 #define	PROP_TX_RECYCLE_THRESHOLD	"tx_recycle_threshold"
221 #define	PROP_TX_OVERLOAD_THRESHOLD	"tx_overload_threshold"
222 #define	PROP_TX_RESCHED_THRESHOLD	"tx_resched_threshold"
223 #define	PROP_RX_COPY_THRESHOLD		"rx_copy_threshold"
224 #define	PROP_RX_LIMIT_PER_INTR		"rx_limit_per_intr"
225 #define	PROP_INTR_THROTTLING		"intr_throttling"
226 #define	PROP_FM_CAPABLE			"fm_capable"
227 
228 #define	IXGBE_LB_NONE			0
229 #define	IXGBE_LB_EXTERNAL		1
230 #define	IXGBE_LB_INTERNAL_MAC		2
231 #define	IXGBE_LB_INTERNAL_PHY		3
232 #define	IXGBE_LB_INTERNAL_SERDES	4
233 
234 /*
235  * capability/feature flags
236  * Flags named _CAPABLE are set when the NIC hardware is capable of the feature.
237  * Separately, the flag named _ENABLED is set when the feature is enabled.
238  */
239 #define	IXGBE_FLAG_DCA_ENABLED		(u32)(1)
240 #define	IXGBE_FLAG_DCA_CAPABLE		(u32)(1 << 1)
241 #define	IXGBE_FLAG_DCB_ENABLED		(u32)(1 << 2)
242 #define	IXGBE_FLAG_DCB_CAPABLE		(u32)(1 << 4)
243 #define	IXGBE_FLAG_RSS_ENABLED		(u32)(1 << 4)
244 #define	IXGBE_FLAG_RSS_CAPABLE		(u32)(1 << 5)
245 #define	IXGBE_FLAG_VMDQ_CAPABLE		(u32)(1 << 6)
246 #define	IXGBE_FLAG_VMDQ_ENABLED		(u32)(1 << 7)
247 #define	IXGBE_FLAG_FAN_FAIL_CAPABLE	(u32)(1 << 8)
248 
249 /* adapter-specific info for each supported device type */
250 typedef struct adapter_info {
251 	uint32_t	max_rx_que_num;	/* maximum number of rx queues */
252 	uint32_t	min_rx_que_num;	/* minimum number of rx queues */
253 	uint32_t	def_rx_que_num;	/* default number of rx queues */
254 	uint32_t	max_tx_que_num;	/* maximum number of tx queues */
255 	uint32_t	min_tx_que_num;	/* minimum number of tx queues */
256 	uint32_t	def_tx_que_num;	/* default number of tx queues */
257 	uint32_t	max_msix_vect;	/* maximum total msix vectors */
258 	uint32_t	max_ring_vect;	/* maximum number of ring vectors */
259 	uint32_t	max_other_vect;	/* maximum number of other vectors */
260 	uint32_t	other_intr;	/* "other" interrupt types handled */
261 	uint32_t	flags;		/* capability flags */
262 } adapter_info_t;
263 
264 /* bits representing all interrupt types other than tx & rx */
265 #define	IXGBE_OTHER_INTR	0x3ff00000
266 #define	IXGBE_82599_OTHER_INTR	0x86100000
267 
268 /*
269  * Shorthand for the NDD parameters
270  */
271 #define	param_autoneg_cap	nd_params[PARAM_AUTONEG_CAP].val
272 #define	param_pause_cap		nd_params[PARAM_PAUSE_CAP].val
273 #define	param_asym_pause_cap	nd_params[PARAM_ASYM_PAUSE_CAP].val
274 #define	param_10000fdx_cap	nd_params[PARAM_10000FDX_CAP].val
275 #define	param_1000fdx_cap	nd_params[PARAM_1000FDX_CAP].val
276 #define	param_100fdx_cap	nd_params[PARAM_1000FDX_CAP].val
277 #define	param_rem_fault		nd_params[PARAM_REM_FAULT].val
278 
279 #define	param_adv_autoneg_cap	nd_params[PARAM_ADV_AUTONEG_CAP].val
280 #define	param_adv_pause_cap	nd_params[PARAM_ADV_PAUSE_CAP].val
281 #define	param_adv_asym_pause_cap nd_params[PARAM_ADV_ASYM_PAUSE_CAP].val
282 #define	param_adv_10000fdx_cap	nd_params[PARAM_ADV_10000FDX_CAP].val
283 #define	param_adv_1000fdx_cap	nd_params[PARAM_ADV_1000FDX_CAP].val
284 #define	param_adv_100fdx_cap	nd_params[PARAM_ADV_1000FDX_CAP].val
285 #define	param_adv_rem_fault	nd_params[PARAM_ADV_REM_FAULT].val
286 
287 #define	param_lp_autoneg_cap	nd_params[PARAM_LP_AUTONEG_CAP].val
288 #define	param_lp_pause_cap	nd_params[PARAM_LP_PAUSE_CAP].val
289 #define	param_lp_asym_pause_cap	nd_params[PARAM_LP_ASYM_PAUSE_CAP].val
290 #define	param_lp_10000fdx_cap	nd_params[PARAM_LP_10000FDX_CAP].val
291 #define	param_lp_1000fdx_cap	nd_params[PARAM_LP_1000FDX_CAP].val
292 #define	param_lp_100fdx_cap	nd_params[PARAM_LP_1000FDX_CAP].val
293 #define	param_lp_rem_fault	nd_params[PARAM_LP_REM_FAULT].val
294 
295 enum ioc_reply {
296 	IOC_INVAL = -1,	/* bad, NAK with EINVAL */
297 	IOC_DONE, 	/* OK, reply sent */
298 	IOC_ACK,	/* OK, just send ACK */
299 	IOC_REPLY	/* OK, just send reply */
300 };
301 
302 #define	DMA_SYNC(area, flag)	((void) ddi_dma_sync((area)->dma_handle, \
303 				    0, 0, (flag)))
304 
305 /*
306  * Defined for ring index operations
307  * ASSERT(index < limit)
308  * ASSERT(step < limit)
309  * ASSERT(index1 < limit)
310  * ASSERT(index2 < limit)
311  */
312 #define	NEXT_INDEX(index, step, limit)	(((index) + (step)) < (limit) ? \
313 	(index) + (step) : (index) + (step) - (limit))
314 #define	PREV_INDEX(index, step, limit)	((index) >= (step) ? \
315 	(index) - (step) : (index) + (limit) - (step))
316 #define	OFFSET(index1, index2, limit)	((index1) <= (index2) ? \
317 	(index2) - (index1) : (index2) + (limit) - (index1))
318 
319 #define	LINK_LIST_INIT(_LH)	\
320 	(_LH)->head = (_LH)->tail = NULL
321 
322 #define	LIST_GET_HEAD(_LH)	((single_link_t *)((_LH)->head))
323 
324 #define	LIST_POP_HEAD(_LH)	\
325 	(single_link_t *)(_LH)->head; \
326 	{ \
327 		if ((_LH)->head != NULL) { \
328 			(_LH)->head = (_LH)->head->link; \
329 			if ((_LH)->head == NULL) \
330 				(_LH)->tail = NULL; \
331 		} \
332 	}
333 
334 #define	LIST_GET_TAIL(_LH)	((single_link_t *)((_LH)->tail))
335 
336 #define	LIST_PUSH_TAIL(_LH, _E)	\
337 	if ((_LH)->tail != NULL) { \
338 		(_LH)->tail->link = (single_link_t *)(_E); \
339 		(_LH)->tail = (single_link_t *)(_E); \
340 	} else { \
341 		(_LH)->head = (_LH)->tail = (single_link_t *)(_E); \
342 	} \
343 	(_E)->link = NULL;
344 
345 #define	LIST_GET_NEXT(_LH, _E)		\
346 	(((_LH)->tail == (single_link_t *)(_E)) ? \
347 	NULL : ((single_link_t *)(_E))->link)
348 
349 
350 typedef struct single_link {
351 	struct single_link	*link;
352 } single_link_t;
353 
354 typedef struct link_list {
355 	single_link_t		*head;
356 	single_link_t		*tail;
357 } link_list_t;
358 
359 /*
360  * Property lookups
361  */
362 #define	IXGBE_PROP_EXISTS(d, n)	ddi_prop_exists(DDI_DEV_T_ANY, (d), \
363 				    DDI_PROP_DONTPASS, (n))
364 #define	IXGBE_PROP_GET_INT(d, n)	ddi_prop_get_int(DDI_DEV_T_ANY, (d), \
365 				    DDI_PROP_DONTPASS, (n), -1)
366 
367 
368 /*
369  * Named Data (ND) Parameter Management Structure
370  */
371 typedef struct {
372 	struct ixgbe *private;
373 	uint32_t info;
374 	uint32_t min;
375 	uint32_t max;
376 	uint32_t val;
377 	char *name;
378 } nd_param_t;
379 
380 /*
381  * NDD parameter indexes, divided into:
382  *
383  *	read-only parameters describing the hardware's capabilities
384  *	read-write parameters controlling the advertised capabilities
385  *	read-only parameters describing the partner's capabilities
386  *	read-write parameters controlling the force speed and duplex
387  *	read-only parameters describing the link state
388  *	read-only parameters describing the driver properties
389  *	read-write parameters controlling the driver properties
390  */
391 enum {
392 	PARAM_AUTONEG_CAP,
393 	PARAM_PAUSE_CAP,
394 	PARAM_ASYM_PAUSE_CAP,
395 	PARAM_10000FDX_CAP,
396 	PARAM_1000FDX_CAP,
397 	PARAM_100FDX_CAP,
398 	PARAM_REM_FAULT,
399 
400 	PARAM_ADV_AUTONEG_CAP,
401 	PARAM_ADV_PAUSE_CAP,
402 	PARAM_ADV_ASYM_PAUSE_CAP,
403 	PARAM_ADV_10000FDX_CAP,
404 	PARAM_ADV_1000FDX_CAP,
405 	PARAM_ADV_100FDX_CAP,
406 	PARAM_ADV_REM_FAULT,
407 
408 	PARAM_LP_AUTONEG_CAP,
409 	PARAM_LP_PAUSE_CAP,
410 	PARAM_LP_ASYM_PAUSE_CAP,
411 	PARAM_LP_10000FDX_CAP,
412 	PARAM_LP_1000FDX_CAP,
413 	PARAM_LP_100FDX_CAP,
414 	PARAM_LP_REM_FAULT,
415 
416 	PARAM_LINK_STATUS,
417 	PARAM_LINK_SPEED,
418 	PARAM_LINK_DUPLEX,
419 
420 	PARAM_COUNT
421 };
422 
423 typedef union ixgbe_ether_addr {
424 	struct {
425 		uint32_t	high;
426 		uint32_t	low;
427 	} reg;
428 	struct {
429 		uint8_t		set;
430 		uint8_t		redundant;
431 		uint8_t		addr[ETHERADDRL];
432 	} mac;
433 } ixgbe_ether_addr_t;
434 
435 typedef enum {
436 	USE_NONE,
437 	USE_COPY,
438 	USE_DMA
439 } tx_type_t;
440 
441 typedef enum {
442 	RCB_FREE,
443 	RCB_SENDUP
444 } rcb_state_t;
445 
446 typedef struct ixgbe_tx_context {
447 	uint32_t		hcksum_flags;
448 	uint32_t		ip_hdr_len;
449 	uint32_t		mac_hdr_len;
450 	uint32_t		l4_proto;
451 	uint32_t		mss;
452 	uint32_t		l4_hdr_len;
453 	boolean_t		lso_flag;
454 } ixgbe_tx_context_t;
455 
456 /*
457  * Hold address/length of each DMA segment
458  */
459 typedef struct sw_desc {
460 	uint64_t		address;
461 	size_t			length;
462 } sw_desc_t;
463 
464 /*
465  * Handles and addresses of DMA buffer
466  */
467 typedef struct dma_buffer {
468 	caddr_t			address;	/* Virtual address */
469 	uint64_t		dma_address;	/* DMA (Hardware) address */
470 	ddi_acc_handle_t	acc_handle;	/* Data access handle */
471 	ddi_dma_handle_t	dma_handle;	/* DMA handle */
472 	size_t			size;		/* Buffer size */
473 	size_t			len;		/* Data length in the buffer */
474 } dma_buffer_t;
475 
476 /*
477  * Tx Control Block
478  */
479 typedef struct tx_control_block {
480 	single_link_t		link;
481 	uint32_t		last_index; /* last descriptor of the pkt */
482 	uint32_t		frag_num;
483 	uint32_t		desc_num;
484 	mblk_t			*mp;
485 	tx_type_t		tx_type;
486 	ddi_dma_handle_t	tx_dma_handle;
487 	dma_buffer_t		tx_buf;
488 	sw_desc_t		desc[MAX_COOKIE];
489 } tx_control_block_t;
490 
491 /*
492  * RX Control Block
493  */
494 typedef struct rx_control_block {
495 	mblk_t			*mp;
496 	rcb_state_t		state;
497 	dma_buffer_t		rx_buf;
498 	frtn_t			free_rtn;
499 	struct ixgbe_rx_ring	*rx_ring;
500 } rx_control_block_t;
501 
502 /*
503  * Software Data Structure for Tx Ring
504  */
505 typedef struct ixgbe_tx_ring {
506 	uint32_t		index;	/* Ring index */
507 	uint32_t		intr_vector;	/* Interrupt vector index */
508 	uint32_t		vect_bit;	/* vector's bit in register */
509 
510 	/*
511 	 * Mutexes
512 	 */
513 	kmutex_t		tx_lock;
514 	kmutex_t		recycle_lock;
515 	kmutex_t		tcb_head_lock;
516 	kmutex_t		tcb_tail_lock;
517 
518 	/*
519 	 * Tx descriptor ring definitions
520 	 */
521 	dma_buffer_t		tbd_area;
522 	union ixgbe_adv_tx_desc	*tbd_ring;
523 	uint32_t		tbd_head; /* Index of next tbd to recycle */
524 	uint32_t		tbd_tail; /* Index of next tbd to transmit */
525 	uint32_t		tbd_free; /* Number of free tbd */
526 
527 	/*
528 	 * Tx control block list definitions
529 	 */
530 	tx_control_block_t	*tcb_area;
531 	tx_control_block_t	**work_list;
532 	tx_control_block_t	**free_list;
533 	uint32_t		tcb_head; /* Head index of free list */
534 	uint32_t		tcb_tail; /* Tail index of free list */
535 	uint32_t		tcb_free; /* Number of free tcb in free list */
536 
537 	uint32_t		*tbd_head_wb; /* Head write-back */
538 	uint32_t		(*tx_recycle)(struct ixgbe_tx_ring *);
539 
540 	/*
541 	 * s/w context structure for TCP/UDP checksum offload
542 	 * and LSO.
543 	 */
544 	ixgbe_tx_context_t	tx_context;
545 
546 	/*
547 	 * Tx ring settings and status
548 	 */
549 	uint32_t		ring_size; /* Tx descriptor ring size */
550 	uint32_t		free_list_size;	/* Tx free list size */
551 	uint32_t		copy_thresh;
552 	uint32_t		recycle_thresh;
553 	uint32_t		overload_thresh;
554 	uint32_t		resched_thresh;
555 
556 	boolean_t		reschedule;
557 	uint32_t		recycle_fail;
558 	uint32_t		stall_watchdog;
559 
560 #ifdef IXGBE_DEBUG
561 	/*
562 	 * Debug statistics
563 	 */
564 	uint32_t		stat_overload;
565 	uint32_t		stat_fail_no_tbd;
566 	uint32_t		stat_fail_no_tcb;
567 	uint32_t		stat_fail_dma_bind;
568 	uint32_t		stat_reschedule;
569 	uint32_t		stat_break_tbd_limit;
570 	uint32_t		stat_lso_header_fail;
571 #endif
572 
573 	mac_ring_handle_t	ring_handle;
574 
575 	/*
576 	 * Pointer to the ixgbe struct
577 	 */
578 	struct ixgbe		*ixgbe;
579 } ixgbe_tx_ring_t;
580 
581 /*
582  * Software Receive Ring
583  */
584 typedef struct ixgbe_rx_ring {
585 	uint32_t		index;		/* Ring index */
586 	uint32_t		intr_vector;	/* Interrupt vector index */
587 	uint32_t		vect_bit;	/* vector's bit in register */
588 
589 	/*
590 	 * Mutexes
591 	 */
592 	kmutex_t		rx_lock;	/* Rx access lock */
593 	kmutex_t		recycle_lock;	/* Recycle lock, for rcb_tail */
594 
595 	/*
596 	 * Rx descriptor ring definitions
597 	 */
598 	dma_buffer_t		rbd_area;	/* DMA buffer of rx desc ring */
599 	union ixgbe_adv_rx_desc	*rbd_ring;	/* Rx desc ring */
600 	uint32_t		rbd_next;	/* Index of next rx desc */
601 
602 	/*
603 	 * Rx control block list definitions
604 	 */
605 	rx_control_block_t	*rcb_area;
606 	rx_control_block_t	**work_list;	/* Work list of rcbs */
607 	rx_control_block_t	**free_list;	/* Free list of rcbs */
608 	uint32_t		rcb_head;	/* Index of next free rcb */
609 	uint32_t		rcb_tail;	/* Index to put recycled rcb */
610 	uint32_t		rcb_free;	/* Number of free rcbs */
611 
612 	/*
613 	 * Rx ring settings and status
614 	 */
615 	uint32_t		ring_size;	/* Rx descriptor ring size */
616 	uint32_t		free_list_size;	/* Rx free list size */
617 	uint32_t		limit_per_intr;	/* Max packets per interrupt */
618 	uint32_t		copy_thresh;
619 
620 #ifdef IXGBE_DEBUG
621 	/*
622 	 * Debug statistics
623 	 */
624 	uint32_t		stat_frame_error;
625 	uint32_t		stat_cksum_error;
626 	uint32_t		stat_exceed_pkt;
627 #endif
628 
629 	mac_ring_handle_t	ring_handle;
630 	uint64_t		ring_gen_num;
631 
632 	struct ixgbe		*ixgbe;		/* Pointer to ixgbe struct */
633 } ixgbe_rx_ring_t;
634 
635 /*
636  * Software Receive Ring Group
637  */
638 typedef struct ixgbe_rx_group {
639 	uint32_t		index;		/* Group index */
640 	mac_group_handle_t	group_handle;   /* call back group handle */
641 	struct ixgbe		*ixgbe;		/* Pointer to ixgbe struct */
642 } ixgbe_rx_group_t;
643 
644 /*
645  * structure to map interrupt cleanup to msi-x vector
646  */
647 typedef struct ixgbe_intr_vector {
648 	struct ixgbe *ixgbe;	/* point to my adapter */
649 	ulong_t rx_map[BT_BITOUL(MAX_RX_QUEUE_NUM)];	/* bitmap of rx rings */
650 	int	rxr_cnt;	/* count rx rings */
651 	ulong_t tx_map[BT_BITOUL(MAX_TX_QUEUE_NUM)];	/* bitmap of tx rings */
652 	int	txr_cnt;	/* count tx rings */
653 	ulong_t other_map[BT_BITOUL(2)];		/* bitmap of other */
654 	int	other_cnt;	/* count other interrupt */
655 } ixgbe_intr_vector_t;
656 
657 /*
658  * Software adapter state
659  */
660 typedef struct ixgbe {
661 	int 			instance;
662 	mac_handle_t		mac_hdl;
663 	dev_info_t		*dip;
664 	struct ixgbe_hw		hw;
665 	struct ixgbe_osdep	osdep;
666 
667 	adapter_info_t		*capab;	/* adapter hardware capabilities */
668 	ddi_taskq_t		*lsc_taskq;	/* link-status-change taskq */
669 	uint32_t		eims;		/* interrupt mask setting */
670 	uint32_t		eimc;		/* interrupt mask clear */
671 	uint32_t		eicr;		/* interrupt cause reg */
672 
673 	uint32_t		ixgbe_state;
674 	link_state_t		link_state;
675 	uint32_t		link_speed;
676 	uint32_t		link_duplex;
677 	uint32_t		link_down_timeout;
678 
679 	uint32_t		reset_count;
680 	uint32_t		attach_progress;
681 	uint32_t		loopback_mode;
682 	uint32_t		default_mtu;
683 	uint32_t		max_frame_size;
684 
685 	/*
686 	 * Each msi-x vector: map vector to interrupt cleanup
687 	 */
688 	ixgbe_intr_vector_t	vect_map[MAX_INTR_VECTOR];
689 
690 	/*
691 	 * Receive Rings
692 	 */
693 	ixgbe_rx_ring_t		*rx_rings;	/* Array of rx rings */
694 	uint32_t		num_rx_rings;	/* Number of rx rings in use */
695 	uint32_t		rx_ring_size;	/* Rx descriptor ring size */
696 	uint32_t		rx_buf_size;	/* Rx buffer size */
697 
698 	/*
699 	 * Receive Groups
700 	 */
701 	ixgbe_rx_group_t	*rx_groups;	/* Array of rx groups */
702 	uint32_t		num_rx_groups;	/* Number of rx groups in use */
703 
704 	/*
705 	 * Transmit Rings
706 	 */
707 	ixgbe_tx_ring_t		*tx_rings;	/* Array of tx rings */
708 	uint32_t		num_tx_rings;	/* Number of tx rings in use */
709 	uint32_t		tx_ring_size;	/* Tx descriptor ring size */
710 	uint32_t		tx_buf_size;	/* Tx buffer size */
711 
712 	boolean_t		tx_head_wb_enable; /* Tx head wrtie-back */
713 	boolean_t		tx_hcksum_enable; /* Tx h/w cksum offload */
714 	boolean_t 		lso_enable; 	/* Large Segment Offload */
715 	boolean_t 		mr_enable; 	/* Multiple Tx and Rx Ring */
716 	uint32_t		tx_copy_thresh;	/* Tx copy threshold */
717 	uint32_t		tx_recycle_thresh; /* Tx recycle threshold */
718 	uint32_t		tx_overload_thresh; /* Tx overload threshold */
719 	uint32_t		tx_resched_thresh; /* Tx reschedule threshold */
720 	boolean_t		rx_hcksum_enable; /* Rx h/w cksum offload */
721 	uint32_t		rx_copy_thresh; /* Rx copy threshold */
722 	uint32_t		rx_limit_per_intr; /* Rx pkts per interrupt */
723 	uint32_t		intr_throttling[MAX_INTR_VECTOR];
724 	uint32_t		intr_force;
725 	int			fm_capabilities; /* FMA capabilities */
726 
727 	int			intr_type;
728 	int			intr_cnt;
729 	int			intr_cap;
730 	size_t			intr_size;
731 	uint_t			intr_pri;
732 	ddi_intr_handle_t	*htable;
733 	uint32_t		eims_mask;
734 
735 	kmutex_t		gen_lock; /* General lock for device access */
736 	kmutex_t		watchdog_lock;
737 
738 	boolean_t		watchdog_enable;
739 	boolean_t		watchdog_start;
740 	timeout_id_t		watchdog_tid;
741 
742 	boolean_t		unicst_init;
743 	uint32_t		unicst_avail;
744 	uint32_t		unicst_total;
745 	ixgbe_ether_addr_t	unicst_addr[MAX_NUM_UNICAST_ADDRESSES];
746 	uint32_t		mcast_count;
747 	struct ether_addr	mcast_table[MAX_NUM_MULTICAST_ADDRESSES];
748 
749 	ulong_t			sys_page_size;
750 
751 	/*
752 	 * Kstat definitions
753 	 */
754 	kstat_t			*ixgbe_ks;
755 
756 	/*
757 	 * NDD definitions
758 	 */
759 	caddr_t			nd_data;
760 	nd_param_t		nd_params[PARAM_COUNT];
761 } ixgbe_t;
762 
763 typedef struct ixgbe_stat {
764 	kstat_named_t link_speed;	/* Link Speed */
765 
766 	kstat_named_t reset_count;	/* Reset Count */
767 
768 	kstat_named_t rx_frame_error;	/* Rx Error in Packet */
769 	kstat_named_t rx_cksum_error;	/* Rx Checksum Error */
770 	kstat_named_t rx_exceed_pkt;	/* Rx Exceed Max Pkt Count */
771 
772 	kstat_named_t tx_overload;	/* Tx Desc Ring Overload */
773 	kstat_named_t tx_fail_no_tcb;	/* Tx Fail Freelist Empty */
774 	kstat_named_t tx_fail_no_tbd;	/* Tx Fail Desc Ring Empty */
775 	kstat_named_t tx_fail_dma_bind;	/* Tx Fail DMA bind */
776 	kstat_named_t tx_reschedule;	/* Tx Reschedule */
777 
778 	kstat_named_t gprc;	/* Good Packets Received Count */
779 	kstat_named_t gptc;	/* Good Packets Xmitted Count */
780 	kstat_named_t gor;	/* Good Octets Received Count */
781 	kstat_named_t got;	/* Good Octets Xmitd Count */
782 	kstat_named_t prc64;	/* Packets Received - 64b */
783 	kstat_named_t prc127;	/* Packets Received - 65-127b */
784 	kstat_named_t prc255;	/* Packets Received - 127-255b */
785 	kstat_named_t prc511;	/* Packets Received - 256-511b */
786 	kstat_named_t prc1023;	/* Packets Received - 511-1023b */
787 	kstat_named_t prc1522;	/* Packets Received - 1024-1522b */
788 	kstat_named_t ptc64;	/* Packets Xmitted (64b) */
789 	kstat_named_t ptc127;	/* Packets Xmitted (64-127b) */
790 	kstat_named_t ptc255;	/* Packets Xmitted (128-255b) */
791 	kstat_named_t ptc511;	/* Packets Xmitted (255-511b) */
792 	kstat_named_t ptc1023;	/* Packets Xmitted (512-1023b) */
793 	kstat_named_t ptc1522;	/* Packets Xmitted (1024-1522b */
794 	kstat_named_t qprc[16];	/* Queue Packets Received Count */
795 	kstat_named_t qptc[16];	/* Queue Packets Transmitted Count */
796 	kstat_named_t qbrc[16];	/* Queue Bytes Received Count */
797 	kstat_named_t qbtc[16];	/* Queue Bytes Transmitted Count */
798 
799 	kstat_named_t crcerrs;	/* CRC Error Count */
800 	kstat_named_t illerrc;	/* Illegal Byte Error Count */
801 	kstat_named_t errbc;	/* Error Byte Count */
802 	kstat_named_t mspdc;	/* MAC Short Packet Discard Count */
803 	kstat_named_t mpc;	/* Missed Packets Count */
804 	kstat_named_t mlfc;	/* MAC Local Fault Count */
805 	kstat_named_t mrfc;	/* MAC Remote Fault Count */
806 	kstat_named_t rlec;	/* Receive Length Error Count */
807 	kstat_named_t lxontxc;	/* Link XON Transmitted Count */
808 	kstat_named_t lxonrxc;	/* Link XON Received Count */
809 	kstat_named_t lxofftxc;	/* Link XOFF Transmitted Count */
810 	kstat_named_t lxoffrxc;	/* Link XOFF Received Count */
811 	kstat_named_t bprc;	/* Broadcasts Pkts Received Count */
812 	kstat_named_t mprc;	/* Multicast Pkts Received Count */
813 	kstat_named_t rnbc;	/* Receive No Buffers Count */
814 	kstat_named_t ruc;	/* Receive Undersize Count */
815 	kstat_named_t rfc;	/* Receive Frag Count */
816 	kstat_named_t roc;	/* Receive Oversize Count */
817 	kstat_named_t rjc;	/* Receive Jabber Count */
818 	kstat_named_t tor;	/* Total Octets Recvd Count */
819 	kstat_named_t tot;	/* Total Octets Xmitted Count */
820 	kstat_named_t tpr;	/* Total Packets Received */
821 	kstat_named_t tpt;	/* Total Packets Xmitted */
822 	kstat_named_t mptc;	/* Multicast Packets Xmited Count */
823 	kstat_named_t bptc;	/* Broadcast Packets Xmited Count */
824 } ixgbe_stat_t;
825 
826 /*
827  * Function prototypes in ixgbe_buf.c
828  */
829 int ixgbe_alloc_dma(ixgbe_t *);
830 void ixgbe_free_dma(ixgbe_t *);
831 void ixgbe_set_fma_flags(int, int);
832 
833 /*
834  * Function prototypes in ixgbe_main.c
835  */
836 int ixgbe_start(ixgbe_t *);
837 void ixgbe_stop(ixgbe_t *);
838 int ixgbe_driver_setup_link(ixgbe_t *, boolean_t);
839 int ixgbe_multicst_add(ixgbe_t *, const uint8_t *);
840 int ixgbe_multicst_remove(ixgbe_t *, const uint8_t *);
841 enum ioc_reply ixgbe_loopback_ioctl(ixgbe_t *, struct iocblk *, mblk_t *);
842 
843 void ixgbe_enable_watchdog_timer(ixgbe_t *);
844 void ixgbe_disable_watchdog_timer(ixgbe_t *);
845 int ixgbe_atomic_reserve(uint32_t *, uint32_t);
846 
847 int ixgbe_check_acc_handle(ddi_acc_handle_t handle);
848 int ixgbe_check_dma_handle(ddi_dma_handle_t handle);
849 void ixgbe_fm_ereport(ixgbe_t *, char *);
850 
851 void ixgbe_fill_ring(void *, mac_ring_type_t, const int, const int,
852     mac_ring_info_t *, mac_ring_handle_t);
853 void ixgbe_fill_group(void *arg, mac_ring_type_t, const int,
854     mac_group_info_t *, mac_group_handle_t);
855 int ixgbe_rx_ring_intr_enable(mac_intr_handle_t);
856 int ixgbe_rx_ring_intr_disable(mac_intr_handle_t);
857 
858 /*
859  * Function prototypes in ixgbe_gld.c
860  */
861 int ixgbe_m_start(void *);
862 void ixgbe_m_stop(void *);
863 int ixgbe_m_promisc(void *, boolean_t);
864 int ixgbe_m_multicst(void *, boolean_t, const uint8_t *);
865 int ixgbe_m_stat(void *, uint_t, uint64_t *);
866 void ixgbe_m_resources(void *);
867 void ixgbe_m_ioctl(void *, queue_t *, mblk_t *);
868 boolean_t ixgbe_m_getcapab(void *, mac_capab_t, void *);
869 
870 /*
871  * Function prototypes in ixgbe_rx.c
872  */
873 mblk_t *ixgbe_ring_rx(ixgbe_rx_ring_t *, int);
874 void ixgbe_rx_recycle(caddr_t arg);
875 mblk_t *ixgbe_ring_rx_poll(void *, int);
876 
877 /*
878  * Function prototypes in ixgbe_tx.c
879  */
880 mblk_t *ixgbe_ring_tx(void *, mblk_t *);
881 void ixgbe_free_tcb(tx_control_block_t *);
882 void ixgbe_put_free_list(ixgbe_tx_ring_t *, link_list_t *);
883 uint32_t ixgbe_tx_recycle_legacy(ixgbe_tx_ring_t *);
884 uint32_t ixgbe_tx_recycle_head_wb(ixgbe_tx_ring_t *);
885 
886 /*
887  * Function prototypes in ixgbe_log.c
888  */
889 void ixgbe_notice(void *, const char *, ...);
890 void ixgbe_log(void *, const char *, ...);
891 void ixgbe_error(void *, const char *, ...);
892 
893 /*
894  * Function prototypes in ixgbe_ndd.c
895  */
896 int ixgbe_nd_init(ixgbe_t *);
897 void ixgbe_nd_cleanup(ixgbe_t *);
898 enum ioc_reply ixgbe_nd_ioctl(ixgbe_t *, queue_t *, mblk_t *, struct iocblk *);
899 
900 /*
901  * Function prototypes in ixgbe_stat.c
902  */
903 int ixgbe_init_stats(ixgbe_t *);
904 
905 #ifdef __cplusplus
906 }
907 #endif
908 
909 #endif /* _IXGBE_SW_H */
910