xref: /illumos-gate/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision 6a634c9dca3093f3922e4b7ab826d7bdf17bf78e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  */
29 
30 #include "ixgbe_sw.h"
31 
32 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
33 static char ixgbe_version[] = "ixgbe 1.1.7";
34 
35 /*
36  * Local function protoypes
37  */
38 static int ixgbe_register_mac(ixgbe_t *);
39 static int ixgbe_identify_hardware(ixgbe_t *);
40 static int ixgbe_regs_map(ixgbe_t *);
41 static void ixgbe_init_properties(ixgbe_t *);
42 static int ixgbe_init_driver_settings(ixgbe_t *);
43 static void ixgbe_init_locks(ixgbe_t *);
44 static void ixgbe_destroy_locks(ixgbe_t *);
45 static int ixgbe_init(ixgbe_t *);
46 static int ixgbe_chip_start(ixgbe_t *);
47 static void ixgbe_chip_stop(ixgbe_t *);
48 static int ixgbe_reset(ixgbe_t *);
49 static void ixgbe_tx_clean(ixgbe_t *);
50 static boolean_t ixgbe_tx_drain(ixgbe_t *);
51 static boolean_t ixgbe_rx_drain(ixgbe_t *);
52 static int ixgbe_alloc_rings(ixgbe_t *);
53 static void ixgbe_free_rings(ixgbe_t *);
54 static int ixgbe_alloc_rx_data(ixgbe_t *);
55 static void ixgbe_free_rx_data(ixgbe_t *);
56 static void ixgbe_setup_rings(ixgbe_t *);
57 static void ixgbe_setup_rx(ixgbe_t *);
58 static void ixgbe_setup_tx(ixgbe_t *);
59 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
60 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
61 static void ixgbe_setup_rss(ixgbe_t *);
62 static void ixgbe_setup_vmdq(ixgbe_t *);
63 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
64 static void ixgbe_init_unicst(ixgbe_t *);
65 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
66 static void ixgbe_setup_multicst(ixgbe_t *);
67 static void ixgbe_get_hw_state(ixgbe_t *);
68 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
69 static void ixgbe_get_conf(ixgbe_t *);
70 static void ixgbe_init_params(ixgbe_t *);
71 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
72 static void ixgbe_driver_link_check(ixgbe_t *);
73 static void ixgbe_sfp_check(void *);
74 static void ixgbe_overtemp_check(void *);
75 static void ixgbe_link_timer(void *);
76 static void ixgbe_local_timer(void *);
77 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
78 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
79 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
80 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
81 static boolean_t is_valid_mac_addr(uint8_t *);
82 static boolean_t ixgbe_stall_check(ixgbe_t *);
83 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
84 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
85 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
86 static int ixgbe_alloc_intrs(ixgbe_t *);
87 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
88 static int ixgbe_add_intr_handlers(ixgbe_t *);
89 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
90 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
91 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
92 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
93 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
94 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
95 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
96 static void ixgbe_setup_adapter_vector(ixgbe_t *);
97 static void ixgbe_rem_intr_handlers(ixgbe_t *);
98 static void ixgbe_rem_intrs(ixgbe_t *);
99 static int ixgbe_enable_intrs(ixgbe_t *);
100 static int ixgbe_disable_intrs(ixgbe_t *);
101 static uint_t ixgbe_intr_legacy(void *, void *);
102 static uint_t ixgbe_intr_msi(void *, void *);
103 static uint_t ixgbe_intr_msix(void *, void *);
104 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
105 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
106 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
107 static void ixgbe_get_driver_control(struct ixgbe_hw *);
108 static int ixgbe_addmac(void *, const uint8_t *);
109 static int ixgbe_remmac(void *, const uint8_t *);
110 static void ixgbe_release_driver_control(struct ixgbe_hw *);
111 
112 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
113 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
114 static int ixgbe_resume(dev_info_t *);
115 static int ixgbe_suspend(dev_info_t *);
116 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
117 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
118 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
119 static int ixgbe_intr_cb_register(ixgbe_t *);
120 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
121 
122 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
123     const void *impl_data);
124 static void ixgbe_fm_init(ixgbe_t *);
125 static void ixgbe_fm_fini(ixgbe_t *);
126 
127 char *ixgbe_priv_props[] = {
128 	"_tx_copy_thresh",
129 	"_tx_recycle_thresh",
130 	"_tx_overload_thresh",
131 	"_tx_resched_thresh",
132 	"_rx_copy_thresh",
133 	"_rx_limit_per_intr",
134 	"_intr_throttling",
135 	"_adv_pause_cap",
136 	"_adv_asym_pause_cap",
137 	NULL
138 };
139 
140 #define	IXGBE_MAX_PRIV_PROPS \
141 	(sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
142 
143 static struct cb_ops ixgbe_cb_ops = {
144 	nulldev,		/* cb_open */
145 	nulldev,		/* cb_close */
146 	nodev,			/* cb_strategy */
147 	nodev,			/* cb_print */
148 	nodev,			/* cb_dump */
149 	nodev,			/* cb_read */
150 	nodev,			/* cb_write */
151 	nodev,			/* cb_ioctl */
152 	nodev,			/* cb_devmap */
153 	nodev,			/* cb_mmap */
154 	nodev,			/* cb_segmap */
155 	nochpoll,		/* cb_chpoll */
156 	ddi_prop_op,		/* cb_prop_op */
157 	NULL,			/* cb_stream */
158 	D_MP | D_HOTPLUG,	/* cb_flag */
159 	CB_REV,			/* cb_rev */
160 	nodev,			/* cb_aread */
161 	nodev			/* cb_awrite */
162 };
163 
164 static struct dev_ops ixgbe_dev_ops = {
165 	DEVO_REV,		/* devo_rev */
166 	0,			/* devo_refcnt */
167 	NULL,			/* devo_getinfo */
168 	nulldev,		/* devo_identify */
169 	nulldev,		/* devo_probe */
170 	ixgbe_attach,		/* devo_attach */
171 	ixgbe_detach,		/* devo_detach */
172 	nodev,			/* devo_reset */
173 	&ixgbe_cb_ops,		/* devo_cb_ops */
174 	NULL,			/* devo_bus_ops */
175 	ddi_power,		/* devo_power */
176 	ddi_quiesce_not_supported,	/* devo_quiesce */
177 };
178 
179 static struct modldrv ixgbe_modldrv = {
180 	&mod_driverops,		/* Type of module.  This one is a driver */
181 	ixgbe_ident,		/* Discription string */
182 	&ixgbe_dev_ops		/* driver ops */
183 };
184 
185 static struct modlinkage ixgbe_modlinkage = {
186 	MODREV_1, &ixgbe_modldrv, NULL
187 };
188 
189 /*
190  * Access attributes for register mapping
191  */
192 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
193 	DDI_DEVICE_ATTR_V1,
194 	DDI_STRUCTURE_LE_ACC,
195 	DDI_STRICTORDER_ACC,
196 	DDI_FLAGERR_ACC
197 };
198 
199 /*
200  * Loopback property
201  */
202 static lb_property_t lb_normal = {
203 	normal,	"normal", IXGBE_LB_NONE
204 };
205 
206 static lb_property_t lb_mac = {
207 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
208 };
209 
210 static lb_property_t lb_external = {
211 	external, "External", IXGBE_LB_EXTERNAL
212 };
213 
214 #define	IXGBE_M_CALLBACK_FLAGS \
215 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
216 
217 static mac_callbacks_t ixgbe_m_callbacks = {
218 	IXGBE_M_CALLBACK_FLAGS,
219 	ixgbe_m_stat,
220 	ixgbe_m_start,
221 	ixgbe_m_stop,
222 	ixgbe_m_promisc,
223 	ixgbe_m_multicst,
224 	NULL,
225 	NULL,
226 	NULL,
227 	ixgbe_m_ioctl,
228 	ixgbe_m_getcapab,
229 	NULL,
230 	NULL,
231 	ixgbe_m_setprop,
232 	ixgbe_m_getprop,
233 	ixgbe_m_propinfo
234 };
235 
236 /*
237  * Initialize capabilities of each supported adapter type
238  */
239 static adapter_info_t ixgbe_82598eb_cap = {
240 	64,		/* maximum number of rx queues */
241 	1,		/* minimum number of rx queues */
242 	64,		/* default number of rx queues */
243 	16,		/* maximum number of rx groups */
244 	1,		/* minimum number of rx groups */
245 	1,		/* default number of rx groups */
246 	32,		/* maximum number of tx queues */
247 	1,		/* minimum number of tx queues */
248 	8,		/* default number of tx queues */
249 	16366,		/* maximum MTU size */
250 	0xFFFF,		/* maximum interrupt throttle rate */
251 	0,		/* minimum interrupt throttle rate */
252 	200,		/* default interrupt throttle rate */
253 	18,		/* maximum total msix vectors */
254 	16,		/* maximum number of ring vectors */
255 	2,		/* maximum number of other vectors */
256 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
257 	0,		/* "other" interrupt types enable mask */
258 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
259 	| IXGBE_FLAG_RSS_CAPABLE
260 	| IXGBE_FLAG_VMDQ_CAPABLE)
261 };
262 
263 static adapter_info_t ixgbe_82599eb_cap = {
264 	128,		/* maximum number of rx queues */
265 	1,		/* minimum number of rx queues */
266 	128,		/* default number of rx queues */
267 	64,		/* maximum number of rx groups */
268 	1,		/* minimum number of rx groups */
269 	1,		/* default number of rx groups */
270 	128,		/* maximum number of tx queues */
271 	1,		/* minimum number of tx queues */
272 	8,		/* default number of tx queues */
273 	15500,		/* maximum MTU size */
274 	0xFF8,		/* maximum interrupt throttle rate */
275 	0,		/* minimum interrupt throttle rate */
276 	200,		/* default interrupt throttle rate */
277 	64,		/* maximum total msix vectors */
278 	16,		/* maximum number of ring vectors */
279 	2,		/* maximum number of other vectors */
280 	(IXGBE_EICR_LSC
281 	| IXGBE_EICR_GPI_SDP1
282 	| IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
283 
284 	(IXGBE_SDP1_GPIEN
285 	| IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
286 
287 	(IXGBE_FLAG_DCA_CAPABLE
288 	| IXGBE_FLAG_RSS_CAPABLE
289 	| IXGBE_FLAG_VMDQ_CAPABLE
290 	| IXGBE_FLAG_RSC_CAPABLE
291 	| IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
292 };
293 
294 /*
295  * Module Initialization Functions.
296  */
297 
298 int
299 _init(void)
300 {
301 	int status;
302 
303 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
304 
305 	status = mod_install(&ixgbe_modlinkage);
306 
307 	if (status != DDI_SUCCESS) {
308 		mac_fini_ops(&ixgbe_dev_ops);
309 	}
310 
311 	return (status);
312 }
313 
314 int
315 _fini(void)
316 {
317 	int status;
318 
319 	status = mod_remove(&ixgbe_modlinkage);
320 
321 	if (status == DDI_SUCCESS) {
322 		mac_fini_ops(&ixgbe_dev_ops);
323 	}
324 
325 	return (status);
326 }
327 
328 int
329 _info(struct modinfo *modinfop)
330 {
331 	int status;
332 
333 	status = mod_info(&ixgbe_modlinkage, modinfop);
334 
335 	return (status);
336 }
337 
338 /*
339  * ixgbe_attach - Driver attach.
340  *
341  * This function is the device specific initialization entry
342  * point. This entry point is required and must be written.
343  * The DDI_ATTACH command must be provided in the attach entry
344  * point. When attach() is called with cmd set to DDI_ATTACH,
345  * all normal kernel services (such as kmem_alloc(9F)) are
346  * available for use by the driver.
347  *
348  * The attach() function will be called once for each instance
349  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
350  * Until attach() succeeds, the only driver entry points which
351  * may be called are open(9E) and getinfo(9E).
352  */
353 static int
354 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
355 {
356 	ixgbe_t *ixgbe;
357 	struct ixgbe_osdep *osdep;
358 	struct ixgbe_hw *hw;
359 	int instance;
360 	char taskqname[32];
361 
362 	/*
363 	 * Check the command and perform corresponding operations
364 	 */
365 	switch (cmd) {
366 	default:
367 		return (DDI_FAILURE);
368 
369 	case DDI_RESUME:
370 		return (ixgbe_resume(devinfo));
371 
372 	case DDI_ATTACH:
373 		break;
374 	}
375 
376 	/* Get the device instance */
377 	instance = ddi_get_instance(devinfo);
378 
379 	/* Allocate memory for the instance data structure */
380 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
381 
382 	ixgbe->dip = devinfo;
383 	ixgbe->instance = instance;
384 
385 	hw = &ixgbe->hw;
386 	osdep = &ixgbe->osdep;
387 	hw->back = osdep;
388 	osdep->ixgbe = ixgbe;
389 
390 	/* Attach the instance pointer to the dev_info data structure */
391 	ddi_set_driver_private(devinfo, ixgbe);
392 
393 	/*
394 	 * Initialize for fma support
395 	 */
396 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
397 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
398 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
399 	ixgbe_fm_init(ixgbe);
400 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
401 
402 	/*
403 	 * Map PCI config space registers
404 	 */
405 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
406 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
407 		goto attach_fail;
408 	}
409 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
410 
411 	/*
412 	 * Identify the chipset family
413 	 */
414 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
415 		ixgbe_error(ixgbe, "Failed to identify hardware");
416 		goto attach_fail;
417 	}
418 
419 	/*
420 	 * Map device registers
421 	 */
422 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
423 		ixgbe_error(ixgbe, "Failed to map device registers");
424 		goto attach_fail;
425 	}
426 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
427 
428 	/*
429 	 * Initialize driver parameters
430 	 */
431 	ixgbe_init_properties(ixgbe);
432 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
433 
434 	/*
435 	 * Register interrupt callback
436 	 */
437 	if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
438 		ixgbe_error(ixgbe, "Failed to register interrupt callback");
439 		goto attach_fail;
440 	}
441 
442 	/*
443 	 * Allocate interrupts
444 	 */
445 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
446 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
447 		goto attach_fail;
448 	}
449 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
450 
451 	/*
452 	 * Allocate rx/tx rings based on the ring numbers.
453 	 * The actual numbers of rx/tx rings are decided by the number of
454 	 * allocated interrupt vectors, so we should allocate the rings after
455 	 * interrupts are allocated.
456 	 */
457 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
458 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
459 		goto attach_fail;
460 	}
461 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
462 
463 	/*
464 	 * Map rings to interrupt vectors
465 	 */
466 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
467 		ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
468 		goto attach_fail;
469 	}
470 
471 	/*
472 	 * Add interrupt handlers
473 	 */
474 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
475 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
476 		goto attach_fail;
477 	}
478 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
479 
480 	/*
481 	 * Create a taskq for sfp-change
482 	 */
483 	(void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
484 	if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
485 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
486 		ixgbe_error(ixgbe, "sfp_taskq create failed");
487 		goto attach_fail;
488 	}
489 	ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
490 
491 	/*
492 	 * Create a taskq for over-temp
493 	 */
494 	(void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
495 	if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
496 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
497 		ixgbe_error(ixgbe, "overtemp_taskq create failed");
498 		goto attach_fail;
499 	}
500 	ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
501 
502 	/*
503 	 * Initialize driver parameters
504 	 */
505 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
506 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
507 		goto attach_fail;
508 	}
509 
510 	/*
511 	 * Initialize mutexes for this device.
512 	 * Do this before enabling the interrupt handler and
513 	 * register the softint to avoid the condition where
514 	 * interrupt handler can try using uninitialized mutex.
515 	 */
516 	ixgbe_init_locks(ixgbe);
517 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
518 
519 	/*
520 	 * Initialize chipset hardware
521 	 */
522 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
523 		ixgbe_error(ixgbe, "Failed to initialize adapter");
524 		goto attach_fail;
525 	}
526 	ixgbe->link_check_complete = B_FALSE;
527 	ixgbe->link_check_hrtime = gethrtime() +
528 	    (IXGBE_LINK_UP_TIME * 100000000ULL);
529 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
530 
531 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
532 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
533 		goto attach_fail;
534 	}
535 
536 	/*
537 	 * Initialize statistics
538 	 */
539 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
540 		ixgbe_error(ixgbe, "Failed to initialize statistics");
541 		goto attach_fail;
542 	}
543 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
544 
545 	/*
546 	 * Register the driver to the MAC
547 	 */
548 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
549 		ixgbe_error(ixgbe, "Failed to register MAC");
550 		goto attach_fail;
551 	}
552 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
553 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
554 
555 	ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
556 	    IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
557 	if (ixgbe->periodic_id == 0) {
558 		ixgbe_error(ixgbe, "Failed to add the link check timer");
559 		goto attach_fail;
560 	}
561 	ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
562 
563 	/*
564 	 * Now that mutex locks are initialized, and the chip is also
565 	 * initialized, enable interrupts.
566 	 */
567 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
568 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
569 		goto attach_fail;
570 	}
571 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
572 
573 	ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
574 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
575 
576 	return (DDI_SUCCESS);
577 
578 attach_fail:
579 	ixgbe_unconfigure(devinfo, ixgbe);
580 	return (DDI_FAILURE);
581 }
582 
583 /*
584  * ixgbe_detach - Driver detach.
585  *
586  * The detach() function is the complement of the attach routine.
587  * If cmd is set to DDI_DETACH, detach() is used to remove  the
588  * state  associated  with  a  given  instance of a device node
589  * prior to the removal of that instance from the system.
590  *
591  * The detach() function will be called once for each  instance
592  * of the device for which there has been a successful attach()
593  * once there are no longer  any  opens  on  the  device.
594  *
595  * Interrupts routine are disabled, All memory allocated by this
596  * driver are freed.
597  */
598 static int
599 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
600 {
601 	ixgbe_t *ixgbe;
602 
603 	/*
604 	 * Check detach command
605 	 */
606 	switch (cmd) {
607 	default:
608 		return (DDI_FAILURE);
609 
610 	case DDI_SUSPEND:
611 		return (ixgbe_suspend(devinfo));
612 
613 	case DDI_DETACH:
614 		break;
615 	}
616 
617 	/*
618 	 * Get the pointer to the driver private data structure
619 	 */
620 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
621 	if (ixgbe == NULL)
622 		return (DDI_FAILURE);
623 
624 	/*
625 	 * If the device is still running, it needs to be stopped first.
626 	 * This check is necessary because under some specific circumstances,
627 	 * the detach routine can be called without stopping the interface
628 	 * first.
629 	 */
630 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
631 		atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
632 		mutex_enter(&ixgbe->gen_lock);
633 		ixgbe_stop(ixgbe, B_TRUE);
634 		mutex_exit(&ixgbe->gen_lock);
635 		/* Disable and stop the watchdog timer */
636 		ixgbe_disable_watchdog_timer(ixgbe);
637 	}
638 
639 	/*
640 	 * Check if there are still rx buffers held by the upper layer.
641 	 * If so, fail the detach.
642 	 */
643 	if (!ixgbe_rx_drain(ixgbe))
644 		return (DDI_FAILURE);
645 
646 	/*
647 	 * Do the remaining unconfigure routines
648 	 */
649 	ixgbe_unconfigure(devinfo, ixgbe);
650 
651 	return (DDI_SUCCESS);
652 }
653 
654 static void
655 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
656 {
657 	/*
658 	 * Disable interrupt
659 	 */
660 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
661 		(void) ixgbe_disable_intrs(ixgbe);
662 	}
663 
664 	/*
665 	 * remove the link check timer
666 	 */
667 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
668 		if (ixgbe->periodic_id != NULL) {
669 			ddi_periodic_delete(ixgbe->periodic_id);
670 			ixgbe->periodic_id = NULL;
671 		}
672 	}
673 
674 	/*
675 	 * Unregister MAC
676 	 */
677 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
678 		(void) mac_unregister(ixgbe->mac_hdl);
679 	}
680 
681 	/*
682 	 * Free statistics
683 	 */
684 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
685 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
686 	}
687 
688 	/*
689 	 * Remove interrupt handlers
690 	 */
691 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
692 		ixgbe_rem_intr_handlers(ixgbe);
693 	}
694 
695 	/*
696 	 * Remove taskq for sfp-status-change
697 	 */
698 	if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
699 		ddi_taskq_destroy(ixgbe->sfp_taskq);
700 	}
701 
702 	/*
703 	 * Remove taskq for over-temp
704 	 */
705 	if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
706 		ddi_taskq_destroy(ixgbe->overtemp_taskq);
707 	}
708 
709 	/*
710 	 * Remove interrupts
711 	 */
712 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
713 		ixgbe_rem_intrs(ixgbe);
714 	}
715 
716 	/*
717 	 * Unregister interrupt callback handler
718 	 */
719 	(void) ddi_cb_unregister(ixgbe->cb_hdl);
720 
721 	/*
722 	 * Remove driver properties
723 	 */
724 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
725 		(void) ddi_prop_remove_all(devinfo);
726 	}
727 
728 	/*
729 	 * Stop the chipset
730 	 */
731 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
732 		mutex_enter(&ixgbe->gen_lock);
733 		ixgbe_chip_stop(ixgbe);
734 		mutex_exit(&ixgbe->gen_lock);
735 	}
736 
737 	/*
738 	 * Free register handle
739 	 */
740 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
741 		if (ixgbe->osdep.reg_handle != NULL)
742 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
743 	}
744 
745 	/*
746 	 * Free PCI config handle
747 	 */
748 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
749 		if (ixgbe->osdep.cfg_handle != NULL)
750 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
751 	}
752 
753 	/*
754 	 * Free locks
755 	 */
756 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
757 		ixgbe_destroy_locks(ixgbe);
758 	}
759 
760 	/*
761 	 * Free the rx/tx rings
762 	 */
763 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
764 		ixgbe_free_rings(ixgbe);
765 	}
766 
767 	/*
768 	 * Unregister FMA capabilities
769 	 */
770 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
771 		ixgbe_fm_fini(ixgbe);
772 	}
773 
774 	/*
775 	 * Free the driver data structure
776 	 */
777 	kmem_free(ixgbe, sizeof (ixgbe_t));
778 
779 	ddi_set_driver_private(devinfo, NULL);
780 }
781 
782 /*
783  * ixgbe_register_mac - Register the driver and its function pointers with
784  * the GLD interface.
785  */
786 static int
787 ixgbe_register_mac(ixgbe_t *ixgbe)
788 {
789 	struct ixgbe_hw *hw = &ixgbe->hw;
790 	mac_register_t *mac;
791 	int status;
792 
793 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
794 		return (IXGBE_FAILURE);
795 
796 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
797 	mac->m_driver = ixgbe;
798 	mac->m_dip = ixgbe->dip;
799 	mac->m_src_addr = hw->mac.addr;
800 	mac->m_callbacks = &ixgbe_m_callbacks;
801 	mac->m_min_sdu = 0;
802 	mac->m_max_sdu = ixgbe->default_mtu;
803 	mac->m_margin = VLAN_TAGSZ;
804 	mac->m_priv_props = ixgbe_priv_props;
805 	mac->m_v12n = MAC_VIRT_LEVEL1;
806 
807 	status = mac_register(mac, &ixgbe->mac_hdl);
808 
809 	mac_free(mac);
810 
811 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
812 }
813 
814 /*
815  * ixgbe_identify_hardware - Identify the type of the chipset.
816  */
817 static int
818 ixgbe_identify_hardware(ixgbe_t *ixgbe)
819 {
820 	struct ixgbe_hw *hw = &ixgbe->hw;
821 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
822 
823 	/*
824 	 * Get the device id
825 	 */
826 	hw->vendor_id =
827 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
828 	hw->device_id =
829 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
830 	hw->revision_id =
831 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
832 	hw->subsystem_device_id =
833 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
834 	hw->subsystem_vendor_id =
835 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
836 
837 	/*
838 	 * Set the mac type of the adapter based on the device id
839 	 */
840 	if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
841 		return (IXGBE_FAILURE);
842 	}
843 
844 	/*
845 	 * Install adapter capabilities
846 	 */
847 	switch (hw->mac.type) {
848 	case ixgbe_mac_82598EB:
849 		IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
850 		ixgbe->capab = &ixgbe_82598eb_cap;
851 
852 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
853 			ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
854 			ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
855 			ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
856 		}
857 		break;
858 
859 	case ixgbe_mac_82599EB:
860 		IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
861 		ixgbe->capab = &ixgbe_82599eb_cap;
862 
863 		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
864 			ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
865 			ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
866 			ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
867 		}
868 		break;
869 
870 	default:
871 		IXGBE_DEBUGLOG_1(ixgbe,
872 		    "adapter not supported in ixgbe_identify_hardware(): %d\n",
873 		    hw->mac.type);
874 		return (IXGBE_FAILURE);
875 	}
876 
877 	return (IXGBE_SUCCESS);
878 }
879 
880 /*
881  * ixgbe_regs_map - Map the device registers.
882  *
883  */
884 static int
885 ixgbe_regs_map(ixgbe_t *ixgbe)
886 {
887 	dev_info_t *devinfo = ixgbe->dip;
888 	struct ixgbe_hw *hw = &ixgbe->hw;
889 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
890 	off_t mem_size;
891 
892 	/*
893 	 * First get the size of device registers to be mapped.
894 	 */
895 	if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
896 	    != DDI_SUCCESS) {
897 		return (IXGBE_FAILURE);
898 	}
899 
900 	/*
901 	 * Call ddi_regs_map_setup() to map registers
902 	 */
903 	if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
904 	    (caddr_t *)&hw->hw_addr, 0,
905 	    mem_size, &ixgbe_regs_acc_attr,
906 	    &osdep->reg_handle)) != DDI_SUCCESS) {
907 		return (IXGBE_FAILURE);
908 	}
909 
910 	return (IXGBE_SUCCESS);
911 }
912 
913 /*
914  * ixgbe_init_properties - Initialize driver properties.
915  */
916 static void
917 ixgbe_init_properties(ixgbe_t *ixgbe)
918 {
919 	/*
920 	 * Get conf file properties, including link settings
921 	 * jumbo frames, ring number, descriptor number, etc.
922 	 */
923 	ixgbe_get_conf(ixgbe);
924 
925 	ixgbe_init_params(ixgbe);
926 }
927 
928 /*
929  * ixgbe_init_driver_settings - Initialize driver settings.
930  *
931  * The settings include hardware function pointers, bus information,
932  * rx/tx rings settings, link state, and any other parameters that
933  * need to be setup during driver initialization.
934  */
935 static int
936 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
937 {
938 	struct ixgbe_hw *hw = &ixgbe->hw;
939 	dev_info_t *devinfo = ixgbe->dip;
940 	ixgbe_rx_ring_t *rx_ring;
941 	ixgbe_rx_group_t *rx_group;
942 	ixgbe_tx_ring_t *tx_ring;
943 	uint32_t rx_size;
944 	uint32_t tx_size;
945 	uint32_t ring_per_group;
946 	int i;
947 
948 	/*
949 	 * Initialize chipset specific hardware function pointers
950 	 */
951 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
952 		return (IXGBE_FAILURE);
953 	}
954 
955 	/*
956 	 * Get the system page size
957 	 */
958 	ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
959 
960 	/*
961 	 * Set rx buffer size
962 	 *
963 	 * The IP header alignment room is counted in the calculation.
964 	 * The rx buffer size is in unit of 1K that is required by the
965 	 * chipset hardware.
966 	 */
967 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
968 	ixgbe->rx_buf_size = ((rx_size >> 10) +
969 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
970 
971 	/*
972 	 * Set tx buffer size
973 	 */
974 	tx_size = ixgbe->max_frame_size;
975 	ixgbe->tx_buf_size = ((tx_size >> 10) +
976 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
977 
978 	/*
979 	 * Initialize rx/tx rings/groups parameters
980 	 */
981 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
982 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
983 		rx_ring = &ixgbe->rx_rings[i];
984 		rx_ring->index = i;
985 		rx_ring->ixgbe = ixgbe;
986 		rx_ring->group_index = i / ring_per_group;
987 		rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
988 	}
989 
990 	for (i = 0; i < ixgbe->num_rx_groups; i++) {
991 		rx_group = &ixgbe->rx_groups[i];
992 		rx_group->index = i;
993 		rx_group->ixgbe = ixgbe;
994 	}
995 
996 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
997 		tx_ring = &ixgbe->tx_rings[i];
998 		tx_ring->index = i;
999 		tx_ring->ixgbe = ixgbe;
1000 		if (ixgbe->tx_head_wb_enable)
1001 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1002 		else
1003 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1004 
1005 		tx_ring->ring_size = ixgbe->tx_ring_size;
1006 		tx_ring->free_list_size = ixgbe->tx_ring_size +
1007 		    (ixgbe->tx_ring_size >> 1);
1008 	}
1009 
1010 	/*
1011 	 * Initialize values of interrupt throttling rate
1012 	 */
1013 	for (i = 1; i < MAX_INTR_VECTOR; i++)
1014 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1015 
1016 	/*
1017 	 * The initial link state should be "unknown"
1018 	 */
1019 	ixgbe->link_state = LINK_STATE_UNKNOWN;
1020 
1021 	return (IXGBE_SUCCESS);
1022 }
1023 
1024 /*
1025  * ixgbe_init_locks - Initialize locks.
1026  */
1027 static void
1028 ixgbe_init_locks(ixgbe_t *ixgbe)
1029 {
1030 	ixgbe_rx_ring_t *rx_ring;
1031 	ixgbe_tx_ring_t *tx_ring;
1032 	int i;
1033 
1034 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1035 		rx_ring = &ixgbe->rx_rings[i];
1036 		mutex_init(&rx_ring->rx_lock, NULL,
1037 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1038 	}
1039 
1040 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1041 		tx_ring = &ixgbe->tx_rings[i];
1042 		mutex_init(&tx_ring->tx_lock, NULL,
1043 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1044 		mutex_init(&tx_ring->recycle_lock, NULL,
1045 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1046 		mutex_init(&tx_ring->tcb_head_lock, NULL,
1047 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1048 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
1049 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1050 	}
1051 
1052 	mutex_init(&ixgbe->gen_lock, NULL,
1053 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1054 
1055 	mutex_init(&ixgbe->watchdog_lock, NULL,
1056 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1057 }
1058 
1059 /*
1060  * ixgbe_destroy_locks - Destroy locks.
1061  */
1062 static void
1063 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1064 {
1065 	ixgbe_rx_ring_t *rx_ring;
1066 	ixgbe_tx_ring_t *tx_ring;
1067 	int i;
1068 
1069 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1070 		rx_ring = &ixgbe->rx_rings[i];
1071 		mutex_destroy(&rx_ring->rx_lock);
1072 	}
1073 
1074 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1075 		tx_ring = &ixgbe->tx_rings[i];
1076 		mutex_destroy(&tx_ring->tx_lock);
1077 		mutex_destroy(&tx_ring->recycle_lock);
1078 		mutex_destroy(&tx_ring->tcb_head_lock);
1079 		mutex_destroy(&tx_ring->tcb_tail_lock);
1080 	}
1081 
1082 	mutex_destroy(&ixgbe->gen_lock);
1083 	mutex_destroy(&ixgbe->watchdog_lock);
1084 }
1085 
1086 static int
1087 ixgbe_resume(dev_info_t *devinfo)
1088 {
1089 	ixgbe_t *ixgbe;
1090 	int i;
1091 
1092 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1093 	if (ixgbe == NULL)
1094 		return (DDI_FAILURE);
1095 
1096 	mutex_enter(&ixgbe->gen_lock);
1097 
1098 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1099 		if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1100 			mutex_exit(&ixgbe->gen_lock);
1101 			return (DDI_FAILURE);
1102 		}
1103 
1104 		/*
1105 		 * Enable and start the watchdog timer
1106 		 */
1107 		ixgbe_enable_watchdog_timer(ixgbe);
1108 	}
1109 
1110 	atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1111 
1112 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1113 		for (i = 0; i < ixgbe->num_tx_rings; i++) {
1114 			mac_tx_ring_update(ixgbe->mac_hdl,
1115 			    ixgbe->tx_rings[i].ring_handle);
1116 		}
1117 	}
1118 
1119 	mutex_exit(&ixgbe->gen_lock);
1120 
1121 	return (DDI_SUCCESS);
1122 }
1123 
1124 static int
1125 ixgbe_suspend(dev_info_t *devinfo)
1126 {
1127 	ixgbe_t *ixgbe;
1128 
1129 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1130 	if (ixgbe == NULL)
1131 		return (DDI_FAILURE);
1132 
1133 	mutex_enter(&ixgbe->gen_lock);
1134 
1135 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1136 	if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1137 		mutex_exit(&ixgbe->gen_lock);
1138 		return (DDI_SUCCESS);
1139 	}
1140 	ixgbe_stop(ixgbe, B_FALSE);
1141 
1142 	mutex_exit(&ixgbe->gen_lock);
1143 
1144 	/*
1145 	 * Disable and stop the watchdog timer
1146 	 */
1147 	ixgbe_disable_watchdog_timer(ixgbe);
1148 
1149 	return (DDI_SUCCESS);
1150 }
1151 
1152 /*
1153  * ixgbe_init - Initialize the device.
1154  */
1155 static int
1156 ixgbe_init(ixgbe_t *ixgbe)
1157 {
1158 	struct ixgbe_hw *hw = &ixgbe->hw;
1159 
1160 	mutex_enter(&ixgbe->gen_lock);
1161 
1162 	/*
1163 	 * Reset chipset to put the hardware in a known state
1164 	 * before we try to do anything with the eeprom.
1165 	 */
1166 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1167 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1168 		goto init_fail;
1169 	}
1170 
1171 	/*
1172 	 * Need to init eeprom before validating the checksum.
1173 	 */
1174 	if (ixgbe_init_eeprom_params(hw) < 0) {
1175 		ixgbe_error(ixgbe,
1176 		    "Unable to intitialize the eeprom interface.");
1177 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1178 		goto init_fail;
1179 	}
1180 
1181 	/*
1182 	 * NVM validation
1183 	 */
1184 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1185 		/*
1186 		 * Some PCI-E parts fail the first check due to
1187 		 * the link being in sleep state.  Call it again,
1188 		 * if it fails a second time it's a real issue.
1189 		 */
1190 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1191 			ixgbe_error(ixgbe,
1192 			    "Invalid NVM checksum. Please contact "
1193 			    "the vendor to update the NVM.");
1194 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1195 			goto init_fail;
1196 		}
1197 	}
1198 
1199 	/*
1200 	 * Setup default flow control thresholds - enable/disable
1201 	 * & flow control type is controlled by ixgbe.conf
1202 	 */
1203 	hw->fc.high_water = DEFAULT_FCRTH;
1204 	hw->fc.low_water = DEFAULT_FCRTL;
1205 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1206 	hw->fc.send_xon = B_TRUE;
1207 
1208 	/*
1209 	 * Initialize link settings
1210 	 */
1211 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1212 
1213 	/*
1214 	 * Initialize the chipset hardware
1215 	 */
1216 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1217 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1218 		goto init_fail;
1219 	}
1220 
1221 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1222 		goto init_fail;
1223 	}
1224 
1225 	mutex_exit(&ixgbe->gen_lock);
1226 	return (IXGBE_SUCCESS);
1227 
1228 init_fail:
1229 	/*
1230 	 * Reset PHY
1231 	 */
1232 	(void) ixgbe_reset_phy(hw);
1233 
1234 	mutex_exit(&ixgbe->gen_lock);
1235 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1236 	return (IXGBE_FAILURE);
1237 }
1238 
1239 /*
1240  * ixgbe_chip_start - Initialize and start the chipset hardware.
1241  */
1242 static int
1243 ixgbe_chip_start(ixgbe_t *ixgbe)
1244 {
1245 	struct ixgbe_hw *hw = &ixgbe->hw;
1246 	int ret_val, i;
1247 
1248 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1249 
1250 	/*
1251 	 * Get the mac address
1252 	 * This function should handle SPARC case correctly.
1253 	 */
1254 	if (!ixgbe_find_mac_address(ixgbe)) {
1255 		ixgbe_error(ixgbe, "Failed to get the mac address");
1256 		return (IXGBE_FAILURE);
1257 	}
1258 
1259 	/*
1260 	 * Validate the mac address
1261 	 */
1262 	(void) ixgbe_init_rx_addrs(hw);
1263 	if (!is_valid_mac_addr(hw->mac.addr)) {
1264 		ixgbe_error(ixgbe, "Invalid mac address");
1265 		return (IXGBE_FAILURE);
1266 	}
1267 
1268 	/*
1269 	 * Configure/Initialize hardware
1270 	 */
1271 	ret_val = ixgbe_init_hw(hw);
1272 	if (ret_val != IXGBE_SUCCESS) {
1273 		if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1274 			ixgbe_error(ixgbe,
1275 			    "This 82599 device is pre-release and contains"
1276 			    " outdated firmware, please contact your hardware"
1277 			    " vendor for a replacement.");
1278 		} else {
1279 			ixgbe_error(ixgbe, "Failed to initialize hardware");
1280 			return (IXGBE_FAILURE);
1281 		}
1282 	}
1283 
1284 	/*
1285 	 * Re-enable relaxed ordering for performance.  It is disabled
1286 	 * by default in the hardware init.
1287 	 */
1288 	if (ixgbe->relax_order_enable == B_TRUE)
1289 		ixgbe_enable_relaxed_ordering(hw);
1290 
1291 	/*
1292 	 * Setup adapter interrupt vectors
1293 	 */
1294 	ixgbe_setup_adapter_vector(ixgbe);
1295 
1296 	/*
1297 	 * Initialize unicast addresses.
1298 	 */
1299 	ixgbe_init_unicst(ixgbe);
1300 
1301 	/*
1302 	 * Setup and initialize the mctable structures.
1303 	 */
1304 	ixgbe_setup_multicst(ixgbe);
1305 
1306 	/*
1307 	 * Set interrupt throttling rate
1308 	 */
1309 	for (i = 0; i < ixgbe->intr_cnt; i++) {
1310 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1311 	}
1312 
1313 	/*
1314 	 * Save the state of the phy
1315 	 */
1316 	ixgbe_get_hw_state(ixgbe);
1317 
1318 	/*
1319 	 * Make sure driver has control
1320 	 */
1321 	ixgbe_get_driver_control(hw);
1322 
1323 	return (IXGBE_SUCCESS);
1324 }
1325 
1326 /*
1327  * ixgbe_chip_stop - Stop the chipset hardware
1328  */
1329 static void
1330 ixgbe_chip_stop(ixgbe_t *ixgbe)
1331 {
1332 	struct ixgbe_hw *hw = &ixgbe->hw;
1333 
1334 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1335 
1336 	/*
1337 	 * Tell firmware driver is no longer in control
1338 	 */
1339 	ixgbe_release_driver_control(hw);
1340 
1341 	/*
1342 	 * Reset the chipset
1343 	 */
1344 	(void) ixgbe_reset_hw(hw);
1345 
1346 	/*
1347 	 * Reset PHY
1348 	 */
1349 	(void) ixgbe_reset_phy(hw);
1350 }
1351 
1352 /*
1353  * ixgbe_reset - Reset the chipset and re-start the driver.
1354  *
1355  * It involves stopping and re-starting the chipset,
1356  * and re-configuring the rx/tx rings.
1357  */
1358 static int
1359 ixgbe_reset(ixgbe_t *ixgbe)
1360 {
1361 	int i;
1362 
1363 	/*
1364 	 * Disable and stop the watchdog timer
1365 	 */
1366 	ixgbe_disable_watchdog_timer(ixgbe);
1367 
1368 	mutex_enter(&ixgbe->gen_lock);
1369 
1370 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1371 	atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1372 
1373 	ixgbe_stop(ixgbe, B_FALSE);
1374 
1375 	if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1376 		mutex_exit(&ixgbe->gen_lock);
1377 		return (IXGBE_FAILURE);
1378 	}
1379 
1380 	/*
1381 	 * After resetting, need to recheck the link status.
1382 	 */
1383 	ixgbe->link_check_complete = B_FALSE;
1384 	ixgbe->link_check_hrtime = gethrtime() +
1385 	    (IXGBE_LINK_UP_TIME * 100000000ULL);
1386 
1387 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1388 
1389 	if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1390 		for (i = 0; i < ixgbe->num_tx_rings; i++) {
1391 			mac_tx_ring_update(ixgbe->mac_hdl,
1392 			    ixgbe->tx_rings[i].ring_handle);
1393 		}
1394 	}
1395 
1396 	mutex_exit(&ixgbe->gen_lock);
1397 
1398 	/*
1399 	 * Enable and start the watchdog timer
1400 	 */
1401 	ixgbe_enable_watchdog_timer(ixgbe);
1402 
1403 	return (IXGBE_SUCCESS);
1404 }
1405 
1406 /*
1407  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1408  */
1409 static void
1410 ixgbe_tx_clean(ixgbe_t *ixgbe)
1411 {
1412 	ixgbe_tx_ring_t *tx_ring;
1413 	tx_control_block_t *tcb;
1414 	link_list_t pending_list;
1415 	uint32_t desc_num;
1416 	int i, j;
1417 
1418 	LINK_LIST_INIT(&pending_list);
1419 
1420 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1421 		tx_ring = &ixgbe->tx_rings[i];
1422 
1423 		mutex_enter(&tx_ring->recycle_lock);
1424 
1425 		/*
1426 		 * Clean the pending tx data - the pending packets in the
1427 		 * work_list that have no chances to be transmitted again.
1428 		 *
1429 		 * We must ensure the chipset is stopped or the link is down
1430 		 * before cleaning the transmit packets.
1431 		 */
1432 		desc_num = 0;
1433 		for (j = 0; j < tx_ring->ring_size; j++) {
1434 			tcb = tx_ring->work_list[j];
1435 			if (tcb != NULL) {
1436 				desc_num += tcb->desc_num;
1437 
1438 				tx_ring->work_list[j] = NULL;
1439 
1440 				ixgbe_free_tcb(tcb);
1441 
1442 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1443 			}
1444 		}
1445 
1446 		if (desc_num > 0) {
1447 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1448 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1449 
1450 			/*
1451 			 * Reset the head and tail pointers of the tbd ring;
1452 			 * Reset the writeback head if it's enable.
1453 			 */
1454 			tx_ring->tbd_head = 0;
1455 			tx_ring->tbd_tail = 0;
1456 			if (ixgbe->tx_head_wb_enable)
1457 				*tx_ring->tbd_head_wb = 0;
1458 
1459 			IXGBE_WRITE_REG(&ixgbe->hw,
1460 			    IXGBE_TDH(tx_ring->index), 0);
1461 			IXGBE_WRITE_REG(&ixgbe->hw,
1462 			    IXGBE_TDT(tx_ring->index), 0);
1463 		}
1464 
1465 		mutex_exit(&tx_ring->recycle_lock);
1466 
1467 		/*
1468 		 * Add the tx control blocks in the pending list to
1469 		 * the free list.
1470 		 */
1471 		ixgbe_put_free_list(tx_ring, &pending_list);
1472 	}
1473 }
1474 
1475 /*
1476  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1477  * transmitted.
1478  */
1479 static boolean_t
1480 ixgbe_tx_drain(ixgbe_t *ixgbe)
1481 {
1482 	ixgbe_tx_ring_t *tx_ring;
1483 	boolean_t done;
1484 	int i, j;
1485 
1486 	/*
1487 	 * Wait for a specific time to allow pending tx packets
1488 	 * to be transmitted.
1489 	 *
1490 	 * Check the counter tbd_free to see if transmission is done.
1491 	 * No lock protection is needed here.
1492 	 *
1493 	 * Return B_TRUE if all pending packets have been transmitted;
1494 	 * Otherwise return B_FALSE;
1495 	 */
1496 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1497 
1498 		done = B_TRUE;
1499 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1500 			tx_ring = &ixgbe->tx_rings[j];
1501 			done = done &&
1502 			    (tx_ring->tbd_free == tx_ring->ring_size);
1503 		}
1504 
1505 		if (done)
1506 			break;
1507 
1508 		msec_delay(1);
1509 	}
1510 
1511 	return (done);
1512 }
1513 
1514 /*
1515  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1516  */
1517 static boolean_t
1518 ixgbe_rx_drain(ixgbe_t *ixgbe)
1519 {
1520 	boolean_t done = B_TRUE;
1521 	int i;
1522 
1523 	/*
1524 	 * Polling the rx free list to check if those rx buffers held by
1525 	 * the upper layer are released.
1526 	 *
1527 	 * Check the counter rcb_free to see if all pending buffers are
1528 	 * released. No lock protection is needed here.
1529 	 *
1530 	 * Return B_TRUE if all pending buffers have been released;
1531 	 * Otherwise return B_FALSE;
1532 	 */
1533 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1534 		done = (ixgbe->rcb_pending == 0);
1535 
1536 		if (done)
1537 			break;
1538 
1539 		msec_delay(1);
1540 	}
1541 
1542 	return (done);
1543 }
1544 
1545 /*
1546  * ixgbe_start - Start the driver/chipset.
1547  */
1548 int
1549 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1550 {
1551 	int i;
1552 
1553 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1554 
1555 	if (alloc_buffer) {
1556 		if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1557 			ixgbe_error(ixgbe,
1558 			    "Failed to allocate software receive rings");
1559 			return (IXGBE_FAILURE);
1560 		}
1561 
1562 		/* Allocate buffers for all the rx/tx rings */
1563 		if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1564 			ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1565 			return (IXGBE_FAILURE);
1566 		}
1567 
1568 		ixgbe->tx_ring_init = B_TRUE;
1569 	} else {
1570 		ixgbe->tx_ring_init = B_FALSE;
1571 	}
1572 
1573 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1574 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1575 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1576 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1577 
1578 	/*
1579 	 * Start the chipset hardware
1580 	 */
1581 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1582 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1583 		goto start_failure;
1584 	}
1585 
1586 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1587 		goto start_failure;
1588 	}
1589 
1590 	/*
1591 	 * Setup the rx/tx rings
1592 	 */
1593 	ixgbe_setup_rings(ixgbe);
1594 
1595 	/*
1596 	 * ixgbe_start() will be called when resetting, however if reset
1597 	 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1598 	 * before enabling the interrupts.
1599 	 */
1600 	atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1601 	    | IXGBE_STALL| IXGBE_OVERTEMP));
1602 
1603 	/*
1604 	 * Enable adapter interrupts
1605 	 * The interrupts must be enabled after the driver state is START
1606 	 */
1607 	ixgbe_enable_adapter_interrupts(ixgbe);
1608 
1609 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1610 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1611 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1612 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1613 
1614 	return (IXGBE_SUCCESS);
1615 
1616 start_failure:
1617 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1618 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1619 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1620 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1621 
1622 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1623 
1624 	return (IXGBE_FAILURE);
1625 }
1626 
1627 /*
1628  * ixgbe_stop - Stop the driver/chipset.
1629  */
1630 void
1631 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1632 {
1633 	int i;
1634 
1635 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1636 
1637 	/*
1638 	 * Disable the adapter interrupts
1639 	 */
1640 	ixgbe_disable_adapter_interrupts(ixgbe);
1641 
1642 	/*
1643 	 * Drain the pending tx packets
1644 	 */
1645 	(void) ixgbe_tx_drain(ixgbe);
1646 
1647 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1648 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1649 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1650 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1651 
1652 	/*
1653 	 * Stop the chipset hardware
1654 	 */
1655 	ixgbe_chip_stop(ixgbe);
1656 
1657 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1658 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1659 	}
1660 
1661 	/*
1662 	 * Clean the pending tx data/resources
1663 	 */
1664 	ixgbe_tx_clean(ixgbe);
1665 
1666 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1667 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1668 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1669 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1670 
1671 	if (ixgbe->link_state == LINK_STATE_UP) {
1672 		ixgbe->link_state = LINK_STATE_UNKNOWN;
1673 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1674 	}
1675 
1676 	if (free_buffer) {
1677 		/*
1678 		 * Release the DMA/memory resources of rx/tx rings
1679 		 */
1680 		ixgbe_free_dma(ixgbe);
1681 		ixgbe_free_rx_data(ixgbe);
1682 	}
1683 }
1684 
1685 /*
1686  * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1687  */
1688 /* ARGSUSED */
1689 static int
1690 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1691     void *arg1, void *arg2)
1692 {
1693 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1694 
1695 	switch (cbaction) {
1696 	/* IRM callback */
1697 	int count;
1698 	case DDI_CB_INTR_ADD:
1699 	case DDI_CB_INTR_REMOVE:
1700 		count = (int)(uintptr_t)cbarg;
1701 		ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1702 		DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1703 		    int, ixgbe->intr_cnt);
1704 		if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1705 		    DDI_SUCCESS) {
1706 			ixgbe_error(ixgbe,
1707 			    "IRM CB: Failed to adjust interrupts");
1708 			goto cb_fail;
1709 		}
1710 		break;
1711 	default:
1712 		IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1713 		    cbaction);
1714 		return (DDI_ENOTSUP);
1715 	}
1716 	return (DDI_SUCCESS);
1717 cb_fail:
1718 	return (DDI_FAILURE);
1719 }
1720 
1721 /*
1722  * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1723  */
1724 static int
1725 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1726 {
1727 	int i, rc, actual;
1728 
1729 	if (count == 0)
1730 		return (DDI_SUCCESS);
1731 
1732 	if ((cbaction == DDI_CB_INTR_ADD &&
1733 	    ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
1734 	    (cbaction == DDI_CB_INTR_REMOVE &&
1735 	    ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
1736 		return (DDI_FAILURE);
1737 
1738 	if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1739 		return (DDI_FAILURE);
1740 	}
1741 
1742 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1743 		mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
1744 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1745 		mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
1746 
1747 	mutex_enter(&ixgbe->gen_lock);
1748 	ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1749 	ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
1750 	ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1751 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
1752 
1753 	ixgbe_stop(ixgbe, B_FALSE);
1754 	/*
1755 	 * Disable interrupts
1756 	 */
1757 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1758 		rc = ixgbe_disable_intrs(ixgbe);
1759 		ASSERT(rc == IXGBE_SUCCESS);
1760 	}
1761 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
1762 
1763 	/*
1764 	 * Remove interrupt handlers
1765 	 */
1766 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1767 		ixgbe_rem_intr_handlers(ixgbe);
1768 	}
1769 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
1770 
1771 	/*
1772 	 * Clear vect_map
1773 	 */
1774 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
1775 	switch (cbaction) {
1776 	case DDI_CB_INTR_ADD:
1777 		rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
1778 		    DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
1779 		    DDI_INTR_ALLOC_NORMAL);
1780 		if (rc != DDI_SUCCESS || actual != count) {
1781 			ixgbe_log(ixgbe, "Adjust interrupts failed."
1782 			    "return: %d, irm cb size: %d, actual: %d",
1783 			    rc, count, actual);
1784 			goto intr_adjust_fail;
1785 		}
1786 		ixgbe->intr_cnt += count;
1787 		break;
1788 
1789 	case DDI_CB_INTR_REMOVE:
1790 		for (i = ixgbe->intr_cnt - count;
1791 		    i < ixgbe->intr_cnt; i ++) {
1792 			rc = ddi_intr_free(ixgbe->htable[i]);
1793 			ixgbe->htable[i] = NULL;
1794 			if (rc != DDI_SUCCESS) {
1795 				ixgbe_log(ixgbe, "Adjust interrupts failed."
1796 				    "return: %d, irm cb size: %d, actual: %d",
1797 				    rc, count, actual);
1798 				goto intr_adjust_fail;
1799 			}
1800 		}
1801 		ixgbe->intr_cnt -= count;
1802 		break;
1803 	}
1804 
1805 	/*
1806 	 * Get priority for first vector, assume remaining are all the same
1807 	 */
1808 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
1809 	if (rc != DDI_SUCCESS) {
1810 		ixgbe_log(ixgbe,
1811 		    "Get interrupt priority failed: %d", rc);
1812 		goto intr_adjust_fail;
1813 	}
1814 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
1815 	if (rc != DDI_SUCCESS) {
1816 		ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
1817 		goto intr_adjust_fail;
1818 	}
1819 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
1820 
1821 	/*
1822 	 * Map rings to interrupt vectors
1823 	 */
1824 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
1825 		ixgbe_error(ixgbe,
1826 		    "IRM CB: Failed to map interrupts to vectors");
1827 		goto intr_adjust_fail;
1828 	}
1829 
1830 	/*
1831 	 * Add interrupt handlers
1832 	 */
1833 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
1834 		ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
1835 		goto intr_adjust_fail;
1836 	}
1837 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
1838 
1839 	/*
1840 	 * Now that mutex locks are initialized, and the chip is also
1841 	 * initialized, enable interrupts.
1842 	 */
1843 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
1844 		ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
1845 		goto intr_adjust_fail;
1846 	}
1847 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
1848 	if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1849 		ixgbe_error(ixgbe, "IRM CB: Failed to start");
1850 		goto intr_adjust_fail;
1851 	}
1852 	ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
1853 	ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1854 	ixgbe->ixgbe_state |= IXGBE_STARTED;
1855 	mutex_exit(&ixgbe->gen_lock);
1856 
1857 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1858 		mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
1859 		    ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
1860 	}
1861 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1862 		mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
1863 		    ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
1864 	}
1865 
1866 	/* Wakeup all Tx rings */
1867 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1868 		mac_tx_ring_update(ixgbe->mac_hdl,
1869 		    ixgbe->tx_rings[i].ring_handle);
1870 	}
1871 
1872 	IXGBE_DEBUGLOG_3(ixgbe,
1873 	    "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
1874 	    ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
1875 	return (DDI_SUCCESS);
1876 
1877 intr_adjust_fail:
1878 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1879 	mutex_exit(&ixgbe->gen_lock);
1880 	return (DDI_FAILURE);
1881 }
1882 
1883 /*
1884  * ixgbe_intr_cb_register - Register interrupt callback function.
1885  */
1886 static int
1887 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
1888 {
1889 	if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
1890 	    ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
1891 		return (IXGBE_FAILURE);
1892 	}
1893 	IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
1894 	return (IXGBE_SUCCESS);
1895 }
1896 
1897 /*
1898  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1899  */
1900 static int
1901 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1902 {
1903 	/*
1904 	 * Allocate memory space for rx rings
1905 	 */
1906 	ixgbe->rx_rings = kmem_zalloc(
1907 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1908 	    KM_NOSLEEP);
1909 
1910 	if (ixgbe->rx_rings == NULL) {
1911 		return (IXGBE_FAILURE);
1912 	}
1913 
1914 	/*
1915 	 * Allocate memory space for tx rings
1916 	 */
1917 	ixgbe->tx_rings = kmem_zalloc(
1918 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1919 	    KM_NOSLEEP);
1920 
1921 	if (ixgbe->tx_rings == NULL) {
1922 		kmem_free(ixgbe->rx_rings,
1923 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1924 		ixgbe->rx_rings = NULL;
1925 		return (IXGBE_FAILURE);
1926 	}
1927 
1928 	/*
1929 	 * Allocate memory space for rx ring groups
1930 	 */
1931 	ixgbe->rx_groups = kmem_zalloc(
1932 	    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1933 	    KM_NOSLEEP);
1934 
1935 	if (ixgbe->rx_groups == NULL) {
1936 		kmem_free(ixgbe->rx_rings,
1937 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1938 		kmem_free(ixgbe->tx_rings,
1939 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1940 		ixgbe->rx_rings = NULL;
1941 		ixgbe->tx_rings = NULL;
1942 		return (IXGBE_FAILURE);
1943 	}
1944 
1945 	return (IXGBE_SUCCESS);
1946 }
1947 
1948 /*
1949  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1950  */
1951 static void
1952 ixgbe_free_rings(ixgbe_t *ixgbe)
1953 {
1954 	if (ixgbe->rx_rings != NULL) {
1955 		kmem_free(ixgbe->rx_rings,
1956 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1957 		ixgbe->rx_rings = NULL;
1958 	}
1959 
1960 	if (ixgbe->tx_rings != NULL) {
1961 		kmem_free(ixgbe->tx_rings,
1962 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1963 		ixgbe->tx_rings = NULL;
1964 	}
1965 
1966 	if (ixgbe->rx_groups != NULL) {
1967 		kmem_free(ixgbe->rx_groups,
1968 		    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1969 		ixgbe->rx_groups = NULL;
1970 	}
1971 }
1972 
1973 static int
1974 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
1975 {
1976 	ixgbe_rx_ring_t *rx_ring;
1977 	int i;
1978 
1979 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1980 		rx_ring = &ixgbe->rx_rings[i];
1981 		if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
1982 			goto alloc_rx_rings_failure;
1983 	}
1984 	return (IXGBE_SUCCESS);
1985 
1986 alloc_rx_rings_failure:
1987 	ixgbe_free_rx_data(ixgbe);
1988 	return (IXGBE_FAILURE);
1989 }
1990 
1991 static void
1992 ixgbe_free_rx_data(ixgbe_t *ixgbe)
1993 {
1994 	ixgbe_rx_ring_t *rx_ring;
1995 	ixgbe_rx_data_t *rx_data;
1996 	int i;
1997 
1998 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1999 		rx_ring = &ixgbe->rx_rings[i];
2000 
2001 		mutex_enter(&ixgbe->rx_pending_lock);
2002 		rx_data = rx_ring->rx_data;
2003 
2004 		if (rx_data != NULL) {
2005 			rx_data->flag |= IXGBE_RX_STOPPED;
2006 
2007 			if (rx_data->rcb_pending == 0) {
2008 				ixgbe_free_rx_ring_data(rx_data);
2009 				rx_ring->rx_data = NULL;
2010 			}
2011 		}
2012 
2013 		mutex_exit(&ixgbe->rx_pending_lock);
2014 	}
2015 }
2016 
2017 /*
2018  * ixgbe_setup_rings - Setup rx/tx rings.
2019  */
2020 static void
2021 ixgbe_setup_rings(ixgbe_t *ixgbe)
2022 {
2023 	/*
2024 	 * Setup the rx/tx rings, including the following:
2025 	 *
2026 	 * 1. Setup the descriptor ring and the control block buffers;
2027 	 * 2. Initialize necessary registers for receive/transmit;
2028 	 * 3. Initialize software pointers/parameters for receive/transmit;
2029 	 */
2030 	ixgbe_setup_rx(ixgbe);
2031 
2032 	ixgbe_setup_tx(ixgbe);
2033 }
2034 
2035 static void
2036 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2037 {
2038 	ixgbe_t *ixgbe = rx_ring->ixgbe;
2039 	ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2040 	struct ixgbe_hw *hw = &ixgbe->hw;
2041 	rx_control_block_t *rcb;
2042 	union ixgbe_adv_rx_desc	*rbd;
2043 	uint32_t size;
2044 	uint32_t buf_low;
2045 	uint32_t buf_high;
2046 	uint32_t reg_val;
2047 	int i;
2048 
2049 	ASSERT(mutex_owned(&rx_ring->rx_lock));
2050 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2051 
2052 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
2053 		rcb = rx_data->work_list[i];
2054 		rbd = &rx_data->rbd_ring[i];
2055 
2056 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2057 		rbd->read.hdr_addr = NULL;
2058 	}
2059 
2060 	/*
2061 	 * Initialize the length register
2062 	 */
2063 	size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2064 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2065 
2066 	/*
2067 	 * Initialize the base address registers
2068 	 */
2069 	buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2070 	buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2071 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2072 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2073 
2074 	/*
2075 	 * Setup head & tail pointers
2076 	 */
2077 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2078 	    rx_data->ring_size - 1);
2079 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2080 
2081 	rx_data->rbd_next = 0;
2082 	rx_data->lro_first = 0;
2083 
2084 	/*
2085 	 * Setup the Receive Descriptor Control Register (RXDCTL)
2086 	 * PTHRESH=32 descriptors (half the internal cache)
2087 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
2088 	 * WTHRESH defaults to 1 (writeback each descriptor)
2089 	 */
2090 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2091 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
2092 
2093 	/* Not a valid value for 82599 */
2094 	if (hw->mac.type < ixgbe_mac_82599EB) {
2095 		reg_val |= 0x0020;	/* pthresh */
2096 	}
2097 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2098 
2099 	if (hw->mac.type == ixgbe_mac_82599EB) {
2100 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2101 		reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2102 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2103 	}
2104 
2105 	/*
2106 	 * Setup the Split and Replication Receive Control Register.
2107 	 * Set the rx buffer size and the advanced descriptor type.
2108 	 */
2109 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2110 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 	reg_val |= IXGBE_SRRCTL_DROP_EN;
2112 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2113 }
2114 
2115 static void
2116 ixgbe_setup_rx(ixgbe_t *ixgbe)
2117 {
2118 	ixgbe_rx_ring_t *rx_ring;
2119 	struct ixgbe_hw *hw = &ixgbe->hw;
2120 	uint32_t reg_val;
2121 	uint32_t ring_mapping;
2122 	uint32_t i, index;
2123 	uint32_t psrtype_rss_bit;
2124 
2125 	/* PSRTYPE must be configured for 82599 */
2126 	if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2127 	    ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2128 		reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2129 		    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2130 		reg_val |= IXGBE_PSRTYPE_L2HDR;
2131 		reg_val |= 0x80000000;
2132 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2133 	} else {
2134 		if (ixgbe->num_rx_groups > 32) {
2135 			psrtype_rss_bit = 0x20000000;
2136 		} else {
2137 			psrtype_rss_bit = 0x40000000;
2138 		}
2139 		for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2140 			reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2141 			    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2142 			reg_val |= IXGBE_PSRTYPE_L2HDR;
2143 			reg_val |= psrtype_rss_bit;
2144 			IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2145 		}
2146 	}
2147 
2148 	/*
2149 	 * Set filter control in FCTRL to accept broadcast packets and do
2150 	 * not pass pause frames to host.  Flow control settings are already
2151 	 * in this register, so preserve them.
2152 	 */
2153 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2154 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
2155 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
2156 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2157 
2158 	/*
2159 	 * Hardware checksum settings
2160 	 */
2161 	if (ixgbe->rx_hcksum_enable) {
2162 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
2163 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2164 	}
2165 
2166 	/*
2167 	 * Setup VMDq and RSS for multiple receive queues
2168 	 */
2169 	switch (ixgbe->classify_mode) {
2170 	case IXGBE_CLASSIFY_RSS:
2171 		/*
2172 		 * One group, only RSS is needed when more than
2173 		 * one ring enabled.
2174 		 */
2175 		ixgbe_setup_rss(ixgbe);
2176 		break;
2177 
2178 	case IXGBE_CLASSIFY_VMDQ:
2179 		/*
2180 		 * Multiple groups, each group has one ring,
2181 		 * only VMDq is needed.
2182 		 */
2183 		ixgbe_setup_vmdq(ixgbe);
2184 		break;
2185 
2186 	case IXGBE_CLASSIFY_VMDQ_RSS:
2187 		/*
2188 		 * Multiple groups and multiple rings, both
2189 		 * VMDq and RSS are needed.
2190 		 */
2191 		ixgbe_setup_vmdq_rss(ixgbe);
2192 		break;
2193 
2194 	default:
2195 		break;
2196 	}
2197 
2198 	/*
2199 	 * Enable the receive unit.  This must be done after filter
2200 	 * control is set in FCTRL.
2201 	 */
2202 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
2203 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
2204 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
2205 
2206 	/*
2207 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2208 	 */
2209 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
2210 		rx_ring = &ixgbe->rx_rings[i];
2211 		ixgbe_setup_rx_ring(rx_ring);
2212 	}
2213 
2214 	/*
2215 	 * Setup the per-ring statistics mapping.
2216 	 */
2217 	ring_mapping = 0;
2218 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
2219 		index = ixgbe->rx_rings[i].hw_index;
2220 		ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2221 		ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2222 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2223 	}
2224 
2225 	/*
2226 	 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2227 	 * by four bytes if the packet has a VLAN field, so includes MTU,
2228 	 * ethernet header and frame check sequence.
2229 	 * Register is MAXFRS in 82599.
2230 	 */
2231 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
2232 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2233 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2234 
2235 	/*
2236 	 * Setup Jumbo Frame enable bit
2237 	 */
2238 	if (ixgbe->default_mtu > ETHERMTU) {
2239 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2240 		reg_val |= IXGBE_HLREG0_JUMBOEN;
2241 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2242 	}
2243 
2244 	/*
2245 	 * Setup RSC for multiple receive queues.
2246 	 */
2247 	if (ixgbe->lro_enable) {
2248 		for (i = 0; i < ixgbe->num_rx_rings; i++) {
2249 			/*
2250 			 * Make sure rx_buf_size * MAXDESC not greater
2251 			 * than 65535.
2252 			 * Intel recommends 4 for MAXDESC field value.
2253 			 */
2254 			reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2255 			reg_val |= IXGBE_RSCCTL_RSCEN;
2256 			if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2257 				reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2258 			else
2259 				reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2260 			IXGBE_WRITE_REG(hw,  IXGBE_RSCCTL(i), reg_val);
2261 		}
2262 
2263 		reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2264 		reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2265 		IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2266 
2267 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2268 		reg_val |= IXGBE_RDRXCTL_RSCACKC;
2269 		reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2270 		reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2271 
2272 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2273 	}
2274 }
2275 
2276 static void
2277 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2278 {
2279 	ixgbe_t *ixgbe = tx_ring->ixgbe;
2280 	struct ixgbe_hw *hw = &ixgbe->hw;
2281 	uint32_t size;
2282 	uint32_t buf_low;
2283 	uint32_t buf_high;
2284 	uint32_t reg_val;
2285 
2286 	ASSERT(mutex_owned(&tx_ring->tx_lock));
2287 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2288 
2289 	/*
2290 	 * Initialize the length register
2291 	 */
2292 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2293 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2294 
2295 	/*
2296 	 * Initialize the base address registers
2297 	 */
2298 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2299 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2300 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2301 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2302 
2303 	/*
2304 	 * Setup head & tail pointers
2305 	 */
2306 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2307 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2308 
2309 	/*
2310 	 * Setup head write-back
2311 	 */
2312 	if (ixgbe->tx_head_wb_enable) {
2313 		/*
2314 		 * The memory of the head write-back is allocated using
2315 		 * the extra tbd beyond the tail of the tbd ring.
2316 		 */
2317 		tx_ring->tbd_head_wb = (uint32_t *)
2318 		    ((uintptr_t)tx_ring->tbd_area.address + size);
2319 		*tx_ring->tbd_head_wb = 0;
2320 
2321 		buf_low = (uint32_t)
2322 		    (tx_ring->tbd_area.dma_address + size);
2323 		buf_high = (uint32_t)
2324 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
2325 
2326 		/* Set the head write-back enable bit */
2327 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2328 
2329 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2330 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2331 
2332 		/*
2333 		 * Turn off relaxed ordering for head write back or it will
2334 		 * cause problems with the tx recycling
2335 		 */
2336 		reg_val = IXGBE_READ_REG(hw,
2337 		    IXGBE_DCA_TXCTRL(tx_ring->index));
2338 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2339 		IXGBE_WRITE_REG(hw,
2340 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2341 	} else {
2342 		tx_ring->tbd_head_wb = NULL;
2343 	}
2344 
2345 	tx_ring->tbd_head = 0;
2346 	tx_ring->tbd_tail = 0;
2347 	tx_ring->tbd_free = tx_ring->ring_size;
2348 
2349 	if (ixgbe->tx_ring_init == B_TRUE) {
2350 		tx_ring->tcb_head = 0;
2351 		tx_ring->tcb_tail = 0;
2352 		tx_ring->tcb_free = tx_ring->free_list_size;
2353 	}
2354 
2355 	/*
2356 	 * Initialize the s/w context structure
2357 	 */
2358 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2359 }
2360 
2361 static void
2362 ixgbe_setup_tx(ixgbe_t *ixgbe)
2363 {
2364 	struct ixgbe_hw *hw = &ixgbe->hw;
2365 	ixgbe_tx_ring_t *tx_ring;
2366 	uint32_t reg_val;
2367 	uint32_t ring_mapping;
2368 	int i;
2369 
2370 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2371 		tx_ring = &ixgbe->tx_rings[i];
2372 		ixgbe_setup_tx_ring(tx_ring);
2373 	}
2374 
2375 	/*
2376 	 * Setup the per-ring statistics mapping.
2377 	 */
2378 	ring_mapping = 0;
2379 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2380 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2381 		if ((i & 0x3) == 0x3) {
2382 			switch (hw->mac.type) {
2383 			case ixgbe_mac_82598EB:
2384 				IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2385 				    ring_mapping);
2386 				break;
2387 
2388 			case ixgbe_mac_82599EB:
2389 				IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2390 				    ring_mapping);
2391 				break;
2392 
2393 			default:
2394 				break;
2395 			}
2396 
2397 			ring_mapping = 0;
2398 		}
2399 	}
2400 	if (i & 0x3) {
2401 		switch (hw->mac.type) {
2402 		case ixgbe_mac_82598EB:
2403 			IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2404 			break;
2405 
2406 		case ixgbe_mac_82599EB:
2407 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2408 			break;
2409 
2410 		default:
2411 			break;
2412 		}
2413 	}
2414 
2415 	/*
2416 	 * Enable CRC appending and TX padding (for short tx frames)
2417 	 */
2418 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2419 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2420 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2421 
2422 	/*
2423 	 * enable DMA for 82599 parts
2424 	 */
2425 	if (hw->mac.type == ixgbe_mac_82599EB) {
2426 	/* DMATXCTL.TE must be set after all Tx config is complete */
2427 		reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2428 		reg_val |= IXGBE_DMATXCTL_TE;
2429 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2430 	}
2431 
2432 	/*
2433 	 * Enabling tx queues ..
2434 	 * For 82599 must be done after DMATXCTL.TE is set
2435 	 */
2436 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2437 		tx_ring = &ixgbe->tx_rings[i];
2438 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2439 		reg_val |= IXGBE_TXDCTL_ENABLE;
2440 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2441 	}
2442 }
2443 
2444 /*
2445  * ixgbe_setup_rss - Setup receive-side scaling feature.
2446  */
2447 static void
2448 ixgbe_setup_rss(ixgbe_t *ixgbe)
2449 {
2450 	struct ixgbe_hw *hw = &ixgbe->hw;
2451 	uint32_t i, mrqc, rxcsum;
2452 	uint32_t random;
2453 	uint32_t reta;
2454 	uint32_t ring_per_group;
2455 
2456 	/*
2457 	 * Fill out redirection table
2458 	 */
2459 	reta = 0;
2460 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2461 
2462 	for (i = 0; i < 128; i++) {
2463 		reta = (reta << 8) | (i % ring_per_group) |
2464 		    ((i % ring_per_group) << 4);
2465 		if ((i & 3) == 3)
2466 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2467 	}
2468 
2469 	/*
2470 	 * Fill out hash function seeds with a random constant
2471 	 */
2472 	for (i = 0; i < 10; i++) {
2473 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2474 		    sizeof (uint32_t));
2475 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2476 	}
2477 
2478 	/*
2479 	 * Enable RSS & perform hash on these packet types
2480 	 */
2481 	mrqc = IXGBE_MRQC_RSSEN |
2482 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
2483 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2484 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2485 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2486 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2487 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
2488 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2489 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2490 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2491 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2492 
2493 	/*
2494 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2495 	 * It is an adapter hardware limitation that Packet Checksum is
2496 	 * mutually exclusive with RSS.
2497 	 */
2498 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2499 	rxcsum |= IXGBE_RXCSUM_PCSD;
2500 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2501 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2502 }
2503 
2504 /*
2505  * ixgbe_setup_vmdq - Setup MAC classification feature
2506  */
2507 static void
2508 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2509 {
2510 	struct ixgbe_hw *hw = &ixgbe->hw;
2511 	uint32_t vmdctl, i, vtctl;
2512 
2513 	/*
2514 	 * Setup the VMDq Control register, enable VMDq based on
2515 	 * packet destination MAC address:
2516 	 */
2517 	switch (hw->mac.type) {
2518 	case ixgbe_mac_82598EB:
2519 		/*
2520 		 * VMDq Enable = 1;
2521 		 * VMDq Filter = 0; MAC filtering
2522 		 * Default VMDq output index = 0;
2523 		 */
2524 		vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2525 		IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2526 		break;
2527 
2528 	case ixgbe_mac_82599EB:
2529 		/*
2530 		 * Enable VMDq-only.
2531 		 */
2532 		vmdctl = IXGBE_MRQC_VMDQEN;
2533 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2534 
2535 		for (i = 0; i < hw->mac.num_rar_entries; i++) {
2536 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2537 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2538 		}
2539 
2540 		/*
2541 		 * Enable Virtualization and Replication.
2542 		 */
2543 		vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2544 		IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2545 
2546 		/*
2547 		 * Enable receiving packets to all VFs
2548 		 */
2549 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2550 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2551 		break;
2552 
2553 	default:
2554 		break;
2555 	}
2556 }
2557 
2558 /*
2559  * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2560  */
2561 static void
2562 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2563 {
2564 	struct ixgbe_hw *hw = &ixgbe->hw;
2565 	uint32_t i, mrqc, rxcsum;
2566 	uint32_t random;
2567 	uint32_t reta;
2568 	uint32_t ring_per_group;
2569 	uint32_t vmdctl, vtctl;
2570 
2571 	/*
2572 	 * Fill out redirection table
2573 	 */
2574 	reta = 0;
2575 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2576 	for (i = 0; i < 128; i++) {
2577 		reta = (reta << 8) | (i % ring_per_group) |
2578 		    ((i % ring_per_group) << 4);
2579 		if ((i & 3) == 3)
2580 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2581 	}
2582 
2583 	/*
2584 	 * Fill out hash function seeds with a random constant
2585 	 */
2586 	for (i = 0; i < 10; i++) {
2587 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2588 		    sizeof (uint32_t));
2589 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2590 	}
2591 
2592 	/*
2593 	 * Enable and setup RSS and VMDq
2594 	 */
2595 	switch (hw->mac.type) {
2596 	case ixgbe_mac_82598EB:
2597 		/*
2598 		 * Enable RSS & Setup RSS Hash functions
2599 		 */
2600 		mrqc = IXGBE_MRQC_RSSEN |
2601 		    IXGBE_MRQC_RSS_FIELD_IPV4 |
2602 		    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2603 		    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2604 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2605 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2606 		    IXGBE_MRQC_RSS_FIELD_IPV6 |
2607 		    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2608 		    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2609 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2610 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2611 
2612 		/*
2613 		 * Enable and Setup VMDq
2614 		 * VMDq Filter = 0; MAC filtering
2615 		 * Default VMDq output index = 0;
2616 		 */
2617 		vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2618 		IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2619 		break;
2620 
2621 	case ixgbe_mac_82599EB:
2622 		/*
2623 		 * Enable RSS & Setup RSS Hash functions
2624 		 */
2625 		mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2626 		    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2627 		    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2628 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2629 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2630 		    IXGBE_MRQC_RSS_FIELD_IPV6 |
2631 		    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2632 		    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2633 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2634 
2635 		/*
2636 		 * Enable VMDq+RSS.
2637 		 */
2638 		if (ixgbe->num_rx_groups > 32)  {
2639 			mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2640 		} else {
2641 			mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2642 		}
2643 
2644 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2645 
2646 		for (i = 0; i < hw->mac.num_rar_entries; i++) {
2647 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2648 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2649 		}
2650 		break;
2651 
2652 	default:
2653 		break;
2654 
2655 	}
2656 
2657 	/*
2658 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2659 	 * It is an adapter hardware limitation that Packet Checksum is
2660 	 * mutually exclusive with RSS.
2661 	 */
2662 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2663 	rxcsum |= IXGBE_RXCSUM_PCSD;
2664 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2665 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2666 
2667 	if (hw->mac.type == ixgbe_mac_82599EB) {
2668 		/*
2669 		 * Enable Virtualization and Replication.
2670 		 */
2671 		vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2672 		IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2673 
2674 		/*
2675 		 * Enable receiving packets to all VFs
2676 		 */
2677 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2678 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2679 	}
2680 }
2681 
2682 /*
2683  * ixgbe_init_unicst - Initialize the unicast addresses.
2684  */
2685 static void
2686 ixgbe_init_unicst(ixgbe_t *ixgbe)
2687 {
2688 	struct ixgbe_hw *hw = &ixgbe->hw;
2689 	uint8_t *mac_addr;
2690 	int slot;
2691 	/*
2692 	 * Here we should consider two situations:
2693 	 *
2694 	 * 1. Chipset is initialized at the first time,
2695 	 *    Clear all the multiple unicast addresses.
2696 	 *
2697 	 * 2. Chipset is reset
2698 	 *    Recover the multiple unicast addresses from the
2699 	 *    software data structure to the RAR registers.
2700 	 */
2701 	if (!ixgbe->unicst_init) {
2702 		/*
2703 		 * Initialize the multiple unicast addresses
2704 		 */
2705 		ixgbe->unicst_total = hw->mac.num_rar_entries;
2706 		ixgbe->unicst_avail = ixgbe->unicst_total;
2707 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2708 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2709 			bzero(mac_addr, ETHERADDRL);
2710 			(void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2711 			ixgbe->unicst_addr[slot].mac.set = 0;
2712 		}
2713 		ixgbe->unicst_init = B_TRUE;
2714 	} else {
2715 		/* Re-configure the RAR registers */
2716 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2717 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2718 			if (ixgbe->unicst_addr[slot].mac.set == 1) {
2719 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2720 				    ixgbe->unicst_addr[slot].mac.group_index,
2721 				    IXGBE_RAH_AV);
2722 			} else {
2723 				bzero(mac_addr, ETHERADDRL);
2724 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2725 				    NULL, NULL);
2726 			}
2727 		}
2728 	}
2729 }
2730 
2731 /*
2732  * ixgbe_unicst_find - Find the slot for the specified unicast address
2733  */
2734 int
2735 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2736 {
2737 	int slot;
2738 
2739 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2740 
2741 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2742 		if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2743 		    mac_addr, ETHERADDRL) == 0)
2744 			return (slot);
2745 	}
2746 
2747 	return (-1);
2748 }
2749 
2750 /*
2751  * ixgbe_multicst_add - Add a multicst address.
2752  */
2753 int
2754 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2755 {
2756 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2757 
2758 	if ((multiaddr[0] & 01) == 0) {
2759 		return (EINVAL);
2760 	}
2761 
2762 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2763 		return (ENOENT);
2764 	}
2765 
2766 	bcopy(multiaddr,
2767 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2768 	ixgbe->mcast_count++;
2769 
2770 	/*
2771 	 * Update the multicast table in the hardware
2772 	 */
2773 	ixgbe_setup_multicst(ixgbe);
2774 
2775 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2776 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2777 		return (EIO);
2778 	}
2779 
2780 	return (0);
2781 }
2782 
2783 /*
2784  * ixgbe_multicst_remove - Remove a multicst address.
2785  */
2786 int
2787 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2788 {
2789 	int i;
2790 
2791 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2792 
2793 	for (i = 0; i < ixgbe->mcast_count; i++) {
2794 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2795 		    ETHERADDRL) == 0) {
2796 			for (i++; i < ixgbe->mcast_count; i++) {
2797 				ixgbe->mcast_table[i - 1] =
2798 				    ixgbe->mcast_table[i];
2799 			}
2800 			ixgbe->mcast_count--;
2801 			break;
2802 		}
2803 	}
2804 
2805 	/*
2806 	 * Update the multicast table in the hardware
2807 	 */
2808 	ixgbe_setup_multicst(ixgbe);
2809 
2810 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2811 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2812 		return (EIO);
2813 	}
2814 
2815 	return (0);
2816 }
2817 
2818 /*
2819  * ixgbe_setup_multicast - Setup multicast data structures.
2820  *
2821  * This routine initializes all of the multicast related structures
2822  * and save them in the hardware registers.
2823  */
2824 static void
2825 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2826 {
2827 	uint8_t *mc_addr_list;
2828 	uint32_t mc_addr_count;
2829 	struct ixgbe_hw *hw = &ixgbe->hw;
2830 
2831 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2832 
2833 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2834 
2835 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2836 	mc_addr_count = ixgbe->mcast_count;
2837 
2838 	/*
2839 	 * Update the multicast addresses to the MTA registers
2840 	 */
2841 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2842 	    ixgbe_mc_table_itr);
2843 }
2844 
2845 /*
2846  * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2847  *
2848  * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2849  * Different chipsets may have different allowed configuration of vmdq and rss.
2850  */
2851 static void
2852 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2853 {
2854 	struct ixgbe_hw *hw = &ixgbe->hw;
2855 	uint32_t ring_per_group;
2856 
2857 	switch (hw->mac.type) {
2858 	case ixgbe_mac_82598EB:
2859 		/*
2860 		 * 82598 supports the following combination:
2861 		 * vmdq no. x rss no.
2862 		 * [5..16]  x 1
2863 		 * [1..4]   x [1..16]
2864 		 * However 8 rss queue per pool (vmdq) is sufficient for
2865 		 * most cases.
2866 		 */
2867 		ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2868 		if (ixgbe->num_rx_groups > 4) {
2869 			ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2870 		} else {
2871 			ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2872 			    min(8, ring_per_group);
2873 		}
2874 
2875 		break;
2876 
2877 	case ixgbe_mac_82599EB:
2878 		/*
2879 		 * 82599 supports the following combination:
2880 		 * vmdq no. x rss no.
2881 		 * [33..64] x [1..2]
2882 		 * [2..32]  x [1..4]
2883 		 * 1 x [1..16]
2884 		 * However 8 rss queue per pool (vmdq) is sufficient for
2885 		 * most cases.
2886 		 */
2887 		ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2888 		if (ixgbe->num_rx_groups == 1) {
2889 			ixgbe->num_rx_rings = min(8, ring_per_group);
2890 		} else if (ixgbe->num_rx_groups <= 32) {
2891 			ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2892 			    min(4, ring_per_group);
2893 		} else if (ixgbe->num_rx_groups <= 64) {
2894 			ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2895 			    min(2, ring_per_group);
2896 		}
2897 		break;
2898 
2899 	default:
2900 		break;
2901 	}
2902 
2903 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2904 
2905 	if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
2906 		ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
2907 	} else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
2908 		ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
2909 	} else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
2910 		ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
2911 	} else {
2912 		ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
2913 	}
2914 
2915 	IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
2916 	    ixgbe->num_rx_groups, ixgbe->num_rx_rings);
2917 }
2918 
2919 /*
2920  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2921  *
2922  * This routine gets user-configured values out of the configuration
2923  * file ixgbe.conf.
2924  *
2925  * For each configurable value, there is a minimum, a maximum, and a
2926  * default.
2927  * If user does not configure a value, use the default.
2928  * If user configures below the minimum, use the minumum.
2929  * If user configures above the maximum, use the maxumum.
2930  */
2931 static void
2932 ixgbe_get_conf(ixgbe_t *ixgbe)
2933 {
2934 	struct ixgbe_hw *hw = &ixgbe->hw;
2935 	uint32_t flow_control;
2936 
2937 	/*
2938 	 * ixgbe driver supports the following user configurations:
2939 	 *
2940 	 * Jumbo frame configuration:
2941 	 *    default_mtu
2942 	 *
2943 	 * Ethernet flow control configuration:
2944 	 *    flow_control
2945 	 *
2946 	 * Multiple rings configurations:
2947 	 *    tx_queue_number
2948 	 *    tx_ring_size
2949 	 *    rx_queue_number
2950 	 *    rx_ring_size
2951 	 *
2952 	 * Call ixgbe_get_prop() to get the value for a specific
2953 	 * configuration parameter.
2954 	 */
2955 
2956 	/*
2957 	 * Jumbo frame configuration - max_frame_size controls host buffer
2958 	 * allocation, so includes MTU, ethernet header, vlan tag and
2959 	 * frame check sequence.
2960 	 */
2961 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2962 	    MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
2963 
2964 	ixgbe->max_frame_size = ixgbe->default_mtu +
2965 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2966 
2967 	/*
2968 	 * Ethernet flow control configuration
2969 	 */
2970 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2971 	    ixgbe_fc_none, 3, ixgbe_fc_none);
2972 	if (flow_control == 3)
2973 		flow_control = ixgbe_fc_default;
2974 
2975 	/*
2976 	 * fc.requested mode is what the user requests.  After autoneg,
2977 	 * fc.current_mode will be the flow_control mode that was negotiated.
2978 	 */
2979 	hw->fc.requested_mode = flow_control;
2980 
2981 	/*
2982 	 * Multiple rings configurations
2983 	 */
2984 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2985 	    ixgbe->capab->min_tx_que_num,
2986 	    ixgbe->capab->max_tx_que_num,
2987 	    ixgbe->capab->def_tx_que_num);
2988 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2989 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2990 
2991 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2992 	    ixgbe->capab->min_rx_que_num,
2993 	    ixgbe->capab->max_rx_que_num,
2994 	    ixgbe->capab->def_rx_que_num);
2995 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2996 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2997 
2998 	/*
2999 	 * Multiple groups configuration
3000 	 */
3001 	ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3002 	    ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3003 	    ixgbe->capab->def_rx_grp_num);
3004 
3005 	ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3006 	    0, 1, DEFAULT_MR_ENABLE);
3007 
3008 	if (ixgbe->mr_enable == B_FALSE) {
3009 		ixgbe->num_tx_rings = 1;
3010 		ixgbe->num_rx_rings = 1;
3011 		ixgbe->num_rx_groups = 1;
3012 		ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3013 	} else {
3014 		ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3015 		    max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3016 		/*
3017 		 * The combination of num_rx_rings and num_rx_groups
3018 		 * may be not supported by h/w. We need to adjust
3019 		 * them to appropriate values.
3020 		 */
3021 		ixgbe_setup_vmdq_rss_conf(ixgbe);
3022 	}
3023 
3024 	/*
3025 	 * Tunable used to force an interrupt type. The only use is
3026 	 * for testing of the lesser interrupt types.
3027 	 * 0 = don't force interrupt type
3028 	 * 1 = force interrupt type MSI-X
3029 	 * 2 = force interrupt type MSI
3030 	 * 3 = force interrupt type Legacy
3031 	 */
3032 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3033 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3034 
3035 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3036 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3037 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3038 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3039 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3040 	    0, 1, DEFAULT_LSO_ENABLE);
3041 	ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3042 	    0, 1, DEFAULT_LRO_ENABLE);
3043 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3044 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3045 	ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3046 	    PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3047 
3048 	/* Head Write Back not recommended for 82599 */
3049 	if (hw->mac.type >= ixgbe_mac_82599EB) {
3050 		ixgbe->tx_head_wb_enable = B_FALSE;
3051 	}
3052 
3053 	/*
3054 	 * ixgbe LSO needs the tx h/w checksum support.
3055 	 * LSO will be disabled if tx h/w checksum is not
3056 	 * enabled.
3057 	 */
3058 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
3059 		ixgbe->lso_enable = B_FALSE;
3060 	}
3061 
3062 	/*
3063 	 * ixgbe LRO needs the rx h/w checksum support.
3064 	 * LRO will be disabled if rx h/w checksum is not
3065 	 * enabled.
3066 	 */
3067 	if (ixgbe->rx_hcksum_enable == B_FALSE) {
3068 		ixgbe->lro_enable = B_FALSE;
3069 	}
3070 
3071 	/*
3072 	 * ixgbe LRO only been supported by 82599 now
3073 	 */
3074 	if (hw->mac.type != ixgbe_mac_82599EB) {
3075 		ixgbe->lro_enable = B_FALSE;
3076 	}
3077 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3078 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3079 	    DEFAULT_TX_COPY_THRESHOLD);
3080 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3081 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3082 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3083 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3084 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3085 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3086 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3087 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3088 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3089 
3090 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3091 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3092 	    DEFAULT_RX_COPY_THRESHOLD);
3093 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3094 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3095 	    DEFAULT_RX_LIMIT_PER_INTR);
3096 
3097 	ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3098 	    ixgbe->capab->min_intr_throttle,
3099 	    ixgbe->capab->max_intr_throttle,
3100 	    ixgbe->capab->def_intr_throttle);
3101 	/*
3102 	 * 82599 requires the interupt throttling rate is
3103 	 * a multiple of 8. This is enforced by the register
3104 	 * definiton.
3105 	 */
3106 	if (hw->mac.type == ixgbe_mac_82599EB)
3107 		ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3108 }
3109 
3110 static void
3111 ixgbe_init_params(ixgbe_t *ixgbe)
3112 {
3113 	ixgbe->param_en_10000fdx_cap = 1;
3114 	ixgbe->param_en_1000fdx_cap = 1;
3115 	ixgbe->param_en_100fdx_cap = 1;
3116 	ixgbe->param_adv_10000fdx_cap = 1;
3117 	ixgbe->param_adv_1000fdx_cap = 1;
3118 	ixgbe->param_adv_100fdx_cap = 1;
3119 
3120 	ixgbe->param_pause_cap = 1;
3121 	ixgbe->param_asym_pause_cap = 1;
3122 	ixgbe->param_rem_fault = 0;
3123 
3124 	ixgbe->param_adv_autoneg_cap = 1;
3125 	ixgbe->param_adv_pause_cap = 1;
3126 	ixgbe->param_adv_asym_pause_cap = 1;
3127 	ixgbe->param_adv_rem_fault = 0;
3128 
3129 	ixgbe->param_lp_10000fdx_cap = 0;
3130 	ixgbe->param_lp_1000fdx_cap = 0;
3131 	ixgbe->param_lp_100fdx_cap = 0;
3132 	ixgbe->param_lp_autoneg_cap = 0;
3133 	ixgbe->param_lp_pause_cap = 0;
3134 	ixgbe->param_lp_asym_pause_cap = 0;
3135 	ixgbe->param_lp_rem_fault = 0;
3136 }
3137 
3138 /*
3139  * ixgbe_get_prop - Get a property value out of the configuration file
3140  * ixgbe.conf.
3141  *
3142  * Caller provides the name of the property, a default value, a minimum
3143  * value, and a maximum value.
3144  *
3145  * Return configured value of the property, with default, minimum and
3146  * maximum properly applied.
3147  */
3148 static int
3149 ixgbe_get_prop(ixgbe_t *ixgbe,
3150     char *propname,	/* name of the property */
3151     int minval,		/* minimum acceptable value */
3152     int maxval,		/* maximim acceptable value */
3153     int defval)		/* default value */
3154 {
3155 	int value;
3156 
3157 	/*
3158 	 * Call ddi_prop_get_int() to read the conf settings
3159 	 */
3160 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3161 	    DDI_PROP_DONTPASS, propname, defval);
3162 	if (value > maxval)
3163 		value = maxval;
3164 
3165 	if (value < minval)
3166 		value = minval;
3167 
3168 	return (value);
3169 }
3170 
3171 /*
3172  * ixgbe_driver_setup_link - Using the link properties to setup the link.
3173  */
3174 int
3175 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3176 {
3177 	u32 autoneg_advertised = 0;
3178 
3179 	/*
3180 	 * No half duplex support with 10Gb parts
3181 	 */
3182 	if (ixgbe->param_adv_10000fdx_cap == 1)
3183 		autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3184 
3185 	if (ixgbe->param_adv_1000fdx_cap == 1)
3186 		autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3187 
3188 	if (ixgbe->param_adv_100fdx_cap == 1)
3189 		autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3190 
3191 	if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3192 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3193 		    "to autonegotiation with full link capabilities.");
3194 
3195 		autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3196 		    IXGBE_LINK_SPEED_1GB_FULL |
3197 		    IXGBE_LINK_SPEED_100_FULL;
3198 	}
3199 
3200 	if (setup_hw) {
3201 		if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3202 		    ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3203 			ixgbe_notice(ixgbe, "Setup link failed on this "
3204 			    "device.");
3205 			return (IXGBE_FAILURE);
3206 		}
3207 	}
3208 
3209 	return (IXGBE_SUCCESS);
3210 }
3211 
3212 /*
3213  * ixgbe_driver_link_check - Link status processing.
3214  *
3215  * This function can be called in both kernel context and interrupt context
3216  */
3217 static void
3218 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3219 {
3220 	struct ixgbe_hw *hw = &ixgbe->hw;
3221 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3222 	boolean_t link_up = B_FALSE;
3223 	boolean_t link_changed = B_FALSE;
3224 
3225 	ASSERT(mutex_owned(&ixgbe->gen_lock));
3226 
3227 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
3228 	if (link_up) {
3229 		ixgbe->link_check_complete = B_TRUE;
3230 
3231 		/* Link is up, enable flow control settings */
3232 		(void) ixgbe_fc_enable(hw, 0);
3233 
3234 		/*
3235 		 * The Link is up, check whether it was marked as down earlier
3236 		 */
3237 		if (ixgbe->link_state != LINK_STATE_UP) {
3238 			switch (speed) {
3239 			case IXGBE_LINK_SPEED_10GB_FULL:
3240 				ixgbe->link_speed = SPEED_10GB;
3241 				break;
3242 			case IXGBE_LINK_SPEED_1GB_FULL:
3243 				ixgbe->link_speed = SPEED_1GB;
3244 				break;
3245 			case IXGBE_LINK_SPEED_100_FULL:
3246 				ixgbe->link_speed = SPEED_100;
3247 			}
3248 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
3249 			ixgbe->link_state = LINK_STATE_UP;
3250 			link_changed = B_TRUE;
3251 		}
3252 	} else {
3253 		if (ixgbe->link_check_complete == B_TRUE ||
3254 		    (ixgbe->link_check_complete == B_FALSE &&
3255 		    gethrtime() >= ixgbe->link_check_hrtime)) {
3256 			/*
3257 			 * The link is really down
3258 			 */
3259 			ixgbe->link_check_complete = B_TRUE;
3260 
3261 			if (ixgbe->link_state != LINK_STATE_DOWN) {
3262 				ixgbe->link_speed = 0;
3263 				ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3264 				ixgbe->link_state = LINK_STATE_DOWN;
3265 				link_changed = B_TRUE;
3266 			}
3267 		}
3268 	}
3269 
3270 	/*
3271 	 * If we are in an interrupt context, need to re-enable the
3272 	 * interrupt, which was automasked
3273 	 */
3274 	if (servicing_interrupt() != 0) {
3275 		ixgbe->eims |= IXGBE_EICR_LSC;
3276 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3277 	}
3278 
3279 	if (link_changed) {
3280 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3281 	}
3282 }
3283 
3284 /*
3285  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3286  */
3287 static void
3288 ixgbe_sfp_check(void *arg)
3289 {
3290 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
3291 	uint32_t eicr = ixgbe->eicr;
3292 	struct ixgbe_hw *hw = &ixgbe->hw;
3293 
3294 	mutex_enter(&ixgbe->gen_lock);
3295 	if (eicr & IXGBE_EICR_GPI_SDP1) {
3296 		/* clear the interrupt */
3297 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3298 
3299 		/* if link up, do multispeed fiber setup */
3300 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3301 		    B_TRUE, B_TRUE);
3302 		ixgbe_driver_link_check(ixgbe);
3303 		ixgbe_get_hw_state(ixgbe);
3304 	} else if (eicr & IXGBE_EICR_GPI_SDP2) {
3305 		/* clear the interrupt */
3306 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3307 
3308 		/* if link up, do sfp module setup */
3309 		(void) hw->mac.ops.setup_sfp(hw);
3310 
3311 		/* do multispeed fiber setup */
3312 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3313 		    B_TRUE, B_TRUE);
3314 		ixgbe_driver_link_check(ixgbe);
3315 		ixgbe_get_hw_state(ixgbe);
3316 	}
3317 	mutex_exit(&ixgbe->gen_lock);
3318 
3319 	/*
3320 	 * We need to fully re-check the link later.
3321 	 */
3322 	ixgbe->link_check_complete = B_FALSE;
3323 	ixgbe->link_check_hrtime = gethrtime() +
3324 	    (IXGBE_LINK_UP_TIME * 100000000ULL);
3325 }
3326 
3327 /*
3328  * ixgbe_overtemp_check - overtemp module processing done in taskq
3329  *
3330  * This routine will only be called on adapters with temperature sensor.
3331  * The indication of over-temperature can be either SDP0 interrupt or the link
3332  * status change interrupt.
3333  */
3334 static void
3335 ixgbe_overtemp_check(void *arg)
3336 {
3337 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
3338 	struct ixgbe_hw *hw = &ixgbe->hw;
3339 	uint32_t eicr = ixgbe->eicr;
3340 	ixgbe_link_speed speed;
3341 	boolean_t link_up;
3342 
3343 	mutex_enter(&ixgbe->gen_lock);
3344 
3345 	/* make sure we know current state of link */
3346 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
3347 
3348 	/* check over-temp condition */
3349 	if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
3350 	    (eicr & IXGBE_EICR_LSC)) {
3351 		if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3352 			atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3353 
3354 			/*
3355 			 * Disable the adapter interrupts
3356 			 */
3357 			ixgbe_disable_adapter_interrupts(ixgbe);
3358 
3359 			/*
3360 			 * Disable Rx/Tx units
3361 			 */
3362 			(void) ixgbe_stop_adapter(hw);
3363 
3364 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3365 			ixgbe_error(ixgbe,
3366 			    "Problem: Network adapter has been stopped "
3367 			    "because it has overheated");
3368 			ixgbe_error(ixgbe,
3369 			    "Action: Restart the computer. "
3370 			    "If the problem persists, power off the system "
3371 			    "and replace the adapter");
3372 		}
3373 	}
3374 
3375 	/* write to clear the interrupt */
3376 	IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3377 
3378 	mutex_exit(&ixgbe->gen_lock);
3379 }
3380 
3381 /*
3382  * ixgbe_link_timer - timer for link status detection
3383  */
3384 static void
3385 ixgbe_link_timer(void *arg)
3386 {
3387 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
3388 
3389 	mutex_enter(&ixgbe->gen_lock);
3390 	ixgbe_driver_link_check(ixgbe);
3391 	mutex_exit(&ixgbe->gen_lock);
3392 }
3393 
3394 /*
3395  * ixgbe_local_timer - Driver watchdog function.
3396  *
3397  * This function will handle the transmit stall check and other routines.
3398  */
3399 static void
3400 ixgbe_local_timer(void *arg)
3401 {
3402 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
3403 
3404 	if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3405 		goto out;
3406 
3407 	if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3408 		ixgbe->reset_count++;
3409 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3410 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3411 		goto out;
3412 	}
3413 
3414 	if (ixgbe_stall_check(ixgbe)) {
3415 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3416 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3417 
3418 		ixgbe->reset_count++;
3419 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3420 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3421 	}
3422 
3423 out:
3424 	ixgbe_restart_watchdog_timer(ixgbe);
3425 }
3426 
3427 /*
3428  * ixgbe_stall_check - Check for transmit stall.
3429  *
3430  * This function checks if the adapter is stalled (in transmit).
3431  *
3432  * It is called each time the watchdog timeout is invoked.
3433  * If the transmit descriptor reclaim continuously fails,
3434  * the watchdog value will increment by 1. If the watchdog
3435  * value exceeds the threshold, the ixgbe is assumed to
3436  * have stalled and need to be reset.
3437  */
3438 static boolean_t
3439 ixgbe_stall_check(ixgbe_t *ixgbe)
3440 {
3441 	ixgbe_tx_ring_t *tx_ring;
3442 	boolean_t result;
3443 	int i;
3444 
3445 	if (ixgbe->link_state != LINK_STATE_UP)
3446 		return (B_FALSE);
3447 
3448 	/*
3449 	 * If any tx ring is stalled, we'll reset the chipset
3450 	 */
3451 	result = B_FALSE;
3452 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
3453 		tx_ring = &ixgbe->tx_rings[i];
3454 		if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3455 			tx_ring->tx_recycle(tx_ring);
3456 		}
3457 
3458 		if (tx_ring->recycle_fail > 0)
3459 			tx_ring->stall_watchdog++;
3460 		else
3461 			tx_ring->stall_watchdog = 0;
3462 
3463 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3464 			result = B_TRUE;
3465 			break;
3466 		}
3467 	}
3468 
3469 	if (result) {
3470 		tx_ring->stall_watchdog = 0;
3471 		tx_ring->recycle_fail = 0;
3472 	}
3473 
3474 	return (result);
3475 }
3476 
3477 
3478 /*
3479  * is_valid_mac_addr - Check if the mac address is valid.
3480  */
3481 static boolean_t
3482 is_valid_mac_addr(uint8_t *mac_addr)
3483 {
3484 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3485 	const uint8_t addr_test2[6] =
3486 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3487 
3488 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3489 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3490 		return (B_FALSE);
3491 
3492 	return (B_TRUE);
3493 }
3494 
3495 static boolean_t
3496 ixgbe_find_mac_address(ixgbe_t *ixgbe)
3497 {
3498 #ifdef __sparc
3499 	struct ixgbe_hw *hw = &ixgbe->hw;
3500 	uchar_t *bytes;
3501 	struct ether_addr sysaddr;
3502 	uint_t nelts;
3503 	int err;
3504 	boolean_t found = B_FALSE;
3505 
3506 	/*
3507 	 * The "vendor's factory-set address" may already have
3508 	 * been extracted from the chip, but if the property
3509 	 * "local-mac-address" is set we use that instead.
3510 	 *
3511 	 * We check whether it looks like an array of 6
3512 	 * bytes (which it should, if OBP set it).  If we can't
3513 	 * make sense of it this way, we'll ignore it.
3514 	 */
3515 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3516 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3517 	if (err == DDI_PROP_SUCCESS) {
3518 		if (nelts == ETHERADDRL) {
3519 			while (nelts--)
3520 				hw->mac.addr[nelts] = bytes[nelts];
3521 			found = B_TRUE;
3522 		}
3523 		ddi_prop_free(bytes);
3524 	}
3525 
3526 	/*
3527 	 * Look up the OBP property "local-mac-address?". If the user has set
3528 	 * 'local-mac-address? = false', use "the system address" instead.
3529 	 */
3530 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3531 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3532 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3533 			if (localetheraddr(NULL, &sysaddr) != 0) {
3534 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3535 				found = B_TRUE;
3536 			}
3537 		}
3538 		ddi_prop_free(bytes);
3539 	}
3540 
3541 	/*
3542 	 * Finally(!), if there's a valid "mac-address" property (created
3543 	 * if we netbooted from this interface), we must use this instead
3544 	 * of any of the above to ensure that the NFS/install server doesn't
3545 	 * get confused by the address changing as Solaris takes over!
3546 	 */
3547 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3548 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3549 	if (err == DDI_PROP_SUCCESS) {
3550 		if (nelts == ETHERADDRL) {
3551 			while (nelts--)
3552 				hw->mac.addr[nelts] = bytes[nelts];
3553 			found = B_TRUE;
3554 		}
3555 		ddi_prop_free(bytes);
3556 	}
3557 
3558 	if (found) {
3559 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3560 		return (B_TRUE);
3561 	}
3562 #else
3563 	_NOTE(ARGUNUSED(ixgbe));
3564 #endif
3565 
3566 	return (B_TRUE);
3567 }
3568 
3569 #pragma inline(ixgbe_arm_watchdog_timer)
3570 static void
3571 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
3572 {
3573 	/*
3574 	 * Fire a watchdog timer
3575 	 */
3576 	ixgbe->watchdog_tid =
3577 	    timeout(ixgbe_local_timer,
3578 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
3579 
3580 }
3581 
3582 /*
3583  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
3584  */
3585 void
3586 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
3587 {
3588 	mutex_enter(&ixgbe->watchdog_lock);
3589 
3590 	if (!ixgbe->watchdog_enable) {
3591 		ixgbe->watchdog_enable = B_TRUE;
3592 		ixgbe->watchdog_start = B_TRUE;
3593 		ixgbe_arm_watchdog_timer(ixgbe);
3594 	}
3595 
3596 	mutex_exit(&ixgbe->watchdog_lock);
3597 }
3598 
3599 /*
3600  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
3601  */
3602 void
3603 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
3604 {
3605 	timeout_id_t tid;
3606 
3607 	mutex_enter(&ixgbe->watchdog_lock);
3608 
3609 	ixgbe->watchdog_enable = B_FALSE;
3610 	ixgbe->watchdog_start = B_FALSE;
3611 	tid = ixgbe->watchdog_tid;
3612 	ixgbe->watchdog_tid = 0;
3613 
3614 	mutex_exit(&ixgbe->watchdog_lock);
3615 
3616 	if (tid != 0)
3617 		(void) untimeout(tid);
3618 }
3619 
3620 /*
3621  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
3622  */
3623 void
3624 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
3625 {
3626 	mutex_enter(&ixgbe->watchdog_lock);
3627 
3628 	if (ixgbe->watchdog_enable) {
3629 		if (!ixgbe->watchdog_start) {
3630 			ixgbe->watchdog_start = B_TRUE;
3631 			ixgbe_arm_watchdog_timer(ixgbe);
3632 		}
3633 	}
3634 
3635 	mutex_exit(&ixgbe->watchdog_lock);
3636 }
3637 
3638 /*
3639  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3640  */
3641 static void
3642 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3643 {
3644 	mutex_enter(&ixgbe->watchdog_lock);
3645 
3646 	if (ixgbe->watchdog_start)
3647 		ixgbe_arm_watchdog_timer(ixgbe);
3648 
3649 	mutex_exit(&ixgbe->watchdog_lock);
3650 }
3651 
3652 /*
3653  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3654  */
3655 void
3656 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3657 {
3658 	timeout_id_t tid;
3659 
3660 	mutex_enter(&ixgbe->watchdog_lock);
3661 
3662 	ixgbe->watchdog_start = B_FALSE;
3663 	tid = ixgbe->watchdog_tid;
3664 	ixgbe->watchdog_tid = 0;
3665 
3666 	mutex_exit(&ixgbe->watchdog_lock);
3667 
3668 	if (tid != 0)
3669 		(void) untimeout(tid);
3670 }
3671 
3672 /*
3673  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3674  */
3675 static void
3676 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3677 {
3678 	struct ixgbe_hw *hw = &ixgbe->hw;
3679 
3680 	/*
3681 	 * mask all interrupts off
3682 	 */
3683 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3684 
3685 	/*
3686 	 * for MSI-X, also disable autoclear
3687 	 */
3688 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3689 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3690 	}
3691 
3692 	IXGBE_WRITE_FLUSH(hw);
3693 }
3694 
3695 /*
3696  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3697  */
3698 static void
3699 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3700 {
3701 	struct ixgbe_hw *hw = &ixgbe->hw;
3702 	uint32_t eiac, eiam;
3703 	uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3704 
3705 	/* interrupt types to enable */
3706 	ixgbe->eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
3707 	ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
3708 	ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3709 
3710 	/* enable automask on "other" causes that this adapter can generate */
3711 	eiam = ixgbe->capab->other_intr;
3712 
3713 	/*
3714 	 * msi-x mode
3715 	 */
3716 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3717 		/* enable autoclear but not on bits 29:20 */
3718 		eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3719 
3720 		/* general purpose interrupt enable */
3721 		gpie |= (IXGBE_GPIE_MSIX_MODE
3722 		    | IXGBE_GPIE_PBA_SUPPORT
3723 		    | IXGBE_GPIE_OCD
3724 		    | IXGBE_GPIE_EIAME);
3725 	/*
3726 	 * non-msi-x mode
3727 	 */
3728 	} else {
3729 
3730 		/* disable autoclear, leave gpie at default */
3731 		eiac = 0;
3732 
3733 		/*
3734 		 * General purpose interrupt enable.
3735 		 * For 82599, extended interrupt automask enable
3736 		 * only in MSI or MSI-X mode
3737 		 */
3738 		if ((hw->mac.type < ixgbe_mac_82599EB) ||
3739 		    (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3740 			gpie |= IXGBE_GPIE_EIAME;
3741 		}
3742 	}
3743 
3744 	/* Enable specific "other" interrupt types */
3745 	switch (hw->mac.type) {
3746 	case ixgbe_mac_82598EB:
3747 		gpie |= ixgbe->capab->other_gpie;
3748 		break;
3749 
3750 	case ixgbe_mac_82599EB:
3751 		gpie |= ixgbe->capab->other_gpie;
3752 
3753 		/* Enable RSC Delay 8us when LRO enabled  */
3754 		if (ixgbe->lro_enable) {
3755 			gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3756 		}
3757 		break;
3758 
3759 	default:
3760 		break;
3761 	}
3762 
3763 	/* write to interrupt control registers */
3764 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3765 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3766 	IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3767 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3768 	IXGBE_WRITE_FLUSH(hw);
3769 }
3770 
3771 /*
3772  * ixgbe_loopback_ioctl - Loopback support.
3773  */
3774 enum ioc_reply
3775 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3776 {
3777 	lb_info_sz_t *lbsp;
3778 	lb_property_t *lbpp;
3779 	uint32_t *lbmp;
3780 	uint32_t size;
3781 	uint32_t value;
3782 
3783 	if (mp->b_cont == NULL)
3784 		return (IOC_INVAL);
3785 
3786 	switch (iocp->ioc_cmd) {
3787 	default:
3788 		return (IOC_INVAL);
3789 
3790 	case LB_GET_INFO_SIZE:
3791 		size = sizeof (lb_info_sz_t);
3792 		if (iocp->ioc_count != size)
3793 			return (IOC_INVAL);
3794 
3795 		value = sizeof (lb_normal);
3796 		value += sizeof (lb_mac);
3797 		value += sizeof (lb_external);
3798 
3799 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3800 		*lbsp = value;
3801 		break;
3802 
3803 	case LB_GET_INFO:
3804 		value = sizeof (lb_normal);
3805 		value += sizeof (lb_mac);
3806 		value += sizeof (lb_external);
3807 
3808 		size = value;
3809 		if (iocp->ioc_count != size)
3810 			return (IOC_INVAL);
3811 
3812 		value = 0;
3813 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3814 
3815 		lbpp[value++] = lb_normal;
3816 		lbpp[value++] = lb_mac;
3817 		lbpp[value++] = lb_external;
3818 		break;
3819 
3820 	case LB_GET_MODE:
3821 		size = sizeof (uint32_t);
3822 		if (iocp->ioc_count != size)
3823 			return (IOC_INVAL);
3824 
3825 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3826 		*lbmp = ixgbe->loopback_mode;
3827 		break;
3828 
3829 	case LB_SET_MODE:
3830 		size = 0;
3831 		if (iocp->ioc_count != sizeof (uint32_t))
3832 			return (IOC_INVAL);
3833 
3834 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3835 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3836 			return (IOC_INVAL);
3837 		break;
3838 	}
3839 
3840 	iocp->ioc_count = size;
3841 	iocp->ioc_error = 0;
3842 
3843 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3844 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3845 		return (IOC_INVAL);
3846 	}
3847 
3848 	return (IOC_REPLY);
3849 }
3850 
3851 /*
3852  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3853  */
3854 static boolean_t
3855 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3856 {
3857 	if (mode == ixgbe->loopback_mode)
3858 		return (B_TRUE);
3859 
3860 	ixgbe->loopback_mode = mode;
3861 
3862 	if (mode == IXGBE_LB_NONE) {
3863 		/*
3864 		 * Reset the chip
3865 		 */
3866 		(void) ixgbe_reset(ixgbe);
3867 		return (B_TRUE);
3868 	}
3869 
3870 	mutex_enter(&ixgbe->gen_lock);
3871 
3872 	switch (mode) {
3873 	default:
3874 		mutex_exit(&ixgbe->gen_lock);
3875 		return (B_FALSE);
3876 
3877 	case IXGBE_LB_EXTERNAL:
3878 		break;
3879 
3880 	case IXGBE_LB_INTERNAL_MAC:
3881 		ixgbe_set_internal_mac_loopback(ixgbe);
3882 		break;
3883 	}
3884 
3885 	mutex_exit(&ixgbe->gen_lock);
3886 
3887 	return (B_TRUE);
3888 }
3889 
3890 /*
3891  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3892  */
3893 static void
3894 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3895 {
3896 	struct ixgbe_hw *hw;
3897 	uint32_t reg;
3898 	uint8_t atlas;
3899 
3900 	hw = &ixgbe->hw;
3901 
3902 	/*
3903 	 * Setup MAC loopback
3904 	 */
3905 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3906 	reg |= IXGBE_HLREG0_LPBK;
3907 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3908 
3909 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3910 	reg &= ~IXGBE_AUTOC_LMS_MASK;
3911 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3912 
3913 	/*
3914 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3915 	 */
3916 	switch (hw->mac.type) {
3917 	case ixgbe_mac_82598EB:
3918 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3919 		    &atlas);
3920 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3921 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3922 		    atlas);
3923 
3924 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3925 		    &atlas);
3926 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3927 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3928 		    atlas);
3929 
3930 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3931 		    &atlas);
3932 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3933 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3934 		    atlas);
3935 
3936 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3937 		    &atlas);
3938 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3939 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3940 		    atlas);
3941 		break;
3942 
3943 	case ixgbe_mac_82599EB:
3944 		reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3945 		reg |= (IXGBE_AUTOC_FLU |
3946 		    IXGBE_AUTOC_10G_KX4);
3947 		IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3948 
3949 		(void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
3950 		    B_FALSE, B_TRUE);
3951 		break;
3952 
3953 	default:
3954 		break;
3955 	}
3956 }
3957 
3958 #pragma inline(ixgbe_intr_rx_work)
3959 /*
3960  * ixgbe_intr_rx_work - RX processing of ISR.
3961  */
3962 static void
3963 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3964 {
3965 	mblk_t *mp;
3966 
3967 	mutex_enter(&rx_ring->rx_lock);
3968 
3969 	mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3970 	mutex_exit(&rx_ring->rx_lock);
3971 
3972 	if (mp != NULL)
3973 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3974 		    rx_ring->ring_gen_num);
3975 }
3976 
3977 #pragma inline(ixgbe_intr_tx_work)
3978 /*
3979  * ixgbe_intr_tx_work - TX processing of ISR.
3980  */
3981 static void
3982 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3983 {
3984 	ixgbe_t *ixgbe = tx_ring->ixgbe;
3985 
3986 	/*
3987 	 * Recycle the tx descriptors
3988 	 */
3989 	tx_ring->tx_recycle(tx_ring);
3990 
3991 	/*
3992 	 * Schedule the re-transmit
3993 	 */
3994 	if (tx_ring->reschedule &&
3995 	    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
3996 		tx_ring->reschedule = B_FALSE;
3997 		mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3998 		    tx_ring->ring_handle);
3999 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4000 	}
4001 }
4002 
4003 #pragma inline(ixgbe_intr_other_work)
4004 /*
4005  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4006  */
4007 static void
4008 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4009 {
4010 	ASSERT(mutex_owned(&ixgbe->gen_lock));
4011 
4012 	/*
4013 	 * handle link status change
4014 	 */
4015 	if (eicr & IXGBE_EICR_LSC) {
4016 		ixgbe_driver_link_check(ixgbe);
4017 		ixgbe_get_hw_state(ixgbe);
4018 	}
4019 
4020 	/*
4021 	 * check for fan failure on adapters with fans
4022 	 */
4023 	if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4024 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
4025 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4026 
4027 		/*
4028 		 * Disable the adapter interrupts
4029 		 */
4030 		ixgbe_disable_adapter_interrupts(ixgbe);
4031 
4032 		/*
4033 		 * Disable Rx/Tx units
4034 		 */
4035 		(void) ixgbe_stop_adapter(&ixgbe->hw);
4036 
4037 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4038 		ixgbe_error(ixgbe,
4039 		    "Problem: Network adapter has been stopped "
4040 		    "because the fan has stopped.\n");
4041 		ixgbe_error(ixgbe,
4042 		    "Action: Replace the adapter.\n");
4043 
4044 		/* re-enable the interrupt, which was automasked */
4045 		ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4046 	}
4047 
4048 	/*
4049 	 * Do SFP check for adapters with hot-plug capability
4050 	 */
4051 	if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4052 	    ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) {
4053 		ixgbe->eicr = eicr;
4054 		if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4055 		    ixgbe_sfp_check, (void *)ixgbe,
4056 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
4057 			ixgbe_log(ixgbe, "No memory available to dispatch "
4058 			    "taskq for SFP check");
4059 		}
4060 	}
4061 
4062 	/*
4063 	 * Do over-temperature check for adapters with temp sensor
4064 	 */
4065 	if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4066 	    ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
4067 		ixgbe->eicr = eicr;
4068 		if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4069 		    ixgbe_overtemp_check, (void *)ixgbe,
4070 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
4071 			ixgbe_log(ixgbe, "No memory available to dispatch "
4072 			    "taskq for overtemp check");
4073 		}
4074 	}
4075 }
4076 
4077 /*
4078  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4079  */
4080 static uint_t
4081 ixgbe_intr_legacy(void *arg1, void *arg2)
4082 {
4083 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4084 	struct ixgbe_hw *hw = &ixgbe->hw;
4085 	ixgbe_tx_ring_t *tx_ring;
4086 	ixgbe_rx_ring_t *rx_ring;
4087 	uint32_t eicr;
4088 	mblk_t *mp;
4089 	boolean_t tx_reschedule;
4090 	uint_t result;
4091 
4092 	_NOTE(ARGUNUSED(arg2));
4093 
4094 	mutex_enter(&ixgbe->gen_lock);
4095 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4096 		mutex_exit(&ixgbe->gen_lock);
4097 		return (DDI_INTR_UNCLAIMED);
4098 	}
4099 
4100 	mp = NULL;
4101 	tx_reschedule = B_FALSE;
4102 
4103 	/*
4104 	 * Any bit set in eicr: claim this interrupt
4105 	 */
4106 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4107 
4108 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4109 		mutex_exit(&ixgbe->gen_lock);
4110 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4111 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4112 		return (DDI_INTR_CLAIMED);
4113 	}
4114 
4115 	if (eicr) {
4116 		/*
4117 		 * For legacy interrupt, we have only one interrupt,
4118 		 * so we have only one rx ring and one tx ring enabled.
4119 		 */
4120 		ASSERT(ixgbe->num_rx_rings == 1);
4121 		ASSERT(ixgbe->num_tx_rings == 1);
4122 
4123 		/*
4124 		 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4125 		 */
4126 		if (eicr & 0x1) {
4127 			ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4128 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4129 			ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4130 			/*
4131 			 * Clean the rx descriptors
4132 			 */
4133 			rx_ring = &ixgbe->rx_rings[0];
4134 			mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4135 		}
4136 
4137 		/*
4138 		 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4139 		 */
4140 		if (eicr & 0x2) {
4141 			/*
4142 			 * Recycle the tx descriptors
4143 			 */
4144 			tx_ring = &ixgbe->tx_rings[0];
4145 			tx_ring->tx_recycle(tx_ring);
4146 
4147 			/*
4148 			 * Schedule the re-transmit
4149 			 */
4150 			tx_reschedule = (tx_ring->reschedule &&
4151 			    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4152 		}
4153 
4154 		/* any interrupt type other than tx/rx */
4155 		if (eicr & ixgbe->capab->other_intr) {
4156 			switch (hw->mac.type) {
4157 			case ixgbe_mac_82598EB:
4158 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4159 				break;
4160 
4161 			case ixgbe_mac_82599EB:
4162 				ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4163 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4164 				break;
4165 
4166 			default:
4167 				break;
4168 			}
4169 			ixgbe_intr_other_work(ixgbe, eicr);
4170 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4171 		}
4172 
4173 		mutex_exit(&ixgbe->gen_lock);
4174 
4175 		result = DDI_INTR_CLAIMED;
4176 	} else {
4177 		mutex_exit(&ixgbe->gen_lock);
4178 
4179 		/*
4180 		 * No interrupt cause bits set: don't claim this interrupt.
4181 		 */
4182 		result = DDI_INTR_UNCLAIMED;
4183 	}
4184 
4185 	/* re-enable the interrupts which were automasked */
4186 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4187 
4188 	/*
4189 	 * Do the following work outside of the gen_lock
4190 	 */
4191 	if (mp != NULL) {
4192 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4193 		    rx_ring->ring_gen_num);
4194 	}
4195 
4196 	if (tx_reschedule)  {
4197 		tx_ring->reschedule = B_FALSE;
4198 		mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4199 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4200 	}
4201 
4202 	return (result);
4203 }
4204 
4205 /*
4206  * ixgbe_intr_msi - Interrupt handler for MSI.
4207  */
4208 static uint_t
4209 ixgbe_intr_msi(void *arg1, void *arg2)
4210 {
4211 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4212 	struct ixgbe_hw *hw = &ixgbe->hw;
4213 	uint32_t eicr;
4214 
4215 	_NOTE(ARGUNUSED(arg2));
4216 
4217 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4218 
4219 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4220 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4221 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4222 		return (DDI_INTR_CLAIMED);
4223 	}
4224 
4225 	/*
4226 	 * For MSI interrupt, we have only one vector,
4227 	 * so we have only one rx ring and one tx ring enabled.
4228 	 */
4229 	ASSERT(ixgbe->num_rx_rings == 1);
4230 	ASSERT(ixgbe->num_tx_rings == 1);
4231 
4232 	/*
4233 	 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4234 	 */
4235 	if (eicr & 0x1) {
4236 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4237 	}
4238 
4239 	/*
4240 	 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4241 	 */
4242 	if (eicr & 0x2) {
4243 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4244 	}
4245 
4246 	/* any interrupt type other than tx/rx */
4247 	if (eicr & ixgbe->capab->other_intr) {
4248 		mutex_enter(&ixgbe->gen_lock);
4249 		switch (hw->mac.type) {
4250 		case ixgbe_mac_82598EB:
4251 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4252 			break;
4253 
4254 		case ixgbe_mac_82599EB:
4255 			ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4256 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4257 			break;
4258 
4259 		default:
4260 			break;
4261 		}
4262 		ixgbe_intr_other_work(ixgbe, eicr);
4263 		ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4264 		mutex_exit(&ixgbe->gen_lock);
4265 	}
4266 
4267 	/* re-enable the interrupts which were automasked */
4268 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4269 
4270 	return (DDI_INTR_CLAIMED);
4271 }
4272 
4273 /*
4274  * ixgbe_intr_msix - Interrupt handler for MSI-X.
4275  */
4276 static uint_t
4277 ixgbe_intr_msix(void *arg1, void *arg2)
4278 {
4279 	ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4280 	ixgbe_t *ixgbe = vect->ixgbe;
4281 	struct ixgbe_hw *hw = &ixgbe->hw;
4282 	uint32_t eicr;
4283 	int r_idx = 0;
4284 
4285 	_NOTE(ARGUNUSED(arg2));
4286 
4287 	/*
4288 	 * Clean each rx ring that has its bit set in the map
4289 	 */
4290 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4291 	while (r_idx >= 0) {
4292 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4293 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4294 		    (ixgbe->num_rx_rings - 1));
4295 	}
4296 
4297 	/*
4298 	 * Clean each tx ring that has its bit set in the map
4299 	 */
4300 	r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4301 	while (r_idx >= 0) {
4302 		ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4303 		r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4304 		    (ixgbe->num_tx_rings - 1));
4305 	}
4306 
4307 
4308 	/*
4309 	 * Clean other interrupt (link change) that has its bit set in the map
4310 	 */
4311 	if (BT_TEST(vect->other_map, 0) == 1) {
4312 		eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4313 
4314 		if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4315 		    DDI_FM_OK) {
4316 			ddi_fm_service_impact(ixgbe->dip,
4317 			    DDI_SERVICE_DEGRADED);
4318 			atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4319 			return (DDI_INTR_CLAIMED);
4320 		}
4321 
4322 		/*
4323 		 * Check "other" cause bits: any interrupt type other than tx/rx
4324 		 */
4325 		if (eicr & ixgbe->capab->other_intr) {
4326 			mutex_enter(&ixgbe->gen_lock);
4327 			switch (hw->mac.type) {
4328 			case ixgbe_mac_82598EB:
4329 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4330 				ixgbe_intr_other_work(ixgbe, eicr);
4331 				break;
4332 
4333 			case ixgbe_mac_82599EB:
4334 				ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4335 				ixgbe_intr_other_work(ixgbe, eicr);
4336 				break;
4337 
4338 			default:
4339 				break;
4340 			}
4341 			mutex_exit(&ixgbe->gen_lock);
4342 		}
4343 
4344 		/* re-enable the interrupts which were automasked */
4345 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4346 	}
4347 
4348 	return (DDI_INTR_CLAIMED);
4349 }
4350 
4351 /*
4352  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4353  *
4354  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4355  * if not successful, try Legacy.
4356  * ixgbe->intr_force can be used to force sequence to start with
4357  * any of the 3 types.
4358  * If MSI-X is not used, number of tx/rx rings is forced to 1.
4359  */
4360 static int
4361 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4362 {
4363 	dev_info_t *devinfo;
4364 	int intr_types;
4365 	int rc;
4366 
4367 	devinfo = ixgbe->dip;
4368 
4369 	/*
4370 	 * Get supported interrupt types
4371 	 */
4372 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4373 
4374 	if (rc != DDI_SUCCESS) {
4375 		ixgbe_log(ixgbe,
4376 		    "Get supported interrupt types failed: %d", rc);
4377 		return (IXGBE_FAILURE);
4378 	}
4379 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4380 
4381 	ixgbe->intr_type = 0;
4382 
4383 	/*
4384 	 * Install MSI-X interrupts
4385 	 */
4386 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4387 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4388 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4389 		if (rc == IXGBE_SUCCESS)
4390 			return (IXGBE_SUCCESS);
4391 
4392 		ixgbe_log(ixgbe,
4393 		    "Allocate MSI-X failed, trying MSI interrupts...");
4394 	}
4395 
4396 	/*
4397 	 * MSI-X not used, force rings and groups to 1
4398 	 */
4399 	ixgbe->num_rx_rings = 1;
4400 	ixgbe->num_rx_groups = 1;
4401 	ixgbe->num_tx_rings = 1;
4402 	ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4403 	ixgbe_log(ixgbe,
4404 	    "MSI-X not used, force rings and groups number to 1");
4405 
4406 	/*
4407 	 * Install MSI interrupts
4408 	 */
4409 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
4410 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4411 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4412 		if (rc == IXGBE_SUCCESS)
4413 			return (IXGBE_SUCCESS);
4414 
4415 		ixgbe_log(ixgbe,
4416 		    "Allocate MSI failed, trying Legacy interrupts...");
4417 	}
4418 
4419 	/*
4420 	 * Install legacy interrupts
4421 	 */
4422 	if (intr_types & DDI_INTR_TYPE_FIXED) {
4423 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4424 		if (rc == IXGBE_SUCCESS)
4425 			return (IXGBE_SUCCESS);
4426 
4427 		ixgbe_log(ixgbe,
4428 		    "Allocate Legacy interrupts failed");
4429 	}
4430 
4431 	/*
4432 	 * If none of the 3 types succeeded, return failure
4433 	 */
4434 	return (IXGBE_FAILURE);
4435 }
4436 
4437 /*
4438  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4439  *
4440  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
4441  * if fewer than 2 handles are available, return failure.
4442  * Upon success, this maps the vectors to rx and tx rings for
4443  * interrupts.
4444  */
4445 static int
4446 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4447 {
4448 	dev_info_t *devinfo;
4449 	int request, count, actual;
4450 	int minimum;
4451 	int rc;
4452 	uint32_t ring_per_group;
4453 
4454 	devinfo = ixgbe->dip;
4455 
4456 	switch (intr_type) {
4457 	case DDI_INTR_TYPE_FIXED:
4458 		request = 1;	/* Request 1 legacy interrupt handle */
4459 		minimum = 1;
4460 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4461 		break;
4462 
4463 	case DDI_INTR_TYPE_MSI:
4464 		request = 1;	/* Request 1 MSI interrupt handle */
4465 		minimum = 1;
4466 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
4467 		break;
4468 
4469 	case DDI_INTR_TYPE_MSIX:
4470 		/*
4471 		 * Best number of vectors for the adapter is
4472 		 * (# rx rings + # tx rings), however we will
4473 		 * limit the request number.
4474 		 */
4475 		request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
4476 		if (request > ixgbe->capab->max_ring_vect)
4477 			request = ixgbe->capab->max_ring_vect;
4478 		minimum = 1;
4479 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
4480 		break;
4481 
4482 	default:
4483 		ixgbe_log(ixgbe,
4484 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
4485 		    intr_type);
4486 		return (IXGBE_FAILURE);
4487 	}
4488 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
4489 	    request, minimum);
4490 
4491 	/*
4492 	 * Get number of supported interrupts
4493 	 */
4494 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4495 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
4496 		ixgbe_log(ixgbe,
4497 		    "Get interrupt number failed. Return: %d, count: %d",
4498 		    rc, count);
4499 		return (IXGBE_FAILURE);
4500 	}
4501 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
4502 
4503 	actual = 0;
4504 	ixgbe->intr_cnt = 0;
4505 	ixgbe->intr_cnt_max = 0;
4506 	ixgbe->intr_cnt_min = 0;
4507 
4508 	/*
4509 	 * Allocate an array of interrupt handles
4510 	 */
4511 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
4512 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
4513 
4514 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
4515 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
4516 	if (rc != DDI_SUCCESS) {
4517 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
4518 		    "return: %d, request: %d, actual: %d",
4519 		    rc, request, actual);
4520 		goto alloc_handle_fail;
4521 	}
4522 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
4523 
4524 	/*
4525 	 * upper/lower limit of interrupts
4526 	 */
4527 	ixgbe->intr_cnt = actual;
4528 	ixgbe->intr_cnt_max = request;
4529 	ixgbe->intr_cnt_min = minimum;
4530 
4531 	/*
4532 	 * rss number per group should not exceed the rx interrupt number,
4533 	 * else need to adjust rx ring number.
4534 	 */
4535 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4536 	ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
4537 	if (actual < ring_per_group) {
4538 		ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
4539 		ixgbe_setup_vmdq_rss_conf(ixgbe);
4540 	}
4541 
4542 	/*
4543 	 * Now we know the actual number of vectors.  Here we map the vector
4544 	 * to other, rx rings and tx ring.
4545 	 */
4546 	if (actual < minimum) {
4547 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
4548 		    actual);
4549 		goto alloc_handle_fail;
4550 	}
4551 
4552 	/*
4553 	 * Get priority for first vector, assume remaining are all the same
4554 	 */
4555 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
4556 	if (rc != DDI_SUCCESS) {
4557 		ixgbe_log(ixgbe,
4558 		    "Get interrupt priority failed: %d", rc);
4559 		goto alloc_handle_fail;
4560 	}
4561 
4562 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
4563 	if (rc != DDI_SUCCESS) {
4564 		ixgbe_log(ixgbe,
4565 		    "Get interrupt cap failed: %d", rc);
4566 		goto alloc_handle_fail;
4567 	}
4568 
4569 	ixgbe->intr_type = intr_type;
4570 
4571 	return (IXGBE_SUCCESS);
4572 
4573 alloc_handle_fail:
4574 	ixgbe_rem_intrs(ixgbe);
4575 
4576 	return (IXGBE_FAILURE);
4577 }
4578 
4579 /*
4580  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
4581  *
4582  * Before adding the interrupt handlers, the interrupt vectors have
4583  * been allocated, and the rx/tx rings have also been allocated.
4584  */
4585 static int
4586 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
4587 {
4588 	int vector = 0;
4589 	int rc;
4590 
4591 	switch (ixgbe->intr_type) {
4592 	case DDI_INTR_TYPE_MSIX:
4593 		/*
4594 		 * Add interrupt handler for all vectors
4595 		 */
4596 		for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
4597 			/*
4598 			 * install pointer to vect_map[vector]
4599 			 */
4600 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
4601 			    (ddi_intr_handler_t *)ixgbe_intr_msix,
4602 			    (void *)&ixgbe->vect_map[vector], NULL);
4603 
4604 			if (rc != DDI_SUCCESS) {
4605 				ixgbe_log(ixgbe,
4606 				    "Add interrupt handler failed. "
4607 				    "return: %d, vector: %d", rc, vector);
4608 				for (vector--; vector >= 0; vector--) {
4609 					(void) ddi_intr_remove_handler(
4610 					    ixgbe->htable[vector]);
4611 				}
4612 				return (IXGBE_FAILURE);
4613 			}
4614 		}
4615 
4616 		break;
4617 
4618 	case DDI_INTR_TYPE_MSI:
4619 		/*
4620 		 * Add interrupt handlers for the only vector
4621 		 */
4622 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
4623 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
4624 		    (void *)ixgbe, NULL);
4625 
4626 		if (rc != DDI_SUCCESS) {
4627 			ixgbe_log(ixgbe,
4628 			    "Add MSI interrupt handler failed: %d", rc);
4629 			return (IXGBE_FAILURE);
4630 		}
4631 
4632 		break;
4633 
4634 	case DDI_INTR_TYPE_FIXED:
4635 		/*
4636 		 * Add interrupt handlers for the only vector
4637 		 */
4638 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
4639 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
4640 		    (void *)ixgbe, NULL);
4641 
4642 		if (rc != DDI_SUCCESS) {
4643 			ixgbe_log(ixgbe,
4644 			    "Add legacy interrupt handler failed: %d", rc);
4645 			return (IXGBE_FAILURE);
4646 		}
4647 
4648 		break;
4649 
4650 	default:
4651 		return (IXGBE_FAILURE);
4652 	}
4653 
4654 	return (IXGBE_SUCCESS);
4655 }
4656 
4657 #pragma inline(ixgbe_map_rxring_to_vector)
4658 /*
4659  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
4660  */
4661 static void
4662 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
4663 {
4664 	/*
4665 	 * Set bit in map
4666 	 */
4667 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4668 
4669 	/*
4670 	 * Count bits set
4671 	 */
4672 	ixgbe->vect_map[v_idx].rxr_cnt++;
4673 
4674 	/*
4675 	 * Remember bit position
4676 	 */
4677 	ixgbe->rx_rings[r_idx].intr_vector = v_idx;
4678 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
4679 }
4680 
4681 #pragma inline(ixgbe_map_txring_to_vector)
4682 /*
4683  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
4684  */
4685 static void
4686 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
4687 {
4688 	/*
4689 	 * Set bit in map
4690 	 */
4691 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
4692 
4693 	/*
4694 	 * Count bits set
4695 	 */
4696 	ixgbe->vect_map[v_idx].txr_cnt++;
4697 
4698 	/*
4699 	 * Remember bit position
4700 	 */
4701 	ixgbe->tx_rings[t_idx].intr_vector = v_idx;
4702 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4703 }
4704 
4705 /*
4706  * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4707  * allocation register (IVAR).
4708  * cause:
4709  *   -1 : other cause
4710  *    0 : rx
4711  *    1 : tx
4712  */
4713 static void
4714 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4715     int8_t cause)
4716 {
4717 	struct ixgbe_hw *hw = &ixgbe->hw;
4718 	u32 ivar, index;
4719 
4720 	switch (hw->mac.type) {
4721 	case ixgbe_mac_82598EB:
4722 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4723 		if (cause == -1) {
4724 			cause = 0;
4725 		}
4726 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4727 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4728 		ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4729 		ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4730 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4731 		break;
4732 
4733 	case ixgbe_mac_82599EB:
4734 		if (cause == -1) {
4735 			/* other causes */
4736 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4737 			index = (intr_alloc_entry & 1) * 8;
4738 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4739 			ivar &= ~(0xFF << index);
4740 			ivar |= (msix_vector << index);
4741 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4742 		} else {
4743 			/* tx or rx causes */
4744 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4745 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4746 			ivar = IXGBE_READ_REG(hw,
4747 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4748 			ivar &= ~(0xFF << index);
4749 			ivar |= (msix_vector << index);
4750 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4751 			    ivar);
4752 		}
4753 		break;
4754 
4755 	default:
4756 		break;
4757 	}
4758 }
4759 
4760 /*
4761  * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4762  * given interrupt vector allocation register (IVAR).
4763  * cause:
4764  *   -1 : other cause
4765  *    0 : rx
4766  *    1 : tx
4767  */
4768 static void
4769 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4770 {
4771 	struct ixgbe_hw *hw = &ixgbe->hw;
4772 	u32 ivar, index;
4773 
4774 	switch (hw->mac.type) {
4775 	case ixgbe_mac_82598EB:
4776 		if (cause == -1) {
4777 			cause = 0;
4778 		}
4779 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4780 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4781 		ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4782 		    (intr_alloc_entry & 0x3)));
4783 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4784 		break;
4785 
4786 	case ixgbe_mac_82599EB:
4787 		if (cause == -1) {
4788 			/* other causes */
4789 			index = (intr_alloc_entry & 1) * 8;
4790 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4791 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4792 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4793 		} else {
4794 			/* tx or rx causes */
4795 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4796 			ivar = IXGBE_READ_REG(hw,
4797 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4798 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4799 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4800 			    ivar);
4801 		}
4802 		break;
4803 
4804 	default:
4805 		break;
4806 	}
4807 }
4808 
4809 /*
4810  * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4811  * given interrupt vector allocation register (IVAR).
4812  * cause:
4813  *   -1 : other cause
4814  *    0 : rx
4815  *    1 : tx
4816  */
4817 static void
4818 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4819 {
4820 	struct ixgbe_hw *hw = &ixgbe->hw;
4821 	u32 ivar, index;
4822 
4823 	switch (hw->mac.type) {
4824 	case ixgbe_mac_82598EB:
4825 		if (cause == -1) {
4826 			cause = 0;
4827 		}
4828 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4829 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4830 		ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4831 		    (intr_alloc_entry & 0x3)));
4832 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4833 		break;
4834 
4835 	case ixgbe_mac_82599EB:
4836 		if (cause == -1) {
4837 			/* other causes */
4838 			index = (intr_alloc_entry & 1) * 8;
4839 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4840 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4841 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4842 		} else {
4843 			/* tx or rx causes */
4844 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4845 			ivar = IXGBE_READ_REG(hw,
4846 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4847 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4848 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4849 			    ivar);
4850 		}
4851 		break;
4852 
4853 	default:
4854 		break;
4855 	}
4856 }
4857 
4858 /*
4859  * Convert the rx ring index driver maintained to the rx ring index
4860  * in h/w.
4861  */
4862 static uint32_t
4863 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4864 {
4865 
4866 	struct ixgbe_hw *hw = &ixgbe->hw;
4867 	uint32_t rx_ring_per_group, hw_rx_index;
4868 
4869 	if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4870 	    ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4871 		return (sw_rx_index);
4872 	} else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4873 		switch (hw->mac.type) {
4874 		case ixgbe_mac_82598EB:
4875 			return (sw_rx_index);
4876 
4877 		case ixgbe_mac_82599EB:
4878 			return (sw_rx_index * 2);
4879 
4880 		default:
4881 			break;
4882 		}
4883 	} else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4884 		rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4885 
4886 		switch (hw->mac.type) {
4887 		case ixgbe_mac_82598EB:
4888 			hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4889 			    16 + (sw_rx_index % rx_ring_per_group);
4890 			return (hw_rx_index);
4891 
4892 		case ixgbe_mac_82599EB:
4893 			if (ixgbe->num_rx_groups > 32) {
4894 				hw_rx_index = (sw_rx_index /
4895 				    rx_ring_per_group) * 2 +
4896 				    (sw_rx_index % rx_ring_per_group);
4897 			} else {
4898 				hw_rx_index = (sw_rx_index /
4899 				    rx_ring_per_group) * 4 +
4900 				    (sw_rx_index % rx_ring_per_group);
4901 			}
4902 			return (hw_rx_index);
4903 
4904 		default:
4905 			break;
4906 		}
4907 	}
4908 
4909 	/*
4910 	 * Should never reach. Just to make compiler happy.
4911 	 */
4912 	return (sw_rx_index);
4913 }
4914 
4915 /*
4916  * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4917  *
4918  * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4919  * to vector[0 - (intr_cnt -1)].
4920  */
4921 static int
4922 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4923 {
4924 	int i, vector = 0;
4925 
4926 	/* initialize vector map */
4927 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4928 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4929 		ixgbe->vect_map[i].ixgbe = ixgbe;
4930 	}
4931 
4932 	/*
4933 	 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4934 	 * tx rings[0] on RTxQ[1].
4935 	 */
4936 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4937 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4938 		ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4939 		return (IXGBE_SUCCESS);
4940 	}
4941 
4942 	/*
4943 	 * Interrupts/vectors mapping for MSI-X
4944 	 */
4945 
4946 	/*
4947 	 * Map other interrupt to vector 0,
4948 	 * Set bit in map and count the bits set.
4949 	 */
4950 	BT_SET(ixgbe->vect_map[vector].other_map, 0);
4951 	ixgbe->vect_map[vector].other_cnt++;
4952 
4953 	/*
4954 	 * Map rx ring interrupts to vectors
4955 	 */
4956 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
4957 		ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4958 		vector = (vector +1) % ixgbe->intr_cnt;
4959 	}
4960 
4961 	/*
4962 	 * Map tx ring interrupts to vectors
4963 	 */
4964 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
4965 		ixgbe_map_txring_to_vector(ixgbe, i, vector);
4966 		vector = (vector +1) % ixgbe->intr_cnt;
4967 	}
4968 
4969 	return (IXGBE_SUCCESS);
4970 }
4971 
4972 /*
4973  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4974  *
4975  * This relies on ring/vector mapping already set up in the
4976  * vect_map[] structures
4977  */
4978 static void
4979 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4980 {
4981 	struct ixgbe_hw *hw = &ixgbe->hw;
4982 	ixgbe_intr_vector_t *vect;	/* vector bitmap */
4983 	int r_idx;	/* ring index */
4984 	int v_idx;	/* vector index */
4985 	uint32_t hw_index;
4986 
4987 	/*
4988 	 * Clear any previous entries
4989 	 */
4990 	switch (hw->mac.type) {
4991 	case ixgbe_mac_82598EB:
4992 		for (v_idx = 0; v_idx < 25; v_idx++)
4993 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4994 		break;
4995 
4996 	case ixgbe_mac_82599EB:
4997 		for (v_idx = 0; v_idx < 64; v_idx++)
4998 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4999 		IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5000 		break;
5001 
5002 	default:
5003 		break;
5004 	}
5005 
5006 	/*
5007 	 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5008 	 * tx rings[0] will use RTxQ[1].
5009 	 */
5010 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5011 		ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5012 		ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5013 		return;
5014 	}
5015 
5016 	/*
5017 	 * For MSI-X interrupt, "Other" is always on vector[0].
5018 	 */
5019 	ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5020 
5021 	/*
5022 	 * For each interrupt vector, populate the IVAR table
5023 	 */
5024 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5025 		vect = &ixgbe->vect_map[v_idx];
5026 
5027 		/*
5028 		 * For each rx ring bit set
5029 		 */
5030 		r_idx = bt_getlowbit(vect->rx_map, 0,
5031 		    (ixgbe->num_rx_rings - 1));
5032 
5033 		while (r_idx >= 0) {
5034 			hw_index = ixgbe->rx_rings[r_idx].hw_index;
5035 			ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5036 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5037 			    (ixgbe->num_rx_rings - 1));
5038 		}
5039 
5040 		/*
5041 		 * For each tx ring bit set
5042 		 */
5043 		r_idx = bt_getlowbit(vect->tx_map, 0,
5044 		    (ixgbe->num_tx_rings - 1));
5045 
5046 		while (r_idx >= 0) {
5047 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5048 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5049 			    (ixgbe->num_tx_rings - 1));
5050 		}
5051 	}
5052 }
5053 
5054 /*
5055  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5056  */
5057 static void
5058 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5059 {
5060 	int i;
5061 	int rc;
5062 
5063 	for (i = 0; i < ixgbe->intr_cnt; i++) {
5064 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5065 		if (rc != DDI_SUCCESS) {
5066 			IXGBE_DEBUGLOG_1(ixgbe,
5067 			    "Remove intr handler failed: %d", rc);
5068 		}
5069 	}
5070 }
5071 
5072 /*
5073  * ixgbe_rem_intrs - Remove the allocated interrupts.
5074  */
5075 static void
5076 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5077 {
5078 	int i;
5079 	int rc;
5080 
5081 	for (i = 0; i < ixgbe->intr_cnt; i++) {
5082 		rc = ddi_intr_free(ixgbe->htable[i]);
5083 		if (rc != DDI_SUCCESS) {
5084 			IXGBE_DEBUGLOG_1(ixgbe,
5085 			    "Free intr failed: %d", rc);
5086 		}
5087 	}
5088 
5089 	kmem_free(ixgbe->htable, ixgbe->intr_size);
5090 	ixgbe->htable = NULL;
5091 }
5092 
5093 /*
5094  * ixgbe_enable_intrs - Enable all the ddi interrupts.
5095  */
5096 static int
5097 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5098 {
5099 	int i;
5100 	int rc;
5101 
5102 	/*
5103 	 * Enable interrupts
5104 	 */
5105 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5106 		/*
5107 		 * Call ddi_intr_block_enable() for MSI
5108 		 */
5109 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5110 		if (rc != DDI_SUCCESS) {
5111 			ixgbe_log(ixgbe,
5112 			    "Enable block intr failed: %d", rc);
5113 			return (IXGBE_FAILURE);
5114 		}
5115 	} else {
5116 		/*
5117 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
5118 		 */
5119 		for (i = 0; i < ixgbe->intr_cnt; i++) {
5120 			rc = ddi_intr_enable(ixgbe->htable[i]);
5121 			if (rc != DDI_SUCCESS) {
5122 				ixgbe_log(ixgbe,
5123 				    "Enable intr failed: %d", rc);
5124 				return (IXGBE_FAILURE);
5125 			}
5126 		}
5127 	}
5128 
5129 	return (IXGBE_SUCCESS);
5130 }
5131 
5132 /*
5133  * ixgbe_disable_intrs - Disable all the interrupts.
5134  */
5135 static int
5136 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5137 {
5138 	int i;
5139 	int rc;
5140 
5141 	/*
5142 	 * Disable all interrupts
5143 	 */
5144 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5145 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5146 		if (rc != DDI_SUCCESS) {
5147 			ixgbe_log(ixgbe,
5148 			    "Disable block intr failed: %d", rc);
5149 			return (IXGBE_FAILURE);
5150 		}
5151 	} else {
5152 		for (i = 0; i < ixgbe->intr_cnt; i++) {
5153 			rc = ddi_intr_disable(ixgbe->htable[i]);
5154 			if (rc != DDI_SUCCESS) {
5155 				ixgbe_log(ixgbe,
5156 				    "Disable intr failed: %d", rc);
5157 				return (IXGBE_FAILURE);
5158 			}
5159 		}
5160 	}
5161 
5162 	return (IXGBE_SUCCESS);
5163 }
5164 
5165 /*
5166  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5167  */
5168 static void
5169 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5170 {
5171 	struct ixgbe_hw *hw = &ixgbe->hw;
5172 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
5173 	boolean_t link_up = B_FALSE;
5174 	uint32_t pcs1g_anlp = 0;
5175 	uint32_t pcs1g_ana = 0;
5176 	boolean_t autoneg = B_FALSE;
5177 
5178 	ASSERT(mutex_owned(&ixgbe->gen_lock));
5179 	ixgbe->param_lp_1000fdx_cap = 0;
5180 	ixgbe->param_lp_100fdx_cap  = 0;
5181 
5182 	/* check for link, don't wait */
5183 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
5184 	pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
5185 
5186 	if (link_up) {
5187 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5188 
5189 		ixgbe->param_lp_1000fdx_cap =
5190 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5191 		ixgbe->param_lp_100fdx_cap =
5192 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5193 	}
5194 
5195 	(void) ixgbe_get_link_capabilities(hw, &speed, &autoneg);
5196 
5197 	ixgbe->param_adv_1000fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5198 	    (speed & IXGBE_LINK_SPEED_1GB_FULL)) ? 1 : 0;
5199 	ixgbe->param_adv_100fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5200 	    (speed & IXGBE_LINK_SPEED_100_FULL)) ? 1 : 0;
5201 }
5202 
5203 /*
5204  * ixgbe_get_driver_control - Notify that driver is in control of device.
5205  */
5206 static void
5207 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5208 {
5209 	uint32_t ctrl_ext;
5210 
5211 	/*
5212 	 * Notify firmware that driver is in control of device
5213 	 */
5214 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5215 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5216 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5217 }
5218 
5219 /*
5220  * ixgbe_release_driver_control - Notify that driver is no longer in control
5221  * of device.
5222  */
5223 static void
5224 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5225 {
5226 	uint32_t ctrl_ext;
5227 
5228 	/*
5229 	 * Notify firmware that driver is no longer in control of device
5230 	 */
5231 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5232 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5233 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5234 }
5235 
5236 /*
5237  * ixgbe_atomic_reserve - Atomic decrease operation.
5238  */
5239 int
5240 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5241 {
5242 	uint32_t oldval;
5243 	uint32_t newval;
5244 
5245 	/*
5246 	 * ATOMICALLY
5247 	 */
5248 	do {
5249 		oldval = *count_p;
5250 		if (oldval < n)
5251 			return (-1);
5252 		newval = oldval - n;
5253 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
5254 
5255 	return (newval);
5256 }
5257 
5258 /*
5259  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5260  */
5261 static uint8_t *
5262 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5263 {
5264 	uint8_t *addr = *upd_ptr;
5265 	uint8_t *new_ptr;
5266 
5267 	_NOTE(ARGUNUSED(hw));
5268 	_NOTE(ARGUNUSED(vmdq));
5269 
5270 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5271 	*upd_ptr = new_ptr;
5272 	return (addr);
5273 }
5274 
5275 /*
5276  * FMA support
5277  */
5278 int
5279 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5280 {
5281 	ddi_fm_error_t de;
5282 
5283 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5284 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5285 	return (de.fme_status);
5286 }
5287 
5288 int
5289 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5290 {
5291 	ddi_fm_error_t de;
5292 
5293 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5294 	return (de.fme_status);
5295 }
5296 
5297 /*
5298  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5299  */
5300 static int
5301 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5302 {
5303 	_NOTE(ARGUNUSED(impl_data));
5304 	/*
5305 	 * as the driver can always deal with an error in any dma or
5306 	 * access handle, we can just return the fme_status value.
5307 	 */
5308 	pci_ereport_post(dip, err, NULL);
5309 	return (err->fme_status);
5310 }
5311 
5312 static void
5313 ixgbe_fm_init(ixgbe_t *ixgbe)
5314 {
5315 	ddi_iblock_cookie_t iblk;
5316 	int fma_dma_flag;
5317 
5318 	/*
5319 	 * Only register with IO Fault Services if we have some capability
5320 	 */
5321 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5322 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5323 	} else {
5324 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5325 	}
5326 
5327 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5328 		fma_dma_flag = 1;
5329 	} else {
5330 		fma_dma_flag = 0;
5331 	}
5332 
5333 	ixgbe_set_fma_flags(fma_dma_flag);
5334 
5335 	if (ixgbe->fm_capabilities) {
5336 
5337 		/*
5338 		 * Register capabilities with IO Fault Services
5339 		 */
5340 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5341 
5342 		/*
5343 		 * Initialize pci ereport capabilities if ereport capable
5344 		 */
5345 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5346 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5347 			pci_ereport_setup(ixgbe->dip);
5348 
5349 		/*
5350 		 * Register error callback if error callback capable
5351 		 */
5352 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5353 			ddi_fm_handler_register(ixgbe->dip,
5354 			    ixgbe_fm_error_cb, (void*) ixgbe);
5355 	}
5356 }
5357 
5358 static void
5359 ixgbe_fm_fini(ixgbe_t *ixgbe)
5360 {
5361 	/*
5362 	 * Only unregister FMA capabilities if they are registered
5363 	 */
5364 	if (ixgbe->fm_capabilities) {
5365 
5366 		/*
5367 		 * Release any resources allocated by pci_ereport_setup()
5368 		 */
5369 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5370 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5371 			pci_ereport_teardown(ixgbe->dip);
5372 
5373 		/*
5374 		 * Un-register error callback if error callback capable
5375 		 */
5376 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5377 			ddi_fm_handler_unregister(ixgbe->dip);
5378 
5379 		/*
5380 		 * Unregister from IO Fault Service
5381 		 */
5382 		ddi_fm_fini(ixgbe->dip);
5383 	}
5384 }
5385 
5386 void
5387 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5388 {
5389 	uint64_t ena;
5390 	char buf[FM_MAX_CLASS];
5391 
5392 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5393 	ena = fm_ena_generate(0, FM_ENA_FMT1);
5394 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5395 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5396 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5397 	}
5398 }
5399 
5400 static int
5401 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5402 {
5403 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5404 
5405 	mutex_enter(&rx_ring->rx_lock);
5406 	rx_ring->ring_gen_num = mr_gen_num;
5407 	mutex_exit(&rx_ring->rx_lock);
5408 	return (0);
5409 }
5410 
5411 /*
5412  * Get the global ring index by a ring index within a group.
5413  */
5414 static int
5415 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5416 {
5417 	ixgbe_rx_ring_t *rx_ring;
5418 	int i;
5419 
5420 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
5421 		rx_ring = &ixgbe->rx_rings[i];
5422 		if (rx_ring->group_index == gindex)
5423 			rindex--;
5424 		if (rindex < 0)
5425 			return (i);
5426 	}
5427 
5428 	return (-1);
5429 }
5430 
5431 /*
5432  * Callback funtion for MAC layer to register all rings.
5433  */
5434 /* ARGSUSED */
5435 void
5436 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
5437     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5438 {
5439 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
5440 	mac_intr_t *mintr = &infop->mri_intr;
5441 
5442 	switch (rtype) {
5443 	case MAC_RING_TYPE_RX: {
5444 		/*
5445 		 * 'index' is the ring index within the group.
5446 		 * Need to get the global ring index by searching in groups.
5447 		 */
5448 		int global_ring_index = ixgbe_get_rx_ring_index(
5449 		    ixgbe, group_index, ring_index);
5450 
5451 		ASSERT(global_ring_index >= 0);
5452 
5453 		ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
5454 		rx_ring->ring_handle = rh;
5455 
5456 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
5457 		infop->mri_start = ixgbe_ring_start;
5458 		infop->mri_stop = NULL;
5459 		infop->mri_poll = ixgbe_ring_rx_poll;
5460 		infop->mri_stat = ixgbe_rx_ring_stat;
5461 
5462 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
5463 		mintr->mi_enable = ixgbe_rx_ring_intr_enable;
5464 		mintr->mi_disable = ixgbe_rx_ring_intr_disable;
5465 		if (ixgbe->intr_type &
5466 		    (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5467 			mintr->mi_ddi_handle =
5468 			    ixgbe->htable[rx_ring->intr_vector];
5469 		}
5470 
5471 		break;
5472 	}
5473 	case MAC_RING_TYPE_TX: {
5474 		ASSERT(group_index == -1);
5475 		ASSERT(ring_index < ixgbe->num_tx_rings);
5476 
5477 		ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
5478 		tx_ring->ring_handle = rh;
5479 
5480 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
5481 		infop->mri_start = NULL;
5482 		infop->mri_stop = NULL;
5483 		infop->mri_tx = ixgbe_ring_tx;
5484 		infop->mri_stat = ixgbe_tx_ring_stat;
5485 		if (ixgbe->intr_type &
5486 		    (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5487 			mintr->mi_ddi_handle =
5488 			    ixgbe->htable[tx_ring->intr_vector];
5489 		}
5490 		break;
5491 	}
5492 	default:
5493 		break;
5494 	}
5495 }
5496 
5497 /*
5498  * Callback funtion for MAC layer to register all groups.
5499  */
5500 void
5501 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
5502     mac_group_info_t *infop, mac_group_handle_t gh)
5503 {
5504 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
5505 
5506 	switch (rtype) {
5507 	case MAC_RING_TYPE_RX: {
5508 		ixgbe_rx_group_t *rx_group;
5509 
5510 		rx_group = &ixgbe->rx_groups[index];
5511 		rx_group->group_handle = gh;
5512 
5513 		infop->mgi_driver = (mac_group_driver_t)rx_group;
5514 		infop->mgi_start = NULL;
5515 		infop->mgi_stop = NULL;
5516 		infop->mgi_addmac = ixgbe_addmac;
5517 		infop->mgi_remmac = ixgbe_remmac;
5518 		infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
5519 
5520 		break;
5521 	}
5522 	case MAC_RING_TYPE_TX:
5523 		break;
5524 	default:
5525 		break;
5526 	}
5527 }
5528 
5529 /*
5530  * Enable interrupt on the specificed rx ring.
5531  */
5532 int
5533 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
5534 {
5535 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5536 	ixgbe_t *ixgbe = rx_ring->ixgbe;
5537 	int r_idx = rx_ring->index;
5538 	int hw_r_idx = rx_ring->hw_index;
5539 	int v_idx = rx_ring->intr_vector;
5540 
5541 	mutex_enter(&ixgbe->gen_lock);
5542 	if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5543 		mutex_exit(&ixgbe->gen_lock);
5544 		/*
5545 		 * Simply return 0.
5546 		 * Interrupts are being adjusted. ixgbe_intr_adjust()
5547 		 * will eventually re-enable the interrupt when it's
5548 		 * done with the adjustment.
5549 		 */
5550 		return (0);
5551 	}
5552 
5553 	/*
5554 	 * To enable interrupt by setting the VAL bit of given interrupt
5555 	 * vector allocation register (IVAR).
5556 	 */
5557 	ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
5558 
5559 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5560 
5561 	/*
5562 	 * Trigger a Rx interrupt on this ring
5563 	 */
5564 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
5565 	IXGBE_WRITE_FLUSH(&ixgbe->hw);
5566 
5567 	mutex_exit(&ixgbe->gen_lock);
5568 
5569 	return (0);
5570 }
5571 
5572 /*
5573  * Disable interrupt on the specificed rx ring.
5574  */
5575 int
5576 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
5577 {
5578 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5579 	ixgbe_t *ixgbe = rx_ring->ixgbe;
5580 	int r_idx = rx_ring->index;
5581 	int hw_r_idx = rx_ring->hw_index;
5582 	int v_idx = rx_ring->intr_vector;
5583 
5584 	mutex_enter(&ixgbe->gen_lock);
5585 	if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5586 		mutex_exit(&ixgbe->gen_lock);
5587 		/*
5588 		 * Simply return 0.
5589 		 * In the rare case where an interrupt is being
5590 		 * disabled while interrupts are being adjusted,
5591 		 * we don't fail the operation. No interrupts will
5592 		 * be generated while they are adjusted, and
5593 		 * ixgbe_intr_adjust() will cause the interrupts
5594 		 * to be re-enabled once it completes. Note that
5595 		 * in this case, packets may be delivered to the
5596 		 * stack via interrupts before xgbe_rx_ring_intr_enable()
5597 		 * is called again. This is acceptable since interrupt
5598 		 * adjustment is infrequent, and the stack will be
5599 		 * able to handle these packets.
5600 		 */
5601 		return (0);
5602 	}
5603 
5604 	/*
5605 	 * To disable interrupt by clearing the VAL bit of given interrupt
5606 	 * vector allocation register (IVAR).
5607 	 */
5608 	ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
5609 
5610 	BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
5611 
5612 	mutex_exit(&ixgbe->gen_lock);
5613 
5614 	return (0);
5615 }
5616 
5617 /*
5618  * Add a mac address.
5619  */
5620 static int
5621 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
5622 {
5623 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5624 	ixgbe_t *ixgbe = rx_group->ixgbe;
5625 	struct ixgbe_hw *hw = &ixgbe->hw;
5626 	int slot, i;
5627 
5628 	mutex_enter(&ixgbe->gen_lock);
5629 
5630 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5631 		mutex_exit(&ixgbe->gen_lock);
5632 		return (ECANCELED);
5633 	}
5634 
5635 	if (ixgbe->unicst_avail == 0) {
5636 		/* no slots available */
5637 		mutex_exit(&ixgbe->gen_lock);
5638 		return (ENOSPC);
5639 	}
5640 
5641 	/*
5642 	 * The first ixgbe->num_rx_groups slots are reserved for each respective
5643 	 * group. The rest slots are shared by all groups. While adding a
5644 	 * MAC address, reserved slots are firstly checked then the shared
5645 	 * slots are searched.
5646 	 */
5647 	slot = -1;
5648 	if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
5649 		for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
5650 			if (ixgbe->unicst_addr[i].mac.set == 0) {
5651 				slot = i;
5652 				break;
5653 			}
5654 		}
5655 	} else {
5656 		slot = rx_group->index;
5657 	}
5658 
5659 	if (slot == -1) {
5660 		/* no slots available */
5661 		mutex_exit(&ixgbe->gen_lock);
5662 		return (ENOSPC);
5663 	}
5664 
5665 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5666 	(void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
5667 	    rx_group->index, IXGBE_RAH_AV);
5668 	ixgbe->unicst_addr[slot].mac.set = 1;
5669 	ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
5670 	ixgbe->unicst_avail--;
5671 
5672 	mutex_exit(&ixgbe->gen_lock);
5673 
5674 	return (0);
5675 }
5676 
5677 /*
5678  * Remove a mac address.
5679  */
5680 static int
5681 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
5682 {
5683 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5684 	ixgbe_t *ixgbe = rx_group->ixgbe;
5685 	struct ixgbe_hw *hw = &ixgbe->hw;
5686 	int slot;
5687 
5688 	mutex_enter(&ixgbe->gen_lock);
5689 
5690 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5691 		mutex_exit(&ixgbe->gen_lock);
5692 		return (ECANCELED);
5693 	}
5694 
5695 	slot = ixgbe_unicst_find(ixgbe, mac_addr);
5696 	if (slot == -1) {
5697 		mutex_exit(&ixgbe->gen_lock);
5698 		return (EINVAL);
5699 	}
5700 
5701 	if (ixgbe->unicst_addr[slot].mac.set == 0) {
5702 		mutex_exit(&ixgbe->gen_lock);
5703 		return (EINVAL);
5704 	}
5705 
5706 	bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5707 	(void) ixgbe_clear_rar(hw, slot);
5708 	ixgbe->unicst_addr[slot].mac.set = 0;
5709 	ixgbe->unicst_avail++;
5710 
5711 	mutex_exit(&ixgbe->gen_lock);
5712 
5713 	return (0);
5714 }
5715