xref: /illumos-gate/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision 46b592853d0f4f11781b6b0a7533f267c6aee132)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #include "ixgbe_sw.h"
29 
30 static char ident[] = "Intel 10Gb Ethernet 1.1.0";
31 
32 /*
33  * Local function protoypes
34  */
35 static int ixgbe_register_mac(ixgbe_t *);
36 static int ixgbe_identify_hardware(ixgbe_t *);
37 static int ixgbe_regs_map(ixgbe_t *);
38 static void ixgbe_init_properties(ixgbe_t *);
39 static int ixgbe_init_driver_settings(ixgbe_t *);
40 static void ixgbe_init_locks(ixgbe_t *);
41 static void ixgbe_destroy_locks(ixgbe_t *);
42 static int ixgbe_init(ixgbe_t *);
43 static int ixgbe_chip_start(ixgbe_t *);
44 static void ixgbe_chip_stop(ixgbe_t *);
45 static int ixgbe_reset(ixgbe_t *);
46 static void ixgbe_tx_clean(ixgbe_t *);
47 static boolean_t ixgbe_tx_drain(ixgbe_t *);
48 static boolean_t ixgbe_rx_drain(ixgbe_t *);
49 static int ixgbe_alloc_rings(ixgbe_t *);
50 static void ixgbe_free_rings(ixgbe_t *);
51 static int ixgbe_alloc_rx_data(ixgbe_t *);
52 static void ixgbe_free_rx_data(ixgbe_t *);
53 static void ixgbe_setup_rings(ixgbe_t *);
54 static void ixgbe_setup_rx(ixgbe_t *);
55 static void ixgbe_setup_tx(ixgbe_t *);
56 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
57 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
58 static void ixgbe_setup_rss(ixgbe_t *);
59 static void ixgbe_init_unicst(ixgbe_t *);
60 static int ixgbe_unicst_set(ixgbe_t *, const uint8_t *, int);
61 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
62 static void ixgbe_setup_multicst(ixgbe_t *);
63 static void ixgbe_get_hw_state(ixgbe_t *);
64 static void ixgbe_get_conf(ixgbe_t *);
65 static void ixgbe_init_params(ixgbe_t *);
66 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
67 static void ixgbe_driver_link_check(void *);
68 static void ixgbe_sfp_check(void *);
69 static void ixgbe_local_timer(void *);
70 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
71 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
72 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
73 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
74 static boolean_t is_valid_mac_addr(uint8_t *);
75 static boolean_t ixgbe_stall_check(ixgbe_t *);
76 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
77 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
78 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
79 static int ixgbe_alloc_intrs(ixgbe_t *);
80 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
81 static int ixgbe_add_intr_handlers(ixgbe_t *);
82 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
83 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
84 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
85 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
86 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
87 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
88 static void ixgbe_setup_adapter_vector(ixgbe_t *);
89 static void ixgbe_rem_intr_handlers(ixgbe_t *);
90 static void ixgbe_rem_intrs(ixgbe_t *);
91 static int ixgbe_enable_intrs(ixgbe_t *);
92 static int ixgbe_disable_intrs(ixgbe_t *);
93 static uint_t ixgbe_intr_legacy(void *, void *);
94 static uint_t ixgbe_intr_msi(void *, void *);
95 static uint_t ixgbe_intr_msix(void *, void *);
96 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
97 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
98 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
99 static void ixgbe_get_driver_control(struct ixgbe_hw *);
100 static int ixgbe_addmac(void *, const uint8_t *);
101 static int ixgbe_remmac(void *, const uint8_t *);
102 static void ixgbe_release_driver_control(struct ixgbe_hw *);
103 
104 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
105 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
106 static int ixgbe_resume(dev_info_t *);
107 static int ixgbe_suspend(dev_info_t *);
108 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
109 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
110 
111 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
112     const void *impl_data);
113 static void ixgbe_fm_init(ixgbe_t *);
114 static void ixgbe_fm_fini(ixgbe_t *);
115 
116 mac_priv_prop_t ixgbe_priv_props[] = {
117 	{"_tx_copy_thresh", MAC_PROP_PERM_RW},
118 	{"_tx_recycle_thresh", MAC_PROP_PERM_RW},
119 	{"_tx_overload_thresh", MAC_PROP_PERM_RW},
120 	{"_tx_resched_thresh", MAC_PROP_PERM_RW},
121 	{"_rx_copy_thresh", MAC_PROP_PERM_RW},
122 	{"_rx_limit_per_intr", MAC_PROP_PERM_RW},
123 	{"_intr_throttling", MAC_PROP_PERM_RW},
124 	{"_adv_pause_cap", MAC_PROP_PERM_READ},
125 	{"_adv_asym_pause_cap", MAC_PROP_PERM_READ}
126 };
127 
128 #define	IXGBE_MAX_PRIV_PROPS \
129 	(sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
130 
131 static struct cb_ops ixgbe_cb_ops = {
132 	nulldev,		/* cb_open */
133 	nulldev,		/* cb_close */
134 	nodev,			/* cb_strategy */
135 	nodev,			/* cb_print */
136 	nodev,			/* cb_dump */
137 	nodev,			/* cb_read */
138 	nodev,			/* cb_write */
139 	nodev,			/* cb_ioctl */
140 	nodev,			/* cb_devmap */
141 	nodev,			/* cb_mmap */
142 	nodev,			/* cb_segmap */
143 	nochpoll,		/* cb_chpoll */
144 	ddi_prop_op,		/* cb_prop_op */
145 	NULL,			/* cb_stream */
146 	D_MP | D_HOTPLUG,	/* cb_flag */
147 	CB_REV,			/* cb_rev */
148 	nodev,			/* cb_aread */
149 	nodev			/* cb_awrite */
150 };
151 
152 static struct dev_ops ixgbe_dev_ops = {
153 	DEVO_REV,		/* devo_rev */
154 	0,			/* devo_refcnt */
155 	NULL,			/* devo_getinfo */
156 	nulldev,		/* devo_identify */
157 	nulldev,		/* devo_probe */
158 	ixgbe_attach,		/* devo_attach */
159 	ixgbe_detach,		/* devo_detach */
160 	nodev,			/* devo_reset */
161 	&ixgbe_cb_ops,		/* devo_cb_ops */
162 	NULL,			/* devo_bus_ops */
163 	ddi_power,		/* devo_power */
164 	ddi_quiesce_not_supported,	/* devo_quiesce */
165 };
166 
167 static struct modldrv ixgbe_modldrv = {
168 	&mod_driverops,		/* Type of module.  This one is a driver */
169 	ident,			/* Discription string */
170 	&ixgbe_dev_ops		/* driver ops */
171 };
172 
173 static struct modlinkage ixgbe_modlinkage = {
174 	MODREV_1, &ixgbe_modldrv, NULL
175 };
176 
177 /*
178  * Access attributes for register mapping
179  */
180 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
181 	DDI_DEVICE_ATTR_V0,
182 	DDI_STRUCTURE_LE_ACC,
183 	DDI_STRICTORDER_ACC,
184 	DDI_FLAGERR_ACC
185 };
186 
187 /*
188  * Loopback property
189  */
190 static lb_property_t lb_normal = {
191 	normal,	"normal", IXGBE_LB_NONE
192 };
193 
194 static lb_property_t lb_mac = {
195 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
196 };
197 
198 #define	IXGBE_M_CALLBACK_FLAGS \
199 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
200 
201 static mac_callbacks_t ixgbe_m_callbacks = {
202 	IXGBE_M_CALLBACK_FLAGS,
203 	ixgbe_m_stat,
204 	ixgbe_m_start,
205 	ixgbe_m_stop,
206 	ixgbe_m_promisc,
207 	ixgbe_m_multicst,
208 	NULL,
209 	NULL,
210 	ixgbe_m_ioctl,
211 	ixgbe_m_getcapab,
212 	NULL,
213 	NULL,
214 	ixgbe_m_setprop,
215 	ixgbe_m_getprop
216 };
217 
218 /*
219  * Initialize capabilities of each supported adapter type
220  */
221 static adapter_info_t ixgbe_82598eb_cap = {
222 	64,		/* maximum number of rx queues */
223 	1,		/* minimum number of rx queues */
224 	8,		/* default number of rx queues */
225 	32,		/* maximum number of tx queues */
226 	1,		/* minimum number of tx queues */
227 	8,		/* default number of tx queues */
228 	0xFFFF,		/* maximum interrupt throttle rate */
229 	0,		/* minimum interrupt throttle rate */
230 	200,		/* default interrupt throttle rate */
231 	18,		/* maximum total msix vectors */
232 	16,		/* maximum number of ring vectors */
233 	2,		/* maximum number of other vectors */
234 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
235 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
236 	| IXGBE_FLAG_RSS_CAPABLE
237 	| IXGBE_FLAG_VMDQ_CAPABLE)
238 };
239 
240 static adapter_info_t ixgbe_82599eb_cap = {
241 	128,		/* maximum number of rx queues */
242 	1,		/* minimum number of rx queues */
243 	8,		/* default number of rx queues */
244 	128,		/* maximum number of tx queues */
245 	1,		/* minimum number of tx queues */
246 	8,		/* default number of tx queues */
247 	0xFF8,		/* maximum interrupt throttle rate */
248 	0,		/* minimum interrupt throttle rate */
249 	200,		/* default interrupt throttle rate */
250 	64,		/* maximum total msix vectors */
251 	16,		/* maximum number of ring vectors */
252 	2,		/* maximum number of other vectors */
253 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
254 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
255 	| IXGBE_FLAG_RSS_CAPABLE
256 	| IXGBE_FLAG_VMDQ_CAPABLE)
257 };
258 
259 /*
260  * Module Initialization Functions.
261  */
262 
263 int
264 _init(void)
265 {
266 	int status;
267 
268 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
269 
270 	status = mod_install(&ixgbe_modlinkage);
271 
272 	if (status != DDI_SUCCESS) {
273 		mac_fini_ops(&ixgbe_dev_ops);
274 	}
275 
276 	return (status);
277 }
278 
279 int
280 _fini(void)
281 {
282 	int status;
283 
284 	status = mod_remove(&ixgbe_modlinkage);
285 
286 	if (status == DDI_SUCCESS) {
287 		mac_fini_ops(&ixgbe_dev_ops);
288 	}
289 
290 	return (status);
291 }
292 
293 int
294 _info(struct modinfo *modinfop)
295 {
296 	int status;
297 
298 	status = mod_info(&ixgbe_modlinkage, modinfop);
299 
300 	return (status);
301 }
302 
303 /*
304  * ixgbe_attach - Driver attach.
305  *
306  * This function is the device specific initialization entry
307  * point. This entry point is required and must be written.
308  * The DDI_ATTACH command must be provided in the attach entry
309  * point. When attach() is called with cmd set to DDI_ATTACH,
310  * all normal kernel services (such as kmem_alloc(9F)) are
311  * available for use by the driver.
312  *
313  * The attach() function will be called once for each instance
314  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
315  * Until attach() succeeds, the only driver entry points which
316  * may be called are open(9E) and getinfo(9E).
317  */
318 static int
319 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
320 {
321 	ixgbe_t *ixgbe;
322 	struct ixgbe_osdep *osdep;
323 	struct ixgbe_hw *hw;
324 	int instance;
325 	char taskqname[32];
326 
327 	/*
328 	 * Check the command and perform corresponding operations
329 	 */
330 	switch (cmd) {
331 	default:
332 		return (DDI_FAILURE);
333 
334 	case DDI_RESUME:
335 		return (ixgbe_resume(devinfo));
336 
337 	case DDI_ATTACH:
338 		break;
339 	}
340 
341 	/* Get the device instance */
342 	instance = ddi_get_instance(devinfo);
343 
344 	/* Allocate memory for the instance data structure */
345 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
346 
347 	ixgbe->dip = devinfo;
348 	ixgbe->instance = instance;
349 
350 	hw = &ixgbe->hw;
351 	osdep = &ixgbe->osdep;
352 	hw->back = osdep;
353 	osdep->ixgbe = ixgbe;
354 
355 	/* Attach the instance pointer to the dev_info data structure */
356 	ddi_set_driver_private(devinfo, ixgbe);
357 
358 	/*
359 	 * Initialize for fma support
360 	 */
361 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
362 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
363 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
364 	ixgbe_fm_init(ixgbe);
365 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
366 
367 	/*
368 	 * Map PCI config space registers
369 	 */
370 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
371 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
372 		goto attach_fail;
373 	}
374 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
375 
376 	/*
377 	 * Identify the chipset family
378 	 */
379 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
380 		ixgbe_error(ixgbe, "Failed to identify hardware");
381 		goto attach_fail;
382 	}
383 
384 	/*
385 	 * Map device registers
386 	 */
387 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
388 		ixgbe_error(ixgbe, "Failed to map device registers");
389 		goto attach_fail;
390 	}
391 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
392 
393 	/*
394 	 * Initialize driver parameters
395 	 */
396 	ixgbe_init_properties(ixgbe);
397 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
398 
399 	/*
400 	 * Allocate interrupts
401 	 */
402 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
403 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
404 		goto attach_fail;
405 	}
406 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
407 
408 	/*
409 	 * Allocate rx/tx rings based on the ring numbers.
410 	 * The actual numbers of rx/tx rings are decided by the number of
411 	 * allocated interrupt vectors, so we should allocate the rings after
412 	 * interrupts are allocated.
413 	 */
414 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
415 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
416 		goto attach_fail;
417 	}
418 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
419 
420 	/*
421 	 * Map rings to interrupt vectors
422 	 */
423 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
424 		ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
425 		goto attach_fail;
426 	}
427 
428 	/*
429 	 * Add interrupt handlers
430 	 */
431 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
432 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
433 		goto attach_fail;
434 	}
435 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
436 
437 	/*
438 	 * Create a taskq for link-status-change
439 	 */
440 	(void) sprintf(taskqname, "ixgbe%d_taskq", instance);
441 	if ((ixgbe->lsc_taskq = ddi_taskq_create(devinfo, taskqname,
442 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
443 		ixgbe_error(ixgbe, "taskq_create failed");
444 		goto attach_fail;
445 	}
446 	ixgbe->attach_progress |= ATTACH_PROGRESS_LSC_TASKQ;
447 
448 	/*
449 	 * Initialize driver parameters
450 	 */
451 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
452 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
453 		goto attach_fail;
454 	}
455 
456 	/*
457 	 * Initialize mutexes for this device.
458 	 * Do this before enabling the interrupt handler and
459 	 * register the softint to avoid the condition where
460 	 * interrupt handler can try using uninitialized mutex.
461 	 */
462 	ixgbe_init_locks(ixgbe);
463 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
464 
465 	/*
466 	 * Initialize chipset hardware
467 	 */
468 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
469 		ixgbe_error(ixgbe, "Failed to initialize adapter");
470 		goto attach_fail;
471 	}
472 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
473 
474 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
475 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
476 		goto attach_fail;
477 	}
478 
479 	/*
480 	 * Initialize statistics
481 	 */
482 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
483 		ixgbe_error(ixgbe, "Failed to initialize statistics");
484 		goto attach_fail;
485 	}
486 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
487 
488 	/*
489 	 * Register the driver to the MAC
490 	 */
491 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
492 		ixgbe_error(ixgbe, "Failed to register MAC");
493 		goto attach_fail;
494 	}
495 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
496 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
497 
498 	/*
499 	 * Now that mutex locks are initialized, and the chip is also
500 	 * initialized, enable interrupts.
501 	 */
502 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
503 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
504 		goto attach_fail;
505 	}
506 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
507 
508 	ixgbe->ixgbe_state |= IXGBE_INITIALIZED;
509 
510 	return (DDI_SUCCESS);
511 
512 attach_fail:
513 	ixgbe_unconfigure(devinfo, ixgbe);
514 	return (DDI_FAILURE);
515 }
516 
517 /*
518  * ixgbe_detach - Driver detach.
519  *
520  * The detach() function is the complement of the attach routine.
521  * If cmd is set to DDI_DETACH, detach() is used to remove  the
522  * state  associated  with  a  given  instance of a device node
523  * prior to the removal of that instance from the system.
524  *
525  * The detach() function will be called once for each  instance
526  * of the device for which there has been a successful attach()
527  * once there are no longer  any  opens  on  the  device.
528  *
529  * Interrupts routine are disabled, All memory allocated by this
530  * driver are freed.
531  */
532 static int
533 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
534 {
535 	ixgbe_t *ixgbe;
536 
537 	/*
538 	 * Check detach command
539 	 */
540 	switch (cmd) {
541 	default:
542 		return (DDI_FAILURE);
543 
544 	case DDI_SUSPEND:
545 		return (ixgbe_suspend(devinfo));
546 
547 	case DDI_DETACH:
548 		break;
549 	}
550 
551 
552 	/*
553 	 * Get the pointer to the driver private data structure
554 	 */
555 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
556 	if (ixgbe == NULL)
557 		return (DDI_FAILURE);
558 
559 	/*
560 	 * Unregister MAC. If failed, we have to fail the detach
561 	 */
562 	if (mac_unregister(ixgbe->mac_hdl) != 0) {
563 		ixgbe_error(ixgbe, "Failed to unregister MAC");
564 		return (DDI_FAILURE);
565 	}
566 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC;
567 
568 	/*
569 	 * If the device is still running, it needs to be stopped first.
570 	 * This check is necessary because under some specific circumstances,
571 	 * the detach routine can be called without stopping the interface
572 	 * first.
573 	 */
574 	mutex_enter(&ixgbe->gen_lock);
575 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
576 		ixgbe->ixgbe_state &= ~IXGBE_STARTED;
577 		ixgbe_stop(ixgbe, B_TRUE);
578 		mutex_exit(&ixgbe->gen_lock);
579 		/* Disable and stop the watchdog timer */
580 		ixgbe_disable_watchdog_timer(ixgbe);
581 	} else
582 		mutex_exit(&ixgbe->gen_lock);
583 
584 	/*
585 	 * Check if there are still rx buffers held by the upper layer.
586 	 * If so, fail the detach.
587 	 */
588 	if (!ixgbe_rx_drain(ixgbe))
589 		return (DDI_FAILURE);
590 
591 	/*
592 	 * Do the remaining unconfigure routines
593 	 */
594 	ixgbe_unconfigure(devinfo, ixgbe);
595 
596 	return (DDI_SUCCESS);
597 }
598 
599 static void
600 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
601 {
602 	/*
603 	 * Disable interrupt
604 	 */
605 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
606 		(void) ixgbe_disable_intrs(ixgbe);
607 	}
608 
609 	/*
610 	 * Unregister MAC
611 	 */
612 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
613 		(void) mac_unregister(ixgbe->mac_hdl);
614 	}
615 
616 	/*
617 	 * Free statistics
618 	 */
619 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
620 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
621 	}
622 
623 	/*
624 	 * Remove interrupt handlers
625 	 */
626 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
627 		ixgbe_rem_intr_handlers(ixgbe);
628 	}
629 
630 	/*
631 	 * Remove taskq for link-status-change
632 	 */
633 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LSC_TASKQ) {
634 		ddi_taskq_destroy(ixgbe->lsc_taskq);
635 	}
636 
637 	/*
638 	 * Remove interrupts
639 	 */
640 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
641 		ixgbe_rem_intrs(ixgbe);
642 	}
643 
644 	/*
645 	 * Remove driver properties
646 	 */
647 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
648 		(void) ddi_prop_remove_all(devinfo);
649 	}
650 
651 	/*
652 	 * Stop the chipset
653 	 */
654 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
655 		mutex_enter(&ixgbe->gen_lock);
656 		ixgbe_chip_stop(ixgbe);
657 		mutex_exit(&ixgbe->gen_lock);
658 	}
659 
660 	/*
661 	 * Free register handle
662 	 */
663 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
664 		if (ixgbe->osdep.reg_handle != NULL)
665 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
666 	}
667 
668 	/*
669 	 * Free PCI config handle
670 	 */
671 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
672 		if (ixgbe->osdep.cfg_handle != NULL)
673 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
674 	}
675 
676 	/*
677 	 * Free locks
678 	 */
679 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
680 		ixgbe_destroy_locks(ixgbe);
681 	}
682 
683 	/*
684 	 * Free the rx/tx rings
685 	 */
686 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
687 		ixgbe_free_rings(ixgbe);
688 	}
689 
690 	/*
691 	 * Unregister FMA capabilities
692 	 */
693 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
694 		ixgbe_fm_fini(ixgbe);
695 	}
696 
697 	/*
698 	 * Free the driver data structure
699 	 */
700 	kmem_free(ixgbe, sizeof (ixgbe_t));
701 
702 	ddi_set_driver_private(devinfo, NULL);
703 }
704 
705 /*
706  * ixgbe_register_mac - Register the driver and its function pointers with
707  * the GLD interface.
708  */
709 static int
710 ixgbe_register_mac(ixgbe_t *ixgbe)
711 {
712 	struct ixgbe_hw *hw = &ixgbe->hw;
713 	mac_register_t *mac;
714 	int status;
715 
716 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
717 		return (IXGBE_FAILURE);
718 
719 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
720 	mac->m_driver = ixgbe;
721 	mac->m_dip = ixgbe->dip;
722 	mac->m_src_addr = hw->mac.addr;
723 	mac->m_callbacks = &ixgbe_m_callbacks;
724 	mac->m_min_sdu = 0;
725 	mac->m_max_sdu = ixgbe->default_mtu;
726 	mac->m_margin = VLAN_TAGSZ;
727 	mac->m_priv_props = ixgbe_priv_props;
728 	mac->m_priv_prop_count = IXGBE_MAX_PRIV_PROPS;
729 	mac->m_v12n = MAC_VIRT_LEVEL1;
730 
731 	status = mac_register(mac, &ixgbe->mac_hdl);
732 
733 	mac_free(mac);
734 
735 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
736 }
737 
738 /*
739  * ixgbe_identify_hardware - Identify the type of the chipset.
740  */
741 static int
742 ixgbe_identify_hardware(ixgbe_t *ixgbe)
743 {
744 	struct ixgbe_hw *hw = &ixgbe->hw;
745 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
746 
747 	/*
748 	 * Get the device id
749 	 */
750 	hw->vendor_id =
751 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
752 	hw->device_id =
753 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
754 	hw->revision_id =
755 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
756 	hw->subsystem_device_id =
757 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
758 	hw->subsystem_vendor_id =
759 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
760 
761 	/*
762 	 * Set the mac type of the adapter based on the device id
763 	 */
764 	if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
765 		return (IXGBE_FAILURE);
766 	}
767 
768 	/*
769 	 * Install adapter capabilities
770 	 */
771 	switch (hw->mac.type) {
772 	case ixgbe_mac_82598EB:
773 		ixgbe_log(ixgbe, "identify 82598 adapter\n");
774 		ixgbe->capab = &ixgbe_82598eb_cap;
775 
776 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
777 			ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
778 			ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
779 		}
780 		ixgbe->capab->other_intr |= IXGBE_EICR_LSC;
781 
782 		break;
783 	case ixgbe_mac_82599EB:
784 		ixgbe_log(ixgbe, "identify 82599 adapter\n");
785 		ixgbe->capab = &ixgbe_82599eb_cap;
786 
787 		ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 |
788 		    IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC);
789 
790 		break;
791 	default:
792 		ixgbe_log(ixgbe,
793 		    "adapter not supported in ixgbe_identify_hardware(): %d\n",
794 		    hw->mac.type);
795 		return (IXGBE_FAILURE);
796 	}
797 
798 	return (IXGBE_SUCCESS);
799 }
800 
801 /*
802  * ixgbe_regs_map - Map the device registers.
803  *
804  */
805 static int
806 ixgbe_regs_map(ixgbe_t *ixgbe)
807 {
808 	dev_info_t *devinfo = ixgbe->dip;
809 	struct ixgbe_hw *hw = &ixgbe->hw;
810 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
811 	off_t mem_size;
812 
813 	/*
814 	 * First get the size of device registers to be mapped.
815 	 */
816 	if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
817 	    != DDI_SUCCESS) {
818 		return (IXGBE_FAILURE);
819 	}
820 
821 	/*
822 	 * Call ddi_regs_map_setup() to map registers
823 	 */
824 	if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
825 	    (caddr_t *)&hw->hw_addr, 0,
826 	    mem_size, &ixgbe_regs_acc_attr,
827 	    &osdep->reg_handle)) != DDI_SUCCESS) {
828 		return (IXGBE_FAILURE);
829 	}
830 
831 	return (IXGBE_SUCCESS);
832 }
833 
834 /*
835  * ixgbe_init_properties - Initialize driver properties.
836  */
837 static void
838 ixgbe_init_properties(ixgbe_t *ixgbe)
839 {
840 	/*
841 	 * Get conf file properties, including link settings
842 	 * jumbo frames, ring number, descriptor number, etc.
843 	 */
844 	ixgbe_get_conf(ixgbe);
845 
846 	ixgbe_init_params(ixgbe);
847 }
848 
849 /*
850  * ixgbe_init_driver_settings - Initialize driver settings.
851  *
852  * The settings include hardware function pointers, bus information,
853  * rx/tx rings settings, link state, and any other parameters that
854  * need to be setup during driver initialization.
855  */
856 static int
857 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
858 {
859 	struct ixgbe_hw *hw = &ixgbe->hw;
860 	dev_info_t *devinfo = ixgbe->dip;
861 	ixgbe_rx_ring_t *rx_ring;
862 	ixgbe_tx_ring_t *tx_ring;
863 	uint32_t rx_size;
864 	uint32_t tx_size;
865 	int i;
866 
867 	/*
868 	 * Initialize chipset specific hardware function pointers
869 	 */
870 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
871 		return (IXGBE_FAILURE);
872 	}
873 
874 	/*
875 	 * Get the system page size
876 	 */
877 	ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
878 
879 	/*
880 	 * Set rx buffer size
881 	 *
882 	 * The IP header alignment room is counted in the calculation.
883 	 * The rx buffer size is in unit of 1K that is required by the
884 	 * chipset hardware.
885 	 */
886 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
887 	ixgbe->rx_buf_size = ((rx_size >> 10) +
888 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
889 
890 	/*
891 	 * Set tx buffer size
892 	 */
893 	tx_size = ixgbe->max_frame_size;
894 	ixgbe->tx_buf_size = ((tx_size >> 10) +
895 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
896 
897 	/*
898 	 * Initialize rx/tx rings parameters
899 	 */
900 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
901 		rx_ring = &ixgbe->rx_rings[i];
902 		rx_ring->index = i;
903 		rx_ring->ixgbe = ixgbe;
904 	}
905 
906 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
907 		tx_ring = &ixgbe->tx_rings[i];
908 		tx_ring->index = i;
909 		tx_ring->ixgbe = ixgbe;
910 		if (ixgbe->tx_head_wb_enable)
911 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
912 		else
913 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
914 
915 		tx_ring->ring_size = ixgbe->tx_ring_size;
916 		tx_ring->free_list_size = ixgbe->tx_ring_size +
917 		    (ixgbe->tx_ring_size >> 1);
918 	}
919 
920 	/*
921 	 * Initialize values of interrupt throttling rate
922 	 */
923 	for (i = 1; i < MAX_INTR_VECTOR; i++)
924 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
925 
926 	/*
927 	 * The initial link state should be "unknown"
928 	 */
929 	ixgbe->link_state = LINK_STATE_UNKNOWN;
930 
931 	return (IXGBE_SUCCESS);
932 }
933 
934 /*
935  * ixgbe_init_locks - Initialize locks.
936  */
937 static void
938 ixgbe_init_locks(ixgbe_t *ixgbe)
939 {
940 	ixgbe_rx_ring_t *rx_ring;
941 	ixgbe_tx_ring_t *tx_ring;
942 	int i;
943 
944 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
945 		rx_ring = &ixgbe->rx_rings[i];
946 		mutex_init(&rx_ring->rx_lock, NULL,
947 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
948 	}
949 
950 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
951 		tx_ring = &ixgbe->tx_rings[i];
952 		mutex_init(&tx_ring->tx_lock, NULL,
953 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
954 		mutex_init(&tx_ring->recycle_lock, NULL,
955 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
956 		mutex_init(&tx_ring->tcb_head_lock, NULL,
957 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
958 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
959 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
960 	}
961 
962 	mutex_init(&ixgbe->gen_lock, NULL,
963 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
964 
965 	mutex_init(&ixgbe->watchdog_lock, NULL,
966 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
967 }
968 
969 /*
970  * ixgbe_destroy_locks - Destroy locks.
971  */
972 static void
973 ixgbe_destroy_locks(ixgbe_t *ixgbe)
974 {
975 	ixgbe_rx_ring_t *rx_ring;
976 	ixgbe_tx_ring_t *tx_ring;
977 	int i;
978 
979 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
980 		rx_ring = &ixgbe->rx_rings[i];
981 		mutex_destroy(&rx_ring->rx_lock);
982 	}
983 
984 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
985 		tx_ring = &ixgbe->tx_rings[i];
986 		mutex_destroy(&tx_ring->tx_lock);
987 		mutex_destroy(&tx_ring->recycle_lock);
988 		mutex_destroy(&tx_ring->tcb_head_lock);
989 		mutex_destroy(&tx_ring->tcb_tail_lock);
990 	}
991 
992 	mutex_destroy(&ixgbe->gen_lock);
993 	mutex_destroy(&ixgbe->watchdog_lock);
994 }
995 
996 static int
997 ixgbe_resume(dev_info_t *devinfo)
998 {
999 	ixgbe_t *ixgbe;
1000 
1001 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1002 	if (ixgbe == NULL)
1003 		return (DDI_FAILURE);
1004 
1005 	mutex_enter(&ixgbe->gen_lock);
1006 
1007 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1008 		if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1009 			mutex_exit(&ixgbe->gen_lock);
1010 			return (DDI_FAILURE);
1011 		}
1012 
1013 		/*
1014 		 * Enable and start the watchdog timer
1015 		 */
1016 		ixgbe_enable_watchdog_timer(ixgbe);
1017 	}
1018 
1019 	ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1020 
1021 	mutex_exit(&ixgbe->gen_lock);
1022 
1023 	return (DDI_SUCCESS);
1024 }
1025 
1026 static int
1027 ixgbe_suspend(dev_info_t *devinfo)
1028 {
1029 	ixgbe_t *ixgbe;
1030 
1031 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1032 	if (ixgbe == NULL)
1033 		return (DDI_FAILURE);
1034 
1035 	mutex_enter(&ixgbe->gen_lock);
1036 
1037 	ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1038 	if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1039 		mutex_exit(&ixgbe->gen_lock);
1040 		return (DDI_SUCCESS);
1041 	}
1042 	ixgbe_stop(ixgbe, B_FALSE);
1043 
1044 	mutex_exit(&ixgbe->gen_lock);
1045 
1046 	/*
1047 	 * Disable and stop the watchdog timer
1048 	 */
1049 	ixgbe_disable_watchdog_timer(ixgbe);
1050 
1051 	return (DDI_SUCCESS);
1052 }
1053 
1054 /*
1055  * ixgbe_init - Initialize the device.
1056  */
1057 static int
1058 ixgbe_init(ixgbe_t *ixgbe)
1059 {
1060 	struct ixgbe_hw *hw = &ixgbe->hw;
1061 
1062 	mutex_enter(&ixgbe->gen_lock);
1063 
1064 	/*
1065 	 * Reset chipset to put the hardware in a known state
1066 	 * before we try to do anything with the eeprom.
1067 	 */
1068 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1069 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1070 		goto init_fail;
1071 	}
1072 
1073 	/*
1074 	 * Need to init eeprom before validating the checksum.
1075 	 */
1076 	if (ixgbe_init_eeprom_params(hw) < 0) {
1077 		ixgbe_error(ixgbe,
1078 		    "Unable to intitialize the eeprom interface.");
1079 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1080 		goto init_fail;
1081 	}
1082 
1083 	/*
1084 	 * NVM validation
1085 	 */
1086 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1087 		/*
1088 		 * Some PCI-E parts fail the first check due to
1089 		 * the link being in sleep state.  Call it again,
1090 		 * if it fails a second time it's a real issue.
1091 		 */
1092 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1093 			ixgbe_error(ixgbe,
1094 			    "Invalid NVM checksum. Please contact "
1095 			    "the vendor to update the NVM.");
1096 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1097 			goto init_fail;
1098 		}
1099 	}
1100 
1101 	/*
1102 	 * Setup default flow control thresholds - enable/disable
1103 	 * & flow control type is controlled by ixgbe.conf
1104 	 */
1105 	hw->fc.high_water = DEFAULT_FCRTH;
1106 	hw->fc.low_water = DEFAULT_FCRTL;
1107 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1108 	hw->fc.send_xon = B_TRUE;
1109 
1110 	/*
1111 	 * Don't wait for auto-negotiation to complete
1112 	 */
1113 	hw->phy.autoneg_wait_to_complete = B_FALSE;
1114 
1115 	/*
1116 	 * Initialize link settings
1117 	 */
1118 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1119 
1120 	/*
1121 	 * Initialize the chipset hardware
1122 	 */
1123 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1124 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1125 		goto init_fail;
1126 	}
1127 
1128 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
1129 		goto init_fail;
1130 	}
1131 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1132 		goto init_fail;
1133 	}
1134 
1135 	mutex_exit(&ixgbe->gen_lock);
1136 	return (IXGBE_SUCCESS);
1137 
1138 init_fail:
1139 	/*
1140 	 * Reset PHY
1141 	 */
1142 	(void) ixgbe_reset_phy(hw);
1143 
1144 	mutex_exit(&ixgbe->gen_lock);
1145 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1146 	return (IXGBE_FAILURE);
1147 }
1148 
1149 /*
1150  * ixgbe_chip_start - Initialize and start the chipset hardware.
1151  */
1152 static int
1153 ixgbe_chip_start(ixgbe_t *ixgbe)
1154 {
1155 	struct ixgbe_hw *hw = &ixgbe->hw;
1156 	int ret_val, i;
1157 
1158 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1159 
1160 	/*
1161 	 * Get the mac address
1162 	 * This function should handle SPARC case correctly.
1163 	 */
1164 	if (!ixgbe_find_mac_address(ixgbe)) {
1165 		ixgbe_error(ixgbe, "Failed to get the mac address");
1166 		return (IXGBE_FAILURE);
1167 	}
1168 
1169 	/*
1170 	 * Validate the mac address
1171 	 */
1172 	(void) ixgbe_init_rx_addrs(hw);
1173 	if (!is_valid_mac_addr(hw->mac.addr)) {
1174 		ixgbe_error(ixgbe, "Invalid mac address");
1175 		return (IXGBE_FAILURE);
1176 	}
1177 
1178 	/*
1179 	 * Configure/Initialize hardware
1180 	 */
1181 	ret_val = ixgbe_init_hw(hw);
1182 	if (ret_val != IXGBE_SUCCESS) {
1183 		if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1184 			ixgbe_error(ixgbe,
1185 			    "This 82599 device is pre-release and contains"
1186 			    " outdated firmware, please contact your hardware"
1187 			    " vendor for a replacement.");
1188 		} else {
1189 			ixgbe_error(ixgbe, "Failed to initialize hardware");
1190 			return (IXGBE_FAILURE);
1191 		}
1192 	}
1193 
1194 	/*
1195 	 * Setup adapter interrupt vectors
1196 	 */
1197 	ixgbe_setup_adapter_vector(ixgbe);
1198 
1199 	/*
1200 	 * Initialize unicast addresses.
1201 	 */
1202 	ixgbe_init_unicst(ixgbe);
1203 
1204 	/*
1205 	 * Setup and initialize the mctable structures.
1206 	 */
1207 	ixgbe_setup_multicst(ixgbe);
1208 
1209 	/*
1210 	 * Set interrupt throttling rate
1211 	 */
1212 	for (i = 0; i < ixgbe->intr_cnt; i++) {
1213 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1214 	}
1215 
1216 	/*
1217 	 * Save the state of the phy
1218 	 */
1219 	ixgbe_get_hw_state(ixgbe);
1220 
1221 	/*
1222 	 * Make sure driver has control
1223 	 */
1224 	ixgbe_get_driver_control(hw);
1225 
1226 	return (IXGBE_SUCCESS);
1227 }
1228 
1229 /*
1230  * ixgbe_chip_stop - Stop the chipset hardware
1231  */
1232 static void
1233 ixgbe_chip_stop(ixgbe_t *ixgbe)
1234 {
1235 	struct ixgbe_hw *hw = &ixgbe->hw;
1236 
1237 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1238 
1239 	/*
1240 	 * Tell firmware driver is no longer in control
1241 	 */
1242 	ixgbe_release_driver_control(hw);
1243 
1244 	/*
1245 	 * Reset the chipset
1246 	 */
1247 	(void) ixgbe_reset_hw(hw);
1248 
1249 	/*
1250 	 * Reset PHY
1251 	 */
1252 	(void) ixgbe_reset_phy(hw);
1253 }
1254 
1255 /*
1256  * ixgbe_reset - Reset the chipset and re-start the driver.
1257  *
1258  * It involves stopping and re-starting the chipset,
1259  * and re-configuring the rx/tx rings.
1260  */
1261 static int
1262 ixgbe_reset(ixgbe_t *ixgbe)
1263 {
1264 	/*
1265 	 * Disable and stop the watchdog timer
1266 	 */
1267 	ixgbe_disable_watchdog_timer(ixgbe);
1268 
1269 	mutex_enter(&ixgbe->gen_lock);
1270 
1271 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1272 	ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1273 
1274 	ixgbe_stop(ixgbe, B_FALSE);
1275 
1276 	if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1277 		mutex_exit(&ixgbe->gen_lock);
1278 		return (IXGBE_FAILURE);
1279 	}
1280 
1281 	ixgbe->ixgbe_state |= IXGBE_STARTED;
1282 	mutex_exit(&ixgbe->gen_lock);
1283 
1284 	/*
1285 	 * Enable and start the watchdog timer
1286 	 */
1287 	ixgbe_enable_watchdog_timer(ixgbe);
1288 
1289 	return (IXGBE_SUCCESS);
1290 }
1291 
1292 /*
1293  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1294  */
1295 static void
1296 ixgbe_tx_clean(ixgbe_t *ixgbe)
1297 {
1298 	ixgbe_tx_ring_t *tx_ring;
1299 	tx_control_block_t *tcb;
1300 	link_list_t pending_list;
1301 	uint32_t desc_num;
1302 	int i, j;
1303 
1304 	LINK_LIST_INIT(&pending_list);
1305 
1306 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1307 		tx_ring = &ixgbe->tx_rings[i];
1308 
1309 		mutex_enter(&tx_ring->recycle_lock);
1310 
1311 		/*
1312 		 * Clean the pending tx data - the pending packets in the
1313 		 * work_list that have no chances to be transmitted again.
1314 		 *
1315 		 * We must ensure the chipset is stopped or the link is down
1316 		 * before cleaning the transmit packets.
1317 		 */
1318 		desc_num = 0;
1319 		for (j = 0; j < tx_ring->ring_size; j++) {
1320 			tcb = tx_ring->work_list[j];
1321 			if (tcb != NULL) {
1322 				desc_num += tcb->desc_num;
1323 
1324 				tx_ring->work_list[j] = NULL;
1325 
1326 				ixgbe_free_tcb(tcb);
1327 
1328 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1329 			}
1330 		}
1331 
1332 		if (desc_num > 0) {
1333 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1334 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1335 
1336 			/*
1337 			 * Reset the head and tail pointers of the tbd ring;
1338 			 * Reset the writeback head if it's enable.
1339 			 */
1340 			tx_ring->tbd_head = 0;
1341 			tx_ring->tbd_tail = 0;
1342 			if (ixgbe->tx_head_wb_enable)
1343 				*tx_ring->tbd_head_wb = 0;
1344 
1345 			IXGBE_WRITE_REG(&ixgbe->hw,
1346 			    IXGBE_TDH(tx_ring->index), 0);
1347 			IXGBE_WRITE_REG(&ixgbe->hw,
1348 			    IXGBE_TDT(tx_ring->index), 0);
1349 		}
1350 
1351 		mutex_exit(&tx_ring->recycle_lock);
1352 
1353 		/*
1354 		 * Add the tx control blocks in the pending list to
1355 		 * the free list.
1356 		 */
1357 		ixgbe_put_free_list(tx_ring, &pending_list);
1358 	}
1359 }
1360 
1361 /*
1362  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1363  * transmitted.
1364  */
1365 static boolean_t
1366 ixgbe_tx_drain(ixgbe_t *ixgbe)
1367 {
1368 	ixgbe_tx_ring_t *tx_ring;
1369 	boolean_t done;
1370 	int i, j;
1371 
1372 	/*
1373 	 * Wait for a specific time to allow pending tx packets
1374 	 * to be transmitted.
1375 	 *
1376 	 * Check the counter tbd_free to see if transmission is done.
1377 	 * No lock protection is needed here.
1378 	 *
1379 	 * Return B_TRUE if all pending packets have been transmitted;
1380 	 * Otherwise return B_FALSE;
1381 	 */
1382 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1383 
1384 		done = B_TRUE;
1385 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1386 			tx_ring = &ixgbe->tx_rings[j];
1387 			done = done &&
1388 			    (tx_ring->tbd_free == tx_ring->ring_size);
1389 		}
1390 
1391 		if (done)
1392 			break;
1393 
1394 		msec_delay(1);
1395 	}
1396 
1397 	return (done);
1398 }
1399 
1400 /*
1401  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1402  */
1403 static boolean_t
1404 ixgbe_rx_drain(ixgbe_t *ixgbe)
1405 {
1406 	boolean_t done = B_TRUE;
1407 	int i;
1408 
1409 	/*
1410 	 * Polling the rx free list to check if those rx buffers held by
1411 	 * the upper layer are released.
1412 	 *
1413 	 * Check the counter rcb_free to see if all pending buffers are
1414 	 * released. No lock protection is needed here.
1415 	 *
1416 	 * Return B_TRUE if all pending buffers have been released;
1417 	 * Otherwise return B_FALSE;
1418 	 */
1419 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1420 		done = (ixgbe->rcb_pending == 0);
1421 
1422 		if (done)
1423 			break;
1424 
1425 		msec_delay(1);
1426 	}
1427 
1428 	return (done);
1429 }
1430 
1431 /*
1432  * ixgbe_start - Start the driver/chipset.
1433  */
1434 int
1435 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1436 {
1437 	int i;
1438 
1439 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1440 
1441 	if (alloc_buffer) {
1442 		if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1443 			ixgbe_error(ixgbe,
1444 			    "Failed to allocate software receive rings");
1445 			return (IXGBE_FAILURE);
1446 		}
1447 
1448 		/* Allocate buffers for all the rx/tx rings */
1449 		if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1450 			ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1451 			return (IXGBE_FAILURE);
1452 		}
1453 
1454 		ixgbe->tx_ring_init = B_TRUE;
1455 	} else {
1456 		ixgbe->tx_ring_init = B_FALSE;
1457 	}
1458 
1459 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1460 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1461 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1462 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1463 
1464 	/*
1465 	 * Start the chipset hardware
1466 	 */
1467 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1468 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1469 		goto start_failure;
1470 	}
1471 
1472 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1473 		goto start_failure;
1474 	}
1475 
1476 	/*
1477 	 * Setup the rx/tx rings
1478 	 */
1479 	ixgbe_setup_rings(ixgbe);
1480 
1481 	/*
1482 	 * Enable adapter interrupts
1483 	 * The interrupts must be enabled after the driver state is START
1484 	 */
1485 	ixgbe_enable_adapter_interrupts(ixgbe);
1486 
1487 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1488 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1489 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1490 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1491 
1492 	return (IXGBE_SUCCESS);
1493 
1494 start_failure:
1495 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1496 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1497 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1498 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1499 
1500 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1501 
1502 	return (IXGBE_FAILURE);
1503 }
1504 
1505 /*
1506  * ixgbe_stop - Stop the driver/chipset.
1507  */
1508 void
1509 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1510 {
1511 	int i;
1512 
1513 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1514 
1515 	/*
1516 	 * Disable the adapter interrupts
1517 	 */
1518 	ixgbe_disable_adapter_interrupts(ixgbe);
1519 
1520 	/*
1521 	 * Drain the pending tx packets
1522 	 */
1523 	(void) ixgbe_tx_drain(ixgbe);
1524 
1525 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1526 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1527 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1528 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1529 
1530 	/*
1531 	 * Stop the chipset hardware
1532 	 */
1533 	ixgbe_chip_stop(ixgbe);
1534 
1535 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1536 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1537 	}
1538 
1539 	/*
1540 	 * Clean the pending tx data/resources
1541 	 */
1542 	ixgbe_tx_clean(ixgbe);
1543 
1544 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1545 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1546 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1547 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1548 
1549 	if (ixgbe->link_state == LINK_STATE_UP) {
1550 		ixgbe->link_state = LINK_STATE_UNKNOWN;
1551 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1552 	}
1553 
1554 	if (free_buffer) {
1555 		/*
1556 		 * Release the DMA/memory resources of rx/tx rings
1557 		 */
1558 		ixgbe_free_dma(ixgbe);
1559 		ixgbe_free_rx_data(ixgbe);
1560 	}
1561 }
1562 
1563 /*
1564  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1565  */
1566 static int
1567 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1568 {
1569 	/*
1570 	 * Allocate memory space for rx rings
1571 	 */
1572 	ixgbe->rx_rings = kmem_zalloc(
1573 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1574 	    KM_NOSLEEP);
1575 
1576 	if (ixgbe->rx_rings == NULL) {
1577 		return (IXGBE_FAILURE);
1578 	}
1579 
1580 	/*
1581 	 * Allocate memory space for tx rings
1582 	 */
1583 	ixgbe->tx_rings = kmem_zalloc(
1584 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1585 	    KM_NOSLEEP);
1586 
1587 	if (ixgbe->tx_rings == NULL) {
1588 		kmem_free(ixgbe->rx_rings,
1589 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1590 		ixgbe->rx_rings = NULL;
1591 		return (IXGBE_FAILURE);
1592 	}
1593 
1594 	/*
1595 	 * Allocate memory space for rx ring groups
1596 	 */
1597 	ixgbe->rx_groups = kmem_zalloc(
1598 	    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1599 	    KM_NOSLEEP);
1600 
1601 	if (ixgbe->rx_groups == NULL) {
1602 		kmem_free(ixgbe->rx_rings,
1603 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1604 		kmem_free(ixgbe->tx_rings,
1605 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1606 		ixgbe->rx_rings = NULL;
1607 		ixgbe->tx_rings = NULL;
1608 		return (IXGBE_FAILURE);
1609 	}
1610 
1611 	return (IXGBE_SUCCESS);
1612 }
1613 
1614 /*
1615  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1616  */
1617 static void
1618 ixgbe_free_rings(ixgbe_t *ixgbe)
1619 {
1620 	if (ixgbe->rx_rings != NULL) {
1621 		kmem_free(ixgbe->rx_rings,
1622 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1623 		ixgbe->rx_rings = NULL;
1624 	}
1625 
1626 	if (ixgbe->tx_rings != NULL) {
1627 		kmem_free(ixgbe->tx_rings,
1628 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1629 		ixgbe->tx_rings = NULL;
1630 	}
1631 
1632 	if (ixgbe->rx_groups != NULL) {
1633 		kmem_free(ixgbe->rx_groups,
1634 		    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1635 		ixgbe->rx_groups = NULL;
1636 	}
1637 }
1638 
1639 static int
1640 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
1641 {
1642 	ixgbe_rx_ring_t *rx_ring;
1643 	int i;
1644 
1645 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1646 		rx_ring = &ixgbe->rx_rings[i];
1647 		if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
1648 			goto alloc_rx_rings_failure;
1649 	}
1650 	return (IXGBE_SUCCESS);
1651 
1652 alloc_rx_rings_failure:
1653 	ixgbe_free_rx_data(ixgbe);
1654 	return (IXGBE_FAILURE);
1655 }
1656 
1657 static void
1658 ixgbe_free_rx_data(ixgbe_t *ixgbe)
1659 {
1660 	ixgbe_rx_ring_t *rx_ring;
1661 	ixgbe_rx_data_t *rx_data;
1662 	int i;
1663 
1664 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1665 		rx_ring = &ixgbe->rx_rings[i];
1666 
1667 		mutex_enter(&ixgbe->rx_pending_lock);
1668 		rx_data = rx_ring->rx_data;
1669 
1670 		if (rx_data != NULL) {
1671 			rx_data->flag |= IXGBE_RX_STOPPED;
1672 
1673 			if (rx_data->rcb_pending == 0) {
1674 				ixgbe_free_rx_ring_data(rx_data);
1675 				rx_ring->rx_data = NULL;
1676 			}
1677 		}
1678 
1679 		mutex_exit(&ixgbe->rx_pending_lock);
1680 	}
1681 }
1682 
1683 /*
1684  * ixgbe_setup_rings - Setup rx/tx rings.
1685  */
1686 static void
1687 ixgbe_setup_rings(ixgbe_t *ixgbe)
1688 {
1689 	/*
1690 	 * Setup the rx/tx rings, including the following:
1691 	 *
1692 	 * 1. Setup the descriptor ring and the control block buffers;
1693 	 * 2. Initialize necessary registers for receive/transmit;
1694 	 * 3. Initialize software pointers/parameters for receive/transmit;
1695 	 */
1696 	ixgbe_setup_rx(ixgbe);
1697 
1698 	ixgbe_setup_tx(ixgbe);
1699 }
1700 
1701 static void
1702 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
1703 {
1704 	ixgbe_t *ixgbe = rx_ring->ixgbe;
1705 	ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
1706 	struct ixgbe_hw *hw = &ixgbe->hw;
1707 	rx_control_block_t *rcb;
1708 	union ixgbe_adv_rx_desc	*rbd;
1709 	uint32_t size;
1710 	uint32_t buf_low;
1711 	uint32_t buf_high;
1712 	uint32_t reg_val;
1713 	int i;
1714 
1715 	ASSERT(mutex_owned(&rx_ring->rx_lock));
1716 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1717 
1718 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
1719 		rcb = rx_data->work_list[i];
1720 		rbd = &rx_data->rbd_ring[i];
1721 
1722 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
1723 		rbd->read.hdr_addr = NULL;
1724 	}
1725 
1726 	/*
1727 	 * Initialize the length register
1728 	 */
1729 	size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
1730 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
1731 
1732 	/*
1733 	 * Initialize the base address registers
1734 	 */
1735 	buf_low = (uint32_t)rx_data->rbd_area.dma_address;
1736 	buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
1737 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
1738 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
1739 
1740 	/*
1741 	 * Setup head & tail pointers
1742 	 */
1743 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_data->ring_size - 1);
1744 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
1745 
1746 	rx_data->rbd_next = 0;
1747 
1748 	/*
1749 	 * Setup the Receive Descriptor Control Register (RXDCTL)
1750 	 * PTHRESH=32 descriptors (half the internal cache)
1751 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
1752 	 * WTHRESH defaults to 1 (writeback each descriptor)
1753 	 */
1754 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
1755 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
1756 
1757 	/* Not a valid value for 82599 */
1758 	if (hw->mac.type < ixgbe_mac_82599EB) {
1759 		reg_val |= 0x0020;	/* pthresh */
1760 	}
1761 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
1762 
1763 	if (hw->mac.type == ixgbe_mac_82599EB) {
1764 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1765 		reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
1766 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
1767 	}
1768 
1769 	/*
1770 	 * Setup the Split and Replication Receive Control Register.
1771 	 * Set the rx buffer size and the advanced descriptor type.
1772 	 */
1773 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
1774 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1775 	reg_val |= IXGBE_SRRCTL_DROP_EN;
1776 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
1777 }
1778 
1779 static void
1780 ixgbe_setup_rx(ixgbe_t *ixgbe)
1781 {
1782 	ixgbe_rx_ring_t *rx_ring;
1783 	struct ixgbe_hw *hw = &ixgbe->hw;
1784 	ixgbe_rx_group_t *rx_group;
1785 	uint32_t reg_val;
1786 	uint32_t ring_mapping;
1787 	int i;
1788 
1789 	/* PSRTYPE must be configured for 82599 */
1790 	reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1791 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
1792 #define	IXGBE_PSRTYPE_L2_PKT	0x00001000
1793 	reg_val |= IXGBE_PSRTYPE_L2_PKT;
1794 	reg_val |= 0xE0000000;
1795 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
1796 
1797 	/*
1798 	 * Set filter control in FCTRL to accept broadcast packets and do
1799 	 * not pass pause frames to host.  Flow control settings are already
1800 	 * in this register, so preserve them.
1801 	 */
1802 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1803 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
1804 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
1805 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
1806 
1807 	/*
1808 	 * Enable the receive unit.  This must be done after filter
1809 	 * control is set in FCTRL.
1810 	 */
1811 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
1812 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
1813 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1814 
1815 	/*
1816 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
1817 	 */
1818 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1819 		rx_ring = &ixgbe->rx_rings[i];
1820 		ixgbe_setup_rx_ring(rx_ring);
1821 	}
1822 
1823 	/*
1824 	 * Setup rx groups.
1825 	 */
1826 	for (i = 0; i < ixgbe->num_rx_groups; i++) {
1827 		rx_group = &ixgbe->rx_groups[i];
1828 		rx_group->index = i;
1829 		rx_group->ixgbe = ixgbe;
1830 	}
1831 
1832 	/*
1833 	 * Setup the per-ring statistics mapping.
1834 	 */
1835 	ring_mapping = 0;
1836 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1837 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1838 		if ((i & 0x3) == 0x3) {
1839 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1840 			ring_mapping = 0;
1841 		}
1842 	}
1843 	if ((i & 0x3) != 0x3)
1844 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1845 
1846 	/*
1847 	 * The Max Frame Size in MHADD/MAXFRS will be internally increased
1848 	 * by four bytes if the packet has a VLAN field, so includes MTU,
1849 	 * ethernet header and frame check sequence.
1850 	 * Register is MAXFRS in 82599.
1851 	 */
1852 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
1853 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
1854 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
1855 
1856 	/*
1857 	 * Setup Jumbo Frame enable bit
1858 	 */
1859 	if (ixgbe->default_mtu > ETHERMTU) {
1860 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1861 		reg_val |= IXGBE_HLREG0_JUMBOEN;
1862 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1863 	}
1864 
1865 	/*
1866 	 * Hardware checksum settings
1867 	 */
1868 	if (ixgbe->rx_hcksum_enable) {
1869 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
1870 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
1871 	}
1872 
1873 	/*
1874 	 * Setup RSS for multiple receive queues
1875 	 */
1876 	if (ixgbe->num_rx_rings > 1)
1877 		ixgbe_setup_rss(ixgbe);
1878 }
1879 
1880 static void
1881 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
1882 {
1883 	ixgbe_t *ixgbe = tx_ring->ixgbe;
1884 	struct ixgbe_hw *hw = &ixgbe->hw;
1885 	uint32_t size;
1886 	uint32_t buf_low;
1887 	uint32_t buf_high;
1888 	uint32_t reg_val;
1889 
1890 	ASSERT(mutex_owned(&tx_ring->tx_lock));
1891 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1892 
1893 	/*
1894 	 * Initialize the length register
1895 	 */
1896 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
1897 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
1898 
1899 	/*
1900 	 * Initialize the base address registers
1901 	 */
1902 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
1903 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
1904 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
1905 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
1906 
1907 	/*
1908 	 * Setup head & tail pointers
1909 	 */
1910 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
1911 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
1912 
1913 	/*
1914 	 * Setup head write-back
1915 	 */
1916 	if (ixgbe->tx_head_wb_enable) {
1917 		/*
1918 		 * The memory of the head write-back is allocated using
1919 		 * the extra tbd beyond the tail of the tbd ring.
1920 		 */
1921 		tx_ring->tbd_head_wb = (uint32_t *)
1922 		    ((uintptr_t)tx_ring->tbd_area.address + size);
1923 		*tx_ring->tbd_head_wb = 0;
1924 
1925 		buf_low = (uint32_t)
1926 		    (tx_ring->tbd_area.dma_address + size);
1927 		buf_high = (uint32_t)
1928 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
1929 
1930 		/* Set the head write-back enable bit */
1931 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1932 
1933 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
1934 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
1935 
1936 		/*
1937 		 * Turn off relaxed ordering for head write back or it will
1938 		 * cause problems with the tx recycling
1939 		 */
1940 		reg_val = IXGBE_READ_REG(hw,
1941 		    IXGBE_DCA_TXCTRL(tx_ring->index));
1942 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1943 		IXGBE_WRITE_REG(hw,
1944 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
1945 	} else {
1946 		tx_ring->tbd_head_wb = NULL;
1947 	}
1948 
1949 	tx_ring->tbd_head = 0;
1950 	tx_ring->tbd_tail = 0;
1951 	tx_ring->tbd_free = tx_ring->ring_size;
1952 
1953 	if (ixgbe->tx_ring_init == B_TRUE) {
1954 		tx_ring->tcb_head = 0;
1955 		tx_ring->tcb_tail = 0;
1956 		tx_ring->tcb_free = tx_ring->free_list_size;
1957 	}
1958 
1959 	/*
1960 	 * Initialize the s/w context structure
1961 	 */
1962 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
1963 }
1964 
1965 static void
1966 ixgbe_setup_tx(ixgbe_t *ixgbe)
1967 {
1968 	struct ixgbe_hw *hw = &ixgbe->hw;
1969 	ixgbe_tx_ring_t *tx_ring;
1970 	uint32_t reg_val;
1971 	uint32_t ring_mapping;
1972 	int i;
1973 
1974 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1975 		tx_ring = &ixgbe->tx_rings[i];
1976 		ixgbe_setup_tx_ring(tx_ring);
1977 	}
1978 
1979 	/*
1980 	 * Setup the per-ring statistics mapping.
1981 	 */
1982 	ring_mapping = 0;
1983 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1984 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1985 		if ((i & 0x3) == 0x3) {
1986 			if (hw->mac.type >= ixgbe_mac_82599EB) {
1987 				IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
1988 				    ring_mapping);
1989 			} else {
1990 				IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
1991 				    ring_mapping);
1992 			}
1993 			ring_mapping = 0;
1994 		}
1995 	}
1996 	if ((i & 0x3) != 0x3)
1997 		if (hw->mac.type >= ixgbe_mac_82599EB) {
1998 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
1999 		} else {
2000 			IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2001 		}
2002 
2003 	/*
2004 	 * Enable CRC appending and TX padding (for short tx frames)
2005 	 */
2006 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2007 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2008 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2009 
2010 	/*
2011 	 * enable DMA for 82599 parts
2012 	 */
2013 	if (hw->mac.type == ixgbe_mac_82599EB) {
2014 	/* DMATXCTL.TE must be set after all Tx config is complete */
2015 		reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2016 		reg_val |= IXGBE_DMATXCTL_TE;
2017 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2018 	}
2019 
2020 	/*
2021 	 * Enabling tx queues ..
2022 	 * For 82599 must be done after DMATXCTL.TE is set
2023 	 */
2024 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2025 		tx_ring = &ixgbe->tx_rings[i];
2026 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2027 		reg_val |= IXGBE_TXDCTL_ENABLE;
2028 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2029 	}
2030 }
2031 
2032 /*
2033  * ixgbe_setup_rss - Setup receive-side scaling feature.
2034  */
2035 static void
2036 ixgbe_setup_rss(ixgbe_t *ixgbe)
2037 {
2038 	struct ixgbe_hw *hw = &ixgbe->hw;
2039 	uint32_t i, mrqc, rxcsum;
2040 	uint32_t random;
2041 	uint32_t reta;
2042 
2043 	/*
2044 	 * Fill out redirection table
2045 	 */
2046 	reta = 0;
2047 	for (i = 0; i < 128; i++) {
2048 		reta = (reta << 8) | (i % ixgbe->num_rx_rings);
2049 		if ((i & 3) == 3)
2050 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2051 	}
2052 
2053 	/*
2054 	 * Fill out hash function seeds with a random constant
2055 	 */
2056 	for (i = 0; i < 10; i++) {
2057 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2058 		    sizeof (uint32_t));
2059 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2060 	}
2061 
2062 	/*
2063 	 * Enable RSS & perform hash on these packet types
2064 	 */
2065 	mrqc = IXGBE_MRQC_RSSEN |
2066 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
2067 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2068 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2069 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2070 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2071 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
2072 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2073 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2074 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2075 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2076 
2077 	/*
2078 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2079 	 * It is an adapter hardware limitation that Packet Checksum is
2080 	 * mutually exclusive with RSS.
2081 	 */
2082 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2083 	rxcsum |= IXGBE_RXCSUM_PCSD;
2084 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2085 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2086 }
2087 
2088 /*
2089  * ixgbe_init_unicst - Initialize the unicast addresses.
2090  */
2091 static void
2092 ixgbe_init_unicst(ixgbe_t *ixgbe)
2093 {
2094 	struct ixgbe_hw *hw = &ixgbe->hw;
2095 	uint8_t *mac_addr;
2096 	int slot;
2097 	/*
2098 	 * Here we should consider two situations:
2099 	 *
2100 	 * 1. Chipset is initialized at the first time,
2101 	 *    Clear all the multiple unicast addresses.
2102 	 *
2103 	 * 2. Chipset is reset
2104 	 *    Recover the multiple unicast addresses from the
2105 	 *    software data structure to the RAR registers.
2106 	 */
2107 	if (!ixgbe->unicst_init) {
2108 		/*
2109 		 * Initialize the multiple unicast addresses
2110 		 */
2111 		ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2112 		ixgbe->unicst_avail = ixgbe->unicst_total;
2113 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2114 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2115 			bzero(mac_addr, ETHERADDRL);
2116 			(void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2117 			ixgbe->unicst_addr[slot].mac.set = 0;
2118 		}
2119 		ixgbe->unicst_init = B_TRUE;
2120 	} else {
2121 		/* Re-configure the RAR registers */
2122 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2123 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2124 			if (ixgbe->unicst_addr[slot].mac.set == 1) {
2125 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2126 				    NULL, IXGBE_RAH_AV);
2127 			} else {
2128 				bzero(mac_addr, ETHERADDRL);
2129 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2130 				    NULL, NULL);
2131 			}
2132 		}
2133 	}
2134 }
2135 
2136 /*
2137  * ixgbe_unicst_set - Set the unicast address to the specified slot.
2138  */
2139 int
2140 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
2141     int slot)
2142 {
2143 	struct ixgbe_hw *hw = &ixgbe->hw;
2144 
2145 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2146 
2147 	/*
2148 	 * Save the unicast address in the software data structure
2149 	 */
2150 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
2151 
2152 	/*
2153 	 * Set the unicast address to the RAR register
2154 	 */
2155 	(void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, IXGBE_RAH_AV);
2156 
2157 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2158 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2159 		return (EIO);
2160 	}
2161 
2162 	return (0);
2163 }
2164 
2165 /*
2166  * ixgbe_unicst_find - Find the slot for the specified unicast address
2167  */
2168 int
2169 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2170 {
2171 	int slot;
2172 
2173 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2174 
2175 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2176 		if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2177 		    mac_addr, ETHERADDRL) == 0)
2178 			return (slot);
2179 	}
2180 
2181 	return (-1);
2182 }
2183 
2184 /*
2185  * ixgbe_multicst_add - Add a multicst address.
2186  */
2187 int
2188 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2189 {
2190 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2191 
2192 	if ((multiaddr[0] & 01) == 0) {
2193 		return (EINVAL);
2194 	}
2195 
2196 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2197 		return (ENOENT);
2198 	}
2199 
2200 	bcopy(multiaddr,
2201 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2202 	ixgbe->mcast_count++;
2203 
2204 	/*
2205 	 * Update the multicast table in the hardware
2206 	 */
2207 	ixgbe_setup_multicst(ixgbe);
2208 
2209 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2210 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2211 		return (EIO);
2212 	}
2213 
2214 	return (0);
2215 }
2216 
2217 /*
2218  * ixgbe_multicst_remove - Remove a multicst address.
2219  */
2220 int
2221 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2222 {
2223 	int i;
2224 
2225 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2226 
2227 	for (i = 0; i < ixgbe->mcast_count; i++) {
2228 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2229 		    ETHERADDRL) == 0) {
2230 			for (i++; i < ixgbe->mcast_count; i++) {
2231 				ixgbe->mcast_table[i - 1] =
2232 				    ixgbe->mcast_table[i];
2233 			}
2234 			ixgbe->mcast_count--;
2235 			break;
2236 		}
2237 	}
2238 
2239 	/*
2240 	 * Update the multicast table in the hardware
2241 	 */
2242 	ixgbe_setup_multicst(ixgbe);
2243 
2244 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2245 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2246 		return (EIO);
2247 	}
2248 
2249 	return (0);
2250 }
2251 
2252 /*
2253  * ixgbe_setup_multicast - Setup multicast data structures.
2254  *
2255  * This routine initializes all of the multicast related structures
2256  * and save them in the hardware registers.
2257  */
2258 static void
2259 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2260 {
2261 	uint8_t *mc_addr_list;
2262 	uint32_t mc_addr_count;
2263 	struct ixgbe_hw *hw = &ixgbe->hw;
2264 
2265 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2266 
2267 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2268 
2269 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2270 	mc_addr_count = ixgbe->mcast_count;
2271 
2272 	/*
2273 	 * Update the multicast addresses to the MTA registers
2274 	 */
2275 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2276 	    ixgbe_mc_table_itr);
2277 }
2278 
2279 /*
2280  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2281  *
2282  * This routine gets user-configured values out of the configuration
2283  * file ixgbe.conf.
2284  *
2285  * For each configurable value, there is a minimum, a maximum, and a
2286  * default.
2287  * If user does not configure a value, use the default.
2288  * If user configures below the minimum, use the minumum.
2289  * If user configures above the maximum, use the maxumum.
2290  */
2291 static void
2292 ixgbe_get_conf(ixgbe_t *ixgbe)
2293 {
2294 	struct ixgbe_hw *hw = &ixgbe->hw;
2295 	uint32_t flow_control;
2296 
2297 	/*
2298 	 * ixgbe driver supports the following user configurations:
2299 	 *
2300 	 * Jumbo frame configuration:
2301 	 *    default_mtu
2302 	 *
2303 	 * Ethernet flow control configuration:
2304 	 *    flow_control
2305 	 *
2306 	 * Multiple rings configurations:
2307 	 *    tx_queue_number
2308 	 *    tx_ring_size
2309 	 *    rx_queue_number
2310 	 *    rx_ring_size
2311 	 *
2312 	 * Call ixgbe_get_prop() to get the value for a specific
2313 	 * configuration parameter.
2314 	 */
2315 
2316 	/*
2317 	 * Jumbo frame configuration - max_frame_size controls host buffer
2318 	 * allocation, so includes MTU, ethernet header, vlan tag and
2319 	 * frame check sequence.
2320 	 */
2321 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2322 	    MIN_MTU, MAX_MTU, DEFAULT_MTU);
2323 
2324 	ixgbe->max_frame_size = ixgbe->default_mtu +
2325 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2326 
2327 	/*
2328 	 * Ethernet flow control configuration
2329 	 */
2330 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2331 	    ixgbe_fc_none, 3, ixgbe_fc_none);
2332 	if (flow_control == 3)
2333 		flow_control = ixgbe_fc_default;
2334 
2335 	/*
2336 	 * fc.requested mode is what the user requests.  After autoneg,
2337 	 * fc.current_mode will be the flow_control mode that was negotiated.
2338 	 */
2339 	hw->fc.requested_mode = flow_control;
2340 
2341 	/*
2342 	 * Multiple rings configurations
2343 	 */
2344 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2345 	    ixgbe->capab->min_tx_que_num,
2346 	    ixgbe->capab->max_tx_que_num,
2347 	    ixgbe->capab->def_tx_que_num);
2348 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2349 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2350 
2351 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2352 	    ixgbe->capab->min_rx_que_num,
2353 	    ixgbe->capab->max_rx_que_num,
2354 	    ixgbe->capab->def_rx_que_num);
2355 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2356 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2357 
2358 	/*
2359 	 * Multiple groups configuration
2360 	 */
2361 	ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
2362 	    MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM);
2363 
2364 	ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
2365 	    0, 1, DEFAULT_MR_ENABLE);
2366 
2367 	if (ixgbe->mr_enable == B_FALSE) {
2368 		ixgbe->num_tx_rings = 1;
2369 		ixgbe->num_rx_rings = 1;
2370 		ixgbe->num_rx_groups = 1;
2371 	}
2372 
2373 	/*
2374 	 * Tunable used to force an interrupt type. The only use is
2375 	 * for testing of the lesser interrupt types.
2376 	 * 0 = don't force interrupt type
2377 	 * 1 = force interrupt type MSI-X
2378 	 * 2 = force interrupt type MSI
2379 	 * 3 = force interrupt type Legacy
2380 	 */
2381 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2382 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2383 
2384 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2385 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
2386 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2387 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
2388 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2389 	    0, 1, DEFAULT_LSO_ENABLE);
2390 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2391 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
2392 
2393 	/* Head Write Back not recommended for 82599 */
2394 	if (hw->mac.type >= ixgbe_mac_82599EB) {
2395 		ixgbe->tx_head_wb_enable = B_FALSE;
2396 	}
2397 
2398 	/*
2399 	 * ixgbe LSO needs the tx h/w checksum support.
2400 	 * LSO will be disabled if tx h/w checksum is not
2401 	 * enabled.
2402 	 */
2403 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
2404 		ixgbe->lso_enable = B_FALSE;
2405 	}
2406 
2407 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
2408 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
2409 	    DEFAULT_TX_COPY_THRESHOLD);
2410 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
2411 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
2412 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
2413 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
2414 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
2415 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
2416 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
2417 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
2418 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
2419 
2420 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
2421 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
2422 	    DEFAULT_RX_COPY_THRESHOLD);
2423 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
2424 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
2425 	    DEFAULT_RX_LIMIT_PER_INTR);
2426 
2427 	ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
2428 	    ixgbe->capab->min_intr_throttle,
2429 	    ixgbe->capab->max_intr_throttle,
2430 	    ixgbe->capab->def_intr_throttle);
2431 	/*
2432 	 * 82599 requires the interupt throttling rate is
2433 	 * a multiple of 8. This is enforced by the register
2434 	 * definiton.
2435 	 */
2436 	if (hw->mac.type == ixgbe_mac_82599EB)
2437 		ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
2438 }
2439 
2440 static void
2441 ixgbe_init_params(ixgbe_t *ixgbe)
2442 {
2443 	ixgbe->param_en_10000fdx_cap = 1;
2444 	ixgbe->param_en_1000fdx_cap = 1;
2445 	ixgbe->param_en_100fdx_cap = 1;
2446 	ixgbe->param_adv_10000fdx_cap = 1;
2447 	ixgbe->param_adv_1000fdx_cap = 1;
2448 	ixgbe->param_adv_100fdx_cap = 1;
2449 
2450 	ixgbe->param_pause_cap = 1;
2451 	ixgbe->param_asym_pause_cap = 1;
2452 	ixgbe->param_rem_fault = 0;
2453 
2454 	ixgbe->param_adv_autoneg_cap = 1;
2455 	ixgbe->param_adv_pause_cap = 1;
2456 	ixgbe->param_adv_asym_pause_cap = 1;
2457 	ixgbe->param_adv_rem_fault = 0;
2458 
2459 	ixgbe->param_lp_10000fdx_cap = 0;
2460 	ixgbe->param_lp_1000fdx_cap = 0;
2461 	ixgbe->param_lp_100fdx_cap = 0;
2462 	ixgbe->param_lp_autoneg_cap = 0;
2463 	ixgbe->param_lp_pause_cap = 0;
2464 	ixgbe->param_lp_asym_pause_cap = 0;
2465 	ixgbe->param_lp_rem_fault = 0;
2466 }
2467 
2468 /*
2469  * ixgbe_get_prop - Get a property value out of the configuration file
2470  * ixgbe.conf.
2471  *
2472  * Caller provides the name of the property, a default value, a minimum
2473  * value, and a maximum value.
2474  *
2475  * Return configured value of the property, with default, minimum and
2476  * maximum properly applied.
2477  */
2478 static int
2479 ixgbe_get_prop(ixgbe_t *ixgbe,
2480     char *propname,	/* name of the property */
2481     int minval,		/* minimum acceptable value */
2482     int maxval,		/* maximim acceptable value */
2483     int defval)		/* default value */
2484 {
2485 	int value;
2486 
2487 	/*
2488 	 * Call ddi_prop_get_int() to read the conf settings
2489 	 */
2490 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
2491 	    DDI_PROP_DONTPASS, propname, defval);
2492 	if (value > maxval)
2493 		value = maxval;
2494 
2495 	if (value < minval)
2496 		value = minval;
2497 
2498 	return (value);
2499 }
2500 
2501 /*
2502  * ixgbe_driver_setup_link - Using the link properties to setup the link.
2503  */
2504 int
2505 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
2506 {
2507 	struct ixgbe_mac_info *mac;
2508 	struct ixgbe_phy_info *phy;
2509 	boolean_t invalid;
2510 
2511 	mac = &ixgbe->hw.mac;
2512 	phy = &ixgbe->hw.phy;
2513 	invalid = B_FALSE;
2514 
2515 	if (ixgbe->param_adv_autoneg_cap == 1) {
2516 		mac->autoneg = B_TRUE;
2517 		phy->autoneg_advertised = 0;
2518 
2519 		/*
2520 		 * No half duplex support with 10Gb parts
2521 		 */
2522 		if (ixgbe->param_adv_10000fdx_cap == 1)
2523 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2524 
2525 		if (ixgbe->param_adv_1000fdx_cap == 1)
2526 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2527 
2528 		if (ixgbe->param_adv_100fdx_cap == 1)
2529 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2530 
2531 		if (phy->autoneg_advertised == 0)
2532 			invalid = B_TRUE;
2533 	} else {
2534 		ixgbe->hw.mac.autoneg = B_FALSE;
2535 	}
2536 
2537 	if (invalid) {
2538 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link to "
2539 		    "autonegotiation with full link capabilities.");
2540 		ixgbe->hw.mac.autoneg = B_TRUE;
2541 	}
2542 
2543 	if (setup_hw) {
2544 		if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS) {
2545 			ixgbe_notice(ixgbe, "Setup link failed on this "
2546 			    "device.");
2547 			return (IXGBE_FAILURE);
2548 		}
2549 	}
2550 
2551 	return (IXGBE_SUCCESS);
2552 }
2553 
2554 /*
2555  * ixgbe_driver_link_check - Link status processing done in taskq.
2556  */
2557 static void
2558 ixgbe_driver_link_check(void *arg)
2559 {
2560 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2561 	struct ixgbe_hw *hw = &ixgbe->hw;
2562 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
2563 	boolean_t link_up = B_FALSE;
2564 	boolean_t link_changed = B_FALSE;
2565 
2566 	mutex_enter(&ixgbe->gen_lock);
2567 
2568 	/* check for link, wait the full time */
2569 	(void) ixgbe_check_link(hw, &speed, &link_up, true);
2570 	if (link_up) {
2571 		/* Link is up, enable flow control settings */
2572 		(void) ixgbe_fc_enable(hw, 0);
2573 
2574 		/*
2575 		 * The Link is up, check whether it was marked as down earlier
2576 		 */
2577 		if (ixgbe->link_state != LINK_STATE_UP) {
2578 			switch (speed) {
2579 			case IXGBE_LINK_SPEED_10GB_FULL:
2580 				ixgbe->link_speed = SPEED_10GB;
2581 				break;
2582 			case IXGBE_LINK_SPEED_1GB_FULL:
2583 				ixgbe->link_speed = SPEED_1GB;
2584 				break;
2585 			case IXGBE_LINK_SPEED_100_FULL:
2586 				ixgbe->link_speed = SPEED_100;
2587 			}
2588 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
2589 			ixgbe->link_state = LINK_STATE_UP;
2590 			ixgbe->link_down_timeout = 0;
2591 			link_changed = B_TRUE;
2592 		}
2593 	} else {
2594 		if (ixgbe->link_state != LINK_STATE_DOWN) {
2595 			ixgbe->link_speed = 0;
2596 			ixgbe->link_duplex = 0;
2597 			ixgbe->link_state = LINK_STATE_DOWN;
2598 			link_changed = B_TRUE;
2599 		}
2600 
2601 		if (ixgbe->ixgbe_state & IXGBE_STARTED) {
2602 			if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) {
2603 				ixgbe->link_down_timeout++;
2604 			} else if (ixgbe->link_down_timeout ==
2605 			    MAX_LINK_DOWN_TIMEOUT) {
2606 				ixgbe_tx_clean(ixgbe);
2607 				ixgbe->link_down_timeout++;
2608 			}
2609 		}
2610 	}
2611 
2612 	/*
2613 	 * this is only reached after a link-status-change interrupt
2614 	 * so always get new phy state
2615 	 */
2616 	ixgbe_get_hw_state(ixgbe);
2617 
2618 	/* re-enable the interrupt, which was automasked */
2619 	ixgbe->eims |= IXGBE_EICR_LSC;
2620 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
2621 
2622 	mutex_exit(&ixgbe->gen_lock);
2623 
2624 	/* outside the gen_lock */
2625 	if (link_changed) {
2626 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
2627 	}
2628 }
2629 
2630 /*
2631  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
2632  */
2633 static void
2634 ixgbe_sfp_check(void *arg)
2635 {
2636 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2637 	uint32_t eicr = ixgbe->eicr;
2638 	struct ixgbe_hw *hw = &ixgbe->hw;
2639 	uint32_t autoneg;
2640 
2641 	if (eicr & IXGBE_EICR_GPI_SDP1) {
2642 		/* clear the interrupt */
2643 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2644 
2645 		/* if link up, do multispeed fiber setup */
2646 		(void) ixgbe_get_link_capabilities(hw, &autoneg,
2647 		    &hw->mac.autoneg);
2648 		(void) ixgbe_setup_link_speed(hw, autoneg, B_TRUE, B_TRUE);
2649 		ixgbe_driver_link_check(ixgbe);
2650 	} else if (eicr & IXGBE_EICR_GPI_SDP2) {
2651 		/* clear the interrupt */
2652 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2653 
2654 		/* if link up, do sfp module setup */
2655 		(void) hw->mac.ops.setup_sfp(hw);
2656 
2657 		/* do multispeed fiber setup */
2658 		(void) ixgbe_get_link_capabilities(hw, &autoneg,
2659 		    &hw->mac.autoneg);
2660 		(void) ixgbe_setup_link_speed(hw, autoneg, B_TRUE, B_TRUE);
2661 		ixgbe_driver_link_check(ixgbe);
2662 	}
2663 }
2664 
2665 /*
2666  * ixgbe_local_timer - Driver watchdog function.
2667  *
2668  * This function will handle the transmit stall check, link status check and
2669  * other routines.
2670  */
2671 static void
2672 ixgbe_local_timer(void *arg)
2673 {
2674 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2675 
2676 	if (ixgbe_stall_check(ixgbe)) {
2677 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2678 		ixgbe->reset_count++;
2679 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2680 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2681 	}
2682 
2683 	ixgbe_restart_watchdog_timer(ixgbe);
2684 }
2685 
2686 /*
2687  * ixgbe_stall_check - Check for transmit stall.
2688  *
2689  * This function checks if the adapter is stalled (in transmit).
2690  *
2691  * It is called each time the watchdog timeout is invoked.
2692  * If the transmit descriptor reclaim continuously fails,
2693  * the watchdog value will increment by 1. If the watchdog
2694  * value exceeds the threshold, the ixgbe is assumed to
2695  * have stalled and need to be reset.
2696  */
2697 static boolean_t
2698 ixgbe_stall_check(ixgbe_t *ixgbe)
2699 {
2700 	ixgbe_tx_ring_t *tx_ring;
2701 	boolean_t result;
2702 	int i;
2703 
2704 	if (ixgbe->link_state != LINK_STATE_UP)
2705 		return (B_FALSE);
2706 
2707 	/*
2708 	 * If any tx ring is stalled, we'll reset the chipset
2709 	 */
2710 	result = B_FALSE;
2711 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2712 		tx_ring = &ixgbe->tx_rings[i];
2713 		if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
2714 			tx_ring->tx_recycle(tx_ring);
2715 		}
2716 
2717 		if (tx_ring->recycle_fail > 0)
2718 			tx_ring->stall_watchdog++;
2719 		else
2720 			tx_ring->stall_watchdog = 0;
2721 
2722 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
2723 			result = B_TRUE;
2724 			break;
2725 		}
2726 	}
2727 
2728 	if (result) {
2729 		tx_ring->stall_watchdog = 0;
2730 		tx_ring->recycle_fail = 0;
2731 	}
2732 
2733 	return (result);
2734 }
2735 
2736 
2737 /*
2738  * is_valid_mac_addr - Check if the mac address is valid.
2739  */
2740 static boolean_t
2741 is_valid_mac_addr(uint8_t *mac_addr)
2742 {
2743 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
2744 	const uint8_t addr_test2[6] =
2745 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2746 
2747 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
2748 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
2749 		return (B_FALSE);
2750 
2751 	return (B_TRUE);
2752 }
2753 
2754 static boolean_t
2755 ixgbe_find_mac_address(ixgbe_t *ixgbe)
2756 {
2757 #ifdef __sparc
2758 	struct ixgbe_hw *hw = &ixgbe->hw;
2759 	uchar_t *bytes;
2760 	struct ether_addr sysaddr;
2761 	uint_t nelts;
2762 	int err;
2763 	boolean_t found = B_FALSE;
2764 
2765 	/*
2766 	 * The "vendor's factory-set address" may already have
2767 	 * been extracted from the chip, but if the property
2768 	 * "local-mac-address" is set we use that instead.
2769 	 *
2770 	 * We check whether it looks like an array of 6
2771 	 * bytes (which it should, if OBP set it).  If we can't
2772 	 * make sense of it this way, we'll ignore it.
2773 	 */
2774 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2775 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
2776 	if (err == DDI_PROP_SUCCESS) {
2777 		if (nelts == ETHERADDRL) {
2778 			while (nelts--)
2779 				hw->mac.addr[nelts] = bytes[nelts];
2780 			found = B_TRUE;
2781 		}
2782 		ddi_prop_free(bytes);
2783 	}
2784 
2785 	/*
2786 	 * Look up the OBP property "local-mac-address?". If the user has set
2787 	 * 'local-mac-address? = false', use "the system address" instead.
2788 	 */
2789 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
2790 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
2791 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
2792 			if (localetheraddr(NULL, &sysaddr) != 0) {
2793 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
2794 				found = B_TRUE;
2795 			}
2796 		}
2797 		ddi_prop_free(bytes);
2798 	}
2799 
2800 	/*
2801 	 * Finally(!), if there's a valid "mac-address" property (created
2802 	 * if we netbooted from this interface), we must use this instead
2803 	 * of any of the above to ensure that the NFS/install server doesn't
2804 	 * get confused by the address changing as Solaris takes over!
2805 	 */
2806 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2807 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
2808 	if (err == DDI_PROP_SUCCESS) {
2809 		if (nelts == ETHERADDRL) {
2810 			while (nelts--)
2811 				hw->mac.addr[nelts] = bytes[nelts];
2812 			found = B_TRUE;
2813 		}
2814 		ddi_prop_free(bytes);
2815 	}
2816 
2817 	if (found) {
2818 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
2819 		return (B_TRUE);
2820 	}
2821 #else
2822 	_NOTE(ARGUNUSED(ixgbe));
2823 #endif
2824 
2825 	return (B_TRUE);
2826 }
2827 
2828 #pragma inline(ixgbe_arm_watchdog_timer)
2829 static void
2830 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
2831 {
2832 	/*
2833 	 * Fire a watchdog timer
2834 	 */
2835 	ixgbe->watchdog_tid =
2836 	    timeout(ixgbe_local_timer,
2837 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
2838 
2839 }
2840 
2841 /*
2842  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
2843  */
2844 void
2845 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
2846 {
2847 	mutex_enter(&ixgbe->watchdog_lock);
2848 
2849 	if (!ixgbe->watchdog_enable) {
2850 		ixgbe->watchdog_enable = B_TRUE;
2851 		ixgbe->watchdog_start = B_TRUE;
2852 		ixgbe_arm_watchdog_timer(ixgbe);
2853 	}
2854 
2855 	mutex_exit(&ixgbe->watchdog_lock);
2856 }
2857 
2858 /*
2859  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
2860  */
2861 void
2862 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
2863 {
2864 	timeout_id_t tid;
2865 
2866 	mutex_enter(&ixgbe->watchdog_lock);
2867 
2868 	ixgbe->watchdog_enable = B_FALSE;
2869 	ixgbe->watchdog_start = B_FALSE;
2870 	tid = ixgbe->watchdog_tid;
2871 	ixgbe->watchdog_tid = 0;
2872 
2873 	mutex_exit(&ixgbe->watchdog_lock);
2874 
2875 	if (tid != 0)
2876 		(void) untimeout(tid);
2877 }
2878 
2879 /*
2880  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
2881  */
2882 void
2883 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
2884 {
2885 	mutex_enter(&ixgbe->watchdog_lock);
2886 
2887 	if (ixgbe->watchdog_enable) {
2888 		if (!ixgbe->watchdog_start) {
2889 			ixgbe->watchdog_start = B_TRUE;
2890 			ixgbe_arm_watchdog_timer(ixgbe);
2891 		}
2892 	}
2893 
2894 	mutex_exit(&ixgbe->watchdog_lock);
2895 }
2896 
2897 /*
2898  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
2899  */
2900 static void
2901 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
2902 {
2903 	mutex_enter(&ixgbe->watchdog_lock);
2904 
2905 	if (ixgbe->watchdog_start)
2906 		ixgbe_arm_watchdog_timer(ixgbe);
2907 
2908 	mutex_exit(&ixgbe->watchdog_lock);
2909 }
2910 
2911 /*
2912  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
2913  */
2914 void
2915 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
2916 {
2917 	timeout_id_t tid;
2918 
2919 	mutex_enter(&ixgbe->watchdog_lock);
2920 
2921 	ixgbe->watchdog_start = B_FALSE;
2922 	tid = ixgbe->watchdog_tid;
2923 	ixgbe->watchdog_tid = 0;
2924 
2925 	mutex_exit(&ixgbe->watchdog_lock);
2926 
2927 	if (tid != 0)
2928 		(void) untimeout(tid);
2929 }
2930 
2931 /*
2932  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
2933  */
2934 static void
2935 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
2936 {
2937 	struct ixgbe_hw *hw = &ixgbe->hw;
2938 
2939 	/*
2940 	 * mask all interrupts off
2941 	 */
2942 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
2943 
2944 	/*
2945 	 * for MSI-X, also disable autoclear
2946 	 */
2947 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2948 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
2949 	}
2950 
2951 	IXGBE_WRITE_FLUSH(hw);
2952 }
2953 
2954 /*
2955  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
2956  */
2957 static void
2958 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
2959 {
2960 	struct ixgbe_hw *hw = &ixgbe->hw;
2961 	uint32_t eiac, eiam;
2962 	uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2963 
2964 	/* interrupt types to enable */
2965 	ixgbe->eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
2966 	ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
2967 	ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
2968 
2969 	/* enable automask on "other" causes that this adapter can generate */
2970 	eiam = ixgbe->capab->other_intr;
2971 
2972 	/*
2973 	 * msi-x mode
2974 	 */
2975 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2976 		/* enable autoclear but not on bits 29:20 */
2977 		eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
2978 
2979 		/* general purpose interrupt enable */
2980 		gpie |= (IXGBE_GPIE_MSIX_MODE
2981 		    | IXGBE_GPIE_PBA_SUPPORT
2982 		    | IXGBE_GPIE_OCD
2983 		    | IXGBE_GPIE_EIAME);
2984 	/*
2985 	 * non-msi-x mode
2986 	 */
2987 	} else {
2988 
2989 		/* disable autoclear, leave gpie at default */
2990 		eiac = 0;
2991 
2992 		/*
2993 		 * General purpose interrupt enable.
2994 		 * For 82599, extended interrupt automask enable
2995 		 * only in MSI or MSI-X mode
2996 		 */
2997 		if ((hw->mac.type < ixgbe_mac_82599EB) ||
2998 		    (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
2999 			gpie |= IXGBE_GPIE_EIAME;
3000 		}
3001 	}
3002 	/* Enable specific interrupts for 82599  */
3003 	if (hw->mac.type == ixgbe_mac_82599EB) {
3004 		gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */
3005 		gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */
3006 	}
3007 
3008 	/* write to interrupt control registers */
3009 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3010 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3011 	IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3012 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3013 	IXGBE_WRITE_FLUSH(hw);
3014 }
3015 
3016 /*
3017  * ixgbe_loopback_ioctl - Loopback support.
3018  */
3019 enum ioc_reply
3020 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3021 {
3022 	lb_info_sz_t *lbsp;
3023 	lb_property_t *lbpp;
3024 	uint32_t *lbmp;
3025 	uint32_t size;
3026 	uint32_t value;
3027 
3028 	if (mp->b_cont == NULL)
3029 		return (IOC_INVAL);
3030 
3031 	switch (iocp->ioc_cmd) {
3032 	default:
3033 		return (IOC_INVAL);
3034 
3035 	case LB_GET_INFO_SIZE:
3036 		size = sizeof (lb_info_sz_t);
3037 		if (iocp->ioc_count != size)
3038 			return (IOC_INVAL);
3039 
3040 		value = sizeof (lb_normal);
3041 		value += sizeof (lb_mac);
3042 
3043 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3044 		*lbsp = value;
3045 		break;
3046 
3047 	case LB_GET_INFO:
3048 		value = sizeof (lb_normal);
3049 		value += sizeof (lb_mac);
3050 
3051 		size = value;
3052 		if (iocp->ioc_count != size)
3053 			return (IOC_INVAL);
3054 
3055 		value = 0;
3056 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3057 
3058 		lbpp[value++] = lb_normal;
3059 		lbpp[value++] = lb_mac;
3060 		break;
3061 
3062 	case LB_GET_MODE:
3063 		size = sizeof (uint32_t);
3064 		if (iocp->ioc_count != size)
3065 			return (IOC_INVAL);
3066 
3067 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3068 		*lbmp = ixgbe->loopback_mode;
3069 		break;
3070 
3071 	case LB_SET_MODE:
3072 		size = 0;
3073 		if (iocp->ioc_count != sizeof (uint32_t))
3074 			return (IOC_INVAL);
3075 
3076 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3077 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3078 			return (IOC_INVAL);
3079 		break;
3080 	}
3081 
3082 	iocp->ioc_count = size;
3083 	iocp->ioc_error = 0;
3084 
3085 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3086 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3087 		return (IOC_INVAL);
3088 	}
3089 
3090 	return (IOC_REPLY);
3091 }
3092 
3093 /*
3094  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3095  */
3096 static boolean_t
3097 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3098 {
3099 	struct ixgbe_hw *hw;
3100 
3101 	if (mode == ixgbe->loopback_mode)
3102 		return (B_TRUE);
3103 
3104 	hw = &ixgbe->hw;
3105 
3106 	ixgbe->loopback_mode = mode;
3107 
3108 	if (mode == IXGBE_LB_NONE) {
3109 		/*
3110 		 * Reset the chip
3111 		 */
3112 		hw->phy.autoneg_wait_to_complete = B_TRUE;
3113 		(void) ixgbe_reset(ixgbe);
3114 		hw->phy.autoneg_wait_to_complete = B_FALSE;
3115 		return (B_TRUE);
3116 	}
3117 
3118 	mutex_enter(&ixgbe->gen_lock);
3119 
3120 	switch (mode) {
3121 	default:
3122 		mutex_exit(&ixgbe->gen_lock);
3123 		return (B_FALSE);
3124 
3125 	case IXGBE_LB_INTERNAL_MAC:
3126 		ixgbe_set_internal_mac_loopback(ixgbe);
3127 		break;
3128 	}
3129 
3130 	mutex_exit(&ixgbe->gen_lock);
3131 
3132 	return (B_TRUE);
3133 }
3134 
3135 /*
3136  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3137  */
3138 static void
3139 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3140 {
3141 	struct ixgbe_hw *hw;
3142 	uint32_t reg;
3143 	uint8_t atlas;
3144 
3145 	hw = &ixgbe->hw;
3146 
3147 	/*
3148 	 * Setup MAC loopback
3149 	 */
3150 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3151 	reg |= IXGBE_HLREG0_LPBK;
3152 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3153 
3154 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3155 	reg &= ~IXGBE_AUTOC_LMS_MASK;
3156 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3157 
3158 	/*
3159 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3160 	 */
3161 	if (hw->mac.type == ixgbe_mac_82598EB) {
3162 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3163 		    &atlas);
3164 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3165 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3166 		    atlas);
3167 
3168 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3169 		    &atlas);
3170 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3171 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3172 		    atlas);
3173 
3174 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3175 		    &atlas);
3176 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3177 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3178 		    atlas);
3179 
3180 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3181 		    &atlas);
3182 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3183 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3184 		    atlas);
3185 	}
3186 }
3187 
3188 #pragma inline(ixgbe_intr_rx_work)
3189 /*
3190  * ixgbe_intr_rx_work - RX processing of ISR.
3191  */
3192 static void
3193 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3194 {
3195 	mblk_t *mp;
3196 
3197 	mutex_enter(&rx_ring->rx_lock);
3198 
3199 	mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3200 	mutex_exit(&rx_ring->rx_lock);
3201 
3202 	if (mp != NULL)
3203 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3204 		    rx_ring->ring_gen_num);
3205 }
3206 
3207 #pragma inline(ixgbe_intr_tx_work)
3208 /*
3209  * ixgbe_intr_tx_work - TX processing of ISR.
3210  */
3211 static void
3212 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3213 {
3214 	ixgbe_t *ixgbe = tx_ring->ixgbe;
3215 
3216 	/*
3217 	 * Recycle the tx descriptors
3218 	 */
3219 	tx_ring->tx_recycle(tx_ring);
3220 
3221 	/*
3222 	 * Schedule the re-transmit
3223 	 */
3224 	if (tx_ring->reschedule &&
3225 	    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
3226 		tx_ring->reschedule = B_FALSE;
3227 		mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3228 		    tx_ring->ring_handle);
3229 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3230 	}
3231 }
3232 
3233 #pragma inline(ixgbe_intr_other_work)
3234 /*
3235  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
3236  */
3237 static void
3238 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
3239 {
3240 	struct ixgbe_hw *hw = &ixgbe->hw;
3241 	/*
3242 	 * dispatch taskq to handle link status change
3243 	 */
3244 	if (eicr & IXGBE_EICR_LSC) {
3245 		if ((ddi_taskq_dispatch(ixgbe->lsc_taskq,
3246 		    ixgbe_driver_link_check, (void *)ixgbe, DDI_NOSLEEP))
3247 		    != DDI_SUCCESS) {
3248 			ixgbe_log(ixgbe, "Fail to dispatch taskq");
3249 		}
3250 	}
3251 
3252 	/*
3253 	 * check for fan failure on adapters with fans
3254 	 */
3255 	if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
3256 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3257 		if (hw->mac.type < ixgbe_mac_82599EB) {
3258 			ixgbe_log(ixgbe,
3259 			    "Fan has stopped, replace the adapter\n");
3260 
3261 			/* re-enable the interrupt, which was automasked */
3262 			ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
3263 		}
3264 	}
3265 
3266 	/*
3267 	 * Do SFP check for 82599
3268 	 */
3269 	if (hw->mac.type == ixgbe_mac_82599EB) {
3270 		if ((ddi_taskq_dispatch(ixgbe->lsc_taskq,
3271 		    ixgbe_sfp_check, (void *)ixgbe,
3272 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
3273 			ixgbe_log(ixgbe, "No memory available to dispatch "
3274 			    "taskq for SFP check");
3275 		}
3276 	}
3277 }
3278 
3279 /*
3280  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
3281  */
3282 static uint_t
3283 ixgbe_intr_legacy(void *arg1, void *arg2)
3284 {
3285 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3286 	struct ixgbe_hw *hw = &ixgbe->hw;
3287 	ixgbe_tx_ring_t *tx_ring;
3288 	ixgbe_rx_ring_t *rx_ring;
3289 	uint32_t eicr;
3290 	mblk_t *mp;
3291 	boolean_t tx_reschedule;
3292 	uint_t result;
3293 
3294 	_NOTE(ARGUNUSED(arg2));
3295 
3296 	mutex_enter(&ixgbe->gen_lock);
3297 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
3298 		mutex_exit(&ixgbe->gen_lock);
3299 		return (DDI_INTR_UNCLAIMED);
3300 	}
3301 
3302 	mp = NULL;
3303 	tx_reschedule = B_FALSE;
3304 
3305 	/*
3306 	 * Any bit set in eicr: claim this interrupt
3307 	 */
3308 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3309 	if (eicr) {
3310 		/*
3311 		 * For legacy interrupt, we have only one interrupt,
3312 		 * so we have only one rx ring and one tx ring enabled.
3313 		 */
3314 		ASSERT(ixgbe->num_rx_rings == 1);
3315 		ASSERT(ixgbe->num_tx_rings == 1);
3316 
3317 		/*
3318 		 * For legacy interrupt, rx rings[0] will use RTxQ[0].
3319 		 */
3320 		if (eicr & 0x1) {
3321 			ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
3322 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3323 			ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3324 			/*
3325 			 * Clean the rx descriptors
3326 			 */
3327 			rx_ring = &ixgbe->rx_rings[0];
3328 			mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3329 		}
3330 
3331 		/*
3332 		 * For legacy interrupt, tx rings[0] will use RTxQ[1].
3333 		 */
3334 		if (eicr & 0x2) {
3335 			/*
3336 			 * Recycle the tx descriptors
3337 			 */
3338 			tx_ring = &ixgbe->tx_rings[0];
3339 			tx_ring->tx_recycle(tx_ring);
3340 
3341 			/*
3342 			 * Schedule the re-transmit
3343 			 */
3344 			tx_reschedule = (tx_ring->reschedule &&
3345 			    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
3346 		}
3347 
3348 		/* any interrupt type other than tx/rx */
3349 		if (eicr & ixgbe->capab->other_intr) {
3350 			if (hw->mac.type < ixgbe_mac_82599EB) {
3351 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3352 			}
3353 			if (hw->mac.type == ixgbe_mac_82599EB) {
3354 				ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3355 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3356 			}
3357 			ixgbe_intr_other_work(ixgbe, eicr);
3358 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3359 		}
3360 
3361 		mutex_exit(&ixgbe->gen_lock);
3362 
3363 		result = DDI_INTR_CLAIMED;
3364 	} else {
3365 		mutex_exit(&ixgbe->gen_lock);
3366 
3367 		/*
3368 		 * No interrupt cause bits set: don't claim this interrupt.
3369 		 */
3370 		result = DDI_INTR_UNCLAIMED;
3371 	}
3372 
3373 	/* re-enable the interrupts which were automasked */
3374 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3375 
3376 	/*
3377 	 * Do the following work outside of the gen_lock
3378 	 */
3379 	if (mp != NULL) {
3380 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3381 		    rx_ring->ring_gen_num);
3382 	}
3383 
3384 	if (tx_reschedule)  {
3385 		tx_ring->reschedule = B_FALSE;
3386 		mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
3387 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3388 	}
3389 
3390 	return (result);
3391 }
3392 
3393 /*
3394  * ixgbe_intr_msi - Interrupt handler for MSI.
3395  */
3396 static uint_t
3397 ixgbe_intr_msi(void *arg1, void *arg2)
3398 {
3399 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3400 	struct ixgbe_hw *hw = &ixgbe->hw;
3401 	uint32_t eicr;
3402 
3403 	_NOTE(ARGUNUSED(arg2));
3404 
3405 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3406 
3407 	/*
3408 	 * For MSI interrupt, we have only one vector,
3409 	 * so we have only one rx ring and one tx ring enabled.
3410 	 */
3411 	ASSERT(ixgbe->num_rx_rings == 1);
3412 	ASSERT(ixgbe->num_tx_rings == 1);
3413 
3414 	/*
3415 	 * For MSI interrupt, rx rings[0] will use RTxQ[0].
3416 	 */
3417 	if (eicr & 0x1) {
3418 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
3419 	}
3420 
3421 	/*
3422 	 * For MSI interrupt, tx rings[0] will use RTxQ[1].
3423 	 */
3424 	if (eicr & 0x2) {
3425 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3426 	}
3427 
3428 	/* any interrupt type other than tx/rx */
3429 	if (eicr & ixgbe->capab->other_intr) {
3430 		mutex_enter(&ixgbe->gen_lock);
3431 		if (hw->mac.type < ixgbe_mac_82599EB) {
3432 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3433 		}
3434 		if (hw->mac.type == ixgbe_mac_82599EB) {
3435 			ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3436 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3437 		}
3438 		ixgbe_intr_other_work(ixgbe, eicr);
3439 		ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3440 		mutex_exit(&ixgbe->gen_lock);
3441 	}
3442 
3443 	/* re-enable the interrupts which were automasked */
3444 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3445 
3446 	return (DDI_INTR_CLAIMED);
3447 }
3448 
3449 /*
3450  * ixgbe_intr_msix - Interrupt handler for MSI-X.
3451  */
3452 static uint_t
3453 ixgbe_intr_msix(void *arg1, void *arg2)
3454 {
3455 	ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
3456 	ixgbe_t *ixgbe = vect->ixgbe;
3457 	struct ixgbe_hw *hw = &ixgbe->hw;
3458 	uint32_t eicr;
3459 	int r_idx = 0;
3460 
3461 	_NOTE(ARGUNUSED(arg2));
3462 
3463 	/*
3464 	 * Clean each rx ring that has its bit set in the map
3465 	 */
3466 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
3467 	while (r_idx >= 0) {
3468 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
3469 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3470 		    (ixgbe->num_rx_rings - 1));
3471 	}
3472 
3473 	/*
3474 	 * Clean each tx ring that has its bit set in the map
3475 	 */
3476 	r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
3477 	while (r_idx >= 0) {
3478 		ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
3479 		r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3480 		    (ixgbe->num_tx_rings - 1));
3481 	}
3482 
3483 
3484 	/*
3485 	 * Clean other interrupt (link change) that has its bit set in the map
3486 	 */
3487 	if (BT_TEST(vect->other_map, 0) == 1) {
3488 		eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3489 
3490 		/*
3491 		 * Need check cause bits and only other causes will
3492 		 * be processed
3493 		 */
3494 		/* any interrupt type other than tx/rx */
3495 		if (eicr & ixgbe->capab->other_intr) {
3496 			if (hw->mac.type < ixgbe_mac_82599EB) {
3497 				mutex_enter(&ixgbe->gen_lock);
3498 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3499 				ixgbe_intr_other_work(ixgbe, eicr);
3500 				mutex_exit(&ixgbe->gen_lock);
3501 			} else {
3502 				if (hw->mac.type == ixgbe_mac_82599EB) {
3503 					mutex_enter(&ixgbe->gen_lock);
3504 					ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3505 					ixgbe_intr_other_work(ixgbe, eicr);
3506 					mutex_exit(&ixgbe->gen_lock);
3507 				}
3508 			}
3509 		}
3510 
3511 		/* re-enable the interrupts which were automasked */
3512 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3513 	}
3514 
3515 	return (DDI_INTR_CLAIMED);
3516 }
3517 
3518 /*
3519  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
3520  *
3521  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
3522  * if not successful, try Legacy.
3523  * ixgbe->intr_force can be used to force sequence to start with
3524  * any of the 3 types.
3525  * If MSI-X is not used, number of tx/rx rings is forced to 1.
3526  */
3527 static int
3528 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
3529 {
3530 	dev_info_t *devinfo;
3531 	int intr_types;
3532 	int rc;
3533 
3534 	devinfo = ixgbe->dip;
3535 
3536 	/*
3537 	 * Get supported interrupt types
3538 	 */
3539 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
3540 
3541 	if (rc != DDI_SUCCESS) {
3542 		ixgbe_log(ixgbe,
3543 		    "Get supported interrupt types failed: %d", rc);
3544 		return (IXGBE_FAILURE);
3545 	}
3546 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
3547 
3548 	ixgbe->intr_type = 0;
3549 
3550 	/*
3551 	 * Install MSI-X interrupts
3552 	 */
3553 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
3554 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
3555 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
3556 		if (rc == IXGBE_SUCCESS)
3557 			return (IXGBE_SUCCESS);
3558 
3559 		ixgbe_log(ixgbe,
3560 		    "Allocate MSI-X failed, trying MSI interrupts...");
3561 	}
3562 
3563 	/*
3564 	 * MSI-X not used, force rings and groups to 1
3565 	 */
3566 	ixgbe->num_rx_rings = 1;
3567 	ixgbe->num_rx_groups = 1;
3568 	ixgbe->num_tx_rings = 1;
3569 	ixgbe_log(ixgbe,
3570 	    "MSI-X not used, force rings and groups number to 1");
3571 
3572 	/*
3573 	 * Install MSI interrupts
3574 	 */
3575 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
3576 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
3577 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
3578 		if (rc == IXGBE_SUCCESS)
3579 			return (IXGBE_SUCCESS);
3580 
3581 		ixgbe_log(ixgbe,
3582 		    "Allocate MSI failed, trying Legacy interrupts...");
3583 	}
3584 
3585 	/*
3586 	 * Install legacy interrupts
3587 	 */
3588 	if (intr_types & DDI_INTR_TYPE_FIXED) {
3589 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
3590 		if (rc == IXGBE_SUCCESS)
3591 			return (IXGBE_SUCCESS);
3592 
3593 		ixgbe_log(ixgbe,
3594 		    "Allocate Legacy interrupts failed");
3595 	}
3596 
3597 	/*
3598 	 * If none of the 3 types succeeded, return failure
3599 	 */
3600 	return (IXGBE_FAILURE);
3601 }
3602 
3603 /*
3604  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
3605  *
3606  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
3607  * if fewer than 2 handles are available, return failure.
3608  * Upon success, this maps the vectors to rx and tx rings for
3609  * interrupts.
3610  */
3611 static int
3612 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
3613 {
3614 	dev_info_t *devinfo;
3615 	int request, count, avail, actual;
3616 	int minimum;
3617 	int rc;
3618 
3619 	devinfo = ixgbe->dip;
3620 
3621 	switch (intr_type) {
3622 	case DDI_INTR_TYPE_FIXED:
3623 		request = 1;	/* Request 1 legacy interrupt handle */
3624 		minimum = 1;
3625 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
3626 		break;
3627 
3628 	case DDI_INTR_TYPE_MSI:
3629 		request = 1;	/* Request 1 MSI interrupt handle */
3630 		minimum = 1;
3631 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
3632 		break;
3633 
3634 	case DDI_INTR_TYPE_MSIX:
3635 		/*
3636 		 * Best number of vectors for the adapter is
3637 		 * # rx rings + # tx rings.
3638 		 */
3639 		request = ixgbe->num_rx_rings + ixgbe->num_tx_rings;
3640 		if (request > ixgbe->capab->max_ring_vect)
3641 			request = ixgbe->capab->max_ring_vect;
3642 		minimum = 2;
3643 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
3644 		break;
3645 
3646 	default:
3647 		ixgbe_log(ixgbe,
3648 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
3649 		    intr_type);
3650 		return (IXGBE_FAILURE);
3651 	}
3652 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
3653 	    request, minimum);
3654 
3655 	/*
3656 	 * Get number of supported interrupts
3657 	 */
3658 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3659 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3660 		ixgbe_log(ixgbe,
3661 		    "Get interrupt number failed. Return: %d, count: %d",
3662 		    rc, count);
3663 		return (IXGBE_FAILURE);
3664 	}
3665 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
3666 
3667 	/*
3668 	 * Get number of available interrupts
3669 	 */
3670 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3671 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3672 		ixgbe_log(ixgbe,
3673 		    "Get interrupt available number failed. "
3674 		    "Return: %d, available: %d", rc, avail);
3675 		return (IXGBE_FAILURE);
3676 	}
3677 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
3678 
3679 	if (avail < request) {
3680 		ixgbe_log(ixgbe, "Request %d handles, %d available",
3681 		    request, avail);
3682 		request = avail;
3683 	}
3684 
3685 	actual = 0;
3686 	ixgbe->intr_cnt = 0;
3687 
3688 	/*
3689 	 * Allocate an array of interrupt handles
3690 	 */
3691 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
3692 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
3693 
3694 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
3695 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
3696 	if (rc != DDI_SUCCESS) {
3697 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
3698 		    "return: %d, request: %d, actual: %d",
3699 		    rc, request, actual);
3700 		goto alloc_handle_fail;
3701 	}
3702 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
3703 
3704 	ixgbe->intr_cnt = actual;
3705 
3706 	/*
3707 	 * Now we know the actual number of vectors.  Here we map the vector
3708 	 * to other, rx rings and tx ring.
3709 	 */
3710 	if (actual < minimum) {
3711 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
3712 		    actual);
3713 		goto alloc_handle_fail;
3714 	}
3715 
3716 	/*
3717 	 * Get priority for first vector, assume remaining are all the same
3718 	 */
3719 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
3720 	if (rc != DDI_SUCCESS) {
3721 		ixgbe_log(ixgbe,
3722 		    "Get interrupt priority failed: %d", rc);
3723 		goto alloc_handle_fail;
3724 	}
3725 
3726 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
3727 	if (rc != DDI_SUCCESS) {
3728 		ixgbe_log(ixgbe,
3729 		    "Get interrupt cap failed: %d", rc);
3730 		goto alloc_handle_fail;
3731 	}
3732 
3733 	ixgbe->intr_type = intr_type;
3734 
3735 	return (IXGBE_SUCCESS);
3736 
3737 alloc_handle_fail:
3738 	ixgbe_rem_intrs(ixgbe);
3739 
3740 	return (IXGBE_FAILURE);
3741 }
3742 
3743 /*
3744  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
3745  *
3746  * Before adding the interrupt handlers, the interrupt vectors have
3747  * been allocated, and the rx/tx rings have also been allocated.
3748  */
3749 static int
3750 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
3751 {
3752 	int vector = 0;
3753 	int rc;
3754 
3755 	switch (ixgbe->intr_type) {
3756 	case DDI_INTR_TYPE_MSIX:
3757 		/*
3758 		 * Add interrupt handler for all vectors
3759 		 */
3760 		for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
3761 			/*
3762 			 * install pointer to vect_map[vector]
3763 			 */
3764 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
3765 			    (ddi_intr_handler_t *)ixgbe_intr_msix,
3766 			    (void *)&ixgbe->vect_map[vector], NULL);
3767 
3768 			if (rc != DDI_SUCCESS) {
3769 				ixgbe_log(ixgbe,
3770 				    "Add rx interrupt handler failed. "
3771 				    "return: %d, vector: %d", rc, vector);
3772 				for (vector--; vector >= 0; vector--) {
3773 					(void) ddi_intr_remove_handler(
3774 					    ixgbe->htable[vector]);
3775 				}
3776 				return (IXGBE_FAILURE);
3777 			}
3778 		}
3779 
3780 		break;
3781 
3782 	case DDI_INTR_TYPE_MSI:
3783 		/*
3784 		 * Add interrupt handlers for the only vector
3785 		 */
3786 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3787 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
3788 		    (void *)ixgbe, NULL);
3789 
3790 		if (rc != DDI_SUCCESS) {
3791 			ixgbe_log(ixgbe,
3792 			    "Add MSI interrupt handler failed: %d", rc);
3793 			return (IXGBE_FAILURE);
3794 		}
3795 
3796 		break;
3797 
3798 	case DDI_INTR_TYPE_FIXED:
3799 		/*
3800 		 * Add interrupt handlers for the only vector
3801 		 */
3802 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3803 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
3804 		    (void *)ixgbe, NULL);
3805 
3806 		if (rc != DDI_SUCCESS) {
3807 			ixgbe_log(ixgbe,
3808 			    "Add legacy interrupt handler failed: %d", rc);
3809 			return (IXGBE_FAILURE);
3810 		}
3811 
3812 		break;
3813 
3814 	default:
3815 		return (IXGBE_FAILURE);
3816 	}
3817 
3818 	return (IXGBE_SUCCESS);
3819 }
3820 
3821 #pragma inline(ixgbe_map_rxring_to_vector)
3822 /*
3823  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
3824  */
3825 static void
3826 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
3827 {
3828 	/*
3829 	 * Set bit in map
3830 	 */
3831 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
3832 
3833 	/*
3834 	 * Count bits set
3835 	 */
3836 	ixgbe->vect_map[v_idx].rxr_cnt++;
3837 
3838 	/*
3839 	 * Remember bit position
3840 	 */
3841 	ixgbe->rx_rings[r_idx].intr_vector = v_idx;
3842 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
3843 }
3844 
3845 #pragma inline(ixgbe_map_txring_to_vector)
3846 /*
3847  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
3848  */
3849 static void
3850 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
3851 {
3852 	/*
3853 	 * Set bit in map
3854 	 */
3855 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
3856 
3857 	/*
3858 	 * Count bits set
3859 	 */
3860 	ixgbe->vect_map[v_idx].txr_cnt++;
3861 
3862 	/*
3863 	 * Remember bit position
3864 	 */
3865 	ixgbe->tx_rings[t_idx].intr_vector = v_idx;
3866 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
3867 }
3868 
3869 /*
3870  * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
3871  * allocation register (IVAR).
3872  * cause:
3873  *   -1 : other cause
3874  *    0 : rx
3875  *    1 : tx
3876  */
3877 static void
3878 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
3879     int8_t cause)
3880 {
3881 	struct ixgbe_hw *hw = &ixgbe->hw;
3882 	u32 ivar, index;
3883 
3884 	switch (hw->mac.type) {
3885 	case ixgbe_mac_82598EB:
3886 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3887 		if (cause == -1) {
3888 			cause = 0;
3889 		}
3890 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3891 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3892 		ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
3893 		ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
3894 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3895 		break;
3896 	case ixgbe_mac_82599EB:
3897 		if (cause == -1) {
3898 			/* other causes */
3899 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3900 			index = (intr_alloc_entry & 1) * 8;
3901 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3902 			ivar &= ~(0xFF << index);
3903 			ivar |= (msix_vector << index);
3904 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3905 		} else {
3906 			/* tx or rx causes */
3907 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3908 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
3909 			ivar = IXGBE_READ_REG(hw,
3910 			    IXGBE_IVAR(intr_alloc_entry >> 1));
3911 			ivar &= ~(0xFF << index);
3912 			ivar |= (msix_vector << index);
3913 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
3914 			    ivar);
3915 		}
3916 		break;
3917 	default:
3918 		break;
3919 	}
3920 }
3921 
3922 /*
3923  * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
3924  * given interrupt vector allocation register (IVAR).
3925  * cause:
3926  *   -1 : other cause
3927  *    0 : rx
3928  *    1 : tx
3929  */
3930 static void
3931 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
3932 {
3933 	struct ixgbe_hw *hw = &ixgbe->hw;
3934 	u32 ivar, index;
3935 
3936 	switch (hw->mac.type) {
3937 	case ixgbe_mac_82598EB:
3938 		if (cause == -1) {
3939 			cause = 0;
3940 		}
3941 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3942 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3943 		ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
3944 		    (intr_alloc_entry & 0x3)));
3945 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3946 		break;
3947 	case ixgbe_mac_82599EB:
3948 		if (cause == -1) {
3949 			/* other causes */
3950 			index = (intr_alloc_entry & 1) * 8;
3951 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3952 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
3953 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3954 		} else {
3955 			/* tx or rx causes */
3956 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
3957 			ivar = IXGBE_READ_REG(hw,
3958 			    IXGBE_IVAR(intr_alloc_entry >> 1));
3959 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
3960 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
3961 			    ivar);
3962 		}
3963 		break;
3964 	default:
3965 		break;
3966 	}
3967 }
3968 
3969 /*
3970  * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
3971  * given interrupt vector allocation register (IVAR).
3972  * cause:
3973  *   -1 : other cause
3974  *    0 : rx
3975  *    1 : tx
3976  */
3977 static void
3978 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
3979 {
3980 	struct ixgbe_hw *hw = &ixgbe->hw;
3981 	u32 ivar, index;
3982 
3983 	switch (hw->mac.type) {
3984 	case ixgbe_mac_82598EB:
3985 		if (cause == -1) {
3986 			cause = 0;
3987 		}
3988 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3989 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3990 		ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
3991 		    (intr_alloc_entry & 0x3)));
3992 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3993 		break;
3994 	case ixgbe_mac_82599EB:
3995 		if (cause == -1) {
3996 			/* other causes */
3997 			index = (intr_alloc_entry & 1) * 8;
3998 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3999 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4000 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4001 		} else {
4002 			/* tx or rx causes */
4003 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4004 			ivar = IXGBE_READ_REG(hw,
4005 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4006 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4007 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4008 			    ivar);
4009 		}
4010 		break;
4011 	default:
4012 		break;
4013 	}
4014 }
4015 
4016 /*
4017  * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4018  *
4019  * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4020  * to vector[0 - (intr_cnt -1)].
4021  */
4022 static int
4023 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4024 {
4025 	int i, vector = 0;
4026 
4027 	/* initialize vector map */
4028 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4029 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4030 		ixgbe->vect_map[i].ixgbe = ixgbe;
4031 	}
4032 
4033 	/*
4034 	 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4035 	 * tx rings[0] on RTxQ[1].
4036 	 */
4037 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4038 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4039 		ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4040 		return (IXGBE_SUCCESS);
4041 	}
4042 
4043 	/*
4044 	 * Interrupts/vectors mapping for MSI-X
4045 	 */
4046 
4047 	/*
4048 	 * Map other interrupt to vector 0,
4049 	 * Set bit in map and count the bits set.
4050 	 */
4051 	BT_SET(ixgbe->vect_map[vector].other_map, 0);
4052 	ixgbe->vect_map[vector].other_cnt++;
4053 	vector++;
4054 
4055 	/*
4056 	 * Map rx ring interrupts to vectors
4057 	 */
4058 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
4059 		ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4060 		vector = (vector +1) % ixgbe->intr_cnt;
4061 	}
4062 
4063 	/*
4064 	 * Map tx ring interrupts to vectors
4065 	 */
4066 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
4067 		ixgbe_map_txring_to_vector(ixgbe, i, vector);
4068 		vector = (vector +1) % ixgbe->intr_cnt;
4069 	}
4070 
4071 	return (IXGBE_SUCCESS);
4072 }
4073 
4074 /*
4075  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4076  *
4077  * This relies on ring/vector mapping already set up in the
4078  * vect_map[] structures
4079  */
4080 static void
4081 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4082 {
4083 	struct ixgbe_hw *hw = &ixgbe->hw;
4084 	ixgbe_intr_vector_t *vect;	/* vector bitmap */
4085 	int r_idx;	/* ring index */
4086 	int v_idx;	/* vector index */
4087 
4088 	/*
4089 	 * Clear any previous entries
4090 	 */
4091 	switch (hw->mac.type) {
4092 	case ixgbe_mac_82598EB:
4093 		for (v_idx = 0; v_idx < 25; v_idx++)
4094 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4095 
4096 		break;
4097 	case ixgbe_mac_82599EB:
4098 		for (v_idx = 0; v_idx < 64; v_idx++)
4099 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4100 		IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
4101 
4102 		break;
4103 	default:
4104 		break;
4105 	}
4106 
4107 	/*
4108 	 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
4109 	 * tx rings[0] will use RTxQ[1].
4110 	 */
4111 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4112 		ixgbe_setup_ivar(ixgbe, 0, 0, 0);
4113 		ixgbe_setup_ivar(ixgbe, 0, 1, 1);
4114 		return;
4115 	}
4116 
4117 	/*
4118 	 * For MSI-X interrupt, "Other" is always on vector[0].
4119 	 */
4120 	ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
4121 
4122 	/*
4123 	 * For each interrupt vector, populate the IVAR table
4124 	 */
4125 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
4126 		vect = &ixgbe->vect_map[v_idx];
4127 
4128 		/*
4129 		 * For each rx ring bit set
4130 		 */
4131 		r_idx = bt_getlowbit(vect->rx_map, 0,
4132 		    (ixgbe->num_rx_rings - 1));
4133 
4134 		while (r_idx >= 0) {
4135 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 0);
4136 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4137 			    (ixgbe->num_rx_rings - 1));
4138 		}
4139 
4140 		/*
4141 		 * For each tx ring bit set
4142 		 */
4143 		r_idx = bt_getlowbit(vect->tx_map, 0,
4144 		    (ixgbe->num_tx_rings - 1));
4145 
4146 		while (r_idx >= 0) {
4147 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
4148 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4149 			    (ixgbe->num_tx_rings - 1));
4150 		}
4151 	}
4152 }
4153 
4154 /*
4155  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
4156  */
4157 static void
4158 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
4159 {
4160 	int i;
4161 	int rc;
4162 
4163 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4164 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
4165 		if (rc != DDI_SUCCESS) {
4166 			IXGBE_DEBUGLOG_1(ixgbe,
4167 			    "Remove intr handler failed: %d", rc);
4168 		}
4169 	}
4170 }
4171 
4172 /*
4173  * ixgbe_rem_intrs - Remove the allocated interrupts.
4174  */
4175 static void
4176 ixgbe_rem_intrs(ixgbe_t *ixgbe)
4177 {
4178 	int i;
4179 	int rc;
4180 
4181 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4182 		rc = ddi_intr_free(ixgbe->htable[i]);
4183 		if (rc != DDI_SUCCESS) {
4184 			IXGBE_DEBUGLOG_1(ixgbe,
4185 			    "Free intr failed: %d", rc);
4186 		}
4187 	}
4188 
4189 	kmem_free(ixgbe->htable, ixgbe->intr_size);
4190 	ixgbe->htable = NULL;
4191 }
4192 
4193 /*
4194  * ixgbe_enable_intrs - Enable all the ddi interrupts.
4195  */
4196 static int
4197 ixgbe_enable_intrs(ixgbe_t *ixgbe)
4198 {
4199 	int i;
4200 	int rc;
4201 
4202 	/*
4203 	 * Enable interrupts
4204 	 */
4205 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4206 		/*
4207 		 * Call ddi_intr_block_enable() for MSI
4208 		 */
4209 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
4210 		if (rc != DDI_SUCCESS) {
4211 			ixgbe_log(ixgbe,
4212 			    "Enable block intr failed: %d", rc);
4213 			return (IXGBE_FAILURE);
4214 		}
4215 	} else {
4216 		/*
4217 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
4218 		 */
4219 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4220 			rc = ddi_intr_enable(ixgbe->htable[i]);
4221 			if (rc != DDI_SUCCESS) {
4222 				ixgbe_log(ixgbe,
4223 				    "Enable intr failed: %d", rc);
4224 				return (IXGBE_FAILURE);
4225 			}
4226 		}
4227 	}
4228 
4229 	return (IXGBE_SUCCESS);
4230 }
4231 
4232 /*
4233  * ixgbe_disable_intrs - Disable all the interrupts.
4234  */
4235 static int
4236 ixgbe_disable_intrs(ixgbe_t *ixgbe)
4237 {
4238 	int i;
4239 	int rc;
4240 
4241 	/*
4242 	 * Disable all interrupts
4243 	 */
4244 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4245 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
4246 		if (rc != DDI_SUCCESS) {
4247 			ixgbe_log(ixgbe,
4248 			    "Disable block intr failed: %d", rc);
4249 			return (IXGBE_FAILURE);
4250 		}
4251 	} else {
4252 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4253 			rc = ddi_intr_disable(ixgbe->htable[i]);
4254 			if (rc != DDI_SUCCESS) {
4255 				ixgbe_log(ixgbe,
4256 				    "Disable intr failed: %d", rc);
4257 				return (IXGBE_FAILURE);
4258 			}
4259 		}
4260 	}
4261 
4262 	return (IXGBE_SUCCESS);
4263 }
4264 
4265 /*
4266  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
4267  */
4268 static void
4269 ixgbe_get_hw_state(ixgbe_t *ixgbe)
4270 {
4271 	struct ixgbe_hw *hw = &ixgbe->hw;
4272 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
4273 	boolean_t link_up = B_FALSE;
4274 	uint32_t pcs1g_anlp = 0;
4275 	uint32_t pcs1g_ana = 0;
4276 
4277 	ASSERT(mutex_owned(&ixgbe->gen_lock));
4278 	ixgbe->param_lp_1000fdx_cap = 0;
4279 	ixgbe->param_lp_100fdx_cap  = 0;
4280 
4281 	/* check for link, don't wait */
4282 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
4283 	if (link_up) {
4284 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
4285 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
4286 
4287 		ixgbe->param_lp_1000fdx_cap =
4288 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4289 		ixgbe->param_lp_100fdx_cap =
4290 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4291 	}
4292 
4293 	ixgbe->param_adv_1000fdx_cap =
4294 	    (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4295 	ixgbe->param_adv_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4296 }
4297 
4298 /*
4299  * ixgbe_get_driver_control - Notify that driver is in control of device.
4300  */
4301 static void
4302 ixgbe_get_driver_control(struct ixgbe_hw *hw)
4303 {
4304 	uint32_t ctrl_ext;
4305 
4306 	/*
4307 	 * Notify firmware that driver is in control of device
4308 	 */
4309 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4310 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
4311 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4312 }
4313 
4314 /*
4315  * ixgbe_release_driver_control - Notify that driver is no longer in control
4316  * of device.
4317  */
4318 static void
4319 ixgbe_release_driver_control(struct ixgbe_hw *hw)
4320 {
4321 	uint32_t ctrl_ext;
4322 
4323 	/*
4324 	 * Notify firmware that driver is no longer in control of device
4325 	 */
4326 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4327 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
4328 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4329 }
4330 
4331 /*
4332  * ixgbe_atomic_reserve - Atomic decrease operation.
4333  */
4334 int
4335 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
4336 {
4337 	uint32_t oldval;
4338 	uint32_t newval;
4339 
4340 	/*
4341 	 * ATOMICALLY
4342 	 */
4343 	do {
4344 		oldval = *count_p;
4345 		if (oldval < n)
4346 			return (-1);
4347 		newval = oldval - n;
4348 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
4349 
4350 	return (newval);
4351 }
4352 
4353 /*
4354  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
4355  */
4356 static uint8_t *
4357 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
4358 {
4359 	uint8_t *addr = *upd_ptr;
4360 	uint8_t *new_ptr;
4361 
4362 	_NOTE(ARGUNUSED(hw));
4363 	_NOTE(ARGUNUSED(vmdq));
4364 
4365 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
4366 	*upd_ptr = new_ptr;
4367 	return (addr);
4368 }
4369 
4370 /*
4371  * FMA support
4372  */
4373 int
4374 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
4375 {
4376 	ddi_fm_error_t de;
4377 
4378 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4379 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
4380 	return (de.fme_status);
4381 }
4382 
4383 int
4384 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
4385 {
4386 	ddi_fm_error_t de;
4387 
4388 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4389 	return (de.fme_status);
4390 }
4391 
4392 /*
4393  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
4394  */
4395 static int
4396 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4397 {
4398 	_NOTE(ARGUNUSED(impl_data));
4399 	/*
4400 	 * as the driver can always deal with an error in any dma or
4401 	 * access handle, we can just return the fme_status value.
4402 	 */
4403 	pci_ereport_post(dip, err, NULL);
4404 	return (err->fme_status);
4405 }
4406 
4407 static void
4408 ixgbe_fm_init(ixgbe_t *ixgbe)
4409 {
4410 	ddi_iblock_cookie_t iblk;
4411 	int fma_acc_flag, fma_dma_flag;
4412 
4413 	/*
4414 	 * Only register with IO Fault Services if we have some capability
4415 	 */
4416 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
4417 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4418 		fma_acc_flag = 1;
4419 	} else {
4420 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4421 		fma_acc_flag = 0;
4422 	}
4423 
4424 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
4425 		fma_dma_flag = 1;
4426 	} else {
4427 		fma_dma_flag = 0;
4428 	}
4429 
4430 	ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag);
4431 
4432 	if (ixgbe->fm_capabilities) {
4433 
4434 		/*
4435 		 * Register capabilities with IO Fault Services
4436 		 */
4437 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
4438 
4439 		/*
4440 		 * Initialize pci ereport capabilities if ereport capable
4441 		 */
4442 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4443 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4444 			pci_ereport_setup(ixgbe->dip);
4445 
4446 		/*
4447 		 * Register error callback if error callback capable
4448 		 */
4449 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4450 			ddi_fm_handler_register(ixgbe->dip,
4451 			    ixgbe_fm_error_cb, (void*) ixgbe);
4452 	}
4453 }
4454 
4455 static void
4456 ixgbe_fm_fini(ixgbe_t *ixgbe)
4457 {
4458 	/*
4459 	 * Only unregister FMA capabilities if they are registered
4460 	 */
4461 	if (ixgbe->fm_capabilities) {
4462 
4463 		/*
4464 		 * Release any resources allocated by pci_ereport_setup()
4465 		 */
4466 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4467 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4468 			pci_ereport_teardown(ixgbe->dip);
4469 
4470 		/*
4471 		 * Un-register error callback if error callback capable
4472 		 */
4473 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4474 			ddi_fm_handler_unregister(ixgbe->dip);
4475 
4476 		/*
4477 		 * Unregister from IO Fault Service
4478 		 */
4479 		ddi_fm_fini(ixgbe->dip);
4480 	}
4481 }
4482 
4483 void
4484 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
4485 {
4486 	uint64_t ena;
4487 	char buf[FM_MAX_CLASS];
4488 
4489 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4490 	ena = fm_ena_generate(0, FM_ENA_FMT1);
4491 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
4492 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
4493 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
4494 	}
4495 }
4496 
4497 static int
4498 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
4499 {
4500 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
4501 
4502 	mutex_enter(&rx_ring->rx_lock);
4503 	rx_ring->ring_gen_num = mr_gen_num;
4504 	mutex_exit(&rx_ring->rx_lock);
4505 	return (0);
4506 }
4507 
4508 /*
4509  * Callback funtion for MAC layer to register all rings.
4510  */
4511 /* ARGSUSED */
4512 void
4513 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
4514     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
4515 {
4516 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4517 	mac_intr_t *mintr = &infop->mri_intr;
4518 
4519 	switch (rtype) {
4520 	case MAC_RING_TYPE_RX: {
4521 		ASSERT(rg_index == 0);
4522 		ASSERT(ring_index < ixgbe->num_rx_rings);
4523 
4524 		ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[ring_index];
4525 		rx_ring->ring_handle = rh;
4526 
4527 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
4528 		infop->mri_start = ixgbe_ring_start;
4529 		infop->mri_stop = NULL;
4530 		infop->mri_poll = ixgbe_ring_rx_poll;
4531 
4532 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
4533 		mintr->mi_enable = ixgbe_rx_ring_intr_enable;
4534 		mintr->mi_disable = ixgbe_rx_ring_intr_disable;
4535 
4536 		break;
4537 	}
4538 	case MAC_RING_TYPE_TX: {
4539 		ASSERT(rg_index == -1);
4540 		ASSERT(ring_index < ixgbe->num_tx_rings);
4541 
4542 		ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
4543 		tx_ring->ring_handle = rh;
4544 
4545 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
4546 		infop->mri_start = NULL;
4547 		infop->mri_stop = NULL;
4548 		infop->mri_tx = ixgbe_ring_tx;
4549 
4550 		break;
4551 	}
4552 	default:
4553 		break;
4554 	}
4555 }
4556 
4557 /*
4558  * Callback funtion for MAC layer to register all groups.
4559  */
4560 void
4561 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
4562     mac_group_info_t *infop, mac_group_handle_t gh)
4563 {
4564 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4565 
4566 	switch (rtype) {
4567 	case MAC_RING_TYPE_RX: {
4568 		ixgbe_rx_group_t *rx_group;
4569 
4570 		rx_group = &ixgbe->rx_groups[index];
4571 		rx_group->group_handle = gh;
4572 
4573 		infop->mgi_driver = (mac_group_driver_t)rx_group;
4574 		infop->mgi_start = NULL;
4575 		infop->mgi_stop = NULL;
4576 		infop->mgi_addmac = ixgbe_addmac;
4577 		infop->mgi_remmac = ixgbe_remmac;
4578 		infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
4579 
4580 		break;
4581 	}
4582 	case MAC_RING_TYPE_TX:
4583 		break;
4584 	default:
4585 		break;
4586 	}
4587 }
4588 
4589 /*
4590  * Enable interrupt on the specificed rx ring.
4591  */
4592 int
4593 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
4594 {
4595 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4596 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4597 	int r_idx = rx_ring->index;
4598 	int v_idx = rx_ring->intr_vector;
4599 
4600 	mutex_enter(&ixgbe->gen_lock);
4601 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 0);
4602 
4603 	/*
4604 	 * To enable interrupt by setting the VAL bit of given interrupt
4605 	 * vector allocation register (IVAR).
4606 	 */
4607 	ixgbe_enable_ivar(ixgbe, r_idx, 0);
4608 
4609 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4610 
4611 	/*
4612 	 * To trigger a Rx interrupt to on this ring
4613 	 */
4614 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
4615 	IXGBE_WRITE_FLUSH(&ixgbe->hw);
4616 
4617 	mutex_exit(&ixgbe->gen_lock);
4618 
4619 	return (0);
4620 }
4621 
4622 /*
4623  * Disable interrupt on the specificed rx ring.
4624  */
4625 int
4626 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
4627 {
4628 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4629 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4630 	int r_idx = rx_ring->index;
4631 	int v_idx = rx_ring->intr_vector;
4632 
4633 	mutex_enter(&ixgbe->gen_lock);
4634 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 1);
4635 
4636 	/*
4637 	 * To disable interrupt by clearing the VAL bit of given interrupt
4638 	 * vector allocation register (IVAR).
4639 	 */
4640 	ixgbe_disable_ivar(ixgbe, r_idx, 0);
4641 
4642 	BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
4643 
4644 	mutex_exit(&ixgbe->gen_lock);
4645 
4646 	return (0);
4647 }
4648 
4649 /*
4650  * Add a mac address.
4651  */
4652 static int
4653 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
4654 {
4655 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4656 	ixgbe_t *ixgbe = rx_group->ixgbe;
4657 	int slot;
4658 	int err;
4659 
4660 	mutex_enter(&ixgbe->gen_lock);
4661 
4662 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4663 		mutex_exit(&ixgbe->gen_lock);
4664 		return (ECANCELED);
4665 	}
4666 
4667 	if (ixgbe->unicst_avail == 0) {
4668 		/* no slots available */
4669 		mutex_exit(&ixgbe->gen_lock);
4670 		return (ENOSPC);
4671 	}
4672 
4673 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
4674 		if (ixgbe->unicst_addr[slot].mac.set == 0)
4675 			break;
4676 	}
4677 
4678 	ASSERT((slot >= 0) && (slot < ixgbe->unicst_total));
4679 
4680 	if ((err = ixgbe_unicst_set(ixgbe, mac_addr, slot)) == 0) {
4681 		ixgbe->unicst_addr[slot].mac.set = 1;
4682 		ixgbe->unicst_avail--;
4683 	}
4684 
4685 	mutex_exit(&ixgbe->gen_lock);
4686 
4687 	return (err);
4688 }
4689 
4690 /*
4691  * Remove a mac address.
4692  */
4693 static int
4694 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
4695 {
4696 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4697 	ixgbe_t *ixgbe = rx_group->ixgbe;
4698 	int slot;
4699 	int err;
4700 
4701 	mutex_enter(&ixgbe->gen_lock);
4702 
4703 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4704 		mutex_exit(&ixgbe->gen_lock);
4705 		return (ECANCELED);
4706 	}
4707 
4708 	slot = ixgbe_unicst_find(ixgbe, mac_addr);
4709 	if (slot == -1) {
4710 		mutex_exit(&ixgbe->gen_lock);
4711 		return (EINVAL);
4712 	}
4713 
4714 	if (ixgbe->unicst_addr[slot].mac.set == 0) {
4715 		mutex_exit(&ixgbe->gen_lock);
4716 		return (EINVAL);
4717 	}
4718 
4719 	bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
4720 	if ((err = ixgbe_unicst_set(ixgbe,
4721 	    ixgbe->unicst_addr[slot].mac.addr, slot)) == 0) {
4722 		ixgbe->unicst_addr[slot].mac.set = 0;
4723 		ixgbe->unicst_avail++;
4724 	}
4725 
4726 	mutex_exit(&ixgbe->gen_lock);
4727 
4728 	return (err);
4729 }
4730