xref: /titanic_41/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision d65a28a2f5c3e1fc40e2c7384739f05e22018bfe)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #include "ixgbe_sw.h"
29 
30 static char ident[] = "Intel 10Gb Ethernet";
31 
32 /*
33  * Local function protoypes
34  */
35 static int ixgbe_register_mac(ixgbe_t *);
36 static int ixgbe_identify_hardware(ixgbe_t *);
37 static int ixgbe_regs_map(ixgbe_t *);
38 static void ixgbe_init_properties(ixgbe_t *);
39 static int ixgbe_init_driver_settings(ixgbe_t *);
40 static void ixgbe_init_locks(ixgbe_t *);
41 static void ixgbe_destroy_locks(ixgbe_t *);
42 static int ixgbe_init(ixgbe_t *);
43 static int ixgbe_chip_start(ixgbe_t *);
44 static void ixgbe_chip_stop(ixgbe_t *);
45 static int ixgbe_reset(ixgbe_t *);
46 static void ixgbe_tx_clean(ixgbe_t *);
47 static boolean_t ixgbe_tx_drain(ixgbe_t *);
48 static boolean_t ixgbe_rx_drain(ixgbe_t *);
49 static int ixgbe_alloc_rings(ixgbe_t *);
50 static int ixgbe_init_rings(ixgbe_t *);
51 static void ixgbe_free_rings(ixgbe_t *);
52 static void ixgbe_fini_rings(ixgbe_t *);
53 static void ixgbe_setup_rings(ixgbe_t *);
54 static void ixgbe_setup_rx(ixgbe_t *);
55 static void ixgbe_setup_tx(ixgbe_t *);
56 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
57 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
58 static void ixgbe_setup_rss(ixgbe_t *);
59 static void ixgbe_init_unicst(ixgbe_t *);
60 static int ixgbe_unicst_set(ixgbe_t *, const uint8_t *, int);
61 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
62 static void ixgbe_setup_multicst(ixgbe_t *);
63 static void ixgbe_get_hw_state(ixgbe_t *);
64 static void ixgbe_get_conf(ixgbe_t *);
65 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
66 static void ixgbe_driver_link_check(void *);
67 static void ixgbe_sfp_check(void *);
68 static void ixgbe_local_timer(void *);
69 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
70 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
71 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
72 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
73 static boolean_t is_valid_mac_addr(uint8_t *);
74 static boolean_t ixgbe_stall_check(ixgbe_t *);
75 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
76 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
77 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
78 static int ixgbe_alloc_intrs(ixgbe_t *);
79 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
80 static int ixgbe_add_intr_handlers(ixgbe_t *);
81 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
82 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
83 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
84 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
85 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
86 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
87 static void ixgbe_setup_adapter_vector(ixgbe_t *);
88 static void ixgbe_rem_intr_handlers(ixgbe_t *);
89 static void ixgbe_rem_intrs(ixgbe_t *);
90 static int ixgbe_enable_intrs(ixgbe_t *);
91 static int ixgbe_disable_intrs(ixgbe_t *);
92 static uint_t ixgbe_intr_legacy(void *, void *);
93 static uint_t ixgbe_intr_msi(void *, void *);
94 static uint_t ixgbe_intr_msix(void *, void *);
95 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
96 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
97 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
98 static void ixgbe_get_driver_control(struct ixgbe_hw *);
99 static int ixgbe_addmac(void *, const uint8_t *);
100 static int ixgbe_remmac(void *, const uint8_t *);
101 static void ixgbe_release_driver_control(struct ixgbe_hw *);
102 
103 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
104 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
105 static int ixgbe_resume(dev_info_t *);
106 static int ixgbe_suspend(dev_info_t *);
107 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
108 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
109 
110 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
111     const void *impl_data);
112 static void ixgbe_fm_init(ixgbe_t *);
113 static void ixgbe_fm_fini(ixgbe_t *);
114 
115 static struct cb_ops ixgbe_cb_ops = {
116 	nulldev,		/* cb_open */
117 	nulldev,		/* cb_close */
118 	nodev,			/* cb_strategy */
119 	nodev,			/* cb_print */
120 	nodev,			/* cb_dump */
121 	nodev,			/* cb_read */
122 	nodev,			/* cb_write */
123 	nodev,			/* cb_ioctl */
124 	nodev,			/* cb_devmap */
125 	nodev,			/* cb_mmap */
126 	nodev,			/* cb_segmap */
127 	nochpoll,		/* cb_chpoll */
128 	ddi_prop_op,		/* cb_prop_op */
129 	NULL,			/* cb_stream */
130 	D_MP | D_HOTPLUG,	/* cb_flag */
131 	CB_REV,			/* cb_rev */
132 	nodev,			/* cb_aread */
133 	nodev			/* cb_awrite */
134 };
135 
136 static struct dev_ops ixgbe_dev_ops = {
137 	DEVO_REV,		/* devo_rev */
138 	0,			/* devo_refcnt */
139 	NULL,			/* devo_getinfo */
140 	nulldev,		/* devo_identify */
141 	nulldev,		/* devo_probe */
142 	ixgbe_attach,		/* devo_attach */
143 	ixgbe_detach,		/* devo_detach */
144 	nodev,			/* devo_reset */
145 	&ixgbe_cb_ops,		/* devo_cb_ops */
146 	NULL,			/* devo_bus_ops */
147 	ddi_power,		/* devo_power */
148 	ddi_quiesce_not_supported,	/* devo_quiesce */
149 };
150 
151 static struct modldrv ixgbe_modldrv = {
152 	&mod_driverops,		/* Type of module.  This one is a driver */
153 	ident,			/* Discription string */
154 	&ixgbe_dev_ops		/* driver ops */
155 };
156 
157 static struct modlinkage ixgbe_modlinkage = {
158 	MODREV_1, &ixgbe_modldrv, NULL
159 };
160 
161 /*
162  * Access attributes for register mapping
163  */
164 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
165 	DDI_DEVICE_ATTR_V0,
166 	DDI_STRUCTURE_LE_ACC,
167 	DDI_STRICTORDER_ACC,
168 	DDI_FLAGERR_ACC
169 };
170 
171 /*
172  * Loopback property
173  */
174 static lb_property_t lb_normal = {
175 	normal,	"normal", IXGBE_LB_NONE
176 };
177 
178 static lb_property_t lb_mac = {
179 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
180 };
181 
182 #define	IXGBE_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
183 
184 static mac_callbacks_t ixgbe_m_callbacks = {
185 	IXGBE_M_CALLBACK_FLAGS,
186 	ixgbe_m_stat,
187 	ixgbe_m_start,
188 	ixgbe_m_stop,
189 	ixgbe_m_promisc,
190 	ixgbe_m_multicst,
191 	NULL,
192 	NULL,
193 	ixgbe_m_ioctl,
194 	ixgbe_m_getcapab
195 };
196 
197 /*
198  * Initialize capabilities of each supported adapter type
199  */
200 static adapter_info_t ixgbe_82598eb_cap = {
201 	64,		/* maximum number of rx queues */
202 	1,		/* minimum number of rx queues */
203 	8,		/* default number of rx queues */
204 	32,		/* maximum number of tx queues */
205 	1,		/* minimum number of tx queues */
206 	8,		/* default number of tx queues */
207 	18,		/* maximum total msix vectors */
208 	16,		/* maximum number of ring vectors */
209 	2,		/* maximum number of other vectors */
210 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
211 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
212 	| IXGBE_FLAG_RSS_CAPABLE
213 	| IXGBE_FLAG_VMDQ_CAPABLE)
214 };
215 
216 static adapter_info_t ixgbe_82599eb_cap = {
217 	128,		/* maximum number of rx queues */
218 	1,		/* minimum number of rx queues */
219 	8,		/* default number of rx queues */
220 	128,		/* maximum number of tx queues */
221 	1,		/* minimum number of tx queues */
222 	8,		/* default number of tx queues */
223 	64,		/* maximum total msix vectors */
224 	16,		/* maximum number of ring vectors */
225 	2,		/* maximum number of other vectors */
226 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
227 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
228 	| IXGBE_FLAG_RSS_CAPABLE
229 	| IXGBE_FLAG_VMDQ_CAPABLE)
230 };
231 
232 /*
233  * Module Initialization Functions.
234  */
235 
236 int
237 _init(void)
238 {
239 	int status;
240 
241 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
242 
243 	status = mod_install(&ixgbe_modlinkage);
244 
245 	if (status != DDI_SUCCESS) {
246 		mac_fini_ops(&ixgbe_dev_ops);
247 	}
248 
249 	return (status);
250 }
251 
252 int
253 _fini(void)
254 {
255 	int status;
256 
257 	status = mod_remove(&ixgbe_modlinkage);
258 
259 	if (status == DDI_SUCCESS) {
260 		mac_fini_ops(&ixgbe_dev_ops);
261 	}
262 
263 	return (status);
264 }
265 
266 int
267 _info(struct modinfo *modinfop)
268 {
269 	int status;
270 
271 	status = mod_info(&ixgbe_modlinkage, modinfop);
272 
273 	return (status);
274 }
275 
276 /*
277  * ixgbe_attach - Driver attach.
278  *
279  * This function is the device specific initialization entry
280  * point. This entry point is required and must be written.
281  * The DDI_ATTACH command must be provided in the attach entry
282  * point. When attach() is called with cmd set to DDI_ATTACH,
283  * all normal kernel services (such as kmem_alloc(9F)) are
284  * available for use by the driver.
285  *
286  * The attach() function will be called once for each instance
287  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
288  * Until attach() succeeds, the only driver entry points which
289  * may be called are open(9E) and getinfo(9E).
290  */
291 static int
292 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
293 {
294 	ixgbe_t *ixgbe;
295 	struct ixgbe_osdep *osdep;
296 	struct ixgbe_hw *hw;
297 	int instance;
298 	char taskqname[32];
299 
300 	/*
301 	 * Check the command and perform corresponding operations
302 	 */
303 	switch (cmd) {
304 	default:
305 		return (DDI_FAILURE);
306 
307 	case DDI_RESUME:
308 		return (ixgbe_resume(devinfo));
309 
310 	case DDI_ATTACH:
311 		break;
312 	}
313 
314 	/* Get the device instance */
315 	instance = ddi_get_instance(devinfo);
316 
317 	/* Allocate memory for the instance data structure */
318 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
319 
320 	ixgbe->dip = devinfo;
321 	ixgbe->instance = instance;
322 
323 	hw = &ixgbe->hw;
324 	osdep = &ixgbe->osdep;
325 	hw->back = osdep;
326 	osdep->ixgbe = ixgbe;
327 
328 	/* Attach the instance pointer to the dev_info data structure */
329 	ddi_set_driver_private(devinfo, ixgbe);
330 
331 	/*
332 	 * Initialize for fma support
333 	 */
334 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
335 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
336 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
337 	ixgbe_fm_init(ixgbe);
338 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
339 
340 	/*
341 	 * Map PCI config space registers
342 	 */
343 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
344 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
345 		goto attach_fail;
346 	}
347 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
348 
349 	/*
350 	 * Identify the chipset family
351 	 */
352 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
353 		ixgbe_error(ixgbe, "Failed to identify hardware");
354 		goto attach_fail;
355 	}
356 
357 	/*
358 	 * Map device registers
359 	 */
360 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
361 		ixgbe_error(ixgbe, "Failed to map device registers");
362 		goto attach_fail;
363 	}
364 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
365 
366 	/*
367 	 * Initialize driver parameters
368 	 */
369 	ixgbe_init_properties(ixgbe);
370 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
371 
372 	/*
373 	 * Allocate interrupts
374 	 */
375 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
376 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
377 		goto attach_fail;
378 	}
379 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
380 
381 	/*
382 	 * Allocate rx/tx rings based on the ring numbers.
383 	 * The actual numbers of rx/tx rings are decided by the number of
384 	 * allocated interrupt vectors, so we should allocate the rings after
385 	 * interrupts are allocated.
386 	 */
387 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
388 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
389 		goto attach_fail;
390 	}
391 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
392 
393 	/*
394 	 * Map rings to interrupt vectors
395 	 */
396 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
397 		ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
398 		goto attach_fail;
399 	}
400 
401 	/*
402 	 * Add interrupt handlers
403 	 */
404 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
405 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
406 		goto attach_fail;
407 	}
408 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
409 
410 	/*
411 	 * Create a taskq for link-status-change
412 	 */
413 	(void) sprintf(taskqname, "ixgbe%d_taskq", instance);
414 	if ((ixgbe->lsc_taskq = ddi_taskq_create(devinfo, taskqname,
415 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
416 		ixgbe_error(ixgbe, "taskq_create failed");
417 		goto attach_fail;
418 	}
419 	ixgbe->attach_progress |= ATTACH_PROGRESS_LSC_TASKQ;
420 
421 	/*
422 	 * Initialize driver parameters
423 	 */
424 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
425 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
426 		goto attach_fail;
427 	}
428 
429 	/*
430 	 * Initialize mutexes for this device.
431 	 * Do this before enabling the interrupt handler and
432 	 * register the softint to avoid the condition where
433 	 * interrupt handler can try using uninitialized mutex.
434 	 */
435 	ixgbe_init_locks(ixgbe);
436 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
437 
438 	/*
439 	 * Initialize chipset hardware
440 	 */
441 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
442 		ixgbe_error(ixgbe, "Failed to initialize adapter");
443 		goto attach_fail;
444 	}
445 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
446 
447 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
448 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
449 		goto attach_fail;
450 	}
451 
452 	/*
453 	 * Initialize DMA and hardware settings for rx/tx rings
454 	 */
455 	if (ixgbe_init_rings(ixgbe) != IXGBE_SUCCESS) {
456 		ixgbe_error(ixgbe, "Failed to initialize rings");
457 		goto attach_fail;
458 	}
459 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT_RINGS;
460 
461 	/*
462 	 * Initialize statistics
463 	 */
464 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
465 		ixgbe_error(ixgbe, "Failed to initialize statistics");
466 		goto attach_fail;
467 	}
468 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
469 
470 	/*
471 	 * Initialize NDD parameters
472 	 */
473 	if (ixgbe_nd_init(ixgbe) != IXGBE_SUCCESS) {
474 		ixgbe_error(ixgbe, "Failed to initialize ndd");
475 		goto attach_fail;
476 	}
477 	ixgbe->attach_progress |= ATTACH_PROGRESS_NDD;
478 
479 	/*
480 	 * Register the driver to the MAC
481 	 */
482 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
483 		ixgbe_error(ixgbe, "Failed to register MAC");
484 		goto attach_fail;
485 	}
486 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
487 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
488 
489 	/*
490 	 * Now that mutex locks are initialized, and the chip is also
491 	 * initialized, enable interrupts.
492 	 */
493 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
494 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
495 		goto attach_fail;
496 	}
497 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
498 
499 	ixgbe->ixgbe_state |= IXGBE_INITIALIZED;
500 
501 	return (DDI_SUCCESS);
502 
503 attach_fail:
504 	ixgbe_unconfigure(devinfo, ixgbe);
505 	return (DDI_FAILURE);
506 }
507 
508 /*
509  * ixgbe_detach - Driver detach.
510  *
511  * The detach() function is the complement of the attach routine.
512  * If cmd is set to DDI_DETACH, detach() is used to remove  the
513  * state  associated  with  a  given  instance of a device node
514  * prior to the removal of that instance from the system.
515  *
516  * The detach() function will be called once for each  instance
517  * of the device for which there has been a successful attach()
518  * once there are no longer  any  opens  on  the  device.
519  *
520  * Interrupts routine are disabled, All memory allocated by this
521  * driver are freed.
522  */
523 static int
524 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
525 {
526 	ixgbe_t *ixgbe;
527 
528 	/*
529 	 * Check detach command
530 	 */
531 	switch (cmd) {
532 	default:
533 		return (DDI_FAILURE);
534 
535 	case DDI_SUSPEND:
536 		return (ixgbe_suspend(devinfo));
537 
538 	case DDI_DETACH:
539 		break;
540 	}
541 
542 
543 	/*
544 	 * Get the pointer to the driver private data structure
545 	 */
546 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
547 	if (ixgbe == NULL)
548 		return (DDI_FAILURE);
549 
550 	/*
551 	 * Unregister MAC. If failed, we have to fail the detach
552 	 */
553 	if (mac_unregister(ixgbe->mac_hdl) != 0) {
554 		ixgbe_error(ixgbe, "Failed to unregister MAC");
555 		return (DDI_FAILURE);
556 	}
557 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC;
558 
559 	/*
560 	 * If the device is still running, it needs to be stopped first.
561 	 * This check is necessary because under some specific circumstances,
562 	 * the detach routine can be called without stopping the interface
563 	 * first.
564 	 */
565 	mutex_enter(&ixgbe->gen_lock);
566 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
567 		ixgbe->ixgbe_state &= ~IXGBE_STARTED;
568 		ixgbe_stop(ixgbe);
569 		mutex_exit(&ixgbe->gen_lock);
570 		/* Disable and stop the watchdog timer */
571 		ixgbe_disable_watchdog_timer(ixgbe);
572 	} else
573 		mutex_exit(&ixgbe->gen_lock);
574 
575 	/*
576 	 * Check if there are still rx buffers held by the upper layer.
577 	 * If so, fail the detach.
578 	 */
579 	if (!ixgbe_rx_drain(ixgbe))
580 		return (DDI_FAILURE);
581 
582 	/*
583 	 * Do the remaining unconfigure routines
584 	 */
585 	ixgbe_unconfigure(devinfo, ixgbe);
586 
587 	return (DDI_SUCCESS);
588 }
589 
590 static void
591 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
592 {
593 	/*
594 	 * Disable interrupt
595 	 */
596 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
597 		(void) ixgbe_disable_intrs(ixgbe);
598 	}
599 
600 	/*
601 	 * Unregister MAC
602 	 */
603 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
604 		(void) mac_unregister(ixgbe->mac_hdl);
605 	}
606 
607 	/*
608 	 * Free ndd parameters
609 	 */
610 	if (ixgbe->attach_progress & ATTACH_PROGRESS_NDD) {
611 		ixgbe_nd_cleanup(ixgbe);
612 	}
613 
614 	/*
615 	 * Free statistics
616 	 */
617 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
618 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
619 	}
620 
621 	/*
622 	 * Remove interrupt handlers
623 	 */
624 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
625 		ixgbe_rem_intr_handlers(ixgbe);
626 	}
627 
628 	/*
629 	 * Remove taskq for link-status-change
630 	 */
631 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LSC_TASKQ) {
632 		ddi_taskq_destroy(ixgbe->lsc_taskq);
633 	}
634 
635 	/*
636 	 * Remove interrupts
637 	 */
638 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
639 		ixgbe_rem_intrs(ixgbe);
640 	}
641 
642 	/*
643 	 * Remove driver properties
644 	 */
645 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
646 		(void) ddi_prop_remove_all(devinfo);
647 	}
648 
649 	/*
650 	 * Release the DMA resources of rx/tx rings
651 	 */
652 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT_RINGS) {
653 		ixgbe_fini_rings(ixgbe);
654 	}
655 
656 	/*
657 	 * Stop the chipset
658 	 */
659 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
660 		mutex_enter(&ixgbe->gen_lock);
661 		ixgbe_chip_stop(ixgbe);
662 		mutex_exit(&ixgbe->gen_lock);
663 	}
664 
665 	/*
666 	 * Free register handle
667 	 */
668 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
669 		if (ixgbe->osdep.reg_handle != NULL)
670 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
671 	}
672 
673 	/*
674 	 * Free PCI config handle
675 	 */
676 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
677 		if (ixgbe->osdep.cfg_handle != NULL)
678 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
679 	}
680 
681 	/*
682 	 * Free locks
683 	 */
684 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
685 		ixgbe_destroy_locks(ixgbe);
686 	}
687 
688 	/*
689 	 * Free the rx/tx rings
690 	 */
691 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
692 		ixgbe_free_rings(ixgbe);
693 	}
694 
695 	/*
696 	 * Unregister FMA capabilities
697 	 */
698 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
699 		ixgbe_fm_fini(ixgbe);
700 	}
701 
702 	/*
703 	 * Free the driver data structure
704 	 */
705 	kmem_free(ixgbe, sizeof (ixgbe_t));
706 
707 	ddi_set_driver_private(devinfo, NULL);
708 }
709 
710 /*
711  * ixgbe_register_mac - Register the driver and its function pointers with
712  * the GLD interface.
713  */
714 static int
715 ixgbe_register_mac(ixgbe_t *ixgbe)
716 {
717 	struct ixgbe_hw *hw = &ixgbe->hw;
718 	mac_register_t *mac;
719 	int status;
720 
721 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
722 		return (IXGBE_FAILURE);
723 
724 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
725 	mac->m_driver = ixgbe;
726 	mac->m_dip = ixgbe->dip;
727 	mac->m_src_addr = hw->mac.addr;
728 	mac->m_callbacks = &ixgbe_m_callbacks;
729 	mac->m_min_sdu = 0;
730 	mac->m_max_sdu = ixgbe->default_mtu;
731 	mac->m_margin = VLAN_TAGSZ;
732 	mac->m_v12n = MAC_VIRT_LEVEL1;
733 
734 	status = mac_register(mac, &ixgbe->mac_hdl);
735 
736 	mac_free(mac);
737 
738 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
739 }
740 
741 /*
742  * ixgbe_identify_hardware - Identify the type of the chipset.
743  */
744 static int
745 ixgbe_identify_hardware(ixgbe_t *ixgbe)
746 {
747 	struct ixgbe_hw *hw = &ixgbe->hw;
748 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
749 
750 	/*
751 	 * Get the device id
752 	 */
753 	hw->vendor_id =
754 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
755 	hw->device_id =
756 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
757 	hw->revision_id =
758 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
759 	hw->subsystem_device_id =
760 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
761 	hw->subsystem_vendor_id =
762 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
763 
764 	/*
765 	 * Set the mac type of the adapter based on the device id
766 	 */
767 	if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
768 		return (IXGBE_FAILURE);
769 	}
770 
771 	/*
772 	 * Install adapter capabilities
773 	 */
774 	switch (hw->mac.type) {
775 	case ixgbe_mac_82598EB:
776 		ixgbe_log(ixgbe, "identify 82598 adapter\n");
777 		ixgbe->capab = &ixgbe_82598eb_cap;
778 
779 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
780 			ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
781 			ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
782 		}
783 		ixgbe->capab->other_intr |= IXGBE_EICR_LSC;
784 
785 		break;
786 	case ixgbe_mac_82599EB:
787 		ixgbe_log(ixgbe, "identify 82599 adapter\n");
788 		ixgbe->capab = &ixgbe_82599eb_cap;
789 
790 		ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 |
791 		    IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC);
792 
793 		break;
794 	default:
795 		ixgbe_log(ixgbe,
796 		    "adapter not supported in ixgbe_identify_hardware(): %d\n",
797 		    hw->mac.type);
798 		return (IXGBE_FAILURE);
799 	}
800 
801 	return (IXGBE_SUCCESS);
802 }
803 
804 /*
805  * ixgbe_regs_map - Map the device registers.
806  *
807  */
808 static int
809 ixgbe_regs_map(ixgbe_t *ixgbe)
810 {
811 	dev_info_t *devinfo = ixgbe->dip;
812 	struct ixgbe_hw *hw = &ixgbe->hw;
813 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
814 	off_t mem_size;
815 
816 	/*
817 	 * First get the size of device registers to be mapped.
818 	 */
819 	if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
820 	    != DDI_SUCCESS) {
821 		return (IXGBE_FAILURE);
822 	}
823 
824 	/*
825 	 * Call ddi_regs_map_setup() to map registers
826 	 */
827 	if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
828 	    (caddr_t *)&hw->hw_addr, 0,
829 	    mem_size, &ixgbe_regs_acc_attr,
830 	    &osdep->reg_handle)) != DDI_SUCCESS) {
831 		return (IXGBE_FAILURE);
832 	}
833 
834 	return (IXGBE_SUCCESS);
835 }
836 
837 /*
838  * ixgbe_init_properties - Initialize driver properties.
839  */
840 static void
841 ixgbe_init_properties(ixgbe_t *ixgbe)
842 {
843 	/*
844 	 * Get conf file properties, including link settings
845 	 * jumbo frames, ring number, descriptor number, etc.
846 	 */
847 	ixgbe_get_conf(ixgbe);
848 }
849 
850 /*
851  * ixgbe_init_driver_settings - Initialize driver settings.
852  *
853  * The settings include hardware function pointers, bus information,
854  * rx/tx rings settings, link state, and any other parameters that
855  * need to be setup during driver initialization.
856  */
857 static int
858 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
859 {
860 	struct ixgbe_hw *hw = &ixgbe->hw;
861 	dev_info_t *devinfo = ixgbe->dip;
862 	ixgbe_rx_ring_t *rx_ring;
863 	ixgbe_tx_ring_t *tx_ring;
864 	uint32_t rx_size;
865 	uint32_t tx_size;
866 	int i;
867 
868 	/*
869 	 * Initialize chipset specific hardware function pointers
870 	 */
871 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
872 		return (IXGBE_FAILURE);
873 	}
874 
875 	/*
876 	 * Get the system page size
877 	 */
878 	ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
879 
880 	/*
881 	 * Set rx buffer size
882 	 *
883 	 * The IP header alignment room is counted in the calculation.
884 	 * The rx buffer size is in unit of 1K that is required by the
885 	 * chipset hardware.
886 	 */
887 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
888 	ixgbe->rx_buf_size = ((rx_size >> 10) +
889 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
890 
891 	/*
892 	 * Set tx buffer size
893 	 */
894 	tx_size = ixgbe->max_frame_size;
895 	ixgbe->tx_buf_size = ((tx_size >> 10) +
896 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
897 
898 	/*
899 	 * Initialize rx/tx rings parameters
900 	 */
901 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
902 		rx_ring = &ixgbe->rx_rings[i];
903 		rx_ring->index = i;
904 		rx_ring->ixgbe = ixgbe;
905 
906 		rx_ring->ring_size = ixgbe->rx_ring_size;
907 		rx_ring->free_list_size = ixgbe->rx_ring_size;
908 		rx_ring->copy_thresh = ixgbe->rx_copy_thresh;
909 		rx_ring->limit_per_intr = ixgbe->rx_limit_per_intr;
910 	}
911 
912 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
913 		tx_ring = &ixgbe->tx_rings[i];
914 		tx_ring->index = i;
915 		tx_ring->ixgbe = ixgbe;
916 		if (ixgbe->tx_head_wb_enable)
917 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
918 		else
919 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
920 
921 		tx_ring->ring_size = ixgbe->tx_ring_size;
922 		tx_ring->free_list_size = ixgbe->tx_ring_size +
923 		    (ixgbe->tx_ring_size >> 1);
924 		tx_ring->copy_thresh = ixgbe->tx_copy_thresh;
925 		tx_ring->recycle_thresh = ixgbe->tx_recycle_thresh;
926 		tx_ring->overload_thresh = ixgbe->tx_overload_thresh;
927 	tx_ring->resched_thresh = ixgbe->tx_resched_thresh;
928 	}
929 
930 	/*
931 	 * Initialize values of interrupt throttling rate
932 	 */
933 	for (i = 1; i < MAX_INTR_VECTOR; i++)
934 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
935 
936 	/*
937 	 * The initial link state should be "unknown"
938 	 */
939 	ixgbe->link_state = LINK_STATE_UNKNOWN;
940 
941 	return (IXGBE_SUCCESS);
942 }
943 
944 /*
945  * ixgbe_init_locks - Initialize locks.
946  */
947 static void
948 ixgbe_init_locks(ixgbe_t *ixgbe)
949 {
950 	ixgbe_rx_ring_t *rx_ring;
951 	ixgbe_tx_ring_t *tx_ring;
952 	int i;
953 
954 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
955 		rx_ring = &ixgbe->rx_rings[i];
956 		mutex_init(&rx_ring->rx_lock, NULL,
957 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
958 		mutex_init(&rx_ring->recycle_lock, NULL,
959 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
960 	}
961 
962 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
963 		tx_ring = &ixgbe->tx_rings[i];
964 		mutex_init(&tx_ring->tx_lock, NULL,
965 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
966 		mutex_init(&tx_ring->recycle_lock, NULL,
967 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
968 		mutex_init(&tx_ring->tcb_head_lock, NULL,
969 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
970 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
971 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
972 	}
973 
974 	mutex_init(&ixgbe->gen_lock, NULL,
975 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
976 
977 	mutex_init(&ixgbe->watchdog_lock, NULL,
978 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
979 }
980 
981 /*
982  * ixgbe_destroy_locks - Destroy locks.
983  */
984 static void
985 ixgbe_destroy_locks(ixgbe_t *ixgbe)
986 {
987 	ixgbe_rx_ring_t *rx_ring;
988 	ixgbe_tx_ring_t *tx_ring;
989 	int i;
990 
991 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
992 		rx_ring = &ixgbe->rx_rings[i];
993 		mutex_destroy(&rx_ring->rx_lock);
994 		mutex_destroy(&rx_ring->recycle_lock);
995 	}
996 
997 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
998 		tx_ring = &ixgbe->tx_rings[i];
999 		mutex_destroy(&tx_ring->tx_lock);
1000 		mutex_destroy(&tx_ring->recycle_lock);
1001 		mutex_destroy(&tx_ring->tcb_head_lock);
1002 		mutex_destroy(&tx_ring->tcb_tail_lock);
1003 	}
1004 
1005 	mutex_destroy(&ixgbe->gen_lock);
1006 	mutex_destroy(&ixgbe->watchdog_lock);
1007 }
1008 
1009 static int
1010 ixgbe_resume(dev_info_t *devinfo)
1011 {
1012 	ixgbe_t *ixgbe;
1013 
1014 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1015 	if (ixgbe == NULL)
1016 		return (DDI_FAILURE);
1017 
1018 	mutex_enter(&ixgbe->gen_lock);
1019 
1020 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1021 		if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) {
1022 			mutex_exit(&ixgbe->gen_lock);
1023 			return (DDI_FAILURE);
1024 		}
1025 
1026 		/*
1027 		 * Enable and start the watchdog timer
1028 		 */
1029 		ixgbe_enable_watchdog_timer(ixgbe);
1030 	}
1031 
1032 	ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1033 
1034 	mutex_exit(&ixgbe->gen_lock);
1035 
1036 	return (DDI_SUCCESS);
1037 }
1038 
1039 static int
1040 ixgbe_suspend(dev_info_t *devinfo)
1041 {
1042 	ixgbe_t *ixgbe;
1043 
1044 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1045 	if (ixgbe == NULL)
1046 		return (DDI_FAILURE);
1047 
1048 	mutex_enter(&ixgbe->gen_lock);
1049 
1050 	ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1051 
1052 	ixgbe_stop(ixgbe);
1053 
1054 	mutex_exit(&ixgbe->gen_lock);
1055 
1056 	/*
1057 	 * Disable and stop the watchdog timer
1058 	 */
1059 	ixgbe_disable_watchdog_timer(ixgbe);
1060 
1061 	return (DDI_SUCCESS);
1062 }
1063 
1064 /*
1065  * ixgbe_init - Initialize the device.
1066  */
1067 static int
1068 ixgbe_init(ixgbe_t *ixgbe)
1069 {
1070 	struct ixgbe_hw *hw = &ixgbe->hw;
1071 
1072 	mutex_enter(&ixgbe->gen_lock);
1073 
1074 	/*
1075 	 * Reset chipset to put the hardware in a known state
1076 	 * before we try to do anything with the eeprom.
1077 	 */
1078 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1079 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1080 		goto init_fail;
1081 	}
1082 
1083 	/*
1084 	 * Need to init eeprom before validating the checksum.
1085 	 */
1086 	if (ixgbe_init_eeprom_params(hw) < 0) {
1087 		ixgbe_error(ixgbe,
1088 		    "Unable to intitialize the eeprom interface.");
1089 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1090 		goto init_fail;
1091 	}
1092 
1093 	/*
1094 	 * NVM validation
1095 	 */
1096 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1097 		/*
1098 		 * Some PCI-E parts fail the first check due to
1099 		 * the link being in sleep state.  Call it again,
1100 		 * if it fails a second time it's a real issue.
1101 		 */
1102 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1103 			ixgbe_error(ixgbe,
1104 			    "Invalid NVM checksum. Please contact "
1105 			    "the vendor to update the NVM.");
1106 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1107 			goto init_fail;
1108 		}
1109 	}
1110 
1111 	/*
1112 	 * Setup default flow control thresholds - enable/disable
1113 	 * & flow control type is controlled by ixgbe.conf
1114 	 */
1115 	hw->fc.high_water = DEFAULT_FCRTH;
1116 	hw->fc.low_water = DEFAULT_FCRTL;
1117 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1118 	hw->fc.send_xon = B_TRUE;
1119 
1120 	/*
1121 	 * Don't wait for auto-negotiation to complete
1122 	 */
1123 	hw->phy.autoneg_wait_to_complete = B_FALSE;
1124 
1125 	/*
1126 	 * Initialize link settings
1127 	 */
1128 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1129 
1130 	/*
1131 	 * Initialize the chipset hardware
1132 	 */
1133 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1134 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1135 		goto init_fail;
1136 	}
1137 
1138 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
1139 		goto init_fail;
1140 	}
1141 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1142 		goto init_fail;
1143 	}
1144 
1145 	mutex_exit(&ixgbe->gen_lock);
1146 	return (IXGBE_SUCCESS);
1147 
1148 init_fail:
1149 	/*
1150 	 * Reset PHY
1151 	 */
1152 	(void) ixgbe_reset_phy(hw);
1153 
1154 	mutex_exit(&ixgbe->gen_lock);
1155 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1156 	return (IXGBE_FAILURE);
1157 }
1158 
1159 /*
1160  * ixgbe_init_rings - Allocate DMA resources for all rx/tx rings and
1161  * initialize relevant hardware settings.
1162  */
1163 static int
1164 ixgbe_init_rings(ixgbe_t *ixgbe)
1165 {
1166 	int i;
1167 
1168 	/*
1169 	 * Allocate buffers for all the rx/tx rings
1170 	 */
1171 	if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS)
1172 		return (IXGBE_FAILURE);
1173 
1174 	/*
1175 	 * Setup the rx/tx rings
1176 	 */
1177 	mutex_enter(&ixgbe->gen_lock);
1178 
1179 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1180 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1181 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1182 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1183 
1184 	ixgbe_setup_rings(ixgbe);
1185 
1186 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1187 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1188 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1189 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1190 
1191 	mutex_exit(&ixgbe->gen_lock);
1192 
1193 	return (IXGBE_SUCCESS);
1194 }
1195 
1196 /*
1197  * ixgbe_fini_rings - Release DMA resources of all rx/tx rings.
1198  */
1199 static void
1200 ixgbe_fini_rings(ixgbe_t *ixgbe)
1201 {
1202 	/*
1203 	 * Release the DMA/memory resources of rx/tx rings
1204 	 */
1205 	ixgbe_free_dma(ixgbe);
1206 }
1207 
1208 /*
1209  * ixgbe_chip_start - Initialize and start the chipset hardware.
1210  */
1211 static int
1212 ixgbe_chip_start(ixgbe_t *ixgbe)
1213 {
1214 	struct ixgbe_hw *hw = &ixgbe->hw;
1215 	int i;
1216 
1217 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1218 
1219 	/*
1220 	 * Get the mac address
1221 	 * This function should handle SPARC case correctly.
1222 	 */
1223 	if (!ixgbe_find_mac_address(ixgbe)) {
1224 		ixgbe_error(ixgbe, "Failed to get the mac address");
1225 		return (IXGBE_FAILURE);
1226 	}
1227 
1228 	/*
1229 	 * Validate the mac address
1230 	 */
1231 	(void) ixgbe_init_rx_addrs(hw);
1232 	if (!is_valid_mac_addr(hw->mac.addr)) {
1233 		ixgbe_error(ixgbe, "Invalid mac address");
1234 		return (IXGBE_FAILURE);
1235 	}
1236 
1237 	/*
1238 	 * Configure/Initialize hardware
1239 	 */
1240 	if (ixgbe_init_hw(hw) != IXGBE_SUCCESS) {
1241 		ixgbe_error(ixgbe, "Failed to initialize hardware");
1242 		return (IXGBE_FAILURE);
1243 	}
1244 
1245 	/*
1246 	 * Setup adapter interrupt vectors
1247 	 */
1248 	ixgbe_setup_adapter_vector(ixgbe);
1249 
1250 	/*
1251 	 * Initialize unicast addresses.
1252 	 */
1253 	ixgbe_init_unicst(ixgbe);
1254 
1255 	/*
1256 	 * Setup and initialize the mctable structures.
1257 	 */
1258 	ixgbe_setup_multicst(ixgbe);
1259 
1260 	/*
1261 	 * Set interrupt throttling rate
1262 	 */
1263 	for (i = 0; i < ixgbe->intr_cnt; i++) {
1264 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1265 	}
1266 
1267 	/*
1268 	 * Save the state of the phy
1269 	 */
1270 	ixgbe_get_hw_state(ixgbe);
1271 
1272 	/*
1273 	 * Make sure driver has control
1274 	 */
1275 	ixgbe_get_driver_control(hw);
1276 
1277 	return (IXGBE_SUCCESS);
1278 }
1279 
1280 /*
1281  * ixgbe_chip_stop - Stop the chipset hardware
1282  */
1283 static void
1284 ixgbe_chip_stop(ixgbe_t *ixgbe)
1285 {
1286 	struct ixgbe_hw *hw = &ixgbe->hw;
1287 
1288 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1289 
1290 	/*
1291 	 * Tell firmware driver is no longer in control
1292 	 */
1293 	ixgbe_release_driver_control(hw);
1294 
1295 	/*
1296 	 * Reset the chipset
1297 	 */
1298 	(void) ixgbe_reset_hw(hw);
1299 
1300 	/*
1301 	 * Reset PHY
1302 	 */
1303 	(void) ixgbe_reset_phy(hw);
1304 }
1305 
1306 /*
1307  * ixgbe_reset - Reset the chipset and re-start the driver.
1308  *
1309  * It involves stopping and re-starting the chipset,
1310  * and re-configuring the rx/tx rings.
1311  */
1312 static int
1313 ixgbe_reset(ixgbe_t *ixgbe)
1314 {
1315 	int i;
1316 
1317 	mutex_enter(&ixgbe->gen_lock);
1318 
1319 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1320 	ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1321 
1322 	/*
1323 	 * Disable the adapter interrupts to stop any rx/tx activities
1324 	 * before draining pending data and resetting hardware.
1325 	 */
1326 	ixgbe_disable_adapter_interrupts(ixgbe);
1327 
1328 	/*
1329 	 * Drain the pending transmit packets
1330 	 */
1331 	(void) ixgbe_tx_drain(ixgbe);
1332 
1333 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1334 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1335 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1336 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1337 
1338 	/*
1339 	 * Stop the chipset hardware
1340 	 */
1341 	ixgbe_chip_stop(ixgbe);
1342 
1343 	/*
1344 	 * Clean the pending tx data/resources
1345 	 */
1346 	ixgbe_tx_clean(ixgbe);
1347 
1348 	/*
1349 	 * Start the chipset hardware
1350 	 */
1351 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1352 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1353 		goto reset_failure;
1354 	}
1355 
1356 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1357 		goto reset_failure;
1358 	}
1359 
1360 	/*
1361 	 * Setup the rx/tx rings
1362 	 */
1363 	ixgbe_setup_rings(ixgbe);
1364 
1365 	/*
1366 	 * Enable adapter interrupts
1367 	 * The interrupts must be enabled after the driver state is START
1368 	 */
1369 	ixgbe_enable_adapter_interrupts(ixgbe);
1370 
1371 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1372 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1373 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1374 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1375 
1376 	ixgbe->ixgbe_state |= IXGBE_STARTED;
1377 	mutex_exit(&ixgbe->gen_lock);
1378 
1379 	return (IXGBE_SUCCESS);
1380 
1381 reset_failure:
1382 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1383 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1384 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1385 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1386 
1387 	mutex_exit(&ixgbe->gen_lock);
1388 
1389 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1390 
1391 	return (IXGBE_FAILURE);
1392 }
1393 
1394 /*
1395  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1396  */
1397 static void
1398 ixgbe_tx_clean(ixgbe_t *ixgbe)
1399 {
1400 	ixgbe_tx_ring_t *tx_ring;
1401 	tx_control_block_t *tcb;
1402 	link_list_t pending_list;
1403 	uint32_t desc_num;
1404 	int i, j;
1405 
1406 	LINK_LIST_INIT(&pending_list);
1407 
1408 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1409 		tx_ring = &ixgbe->tx_rings[i];
1410 
1411 		mutex_enter(&tx_ring->recycle_lock);
1412 
1413 		/*
1414 		 * Clean the pending tx data - the pending packets in the
1415 		 * work_list that have no chances to be transmitted again.
1416 		 *
1417 		 * We must ensure the chipset is stopped or the link is down
1418 		 * before cleaning the transmit packets.
1419 		 */
1420 		desc_num = 0;
1421 		for (j = 0; j < tx_ring->ring_size; j++) {
1422 			tcb = tx_ring->work_list[j];
1423 			if (tcb != NULL) {
1424 				desc_num += tcb->desc_num;
1425 
1426 				tx_ring->work_list[j] = NULL;
1427 
1428 				ixgbe_free_tcb(tcb);
1429 
1430 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1431 			}
1432 		}
1433 
1434 		if (desc_num > 0) {
1435 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1436 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1437 
1438 			/*
1439 			 * Reset the head and tail pointers of the tbd ring;
1440 			 * Reset the writeback head if it's enable.
1441 			 */
1442 			tx_ring->tbd_head = 0;
1443 			tx_ring->tbd_tail = 0;
1444 			if (ixgbe->tx_head_wb_enable)
1445 				*tx_ring->tbd_head_wb = 0;
1446 
1447 			IXGBE_WRITE_REG(&ixgbe->hw,
1448 			    IXGBE_TDH(tx_ring->index), 0);
1449 			IXGBE_WRITE_REG(&ixgbe->hw,
1450 			    IXGBE_TDT(tx_ring->index), 0);
1451 		}
1452 
1453 		mutex_exit(&tx_ring->recycle_lock);
1454 
1455 		/*
1456 		 * Add the tx control blocks in the pending list to
1457 		 * the free list.
1458 		 */
1459 		ixgbe_put_free_list(tx_ring, &pending_list);
1460 	}
1461 }
1462 
1463 /*
1464  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1465  * transmitted.
1466  */
1467 static boolean_t
1468 ixgbe_tx_drain(ixgbe_t *ixgbe)
1469 {
1470 	ixgbe_tx_ring_t *tx_ring;
1471 	boolean_t done;
1472 	int i, j;
1473 
1474 	/*
1475 	 * Wait for a specific time to allow pending tx packets
1476 	 * to be transmitted.
1477 	 *
1478 	 * Check the counter tbd_free to see if transmission is done.
1479 	 * No lock protection is needed here.
1480 	 *
1481 	 * Return B_TRUE if all pending packets have been transmitted;
1482 	 * Otherwise return B_FALSE;
1483 	 */
1484 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1485 
1486 		done = B_TRUE;
1487 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1488 			tx_ring = &ixgbe->tx_rings[j];
1489 			done = done &&
1490 			    (tx_ring->tbd_free == tx_ring->ring_size);
1491 		}
1492 
1493 		if (done)
1494 			break;
1495 
1496 		msec_delay(1);
1497 	}
1498 
1499 	return (done);
1500 }
1501 
1502 /*
1503  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1504  */
1505 static boolean_t
1506 ixgbe_rx_drain(ixgbe_t *ixgbe)
1507 {
1508 	ixgbe_rx_ring_t *rx_ring;
1509 	boolean_t done;
1510 	int i, j;
1511 
1512 	/*
1513 	 * Polling the rx free list to check if those rx buffers held by
1514 	 * the upper layer are released.
1515 	 *
1516 	 * Check the counter rcb_free to see if all pending buffers are
1517 	 * released. No lock protection is needed here.
1518 	 *
1519 	 * Return B_TRUE if all pending buffers have been released;
1520 	 * Otherwise return B_FALSE;
1521 	 */
1522 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1523 
1524 		done = B_TRUE;
1525 		for (j = 0; j < ixgbe->num_rx_rings; j++) {
1526 			rx_ring = &ixgbe->rx_rings[j];
1527 			done = done &&
1528 			    (rx_ring->rcb_free == rx_ring->free_list_size);
1529 		}
1530 
1531 		if (done)
1532 			break;
1533 
1534 		msec_delay(1);
1535 	}
1536 
1537 	return (done);
1538 }
1539 
1540 /*
1541  * ixgbe_start - Start the driver/chipset.
1542  */
1543 int
1544 ixgbe_start(ixgbe_t *ixgbe)
1545 {
1546 	int i;
1547 
1548 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1549 
1550 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1551 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1552 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1553 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1554 
1555 	/*
1556 	 * Start the chipset hardware
1557 	 */
1558 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1559 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1560 		goto start_failure;
1561 	}
1562 
1563 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1564 		goto start_failure;
1565 	}
1566 
1567 	/*
1568 	 * Setup the rx/tx rings
1569 	 */
1570 	ixgbe_setup_rings(ixgbe);
1571 
1572 	/*
1573 	 * Enable adapter interrupts
1574 	 * The interrupts must be enabled after the driver state is START
1575 	 */
1576 	ixgbe_enable_adapter_interrupts(ixgbe);
1577 
1578 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1579 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1580 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1581 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1582 
1583 	return (IXGBE_SUCCESS);
1584 
1585 start_failure:
1586 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1587 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1588 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1589 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1590 
1591 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1592 
1593 	return (IXGBE_FAILURE);
1594 }
1595 
1596 /*
1597  * ixgbe_stop - Stop the driver/chipset.
1598  */
1599 void
1600 ixgbe_stop(ixgbe_t *ixgbe)
1601 {
1602 	int i;
1603 
1604 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1605 
1606 	/*
1607 	 * Disable the adapter interrupts
1608 	 */
1609 	ixgbe_disable_adapter_interrupts(ixgbe);
1610 
1611 	/*
1612 	 * Drain the pending tx packets
1613 	 */
1614 	(void) ixgbe_tx_drain(ixgbe);
1615 
1616 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1617 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1618 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1619 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1620 
1621 	/*
1622 	 * Stop the chipset hardware
1623 	 */
1624 	ixgbe_chip_stop(ixgbe);
1625 
1626 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1627 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1628 	}
1629 
1630 	/*
1631 	 * Clean the pending tx data/resources
1632 	 */
1633 	ixgbe_tx_clean(ixgbe);
1634 
1635 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1636 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1637 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1638 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1639 }
1640 
1641 /*
1642  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1643  */
1644 static int
1645 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1646 {
1647 	/*
1648 	 * Allocate memory space for rx rings
1649 	 */
1650 	ixgbe->rx_rings = kmem_zalloc(
1651 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1652 	    KM_NOSLEEP);
1653 
1654 	if (ixgbe->rx_rings == NULL) {
1655 		return (IXGBE_FAILURE);
1656 	}
1657 
1658 	/*
1659 	 * Allocate memory space for tx rings
1660 	 */
1661 	ixgbe->tx_rings = kmem_zalloc(
1662 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1663 	    KM_NOSLEEP);
1664 
1665 	if (ixgbe->tx_rings == NULL) {
1666 		kmem_free(ixgbe->rx_rings,
1667 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1668 		ixgbe->rx_rings = NULL;
1669 		return (IXGBE_FAILURE);
1670 	}
1671 
1672 	/*
1673 	 * Allocate memory space for rx ring groups
1674 	 */
1675 	ixgbe->rx_groups = kmem_zalloc(
1676 	    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1677 	    KM_NOSLEEP);
1678 
1679 	if (ixgbe->rx_groups == NULL) {
1680 		kmem_free(ixgbe->rx_rings,
1681 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1682 		kmem_free(ixgbe->tx_rings,
1683 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1684 		ixgbe->rx_rings = NULL;
1685 		ixgbe->tx_rings = NULL;
1686 		return (IXGBE_FAILURE);
1687 	}
1688 
1689 	return (IXGBE_SUCCESS);
1690 }
1691 
1692 /*
1693  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1694  */
1695 static void
1696 ixgbe_free_rings(ixgbe_t *ixgbe)
1697 {
1698 	if (ixgbe->rx_rings != NULL) {
1699 		kmem_free(ixgbe->rx_rings,
1700 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1701 		ixgbe->rx_rings = NULL;
1702 	}
1703 
1704 	if (ixgbe->tx_rings != NULL) {
1705 		kmem_free(ixgbe->tx_rings,
1706 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1707 		ixgbe->tx_rings = NULL;
1708 	}
1709 
1710 	if (ixgbe->rx_groups != NULL) {
1711 		kmem_free(ixgbe->rx_groups,
1712 		    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1713 		ixgbe->rx_groups = NULL;
1714 	}
1715 }
1716 
1717 /*
1718  * ixgbe_setup_rings - Setup rx/tx rings.
1719  */
1720 static void
1721 ixgbe_setup_rings(ixgbe_t *ixgbe)
1722 {
1723 	/*
1724 	 * Setup the rx/tx rings, including the following:
1725 	 *
1726 	 * 1. Setup the descriptor ring and the control block buffers;
1727 	 * 2. Initialize necessary registers for receive/transmit;
1728 	 * 3. Initialize software pointers/parameters for receive/transmit;
1729 	 */
1730 	ixgbe_setup_rx(ixgbe);
1731 
1732 	ixgbe_setup_tx(ixgbe);
1733 }
1734 
1735 static void
1736 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
1737 {
1738 	ixgbe_t *ixgbe = rx_ring->ixgbe;
1739 	struct ixgbe_hw *hw = &ixgbe->hw;
1740 	rx_control_block_t *rcb;
1741 	union ixgbe_adv_rx_desc	*rbd;
1742 	uint32_t size;
1743 	uint32_t buf_low;
1744 	uint32_t buf_high;
1745 	uint32_t reg_val;
1746 	int i;
1747 
1748 	ASSERT(mutex_owned(&rx_ring->rx_lock));
1749 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1750 
1751 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
1752 		rcb = rx_ring->work_list[i];
1753 		rbd = &rx_ring->rbd_ring[i];
1754 
1755 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
1756 		rbd->read.hdr_addr = NULL;
1757 	}
1758 
1759 	/*
1760 	 * Initialize the length register
1761 	 */
1762 	size = rx_ring->ring_size * sizeof (union ixgbe_adv_rx_desc);
1763 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
1764 
1765 	/*
1766 	 * Initialize the base address registers
1767 	 */
1768 	buf_low = (uint32_t)rx_ring->rbd_area.dma_address;
1769 	buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32);
1770 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
1771 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
1772 
1773 	/*
1774 	 * Setup head & tail pointers
1775 	 */
1776 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_ring->ring_size - 1);
1777 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
1778 
1779 	rx_ring->rbd_next = 0;
1780 
1781 	/*
1782 	 * Note: Considering the case that the chipset is being reset
1783 	 * and there are still some buffers held by the upper layer,
1784 	 * we should not reset the values of rcb_head, rcb_tail and
1785 	 * rcb_free if the state is not IXGBE_UNKNOWN.
1786 	 */
1787 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
1788 		rx_ring->rcb_head = 0;
1789 		rx_ring->rcb_tail = 0;
1790 		rx_ring->rcb_free = rx_ring->free_list_size;
1791 	}
1792 
1793 	/*
1794 	 * Setup the Receive Descriptor Control Register (RXDCTL)
1795 	 * PTHRESH=32 descriptors (half the internal cache)
1796 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
1797 	 * WTHRESH defaults to 1 (writeback each descriptor)
1798 	 */
1799 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
1800 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
1801 
1802 	/* Not a valid value for 82599 */
1803 	if (hw->mac.type < ixgbe_mac_82599EB) {
1804 		reg_val |= 0x0020;	/* pthresh */
1805 	}
1806 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
1807 
1808 	if (hw->mac.type == ixgbe_mac_82599EB) {
1809 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1810 		reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
1811 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
1812 	}
1813 
1814 	/*
1815 	 * Setup the Split and Replication Receive Control Register.
1816 	 * Set the rx buffer size and the advanced descriptor type.
1817 	 */
1818 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
1819 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1820 	reg_val |= IXGBE_SRRCTL_DROP_EN;
1821 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
1822 }
1823 
1824 static void
1825 ixgbe_setup_rx(ixgbe_t *ixgbe)
1826 {
1827 	ixgbe_rx_ring_t *rx_ring;
1828 	struct ixgbe_hw *hw = &ixgbe->hw;
1829 	ixgbe_rx_group_t *rx_group;
1830 	uint32_t reg_val;
1831 	uint32_t ring_mapping;
1832 	int i;
1833 
1834 	/* PSRTYPE must be configured for 82599 */
1835 	reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1836 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
1837 #define	IXGBE_PSRTYPE_L2_PKT	0x00001000
1838 	reg_val |= IXGBE_PSRTYPE_L2_PKT;
1839 	reg_val |= 0xE0000000;
1840 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
1841 
1842 	/*
1843 	 * Set filter control in FCTRL to accept broadcast packets and do
1844 	 * not pass pause frames to host.  Flow control settings are already
1845 	 * in this register, so preserve them.
1846 	 */
1847 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1848 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
1849 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
1850 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
1851 
1852 	/*
1853 	 * Enable the receive unit.  This must be done after filter
1854 	 * control is set in FCTRL.
1855 	 */
1856 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
1857 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
1858 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1859 
1860 	/*
1861 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
1862 	 */
1863 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1864 		rx_ring = &ixgbe->rx_rings[i];
1865 		ixgbe_setup_rx_ring(rx_ring);
1866 	}
1867 
1868 	/*
1869 	 * Setup rx groups.
1870 	 */
1871 	for (i = 0; i < ixgbe->num_rx_groups; i++) {
1872 		rx_group = &ixgbe->rx_groups[i];
1873 		rx_group->index = i;
1874 		rx_group->ixgbe = ixgbe;
1875 	}
1876 
1877 	/*
1878 	 * Setup the per-ring statistics mapping.
1879 	 */
1880 	ring_mapping = 0;
1881 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1882 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1883 		if ((i & 0x3) == 0x3) {
1884 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1885 			ring_mapping = 0;
1886 		}
1887 	}
1888 	if ((i & 0x3) != 0x3)
1889 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1890 
1891 	/*
1892 	 * The Max Frame Size in MHADD/MAXFRS will be internally increased
1893 	 * by four bytes if the packet has a VLAN field, so includes MTU,
1894 	 * ethernet header and frame check sequence.
1895 	 * Register is MAXFRS in 82599.
1896 	 */
1897 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
1898 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
1899 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
1900 
1901 	/*
1902 	 * Setup Jumbo Frame enable bit
1903 	 */
1904 	if (ixgbe->default_mtu > ETHERMTU) {
1905 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1906 		reg_val |= IXGBE_HLREG0_JUMBOEN;
1907 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1908 	}
1909 
1910 	/*
1911 	 * Hardware checksum settings
1912 	 */
1913 	if (ixgbe->rx_hcksum_enable) {
1914 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
1915 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
1916 	}
1917 
1918 	/*
1919 	 * Setup RSS for multiple receive queues
1920 	 */
1921 	if (ixgbe->num_rx_rings > 1)
1922 		ixgbe_setup_rss(ixgbe);
1923 }
1924 
1925 static void
1926 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
1927 {
1928 	ixgbe_t *ixgbe = tx_ring->ixgbe;
1929 	struct ixgbe_hw *hw = &ixgbe->hw;
1930 	uint32_t size;
1931 	uint32_t buf_low;
1932 	uint32_t buf_high;
1933 	uint32_t reg_val;
1934 
1935 	ASSERT(mutex_owned(&tx_ring->tx_lock));
1936 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1937 
1938 	/*
1939 	 * Initialize the length register
1940 	 */
1941 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
1942 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
1943 
1944 	/*
1945 	 * Initialize the base address registers
1946 	 */
1947 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
1948 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
1949 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
1950 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
1951 
1952 	/*
1953 	 * Setup head & tail pointers
1954 	 */
1955 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
1956 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
1957 
1958 	/*
1959 	 * Setup head write-back
1960 	 */
1961 	if (ixgbe->tx_head_wb_enable) {
1962 		/*
1963 		 * The memory of the head write-back is allocated using
1964 		 * the extra tbd beyond the tail of the tbd ring.
1965 		 */
1966 		tx_ring->tbd_head_wb = (uint32_t *)
1967 		    ((uintptr_t)tx_ring->tbd_area.address + size);
1968 		*tx_ring->tbd_head_wb = 0;
1969 
1970 		buf_low = (uint32_t)
1971 		    (tx_ring->tbd_area.dma_address + size);
1972 		buf_high = (uint32_t)
1973 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
1974 
1975 		/* Set the head write-back enable bit */
1976 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1977 
1978 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
1979 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
1980 
1981 		/*
1982 		 * Turn off relaxed ordering for head write back or it will
1983 		 * cause problems with the tx recycling
1984 		 */
1985 		reg_val = IXGBE_READ_REG(hw,
1986 		    IXGBE_DCA_TXCTRL(tx_ring->index));
1987 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1988 		IXGBE_WRITE_REG(hw,
1989 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
1990 	} else {
1991 		tx_ring->tbd_head_wb = NULL;
1992 	}
1993 
1994 	tx_ring->tbd_head = 0;
1995 	tx_ring->tbd_tail = 0;
1996 	tx_ring->tbd_free = tx_ring->ring_size;
1997 
1998 	/*
1999 	 * Note: Considering the case that the chipset is being reset,
2000 	 * and there are still some tcb in the pending list,
2001 	 * we should not reset the values of tcb_head, tcb_tail and
2002 	 * tcb_free if the state is not IXGBE_UNKNOWN.
2003 	 */
2004 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
2005 		tx_ring->tcb_head = 0;
2006 		tx_ring->tcb_tail = 0;
2007 		tx_ring->tcb_free = tx_ring->free_list_size;
2008 	}
2009 
2010 	/*
2011 	 * Initialize the s/w context structure
2012 	 */
2013 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2014 }
2015 
2016 static void
2017 ixgbe_setup_tx(ixgbe_t *ixgbe)
2018 {
2019 	struct ixgbe_hw *hw = &ixgbe->hw;
2020 	ixgbe_tx_ring_t *tx_ring;
2021 	uint32_t reg_val;
2022 	uint32_t ring_mapping;
2023 	int i;
2024 
2025 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2026 		tx_ring = &ixgbe->tx_rings[i];
2027 		ixgbe_setup_tx_ring(tx_ring);
2028 	}
2029 
2030 	/*
2031 	 * Setup the per-ring statistics mapping.
2032 	 */
2033 	ring_mapping = 0;
2034 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2035 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2036 		if ((i & 0x3) == 0x3) {
2037 			if (hw->mac.type >= ixgbe_mac_82599EB) {
2038 				IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2039 				    ring_mapping);
2040 			} else {
2041 				IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2042 				    ring_mapping);
2043 			}
2044 			ring_mapping = 0;
2045 		}
2046 	}
2047 	if ((i & 0x3) != 0x3)
2048 		if (hw->mac.type >= ixgbe_mac_82599EB) {
2049 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2050 		} else {
2051 			IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2052 		}
2053 
2054 	/*
2055 	 * Enable CRC appending and TX padding (for short tx frames)
2056 	 */
2057 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2058 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2059 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2060 
2061 	/*
2062 	 * enable DMA for 82599 parts
2063 	 */
2064 	if (hw->mac.type == ixgbe_mac_82599EB) {
2065 	/* DMATXCTL.TE must be set after all Tx config is complete */
2066 		reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2067 		reg_val |= IXGBE_DMATXCTL_TE;
2068 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2069 	}
2070 
2071 	/*
2072 	 * Enabling tx queues ..
2073 	 * For 82599 must be done after DMATXCTL.TE is set
2074 	 */
2075 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2076 		tx_ring = &ixgbe->tx_rings[i];
2077 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2078 		reg_val |= IXGBE_TXDCTL_ENABLE;
2079 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2080 	}
2081 }
2082 
2083 /*
2084  * ixgbe_setup_rss - Setup receive-side scaling feature.
2085  */
2086 static void
2087 ixgbe_setup_rss(ixgbe_t *ixgbe)
2088 {
2089 	struct ixgbe_hw *hw = &ixgbe->hw;
2090 	uint32_t i, mrqc, rxcsum;
2091 	uint32_t random;
2092 	uint32_t reta;
2093 
2094 	/*
2095 	 * Fill out redirection table
2096 	 */
2097 	reta = 0;
2098 	for (i = 0; i < 128; i++) {
2099 		reta = (reta << 8) | (i % ixgbe->num_rx_rings);
2100 		if ((i & 3) == 3)
2101 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2102 	}
2103 
2104 	/*
2105 	 * Fill out hash function seeds with a random constant
2106 	 */
2107 	for (i = 0; i < 10; i++) {
2108 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2109 		    sizeof (uint32_t));
2110 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2111 	}
2112 
2113 	/*
2114 	 * Enable RSS & perform hash on these packet types
2115 	 */
2116 	mrqc = IXGBE_MRQC_RSSEN |
2117 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
2118 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2119 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2120 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2121 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2122 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
2123 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2124 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2125 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2126 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2127 
2128 	/*
2129 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2130 	 * It is an adapter hardware limitation that Packet Checksum is
2131 	 * mutually exclusive with RSS.
2132 	 */
2133 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2134 	rxcsum |= IXGBE_RXCSUM_PCSD;
2135 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2136 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2137 }
2138 
2139 /*
2140  * ixgbe_init_unicst - Initialize the unicast addresses.
2141  */
2142 static void
2143 ixgbe_init_unicst(ixgbe_t *ixgbe)
2144 {
2145 	struct ixgbe_hw *hw = &ixgbe->hw;
2146 	uint8_t *mac_addr;
2147 	int slot;
2148 	/*
2149 	 * Here we should consider two situations:
2150 	 *
2151 	 * 1. Chipset is initialized at the first time,
2152 	 *    Clear all the multiple unicast addresses.
2153 	 *
2154 	 * 2. Chipset is reset
2155 	 *    Recover the multiple unicast addresses from the
2156 	 *    software data structure to the RAR registers.
2157 	 */
2158 	if (!ixgbe->unicst_init) {
2159 		/*
2160 		 * Initialize the multiple unicast addresses
2161 		 */
2162 		ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2163 		ixgbe->unicst_avail = ixgbe->unicst_total;
2164 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2165 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2166 			bzero(mac_addr, ETHERADDRL);
2167 			(void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2168 			ixgbe->unicst_addr[slot].mac.set = 0;
2169 		}
2170 		ixgbe->unicst_init = B_TRUE;
2171 	} else {
2172 		/* Re-configure the RAR registers */
2173 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2174 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2175 			if (ixgbe->unicst_addr[slot].mac.set == 1) {
2176 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2177 				    NULL, IXGBE_RAH_AV);
2178 			} else {
2179 				bzero(mac_addr, ETHERADDRL);
2180 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2181 				    NULL, NULL);
2182 			}
2183 		}
2184 	}
2185 }
2186 
2187 /*
2188  * ixgbe_unicst_set - Set the unicast address to the specified slot.
2189  */
2190 int
2191 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
2192     int slot)
2193 {
2194 	struct ixgbe_hw *hw = &ixgbe->hw;
2195 
2196 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2197 
2198 	/*
2199 	 * Save the unicast address in the software data structure
2200 	 */
2201 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
2202 
2203 	/*
2204 	 * Set the unicast address to the RAR register
2205 	 */
2206 	(void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, IXGBE_RAH_AV);
2207 
2208 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2209 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2210 		return (EIO);
2211 	}
2212 
2213 	return (0);
2214 }
2215 
2216 /*
2217  * ixgbe_unicst_find - Find the slot for the specified unicast address
2218  */
2219 int
2220 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2221 {
2222 	int slot;
2223 
2224 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2225 
2226 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2227 		if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2228 		    mac_addr, ETHERADDRL) == 0)
2229 			return (slot);
2230 	}
2231 
2232 	return (-1);
2233 }
2234 
2235 /*
2236  * ixgbe_multicst_add - Add a multicst address.
2237  */
2238 int
2239 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2240 {
2241 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2242 
2243 	if ((multiaddr[0] & 01) == 0) {
2244 		return (EINVAL);
2245 	}
2246 
2247 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2248 		return (ENOENT);
2249 	}
2250 
2251 	bcopy(multiaddr,
2252 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2253 	ixgbe->mcast_count++;
2254 
2255 	/*
2256 	 * Update the multicast table in the hardware
2257 	 */
2258 	ixgbe_setup_multicst(ixgbe);
2259 
2260 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2261 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2262 		return (EIO);
2263 	}
2264 
2265 	return (0);
2266 }
2267 
2268 /*
2269  * ixgbe_multicst_remove - Remove a multicst address.
2270  */
2271 int
2272 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2273 {
2274 	int i;
2275 
2276 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2277 
2278 	for (i = 0; i < ixgbe->mcast_count; i++) {
2279 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2280 		    ETHERADDRL) == 0) {
2281 			for (i++; i < ixgbe->mcast_count; i++) {
2282 				ixgbe->mcast_table[i - 1] =
2283 				    ixgbe->mcast_table[i];
2284 			}
2285 			ixgbe->mcast_count--;
2286 			break;
2287 		}
2288 	}
2289 
2290 	/*
2291 	 * Update the multicast table in the hardware
2292 	 */
2293 	ixgbe_setup_multicst(ixgbe);
2294 
2295 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2296 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2297 		return (EIO);
2298 	}
2299 
2300 	return (0);
2301 }
2302 
2303 /*
2304  * ixgbe_setup_multicast - Setup multicast data structures.
2305  *
2306  * This routine initializes all of the multicast related structures
2307  * and save them in the hardware registers.
2308  */
2309 static void
2310 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2311 {
2312 	uint8_t *mc_addr_list;
2313 	uint32_t mc_addr_count;
2314 	struct ixgbe_hw *hw = &ixgbe->hw;
2315 
2316 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2317 
2318 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2319 
2320 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2321 	mc_addr_count = ixgbe->mcast_count;
2322 
2323 	/*
2324 	 * Update the multicast addresses to the MTA registers
2325 	 */
2326 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2327 	    ixgbe_mc_table_itr);
2328 }
2329 
2330 /*
2331  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2332  *
2333  * This routine gets user-configured values out of the configuration
2334  * file ixgbe.conf.
2335  *
2336  * For each configurable value, there is a minimum, a maximum, and a
2337  * default.
2338  * If user does not configure a value, use the default.
2339  * If user configures below the minimum, use the minumum.
2340  * If user configures above the maximum, use the maxumum.
2341  */
2342 static void
2343 ixgbe_get_conf(ixgbe_t *ixgbe)
2344 {
2345 	struct ixgbe_hw *hw = &ixgbe->hw;
2346 	uint32_t flow_control;
2347 
2348 	/*
2349 	 * ixgbe driver supports the following user configurations:
2350 	 *
2351 	 * Jumbo frame configuration:
2352 	 *    default_mtu
2353 	 *
2354 	 * Ethernet flow control configuration:
2355 	 *    flow_control
2356 	 *
2357 	 * Multiple rings configurations:
2358 	 *    tx_queue_number
2359 	 *    tx_ring_size
2360 	 *    rx_queue_number
2361 	 *    rx_ring_size
2362 	 *
2363 	 * Call ixgbe_get_prop() to get the value for a specific
2364 	 * configuration parameter.
2365 	 */
2366 
2367 	/*
2368 	 * Jumbo frame configuration - max_frame_size controls host buffer
2369 	 * allocation, so includes MTU, ethernet header, vlan tag and
2370 	 * frame check sequence.
2371 	 */
2372 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2373 	    MIN_MTU, MAX_MTU, DEFAULT_MTU);
2374 
2375 	ixgbe->max_frame_size = ixgbe->default_mtu +
2376 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2377 
2378 	/*
2379 	 * Ethernet flow control configuration
2380 	 */
2381 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2382 	    ixgbe_fc_none, 3, ixgbe_fc_none);
2383 	if (flow_control == 3)
2384 		flow_control = ixgbe_fc_default;
2385 
2386 	/*
2387 	 * fc.requested mode is what the user requests.  After autoneg,
2388 	 * fc.current_mode will be the flow_control mode that was negotiated.
2389 	 */
2390 	hw->fc.requested_mode = flow_control;
2391 
2392 	/*
2393 	 * Multiple rings configurations
2394 	 */
2395 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2396 	    ixgbe->capab->min_tx_que_num,
2397 	    ixgbe->capab->max_tx_que_num,
2398 	    ixgbe->capab->def_tx_que_num);
2399 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2400 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2401 
2402 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2403 	    ixgbe->capab->min_rx_que_num,
2404 	    ixgbe->capab->max_rx_que_num,
2405 	    ixgbe->capab->def_rx_que_num);
2406 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2407 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2408 
2409 	/*
2410 	 * Multiple groups configuration
2411 	 */
2412 	ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
2413 	    MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM);
2414 
2415 	ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
2416 	    0, 1, DEFAULT_MR_ENABLE);
2417 
2418 	if (ixgbe->mr_enable == B_FALSE) {
2419 		ixgbe->num_tx_rings = 1;
2420 		ixgbe->num_rx_rings = 1;
2421 		ixgbe->num_rx_groups = 1;
2422 	}
2423 
2424 	/*
2425 	 * Tunable used to force an interrupt type. The only use is
2426 	 * for testing of the lesser interrupt types.
2427 	 * 0 = don't force interrupt type
2428 	 * 1 = force interrupt type MSI-X
2429 	 * 2 = force interrupt type MSI
2430 	 * 3 = force interrupt type Legacy
2431 	 */
2432 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2433 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2434 
2435 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2436 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
2437 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2438 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
2439 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2440 	    0, 1, DEFAULT_LSO_ENABLE);
2441 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2442 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
2443 
2444 	/* Head Write Back not recommended for 82599 */
2445 	if (hw->mac.type >= ixgbe_mac_82599EB) {
2446 		ixgbe->tx_head_wb_enable = B_FALSE;
2447 	}
2448 
2449 	/*
2450 	 * ixgbe LSO needs the tx h/w checksum support.
2451 	 * LSO will be disabled if tx h/w checksum is not
2452 	 * enabled.
2453 	 */
2454 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
2455 		ixgbe->lso_enable = B_FALSE;
2456 	}
2457 
2458 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
2459 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
2460 	    DEFAULT_TX_COPY_THRESHOLD);
2461 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
2462 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
2463 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
2464 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
2465 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
2466 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
2467 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
2468 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
2469 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
2470 
2471 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
2472 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
2473 	    DEFAULT_RX_COPY_THRESHOLD);
2474 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
2475 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
2476 	    DEFAULT_RX_LIMIT_PER_INTR);
2477 
2478 	/*
2479 	 * Interrupt throttling is per 256ns in 82598 and 2 usec increments
2480 	 * in 82599.
2481 	 */
2482 	switch (hw->mac.type) {
2483 	case ixgbe_mac_82598EB:
2484 		ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe,
2485 		    PROP_INTR_THROTTLING,
2486 		    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING_82598,
2487 		    DEFAULT_INTR_THROTTLING_82598);
2488 		break;
2489 	case ixgbe_mac_82599EB:
2490 		ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe,
2491 		    PROP_INTR_THROTTLING,
2492 		    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING_82599,
2493 		    DEFAULT_INTR_THROTTLING_82599);
2494 		break;
2495 	}
2496 }
2497 
2498 /*
2499  * ixgbe_get_prop - Get a property value out of the configuration file
2500  * ixgbe.conf.
2501  *
2502  * Caller provides the name of the property, a default value, a minimum
2503  * value, and a maximum value.
2504  *
2505  * Return configured value of the property, with default, minimum and
2506  * maximum properly applied.
2507  */
2508 static int
2509 ixgbe_get_prop(ixgbe_t *ixgbe,
2510     char *propname,	/* name of the property */
2511     int minval,		/* minimum acceptable value */
2512     int maxval,		/* maximim acceptable value */
2513     int defval)		/* default value */
2514 {
2515 	int value;
2516 
2517 	/*
2518 	 * Call ddi_prop_get_int() to read the conf settings
2519 	 */
2520 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
2521 	    DDI_PROP_DONTPASS, propname, defval);
2522 	if (value > maxval)
2523 		value = maxval;
2524 
2525 	if (value < minval)
2526 		value = minval;
2527 
2528 	return (value);
2529 }
2530 
2531 /*
2532  * ixgbe_driver_setup_link - Using the link properties to setup the link.
2533  */
2534 int
2535 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
2536 {
2537 	struct ixgbe_mac_info *mac;
2538 	struct ixgbe_phy_info *phy;
2539 	boolean_t invalid;
2540 
2541 	mac = &ixgbe->hw.mac;
2542 	phy = &ixgbe->hw.phy;
2543 	invalid = B_FALSE;
2544 
2545 	if (ixgbe->param_adv_autoneg_cap == 1) {
2546 		mac->autoneg = B_TRUE;
2547 		phy->autoneg_advertised = 0;
2548 
2549 		/*
2550 		 * No half duplex support with 10Gb parts
2551 		 */
2552 		if (ixgbe->param_adv_10000fdx_cap == 1)
2553 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2554 
2555 		if (ixgbe->param_adv_1000fdx_cap == 1)
2556 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2557 
2558 		if (ixgbe->param_adv_100fdx_cap == 1)
2559 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2560 
2561 		if (phy->autoneg_advertised == 0)
2562 			invalid = B_TRUE;
2563 	} else {
2564 		ixgbe->hw.mac.autoneg = B_FALSE;
2565 	}
2566 
2567 	if (invalid) {
2568 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link to "
2569 		    "autonegotiation with full link capabilities.");
2570 		ixgbe->hw.mac.autoneg = B_TRUE;
2571 	}
2572 
2573 	if (setup_hw) {
2574 		if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS) {
2575 			ixgbe_notice(ixgbe, "Setup link failed on this "
2576 			    "device.");
2577 			return (IXGBE_FAILURE);
2578 		}
2579 	}
2580 
2581 	return (IXGBE_SUCCESS);
2582 }
2583 
2584 /*
2585  * ixgbe_driver_link_check - Link status processing done in taskq.
2586  */
2587 static void
2588 ixgbe_driver_link_check(void *arg)
2589 {
2590 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2591 	struct ixgbe_hw *hw = &ixgbe->hw;
2592 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
2593 	boolean_t link_up = B_FALSE;
2594 	boolean_t link_changed = B_FALSE;
2595 
2596 	mutex_enter(&ixgbe->gen_lock);
2597 
2598 	/* check for link, wait the full time */
2599 	(void) ixgbe_check_link(hw, &speed, &link_up, true);
2600 	if (link_up) {
2601 		/* Link is up, enable flow control settings */
2602 		(void) ixgbe_fc_enable(hw, 0);
2603 
2604 		/*
2605 		 * The Link is up, check whether it was marked as down earlier
2606 		 */
2607 		if (ixgbe->link_state != LINK_STATE_UP) {
2608 			switch (speed) {
2609 			case IXGBE_LINK_SPEED_10GB_FULL:
2610 				ixgbe->link_speed = SPEED_10GB;
2611 				break;
2612 			case IXGBE_LINK_SPEED_1GB_FULL:
2613 				ixgbe->link_speed = SPEED_1GB;
2614 				break;
2615 			case IXGBE_LINK_SPEED_100_FULL:
2616 				ixgbe->link_speed = SPEED_100;
2617 			}
2618 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
2619 			ixgbe->link_state = LINK_STATE_UP;
2620 			ixgbe->link_down_timeout = 0;
2621 			link_changed = B_TRUE;
2622 		}
2623 	} else {
2624 		if (ixgbe->link_state != LINK_STATE_DOWN) {
2625 			ixgbe->link_speed = 0;
2626 			ixgbe->link_duplex = 0;
2627 			ixgbe->link_state = LINK_STATE_DOWN;
2628 			link_changed = B_TRUE;
2629 		}
2630 
2631 		if (ixgbe->ixgbe_state & IXGBE_STARTED) {
2632 			if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) {
2633 				ixgbe->link_down_timeout++;
2634 			} else if (ixgbe->link_down_timeout ==
2635 			    MAX_LINK_DOWN_TIMEOUT) {
2636 				ixgbe_tx_clean(ixgbe);
2637 				ixgbe->link_down_timeout++;
2638 			}
2639 		}
2640 	}
2641 
2642 	/*
2643 	 * this is only reached after a link-status-change interrupt
2644 	 * so always get new phy state
2645 	 */
2646 	ixgbe_get_hw_state(ixgbe);
2647 
2648 	/* re-enable the interrupt, which was automasked */
2649 	ixgbe->eims |= IXGBE_EICR_LSC;
2650 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
2651 
2652 	mutex_exit(&ixgbe->gen_lock);
2653 
2654 	/* outside the gen_lock */
2655 	if (link_changed) {
2656 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
2657 	}
2658 }
2659 
2660 /*
2661  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
2662  */
2663 static void
2664 ixgbe_sfp_check(void *arg)
2665 {
2666 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2667 	uint32_t eicr = ixgbe->eicr;
2668 	struct ixgbe_hw *hw = &ixgbe->hw;
2669 	uint32_t autoneg;
2670 
2671 	if (eicr & IXGBE_EICR_GPI_SDP1) {
2672 		/* clear the interrupt */
2673 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2674 
2675 		/* if link up, do multispeed fiber setup */
2676 		(void) ixgbe_get_link_capabilities(hw, &autoneg,
2677 		    &hw->mac.autoneg);
2678 		(void) ixgbe_setup_link_speed(hw, autoneg, B_TRUE, B_TRUE);
2679 		ixgbe_driver_link_check(ixgbe);
2680 	} else if (eicr & IXGBE_EICR_GPI_SDP2) {
2681 		/* clear the interrupt */
2682 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2683 
2684 		/* if link up, do sfp module setup */
2685 		(void) hw->mac.ops.setup_sfp(hw);
2686 
2687 		/* do multispeed fiber setup */
2688 		(void) ixgbe_get_link_capabilities(hw, &autoneg,
2689 		    &hw->mac.autoneg);
2690 		(void) ixgbe_setup_link_speed(hw, autoneg, B_TRUE, B_TRUE);
2691 		ixgbe_driver_link_check(ixgbe);
2692 	}
2693 }
2694 
2695 /*
2696  * ixgbe_local_timer - Driver watchdog function.
2697  *
2698  * This function will handle the transmit stall check, link status check and
2699  * other routines.
2700  */
2701 static void
2702 ixgbe_local_timer(void *arg)
2703 {
2704 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2705 
2706 	if (ixgbe_stall_check(ixgbe)) {
2707 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2708 		ixgbe->reset_count++;
2709 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2710 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2711 	}
2712 
2713 	ixgbe_restart_watchdog_timer(ixgbe);
2714 }
2715 
2716 /*
2717  * ixgbe_stall_check - Check for transmit stall.
2718  *
2719  * This function checks if the adapter is stalled (in transmit).
2720  *
2721  * It is called each time the watchdog timeout is invoked.
2722  * If the transmit descriptor reclaim continuously fails,
2723  * the watchdog value will increment by 1. If the watchdog
2724  * value exceeds the threshold, the ixgbe is assumed to
2725  * have stalled and need to be reset.
2726  */
2727 static boolean_t
2728 ixgbe_stall_check(ixgbe_t *ixgbe)
2729 {
2730 	ixgbe_tx_ring_t *tx_ring;
2731 	boolean_t result;
2732 	int i;
2733 
2734 	if (ixgbe->link_state != LINK_STATE_UP)
2735 		return (B_FALSE);
2736 
2737 	/*
2738 	 * If any tx ring is stalled, we'll reset the chipset
2739 	 */
2740 	result = B_FALSE;
2741 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2742 		tx_ring = &ixgbe->tx_rings[i];
2743 		tx_ring->tx_recycle(tx_ring);
2744 
2745 		if (tx_ring->recycle_fail > 0)
2746 			tx_ring->stall_watchdog++;
2747 		else
2748 			tx_ring->stall_watchdog = 0;
2749 
2750 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
2751 			result = B_TRUE;
2752 			break;
2753 		}
2754 	}
2755 
2756 	if (result) {
2757 		tx_ring->stall_watchdog = 0;
2758 		tx_ring->recycle_fail = 0;
2759 	}
2760 
2761 	return (result);
2762 }
2763 
2764 
2765 /*
2766  * is_valid_mac_addr - Check if the mac address is valid.
2767  */
2768 static boolean_t
2769 is_valid_mac_addr(uint8_t *mac_addr)
2770 {
2771 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
2772 	const uint8_t addr_test2[6] =
2773 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2774 
2775 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
2776 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
2777 		return (B_FALSE);
2778 
2779 	return (B_TRUE);
2780 }
2781 
2782 static boolean_t
2783 ixgbe_find_mac_address(ixgbe_t *ixgbe)
2784 {
2785 #ifdef __sparc
2786 	struct ixgbe_hw *hw = &ixgbe->hw;
2787 	uchar_t *bytes;
2788 	struct ether_addr sysaddr;
2789 	uint_t nelts;
2790 	int err;
2791 	boolean_t found = B_FALSE;
2792 
2793 	/*
2794 	 * The "vendor's factory-set address" may already have
2795 	 * been extracted from the chip, but if the property
2796 	 * "local-mac-address" is set we use that instead.
2797 	 *
2798 	 * We check whether it looks like an array of 6
2799 	 * bytes (which it should, if OBP set it).  If we can't
2800 	 * make sense of it this way, we'll ignore it.
2801 	 */
2802 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2803 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
2804 	if (err == DDI_PROP_SUCCESS) {
2805 		if (nelts == ETHERADDRL) {
2806 			while (nelts--)
2807 				hw->mac.addr[nelts] = bytes[nelts];
2808 			found = B_TRUE;
2809 		}
2810 		ddi_prop_free(bytes);
2811 	}
2812 
2813 	/*
2814 	 * Look up the OBP property "local-mac-address?". If the user has set
2815 	 * 'local-mac-address? = false', use "the system address" instead.
2816 	 */
2817 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
2818 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
2819 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
2820 			if (localetheraddr(NULL, &sysaddr) != 0) {
2821 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
2822 				found = B_TRUE;
2823 			}
2824 		}
2825 		ddi_prop_free(bytes);
2826 	}
2827 
2828 	/*
2829 	 * Finally(!), if there's a valid "mac-address" property (created
2830 	 * if we netbooted from this interface), we must use this instead
2831 	 * of any of the above to ensure that the NFS/install server doesn't
2832 	 * get confused by the address changing as Solaris takes over!
2833 	 */
2834 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2835 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
2836 	if (err == DDI_PROP_SUCCESS) {
2837 		if (nelts == ETHERADDRL) {
2838 			while (nelts--)
2839 				hw->mac.addr[nelts] = bytes[nelts];
2840 			found = B_TRUE;
2841 		}
2842 		ddi_prop_free(bytes);
2843 	}
2844 
2845 	if (found) {
2846 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
2847 		return (B_TRUE);
2848 	}
2849 #else
2850 	_NOTE(ARGUNUSED(ixgbe));
2851 #endif
2852 
2853 	return (B_TRUE);
2854 }
2855 
2856 #pragma inline(ixgbe_arm_watchdog_timer)
2857 static void
2858 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
2859 {
2860 	/*
2861 	 * Fire a watchdog timer
2862 	 */
2863 	ixgbe->watchdog_tid =
2864 	    timeout(ixgbe_local_timer,
2865 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
2866 
2867 }
2868 
2869 /*
2870  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
2871  */
2872 void
2873 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
2874 {
2875 	mutex_enter(&ixgbe->watchdog_lock);
2876 
2877 	if (!ixgbe->watchdog_enable) {
2878 		ixgbe->watchdog_enable = B_TRUE;
2879 		ixgbe->watchdog_start = B_TRUE;
2880 		ixgbe_arm_watchdog_timer(ixgbe);
2881 	}
2882 
2883 	mutex_exit(&ixgbe->watchdog_lock);
2884 }
2885 
2886 /*
2887  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
2888  */
2889 void
2890 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
2891 {
2892 	timeout_id_t tid;
2893 
2894 	mutex_enter(&ixgbe->watchdog_lock);
2895 
2896 	ixgbe->watchdog_enable = B_FALSE;
2897 	ixgbe->watchdog_start = B_FALSE;
2898 	tid = ixgbe->watchdog_tid;
2899 	ixgbe->watchdog_tid = 0;
2900 
2901 	mutex_exit(&ixgbe->watchdog_lock);
2902 
2903 	if (tid != 0)
2904 		(void) untimeout(tid);
2905 }
2906 
2907 /*
2908  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
2909  */
2910 void
2911 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
2912 {
2913 	mutex_enter(&ixgbe->watchdog_lock);
2914 
2915 	if (ixgbe->watchdog_enable) {
2916 		if (!ixgbe->watchdog_start) {
2917 			ixgbe->watchdog_start = B_TRUE;
2918 			ixgbe_arm_watchdog_timer(ixgbe);
2919 		}
2920 	}
2921 
2922 	mutex_exit(&ixgbe->watchdog_lock);
2923 }
2924 
2925 /*
2926  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
2927  */
2928 static void
2929 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
2930 {
2931 	mutex_enter(&ixgbe->watchdog_lock);
2932 
2933 	if (ixgbe->watchdog_start)
2934 		ixgbe_arm_watchdog_timer(ixgbe);
2935 
2936 	mutex_exit(&ixgbe->watchdog_lock);
2937 }
2938 
2939 /*
2940  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
2941  */
2942 void
2943 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
2944 {
2945 	timeout_id_t tid;
2946 
2947 	mutex_enter(&ixgbe->watchdog_lock);
2948 
2949 	ixgbe->watchdog_start = B_FALSE;
2950 	tid = ixgbe->watchdog_tid;
2951 	ixgbe->watchdog_tid = 0;
2952 
2953 	mutex_exit(&ixgbe->watchdog_lock);
2954 
2955 	if (tid != 0)
2956 		(void) untimeout(tid);
2957 }
2958 
2959 /*
2960  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
2961  */
2962 static void
2963 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
2964 {
2965 	struct ixgbe_hw *hw = &ixgbe->hw;
2966 
2967 	/*
2968 	 * mask all interrupts off
2969 	 */
2970 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
2971 
2972 	/*
2973 	 * for MSI-X, also disable autoclear
2974 	 */
2975 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2976 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
2977 	}
2978 
2979 	IXGBE_WRITE_FLUSH(hw);
2980 }
2981 
2982 /*
2983  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
2984  */
2985 static void
2986 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
2987 {
2988 	struct ixgbe_hw *hw = &ixgbe->hw;
2989 	uint32_t eiac, eiam;
2990 	uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2991 
2992 	/* interrupt types to enable */
2993 	ixgbe->eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
2994 	ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
2995 	ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
2996 
2997 	/* enable automask on "other" causes that this adapter can generate */
2998 	eiam = ixgbe->capab->other_intr;
2999 
3000 	/*
3001 	 * msi-x mode
3002 	 */
3003 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3004 		/* enable autoclear but not on bits 29:20 */
3005 		eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3006 
3007 		/* general purpose interrupt enable */
3008 		gpie |= (IXGBE_GPIE_MSIX_MODE
3009 		    | IXGBE_GPIE_PBA_SUPPORT
3010 		    | IXGBE_GPIE_OCD
3011 		    | IXGBE_GPIE_EIAME);
3012 	/*
3013 	 * non-msi-x mode
3014 	 */
3015 	} else {
3016 
3017 		/* disable autoclear, leave gpie at default */
3018 		eiac = 0;
3019 
3020 		/*
3021 		 * General purpose interrupt enable.
3022 		 * For 82599, extended interrupt automask enable
3023 		 * only in MSI or MSI-X mode
3024 		 */
3025 		if ((hw->mac.type < ixgbe_mac_82599EB) ||
3026 		    (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3027 			gpie |= IXGBE_GPIE_EIAME;
3028 		}
3029 	}
3030 	/* Enable specific interrupts for 82599  */
3031 	if (hw->mac.type == ixgbe_mac_82599EB) {
3032 		gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */
3033 		gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */
3034 	}
3035 
3036 	/* write to interrupt control registers */
3037 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3038 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3039 	IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3040 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3041 	IXGBE_WRITE_FLUSH(hw);
3042 }
3043 
3044 /*
3045  * ixgbe_loopback_ioctl - Loopback support.
3046  */
3047 enum ioc_reply
3048 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3049 {
3050 	lb_info_sz_t *lbsp;
3051 	lb_property_t *lbpp;
3052 	uint32_t *lbmp;
3053 	uint32_t size;
3054 	uint32_t value;
3055 
3056 	if (mp->b_cont == NULL)
3057 		return (IOC_INVAL);
3058 
3059 	switch (iocp->ioc_cmd) {
3060 	default:
3061 		return (IOC_INVAL);
3062 
3063 	case LB_GET_INFO_SIZE:
3064 		size = sizeof (lb_info_sz_t);
3065 		if (iocp->ioc_count != size)
3066 			return (IOC_INVAL);
3067 
3068 		value = sizeof (lb_normal);
3069 		value += sizeof (lb_mac);
3070 
3071 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3072 		*lbsp = value;
3073 		break;
3074 
3075 	case LB_GET_INFO:
3076 		value = sizeof (lb_normal);
3077 		value += sizeof (lb_mac);
3078 
3079 		size = value;
3080 		if (iocp->ioc_count != size)
3081 			return (IOC_INVAL);
3082 
3083 		value = 0;
3084 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3085 
3086 		lbpp[value++] = lb_normal;
3087 		lbpp[value++] = lb_mac;
3088 		break;
3089 
3090 	case LB_GET_MODE:
3091 		size = sizeof (uint32_t);
3092 		if (iocp->ioc_count != size)
3093 			return (IOC_INVAL);
3094 
3095 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3096 		*lbmp = ixgbe->loopback_mode;
3097 		break;
3098 
3099 	case LB_SET_MODE:
3100 		size = 0;
3101 		if (iocp->ioc_count != sizeof (uint32_t))
3102 			return (IOC_INVAL);
3103 
3104 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3105 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3106 			return (IOC_INVAL);
3107 		break;
3108 	}
3109 
3110 	iocp->ioc_count = size;
3111 	iocp->ioc_error = 0;
3112 
3113 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3114 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3115 		return (IOC_INVAL);
3116 	}
3117 
3118 	return (IOC_REPLY);
3119 }
3120 
3121 /*
3122  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3123  */
3124 static boolean_t
3125 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3126 {
3127 	struct ixgbe_hw *hw;
3128 
3129 	if (mode == ixgbe->loopback_mode)
3130 		return (B_TRUE);
3131 
3132 	hw = &ixgbe->hw;
3133 
3134 	ixgbe->loopback_mode = mode;
3135 
3136 	if (mode == IXGBE_LB_NONE) {
3137 		/*
3138 		 * Reset the chip
3139 		 */
3140 		hw->phy.autoneg_wait_to_complete = B_TRUE;
3141 		(void) ixgbe_reset(ixgbe);
3142 		hw->phy.autoneg_wait_to_complete = B_FALSE;
3143 		return (B_TRUE);
3144 	}
3145 
3146 	mutex_enter(&ixgbe->gen_lock);
3147 
3148 	switch (mode) {
3149 	default:
3150 		mutex_exit(&ixgbe->gen_lock);
3151 		return (B_FALSE);
3152 
3153 	case IXGBE_LB_INTERNAL_MAC:
3154 		ixgbe_set_internal_mac_loopback(ixgbe);
3155 		break;
3156 	}
3157 
3158 	mutex_exit(&ixgbe->gen_lock);
3159 
3160 	return (B_TRUE);
3161 }
3162 
3163 /*
3164  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3165  */
3166 static void
3167 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3168 {
3169 	struct ixgbe_hw *hw;
3170 	uint32_t reg;
3171 	uint8_t atlas;
3172 
3173 	hw = &ixgbe->hw;
3174 
3175 	/*
3176 	 * Setup MAC loopback
3177 	 */
3178 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3179 	reg |= IXGBE_HLREG0_LPBK;
3180 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3181 
3182 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3183 	reg &= ~IXGBE_AUTOC_LMS_MASK;
3184 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3185 
3186 	/*
3187 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3188 	 */
3189 	if (hw->mac.type == ixgbe_mac_82598EB) {
3190 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3191 		    &atlas);
3192 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3193 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3194 		    atlas);
3195 
3196 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3197 		    &atlas);
3198 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3199 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3200 		    atlas);
3201 
3202 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3203 		    &atlas);
3204 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3205 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3206 		    atlas);
3207 
3208 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3209 		    &atlas);
3210 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3211 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3212 		    atlas);
3213 	}
3214 }
3215 
3216 #pragma inline(ixgbe_intr_rx_work)
3217 /*
3218  * ixgbe_intr_rx_work - RX processing of ISR.
3219  */
3220 static void
3221 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3222 {
3223 	mblk_t *mp;
3224 
3225 	mutex_enter(&rx_ring->rx_lock);
3226 
3227 	mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3228 	mutex_exit(&rx_ring->rx_lock);
3229 
3230 	if (mp != NULL)
3231 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3232 		    rx_ring->ring_gen_num);
3233 }
3234 
3235 #pragma inline(ixgbe_intr_tx_work)
3236 /*
3237  * ixgbe_intr_tx_work - TX processing of ISR.
3238  */
3239 static void
3240 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3241 {
3242 	/*
3243 	 * Recycle the tx descriptors
3244 	 */
3245 	tx_ring->tx_recycle(tx_ring);
3246 
3247 	/*
3248 	 * Schedule the re-transmit
3249 	 */
3250 	if (tx_ring->reschedule &&
3251 	    (tx_ring->tbd_free >= tx_ring->resched_thresh)) {
3252 		tx_ring->reschedule = B_FALSE;
3253 		mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3254 		    tx_ring->ring_handle);
3255 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3256 	}
3257 }
3258 
3259 #pragma inline(ixgbe_intr_other_work)
3260 /*
3261  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
3262  */
3263 static void
3264 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
3265 {
3266 	struct ixgbe_hw *hw = &ixgbe->hw;
3267 	/*
3268 	 * dispatch taskq to handle link status change
3269 	 */
3270 	if (eicr & IXGBE_EICR_LSC) {
3271 		if ((ddi_taskq_dispatch(ixgbe->lsc_taskq,
3272 		    ixgbe_driver_link_check, (void *)ixgbe, DDI_NOSLEEP))
3273 		    != DDI_SUCCESS) {
3274 			ixgbe_log(ixgbe, "Fail to dispatch taskq");
3275 		}
3276 	}
3277 
3278 	/*
3279 	 * check for fan failure on adapters with fans
3280 	 */
3281 	if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
3282 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3283 		if (hw->mac.type < ixgbe_mac_82599EB) {
3284 			ixgbe_log(ixgbe,
3285 			    "Fan has stopped, replace the adapter\n");
3286 
3287 			/* re-enable the interrupt, which was automasked */
3288 			ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
3289 		}
3290 	}
3291 
3292 	/*
3293 	 * Do SFP check for 82599
3294 	 */
3295 	if (hw->mac.type == ixgbe_mac_82599EB) {
3296 		if ((ddi_taskq_dispatch(ixgbe->lsc_taskq,
3297 		    ixgbe_sfp_check, (void *)ixgbe,
3298 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
3299 			ixgbe_log(ixgbe,
3300 			    "No memory available to dispatch taskq");
3301 		}
3302 	}
3303 }
3304 
3305 /*
3306  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
3307  */
3308 static uint_t
3309 ixgbe_intr_legacy(void *arg1, void *arg2)
3310 {
3311 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3312 	struct ixgbe_hw *hw = &ixgbe->hw;
3313 	ixgbe_tx_ring_t *tx_ring;
3314 	ixgbe_rx_ring_t *rx_ring;
3315 	uint32_t eicr;
3316 	mblk_t *mp;
3317 	boolean_t tx_reschedule;
3318 	uint_t result;
3319 
3320 	_NOTE(ARGUNUSED(arg2));
3321 
3322 	mutex_enter(&ixgbe->gen_lock);
3323 
3324 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
3325 		mutex_exit(&ixgbe->gen_lock);
3326 		return (DDI_INTR_UNCLAIMED);
3327 	}
3328 
3329 	mp = NULL;
3330 	tx_reschedule = B_FALSE;
3331 
3332 	/*
3333 	 * Any bit set in eicr: claim this interrupt
3334 	 */
3335 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3336 	if (eicr) {
3337 		/*
3338 		 * For legacy interrupt, we have only one interrupt,
3339 		 * so we have only one rx ring and one tx ring enabled.
3340 		 */
3341 		ASSERT(ixgbe->num_rx_rings == 1);
3342 		ASSERT(ixgbe->num_tx_rings == 1);
3343 
3344 		/*
3345 		 * For legacy interrupt, rx rings[0] will use RTxQ[0].
3346 		 */
3347 		if (eicr & 0x1) {
3348 			ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
3349 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3350 			ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3351 			/*
3352 			 * Clean the rx descriptors
3353 			 */
3354 			rx_ring = &ixgbe->rx_rings[0];
3355 			mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3356 		}
3357 
3358 		/*
3359 		 * For legacy interrupt, tx rings[0] will use RTxQ[1].
3360 		 */
3361 		if (eicr & 0x2) {
3362 			/*
3363 			 * Recycle the tx descriptors
3364 			 */
3365 			tx_ring = &ixgbe->tx_rings[0];
3366 			tx_ring->tx_recycle(tx_ring);
3367 
3368 			/*
3369 			 * Schedule the re-transmit
3370 			 */
3371 			tx_reschedule = (tx_ring->reschedule &&
3372 			    (tx_ring->tbd_free >= tx_ring->resched_thresh));
3373 		}
3374 
3375 		/* any interrupt type other than tx/rx */
3376 		if (eicr & ixgbe->capab->other_intr) {
3377 			if (hw->mac.type < ixgbe_mac_82599EB) {
3378 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3379 			}
3380 			if (hw->mac.type == ixgbe_mac_82599EB) {
3381 				ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3382 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3383 			}
3384 			ixgbe_intr_other_work(ixgbe, eicr);
3385 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3386 		}
3387 
3388 		mutex_exit(&ixgbe->gen_lock);
3389 
3390 		result = DDI_INTR_CLAIMED;
3391 	} else {
3392 		mutex_exit(&ixgbe->gen_lock);
3393 
3394 		/*
3395 		 * No interrupt cause bits set: don't claim this interrupt.
3396 		 */
3397 		result = DDI_INTR_UNCLAIMED;
3398 	}
3399 
3400 	/* re-enable the interrupts which were automasked */
3401 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3402 
3403 	/*
3404 	 * Do the following work outside of the gen_lock
3405 	 */
3406 	if (mp != NULL) {
3407 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3408 		    rx_ring->ring_gen_num);
3409 	}
3410 
3411 	if (tx_reschedule)  {
3412 		tx_ring->reschedule = B_FALSE;
3413 		mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
3414 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3415 	}
3416 
3417 	return (result);
3418 }
3419 
3420 /*
3421  * ixgbe_intr_msi - Interrupt handler for MSI.
3422  */
3423 static uint_t
3424 ixgbe_intr_msi(void *arg1, void *arg2)
3425 {
3426 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3427 	struct ixgbe_hw *hw = &ixgbe->hw;
3428 	uint32_t eicr;
3429 
3430 	_NOTE(ARGUNUSED(arg2));
3431 
3432 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3433 
3434 	/*
3435 	 * For MSI interrupt, we have only one vector,
3436 	 * so we have only one rx ring and one tx ring enabled.
3437 	 */
3438 	ASSERT(ixgbe->num_rx_rings == 1);
3439 	ASSERT(ixgbe->num_tx_rings == 1);
3440 
3441 	/*
3442 	 * For MSI interrupt, rx rings[0] will use RTxQ[0].
3443 	 */
3444 	if (eicr & 0x1) {
3445 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
3446 	}
3447 
3448 	/*
3449 	 * For MSI interrupt, tx rings[0] will use RTxQ[1].
3450 	 */
3451 	if (eicr & 0x2) {
3452 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3453 	}
3454 
3455 	/* any interrupt type other than tx/rx */
3456 	if (eicr & ixgbe->capab->other_intr) {
3457 		mutex_enter(&ixgbe->gen_lock);
3458 		if (hw->mac.type < ixgbe_mac_82599EB) {
3459 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3460 		}
3461 		if (hw->mac.type == ixgbe_mac_82599EB) {
3462 			ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3463 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3464 		}
3465 		ixgbe_intr_other_work(ixgbe, eicr);
3466 		ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3467 		mutex_exit(&ixgbe->gen_lock);
3468 	}
3469 
3470 	/* re-enable the interrupts which were automasked */
3471 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3472 
3473 	return (DDI_INTR_CLAIMED);
3474 }
3475 
3476 /*
3477  * ixgbe_intr_msix - Interrupt handler for MSI-X.
3478  */
3479 static uint_t
3480 ixgbe_intr_msix(void *arg1, void *arg2)
3481 {
3482 	ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
3483 	ixgbe_t *ixgbe = vect->ixgbe;
3484 	struct ixgbe_hw *hw = &ixgbe->hw;
3485 	uint32_t eicr;
3486 	int r_idx = 0;
3487 
3488 	_NOTE(ARGUNUSED(arg2));
3489 
3490 	/*
3491 	 * Clean each rx ring that has its bit set in the map
3492 	 */
3493 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
3494 	while (r_idx >= 0) {
3495 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
3496 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3497 		    (ixgbe->num_rx_rings - 1));
3498 	}
3499 
3500 	/*
3501 	 * Clean each tx ring that has its bit set in the map
3502 	 */
3503 	r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
3504 	while (r_idx >= 0) {
3505 		ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
3506 		r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3507 		    (ixgbe->num_tx_rings - 1));
3508 	}
3509 
3510 
3511 	/*
3512 	 * Clean other interrupt (link change) that has its bit set in the map
3513 	 */
3514 	if (BT_TEST(vect->other_map, 0) == 1) {
3515 		eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3516 
3517 		/*
3518 		 * Need check cause bits and only other causes will
3519 		 * be processed
3520 		 */
3521 		/* any interrupt type other than tx/rx */
3522 		if (eicr & ixgbe->capab->other_intr) {
3523 			if (hw->mac.type < ixgbe_mac_82599EB) {
3524 				mutex_enter(&ixgbe->gen_lock);
3525 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3526 				ixgbe_intr_other_work(ixgbe, eicr);
3527 				mutex_exit(&ixgbe->gen_lock);
3528 			} else {
3529 				if (hw->mac.type == ixgbe_mac_82599EB) {
3530 					mutex_enter(&ixgbe->gen_lock);
3531 					ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3532 					ixgbe_intr_other_work(ixgbe, eicr);
3533 					mutex_exit(&ixgbe->gen_lock);
3534 				}
3535 			}
3536 		}
3537 
3538 		/* re-enable the interrupts which were automasked */
3539 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3540 	}
3541 
3542 	return (DDI_INTR_CLAIMED);
3543 }
3544 
3545 /*
3546  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
3547  *
3548  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
3549  * if not successful, try Legacy.
3550  * ixgbe->intr_force can be used to force sequence to start with
3551  * any of the 3 types.
3552  * If MSI-X is not used, number of tx/rx rings is forced to 1.
3553  */
3554 static int
3555 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
3556 {
3557 	dev_info_t *devinfo;
3558 	int intr_types;
3559 	int rc;
3560 
3561 	devinfo = ixgbe->dip;
3562 
3563 	/*
3564 	 * Get supported interrupt types
3565 	 */
3566 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
3567 
3568 	if (rc != DDI_SUCCESS) {
3569 		ixgbe_log(ixgbe,
3570 		    "Get supported interrupt types failed: %d", rc);
3571 		return (IXGBE_FAILURE);
3572 	}
3573 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
3574 
3575 	ixgbe->intr_type = 0;
3576 
3577 	/*
3578 	 * Install MSI-X interrupts
3579 	 */
3580 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
3581 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
3582 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
3583 		if (rc == IXGBE_SUCCESS)
3584 			return (IXGBE_SUCCESS);
3585 
3586 		ixgbe_log(ixgbe,
3587 		    "Allocate MSI-X failed, trying MSI interrupts...");
3588 	}
3589 
3590 	/*
3591 	 * MSI-X not used, force rings and groups to 1
3592 	 */
3593 	ixgbe->num_rx_rings = 1;
3594 	ixgbe->num_rx_groups = 1;
3595 	ixgbe->num_tx_rings = 1;
3596 	ixgbe_log(ixgbe,
3597 	    "MSI-X not used, force rings and groups number to 1");
3598 
3599 	/*
3600 	 * Install MSI interrupts
3601 	 */
3602 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
3603 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
3604 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
3605 		if (rc == IXGBE_SUCCESS)
3606 			return (IXGBE_SUCCESS);
3607 
3608 		ixgbe_log(ixgbe,
3609 		    "Allocate MSI failed, trying Legacy interrupts...");
3610 	}
3611 
3612 	/*
3613 	 * Install legacy interrupts
3614 	 */
3615 	if (intr_types & DDI_INTR_TYPE_FIXED) {
3616 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
3617 		if (rc == IXGBE_SUCCESS)
3618 			return (IXGBE_SUCCESS);
3619 
3620 		ixgbe_log(ixgbe,
3621 		    "Allocate Legacy interrupts failed");
3622 	}
3623 
3624 	/*
3625 	 * If none of the 3 types succeeded, return failure
3626 	 */
3627 	return (IXGBE_FAILURE);
3628 }
3629 
3630 /*
3631  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
3632  *
3633  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
3634  * if fewer than 2 handles are available, return failure.
3635  * Upon success, this maps the vectors to rx and tx rings for
3636  * interrupts.
3637  */
3638 static int
3639 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
3640 {
3641 	dev_info_t *devinfo;
3642 	int request, count, avail, actual;
3643 	int minimum;
3644 	int rc;
3645 
3646 	devinfo = ixgbe->dip;
3647 
3648 	switch (intr_type) {
3649 	case DDI_INTR_TYPE_FIXED:
3650 		request = 1;	/* Request 1 legacy interrupt handle */
3651 		minimum = 1;
3652 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
3653 		break;
3654 
3655 	case DDI_INTR_TYPE_MSI:
3656 		request = 1;	/* Request 1 MSI interrupt handle */
3657 		minimum = 1;
3658 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
3659 		break;
3660 
3661 	case DDI_INTR_TYPE_MSIX:
3662 		/*
3663 		 * Best number of vectors for the adapter is
3664 		 * # rx rings + # tx rings.
3665 		 */
3666 		request = ixgbe->num_rx_rings + ixgbe->num_tx_rings;
3667 		if (request > ixgbe->capab->max_ring_vect)
3668 			request = ixgbe->capab->max_ring_vect;
3669 		minimum = 2;
3670 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
3671 		break;
3672 
3673 	default:
3674 		ixgbe_log(ixgbe,
3675 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
3676 		    intr_type);
3677 		return (IXGBE_FAILURE);
3678 	}
3679 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
3680 	    request, minimum);
3681 
3682 	/*
3683 	 * Get number of supported interrupts
3684 	 */
3685 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3686 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3687 		ixgbe_log(ixgbe,
3688 		    "Get interrupt number failed. Return: %d, count: %d",
3689 		    rc, count);
3690 		return (IXGBE_FAILURE);
3691 	}
3692 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
3693 
3694 	/*
3695 	 * Get number of available interrupts
3696 	 */
3697 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3698 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3699 		ixgbe_log(ixgbe,
3700 		    "Get interrupt available number failed. "
3701 		    "Return: %d, available: %d", rc, avail);
3702 		return (IXGBE_FAILURE);
3703 	}
3704 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
3705 
3706 	if (avail < request) {
3707 		ixgbe_log(ixgbe, "Request %d handles, %d available",
3708 		    request, avail);
3709 		request = avail;
3710 	}
3711 
3712 	actual = 0;
3713 	ixgbe->intr_cnt = 0;
3714 
3715 	/*
3716 	 * Allocate an array of interrupt handles
3717 	 */
3718 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
3719 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
3720 
3721 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
3722 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
3723 	if (rc != DDI_SUCCESS) {
3724 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
3725 		    "return: %d, request: %d, actual: %d",
3726 		    rc, request, actual);
3727 		goto alloc_handle_fail;
3728 	}
3729 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
3730 
3731 	ixgbe->intr_cnt = actual;
3732 
3733 	/*
3734 	 * Now we know the actual number of vectors.  Here we map the vector
3735 	 * to other, rx rings and tx ring.
3736 	 */
3737 	if (actual < minimum) {
3738 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
3739 		    actual);
3740 		goto alloc_handle_fail;
3741 	}
3742 
3743 	/*
3744 	 * Get priority for first vector, assume remaining are all the same
3745 	 */
3746 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
3747 	if (rc != DDI_SUCCESS) {
3748 		ixgbe_log(ixgbe,
3749 		    "Get interrupt priority failed: %d", rc);
3750 		goto alloc_handle_fail;
3751 	}
3752 
3753 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
3754 	if (rc != DDI_SUCCESS) {
3755 		ixgbe_log(ixgbe,
3756 		    "Get interrupt cap failed: %d", rc);
3757 		goto alloc_handle_fail;
3758 	}
3759 
3760 	ixgbe->intr_type = intr_type;
3761 
3762 	return (IXGBE_SUCCESS);
3763 
3764 alloc_handle_fail:
3765 	ixgbe_rem_intrs(ixgbe);
3766 
3767 	return (IXGBE_FAILURE);
3768 }
3769 
3770 /*
3771  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
3772  *
3773  * Before adding the interrupt handlers, the interrupt vectors have
3774  * been allocated, and the rx/tx rings have also been allocated.
3775  */
3776 static int
3777 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
3778 {
3779 	int vector = 0;
3780 	int rc;
3781 
3782 	switch (ixgbe->intr_type) {
3783 	case DDI_INTR_TYPE_MSIX:
3784 		/*
3785 		 * Add interrupt handler for all vectors
3786 		 */
3787 		for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
3788 			/*
3789 			 * install pointer to vect_map[vector]
3790 			 */
3791 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
3792 			    (ddi_intr_handler_t *)ixgbe_intr_msix,
3793 			    (void *)&ixgbe->vect_map[vector], NULL);
3794 
3795 			if (rc != DDI_SUCCESS) {
3796 				ixgbe_log(ixgbe,
3797 				    "Add rx interrupt handler failed. "
3798 				    "return: %d, vector: %d", rc, vector);
3799 				for (vector--; vector >= 0; vector--) {
3800 					(void) ddi_intr_remove_handler(
3801 					    ixgbe->htable[vector]);
3802 				}
3803 				return (IXGBE_FAILURE);
3804 			}
3805 		}
3806 
3807 		break;
3808 
3809 	case DDI_INTR_TYPE_MSI:
3810 		/*
3811 		 * Add interrupt handlers for the only vector
3812 		 */
3813 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3814 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
3815 		    (void *)ixgbe, NULL);
3816 
3817 		if (rc != DDI_SUCCESS) {
3818 			ixgbe_log(ixgbe,
3819 			    "Add MSI interrupt handler failed: %d", rc);
3820 			return (IXGBE_FAILURE);
3821 		}
3822 
3823 		break;
3824 
3825 	case DDI_INTR_TYPE_FIXED:
3826 		/*
3827 		 * Add interrupt handlers for the only vector
3828 		 */
3829 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3830 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
3831 		    (void *)ixgbe, NULL);
3832 
3833 		if (rc != DDI_SUCCESS) {
3834 			ixgbe_log(ixgbe,
3835 			    "Add legacy interrupt handler failed: %d", rc);
3836 			return (IXGBE_FAILURE);
3837 		}
3838 
3839 		break;
3840 
3841 	default:
3842 		return (IXGBE_FAILURE);
3843 	}
3844 
3845 	return (IXGBE_SUCCESS);
3846 }
3847 
3848 #pragma inline(ixgbe_map_rxring_to_vector)
3849 /*
3850  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
3851  */
3852 static void
3853 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
3854 {
3855 	/*
3856 	 * Set bit in map
3857 	 */
3858 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
3859 
3860 	/*
3861 	 * Count bits set
3862 	 */
3863 	ixgbe->vect_map[v_idx].rxr_cnt++;
3864 
3865 	/*
3866 	 * Remember bit position
3867 	 */
3868 	ixgbe->rx_rings[r_idx].intr_vector = v_idx;
3869 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
3870 }
3871 
3872 #pragma inline(ixgbe_map_txring_to_vector)
3873 /*
3874  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
3875  */
3876 static void
3877 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
3878 {
3879 	/*
3880 	 * Set bit in map
3881 	 */
3882 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
3883 
3884 	/*
3885 	 * Count bits set
3886 	 */
3887 	ixgbe->vect_map[v_idx].txr_cnt++;
3888 
3889 	/*
3890 	 * Remember bit position
3891 	 */
3892 	ixgbe->tx_rings[t_idx].intr_vector = v_idx;
3893 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
3894 }
3895 
3896 /*
3897  * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
3898  * allocation register (IVAR).
3899  * cause:
3900  *   -1 : other cause
3901  *    0 : rx
3902  *    1 : tx
3903  */
3904 static void
3905 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
3906     int8_t cause)
3907 {
3908 	struct ixgbe_hw *hw = &ixgbe->hw;
3909 	u32 ivar, index;
3910 
3911 	switch (hw->mac.type) {
3912 	case ixgbe_mac_82598EB:
3913 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3914 		if (cause == -1) {
3915 			cause = 0;
3916 		}
3917 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3918 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3919 		ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
3920 		ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
3921 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3922 		break;
3923 	case ixgbe_mac_82599EB:
3924 		if (cause == -1) {
3925 			/* other causes */
3926 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3927 			index = (intr_alloc_entry & 1) * 8;
3928 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3929 			ivar &= ~(0xFF << index);
3930 			ivar |= (msix_vector << index);
3931 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3932 		} else {
3933 			/* tx or rx causes */
3934 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3935 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
3936 			ivar = IXGBE_READ_REG(hw,
3937 			    IXGBE_IVAR(intr_alloc_entry >> 1));
3938 			ivar &= ~(0xFF << index);
3939 			ivar |= (msix_vector << index);
3940 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
3941 			    ivar);
3942 		}
3943 		break;
3944 	default:
3945 		break;
3946 	}
3947 }
3948 
3949 /*
3950  * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
3951  * given interrupt vector allocation register (IVAR).
3952  * cause:
3953  *   -1 : other cause
3954  *    0 : rx
3955  *    1 : tx
3956  */
3957 static void
3958 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
3959 {
3960 	struct ixgbe_hw *hw = &ixgbe->hw;
3961 	u32 ivar, index;
3962 
3963 	switch (hw->mac.type) {
3964 	case ixgbe_mac_82598EB:
3965 		if (cause == -1) {
3966 			cause = 0;
3967 		}
3968 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3969 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3970 		ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
3971 		    (intr_alloc_entry & 0x3)));
3972 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3973 		break;
3974 	case ixgbe_mac_82599EB:
3975 		if (cause == -1) {
3976 			/* other causes */
3977 			index = (intr_alloc_entry & 1) * 8;
3978 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3979 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
3980 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3981 		} else {
3982 			/* tx or rx causes */
3983 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
3984 			ivar = IXGBE_READ_REG(hw,
3985 			    IXGBE_IVAR(intr_alloc_entry >> 1));
3986 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
3987 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
3988 			    ivar);
3989 		}
3990 		break;
3991 	default:
3992 		break;
3993 	}
3994 }
3995 
3996 /*
3997  * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
3998  * given interrupt vector allocation register (IVAR).
3999  * cause:
4000  *   -1 : other cause
4001  *    0 : rx
4002  *    1 : tx
4003  */
4004 static void
4005 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4006 {
4007 	struct ixgbe_hw *hw = &ixgbe->hw;
4008 	u32 ivar, index;
4009 
4010 	switch (hw->mac.type) {
4011 	case ixgbe_mac_82598EB:
4012 		if (cause == -1) {
4013 			cause = 0;
4014 		}
4015 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4016 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4017 		ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4018 		    (intr_alloc_entry & 0x3)));
4019 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4020 		break;
4021 	case ixgbe_mac_82599EB:
4022 		if (cause == -1) {
4023 			/* other causes */
4024 			index = (intr_alloc_entry & 1) * 8;
4025 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4026 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4027 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4028 		} else {
4029 			/* tx or rx causes */
4030 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4031 			ivar = IXGBE_READ_REG(hw,
4032 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4033 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4034 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4035 			    ivar);
4036 		}
4037 		break;
4038 	default:
4039 		break;
4040 	}
4041 }
4042 
4043 /*
4044  * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4045  *
4046  * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4047  * to vector[0 - (intr_cnt -1)].
4048  */
4049 static int
4050 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4051 {
4052 	int i, vector = 0;
4053 
4054 	/* initialize vector map */
4055 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4056 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4057 		ixgbe->vect_map[i].ixgbe = ixgbe;
4058 	}
4059 
4060 	/*
4061 	 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4062 	 * tx rings[0] on RTxQ[1].
4063 	 */
4064 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4065 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4066 		ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4067 		return (IXGBE_SUCCESS);
4068 	}
4069 
4070 	/*
4071 	 * Interrupts/vectors mapping for MSI-X
4072 	 */
4073 
4074 	/*
4075 	 * Map other interrupt to vector 0,
4076 	 * Set bit in map and count the bits set.
4077 	 */
4078 	BT_SET(ixgbe->vect_map[vector].other_map, 0);
4079 	ixgbe->vect_map[vector].other_cnt++;
4080 	vector++;
4081 
4082 	/*
4083 	 * Map rx ring interrupts to vectors
4084 	 */
4085 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
4086 		ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4087 		vector = (vector +1) % ixgbe->intr_cnt;
4088 	}
4089 
4090 	/*
4091 	 * Map tx ring interrupts to vectors
4092 	 */
4093 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
4094 		ixgbe_map_txring_to_vector(ixgbe, i, vector);
4095 		vector = (vector +1) % ixgbe->intr_cnt;
4096 	}
4097 
4098 	return (IXGBE_SUCCESS);
4099 }
4100 
4101 /*
4102  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4103  *
4104  * This relies on ring/vector mapping already set up in the
4105  * vect_map[] structures
4106  */
4107 static void
4108 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4109 {
4110 	struct ixgbe_hw *hw = &ixgbe->hw;
4111 	ixgbe_intr_vector_t *vect;	/* vector bitmap */
4112 	int r_idx;	/* ring index */
4113 	int v_idx;	/* vector index */
4114 
4115 	/*
4116 	 * Clear any previous entries
4117 	 */
4118 	switch (hw->mac.type) {
4119 	case ixgbe_mac_82598EB:
4120 		for (v_idx = 0; v_idx < 25; v_idx++)
4121 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4122 
4123 		break;
4124 	case ixgbe_mac_82599EB:
4125 		for (v_idx = 0; v_idx < 64; v_idx++)
4126 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4127 		IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
4128 
4129 		break;
4130 	default:
4131 		break;
4132 	}
4133 
4134 	/*
4135 	 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
4136 	 * tx rings[0] will use RTxQ[1].
4137 	 */
4138 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4139 		ixgbe_setup_ivar(ixgbe, 0, 0, 0);
4140 		ixgbe_setup_ivar(ixgbe, 0, 1, 1);
4141 		return;
4142 	}
4143 
4144 	/*
4145 	 * For MSI-X interrupt, "Other" is always on vector[0].
4146 	 */
4147 	ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
4148 
4149 	/*
4150 	 * For each interrupt vector, populate the IVAR table
4151 	 */
4152 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
4153 		vect = &ixgbe->vect_map[v_idx];
4154 
4155 		/*
4156 		 * For each rx ring bit set
4157 		 */
4158 		r_idx = bt_getlowbit(vect->rx_map, 0,
4159 		    (ixgbe->num_rx_rings - 1));
4160 
4161 		while (r_idx >= 0) {
4162 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 0);
4163 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4164 			    (ixgbe->num_rx_rings - 1));
4165 		}
4166 
4167 		/*
4168 		 * For each tx ring bit set
4169 		 */
4170 		r_idx = bt_getlowbit(vect->tx_map, 0,
4171 		    (ixgbe->num_tx_rings - 1));
4172 
4173 		while (r_idx >= 0) {
4174 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
4175 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4176 			    (ixgbe->num_tx_rings - 1));
4177 		}
4178 	}
4179 }
4180 
4181 /*
4182  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
4183  */
4184 static void
4185 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
4186 {
4187 	int i;
4188 	int rc;
4189 
4190 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4191 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
4192 		if (rc != DDI_SUCCESS) {
4193 			IXGBE_DEBUGLOG_1(ixgbe,
4194 			    "Remove intr handler failed: %d", rc);
4195 		}
4196 	}
4197 }
4198 
4199 /*
4200  * ixgbe_rem_intrs - Remove the allocated interrupts.
4201  */
4202 static void
4203 ixgbe_rem_intrs(ixgbe_t *ixgbe)
4204 {
4205 	int i;
4206 	int rc;
4207 
4208 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4209 		rc = ddi_intr_free(ixgbe->htable[i]);
4210 		if (rc != DDI_SUCCESS) {
4211 			IXGBE_DEBUGLOG_1(ixgbe,
4212 			    "Free intr failed: %d", rc);
4213 		}
4214 	}
4215 
4216 	kmem_free(ixgbe->htable, ixgbe->intr_size);
4217 	ixgbe->htable = NULL;
4218 }
4219 
4220 /*
4221  * ixgbe_enable_intrs - Enable all the ddi interrupts.
4222  */
4223 static int
4224 ixgbe_enable_intrs(ixgbe_t *ixgbe)
4225 {
4226 	int i;
4227 	int rc;
4228 
4229 	/*
4230 	 * Enable interrupts
4231 	 */
4232 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4233 		/*
4234 		 * Call ddi_intr_block_enable() for MSI
4235 		 */
4236 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
4237 		if (rc != DDI_SUCCESS) {
4238 			ixgbe_log(ixgbe,
4239 			    "Enable block intr failed: %d", rc);
4240 			return (IXGBE_FAILURE);
4241 		}
4242 	} else {
4243 		/*
4244 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
4245 		 */
4246 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4247 			rc = ddi_intr_enable(ixgbe->htable[i]);
4248 			if (rc != DDI_SUCCESS) {
4249 				ixgbe_log(ixgbe,
4250 				    "Enable intr failed: %d", rc);
4251 				return (IXGBE_FAILURE);
4252 			}
4253 		}
4254 	}
4255 
4256 	return (IXGBE_SUCCESS);
4257 }
4258 
4259 /*
4260  * ixgbe_disable_intrs - Disable all the interrupts.
4261  */
4262 static int
4263 ixgbe_disable_intrs(ixgbe_t *ixgbe)
4264 {
4265 	int i;
4266 	int rc;
4267 
4268 	/*
4269 	 * Disable all interrupts
4270 	 */
4271 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4272 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
4273 		if (rc != DDI_SUCCESS) {
4274 			ixgbe_log(ixgbe,
4275 			    "Disable block intr failed: %d", rc);
4276 			return (IXGBE_FAILURE);
4277 		}
4278 	} else {
4279 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4280 			rc = ddi_intr_disable(ixgbe->htable[i]);
4281 			if (rc != DDI_SUCCESS) {
4282 				ixgbe_log(ixgbe,
4283 				    "Disable intr failed: %d", rc);
4284 				return (IXGBE_FAILURE);
4285 			}
4286 		}
4287 	}
4288 
4289 	return (IXGBE_SUCCESS);
4290 }
4291 
4292 /*
4293  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
4294  */
4295 static void
4296 ixgbe_get_hw_state(ixgbe_t *ixgbe)
4297 {
4298 	struct ixgbe_hw *hw = &ixgbe->hw;
4299 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
4300 	boolean_t link_up = B_FALSE;
4301 	uint32_t pcs1g_anlp = 0;
4302 	uint32_t pcs1g_ana = 0;
4303 
4304 	ASSERT(mutex_owned(&ixgbe->gen_lock));
4305 	ixgbe->param_lp_1000fdx_cap = 0;
4306 	ixgbe->param_lp_100fdx_cap  = 0;
4307 
4308 	/* check for link, don't wait */
4309 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
4310 	if (link_up) {
4311 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
4312 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
4313 
4314 		ixgbe->param_lp_1000fdx_cap =
4315 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4316 		ixgbe->param_lp_100fdx_cap =
4317 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4318 	}
4319 
4320 	ixgbe->param_1000fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4321 	ixgbe->param_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4322 }
4323 
4324 /*
4325  * ixgbe_get_driver_control - Notify that driver is in control of device.
4326  */
4327 static void
4328 ixgbe_get_driver_control(struct ixgbe_hw *hw)
4329 {
4330 	uint32_t ctrl_ext;
4331 
4332 	/*
4333 	 * Notify firmware that driver is in control of device
4334 	 */
4335 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4336 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
4337 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4338 }
4339 
4340 /*
4341  * ixgbe_release_driver_control - Notify that driver is no longer in control
4342  * of device.
4343  */
4344 static void
4345 ixgbe_release_driver_control(struct ixgbe_hw *hw)
4346 {
4347 	uint32_t ctrl_ext;
4348 
4349 	/*
4350 	 * Notify firmware that driver is no longer in control of device
4351 	 */
4352 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4353 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
4354 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4355 }
4356 
4357 /*
4358  * ixgbe_atomic_reserve - Atomic decrease operation.
4359  */
4360 int
4361 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
4362 {
4363 	uint32_t oldval;
4364 	uint32_t newval;
4365 
4366 	/*
4367 	 * ATOMICALLY
4368 	 */
4369 	do {
4370 		oldval = *count_p;
4371 		if (oldval < n)
4372 			return (-1);
4373 		newval = oldval - n;
4374 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
4375 
4376 	return (newval);
4377 }
4378 
4379 /*
4380  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
4381  */
4382 static uint8_t *
4383 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
4384 {
4385 	uint8_t *addr = *upd_ptr;
4386 	uint8_t *new_ptr;
4387 
4388 	_NOTE(ARGUNUSED(hw));
4389 	_NOTE(ARGUNUSED(vmdq));
4390 
4391 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
4392 	*upd_ptr = new_ptr;
4393 	return (addr);
4394 }
4395 
4396 /*
4397  * FMA support
4398  */
4399 int
4400 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
4401 {
4402 	ddi_fm_error_t de;
4403 
4404 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4405 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
4406 	return (de.fme_status);
4407 }
4408 
4409 int
4410 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
4411 {
4412 	ddi_fm_error_t de;
4413 
4414 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4415 	return (de.fme_status);
4416 }
4417 
4418 /*
4419  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
4420  */
4421 static int
4422 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4423 {
4424 	_NOTE(ARGUNUSED(impl_data));
4425 	/*
4426 	 * as the driver can always deal with an error in any dma or
4427 	 * access handle, we can just return the fme_status value.
4428 	 */
4429 	pci_ereport_post(dip, err, NULL);
4430 	return (err->fme_status);
4431 }
4432 
4433 static void
4434 ixgbe_fm_init(ixgbe_t *ixgbe)
4435 {
4436 	ddi_iblock_cookie_t iblk;
4437 	int fma_acc_flag, fma_dma_flag;
4438 
4439 	/*
4440 	 * Only register with IO Fault Services if we have some capability
4441 	 */
4442 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
4443 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4444 		fma_acc_flag = 1;
4445 	} else {
4446 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4447 		fma_acc_flag = 0;
4448 	}
4449 
4450 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
4451 		fma_dma_flag = 1;
4452 	} else {
4453 		fma_dma_flag = 0;
4454 	}
4455 
4456 	ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag);
4457 
4458 	if (ixgbe->fm_capabilities) {
4459 
4460 		/*
4461 		 * Register capabilities with IO Fault Services
4462 		 */
4463 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
4464 
4465 		/*
4466 		 * Initialize pci ereport capabilities if ereport capable
4467 		 */
4468 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4469 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4470 			pci_ereport_setup(ixgbe->dip);
4471 
4472 		/*
4473 		 * Register error callback if error callback capable
4474 		 */
4475 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4476 			ddi_fm_handler_register(ixgbe->dip,
4477 			    ixgbe_fm_error_cb, (void*) ixgbe);
4478 	}
4479 }
4480 
4481 static void
4482 ixgbe_fm_fini(ixgbe_t *ixgbe)
4483 {
4484 	/*
4485 	 * Only unregister FMA capabilities if they are registered
4486 	 */
4487 	if (ixgbe->fm_capabilities) {
4488 
4489 		/*
4490 		 * Release any resources allocated by pci_ereport_setup()
4491 		 */
4492 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4493 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4494 			pci_ereport_teardown(ixgbe->dip);
4495 
4496 		/*
4497 		 * Un-register error callback if error callback capable
4498 		 */
4499 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4500 			ddi_fm_handler_unregister(ixgbe->dip);
4501 
4502 		/*
4503 		 * Unregister from IO Fault Service
4504 		 */
4505 		ddi_fm_fini(ixgbe->dip);
4506 	}
4507 }
4508 
4509 void
4510 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
4511 {
4512 	uint64_t ena;
4513 	char buf[FM_MAX_CLASS];
4514 
4515 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4516 	ena = fm_ena_generate(0, FM_ENA_FMT1);
4517 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
4518 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
4519 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
4520 	}
4521 }
4522 
4523 static int
4524 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
4525 {
4526 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
4527 
4528 	mutex_enter(&rx_ring->rx_lock);
4529 	rx_ring->ring_gen_num = mr_gen_num;
4530 	mutex_exit(&rx_ring->rx_lock);
4531 	return (0);
4532 }
4533 
4534 /*
4535  * Callback funtion for MAC layer to register all rings.
4536  */
4537 /* ARGSUSED */
4538 void
4539 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
4540     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
4541 {
4542 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4543 	mac_intr_t *mintr = &infop->mri_intr;
4544 
4545 	switch (rtype) {
4546 	case MAC_RING_TYPE_RX: {
4547 		ASSERT(rg_index == 0);
4548 		ASSERT(ring_index < ixgbe->num_rx_rings);
4549 
4550 		ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[ring_index];
4551 		rx_ring->ring_handle = rh;
4552 
4553 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
4554 		infop->mri_start = ixgbe_ring_start;
4555 		infop->mri_stop = NULL;
4556 		infop->mri_poll = ixgbe_ring_rx_poll;
4557 
4558 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
4559 		mintr->mi_enable = ixgbe_rx_ring_intr_enable;
4560 		mintr->mi_disable = ixgbe_rx_ring_intr_disable;
4561 
4562 		break;
4563 	}
4564 	case MAC_RING_TYPE_TX: {
4565 		ASSERT(rg_index == -1);
4566 		ASSERT(ring_index < ixgbe->num_tx_rings);
4567 
4568 		ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
4569 		tx_ring->ring_handle = rh;
4570 
4571 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
4572 		infop->mri_start = NULL;
4573 		infop->mri_stop = NULL;
4574 		infop->mri_tx = ixgbe_ring_tx;
4575 
4576 		break;
4577 	}
4578 	default:
4579 		break;
4580 	}
4581 }
4582 
4583 /*
4584  * Callback funtion for MAC layer to register all groups.
4585  */
4586 void
4587 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
4588     mac_group_info_t *infop, mac_group_handle_t gh)
4589 {
4590 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4591 
4592 	switch (rtype) {
4593 	case MAC_RING_TYPE_RX: {
4594 		ixgbe_rx_group_t *rx_group;
4595 
4596 		rx_group = &ixgbe->rx_groups[index];
4597 		rx_group->group_handle = gh;
4598 
4599 		infop->mgi_driver = (mac_group_driver_t)rx_group;
4600 		infop->mgi_start = NULL;
4601 		infop->mgi_stop = NULL;
4602 		infop->mgi_addmac = ixgbe_addmac;
4603 		infop->mgi_remmac = ixgbe_remmac;
4604 		infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
4605 
4606 		break;
4607 	}
4608 	case MAC_RING_TYPE_TX:
4609 		break;
4610 	default:
4611 		break;
4612 	}
4613 }
4614 
4615 /*
4616  * Enable interrupt on the specificed rx ring.
4617  */
4618 int
4619 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
4620 {
4621 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4622 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4623 	int r_idx = rx_ring->index;
4624 	int v_idx = rx_ring->intr_vector;
4625 
4626 	mutex_enter(&ixgbe->gen_lock);
4627 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 0);
4628 
4629 	/*
4630 	 * To enable interrupt by setting the VAL bit of given interrupt
4631 	 * vector allocation register (IVAR).
4632 	 */
4633 	ixgbe_enable_ivar(ixgbe, r_idx, 0);
4634 
4635 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4636 	mutex_exit(&ixgbe->gen_lock);
4637 
4638 	return (0);
4639 }
4640 
4641 /*
4642  * Disable interrupt on the specificed rx ring.
4643  */
4644 int
4645 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
4646 {
4647 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4648 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4649 	int r_idx = rx_ring->index;
4650 	int v_idx = rx_ring->intr_vector;
4651 
4652 	mutex_enter(&ixgbe->gen_lock);
4653 
4654 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 1);
4655 
4656 	/*
4657 	 * To disable interrupt by clearing the VAL bit of given interrupt
4658 	 * vector allocation register (IVAR).
4659 	 */
4660 	ixgbe_disable_ivar(ixgbe, r_idx, 0);
4661 
4662 	BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
4663 
4664 	mutex_exit(&ixgbe->gen_lock);
4665 
4666 	return (0);
4667 }
4668 
4669 /*
4670  * Add a mac address.
4671  */
4672 static int
4673 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
4674 {
4675 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4676 	ixgbe_t *ixgbe = rx_group->ixgbe;
4677 	int slot;
4678 	int err;
4679 
4680 	mutex_enter(&ixgbe->gen_lock);
4681 
4682 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4683 		mutex_exit(&ixgbe->gen_lock);
4684 		return (ECANCELED);
4685 	}
4686 
4687 	if (ixgbe->unicst_avail == 0) {
4688 		/* no slots available */
4689 		mutex_exit(&ixgbe->gen_lock);
4690 		return (ENOSPC);
4691 	}
4692 
4693 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
4694 		if (ixgbe->unicst_addr[slot].mac.set == 0)
4695 			break;
4696 	}
4697 
4698 	ASSERT((slot >= 0) && (slot < ixgbe->unicst_total));
4699 
4700 	if ((err = ixgbe_unicst_set(ixgbe, mac_addr, slot)) == 0) {
4701 		ixgbe->unicst_addr[slot].mac.set = 1;
4702 		ixgbe->unicst_avail--;
4703 	}
4704 
4705 	mutex_exit(&ixgbe->gen_lock);
4706 
4707 	return (err);
4708 }
4709 
4710 /*
4711  * Remove a mac address.
4712  */
4713 static int
4714 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
4715 {
4716 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4717 	ixgbe_t *ixgbe = rx_group->ixgbe;
4718 	int slot;
4719 	int err;
4720 
4721 	mutex_enter(&ixgbe->gen_lock);
4722 
4723 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4724 		mutex_exit(&ixgbe->gen_lock);
4725 		return (ECANCELED);
4726 	}
4727 
4728 	slot = ixgbe_unicst_find(ixgbe, mac_addr);
4729 	if (slot == -1) {
4730 		mutex_exit(&ixgbe->gen_lock);
4731 		return (EINVAL);
4732 	}
4733 
4734 	if (ixgbe->unicst_addr[slot].mac.set == 0) {
4735 		mutex_exit(&ixgbe->gen_lock);
4736 		return (EINVAL);
4737 	}
4738 
4739 	bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
4740 	if ((err = ixgbe_unicst_set(ixgbe,
4741 	    ixgbe->unicst_addr[slot].mac.addr, slot)) == 0) {
4742 		ixgbe->unicst_addr[slot].mac.set = 0;
4743 		ixgbe->unicst_avail++;
4744 	}
4745 
4746 	mutex_exit(&ixgbe->gen_lock);
4747 
4748 	return (err);
4749 }
4750