xref: /titanic_52/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision 71269a2275bf5a143dad6461eee2710a344e7261)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 
31 
32 #include "ixgbe_sw.h"
33 
34 static char ident[] = "Intel 10Gb Ethernet";
35 
36 /*
37  * Local function protoypes
38  */
39 static int ixgbe_register_mac(ixgbe_t *);
40 static int ixgbe_identify_hardware(ixgbe_t *);
41 static int ixgbe_regs_map(ixgbe_t *);
42 static void ixgbe_init_properties(ixgbe_t *);
43 static int ixgbe_init_driver_settings(ixgbe_t *);
44 static void ixgbe_init_locks(ixgbe_t *);
45 static void ixgbe_destroy_locks(ixgbe_t *);
46 static int ixgbe_init(ixgbe_t *);
47 static int ixgbe_chip_start(ixgbe_t *);
48 static void ixgbe_chip_stop(ixgbe_t *);
49 static int ixgbe_reset(ixgbe_t *);
50 static void ixgbe_tx_clean(ixgbe_t *);
51 static boolean_t ixgbe_tx_drain(ixgbe_t *);
52 static boolean_t ixgbe_rx_drain(ixgbe_t *);
53 static int ixgbe_alloc_rings(ixgbe_t *);
54 static int ixgbe_init_rings(ixgbe_t *);
55 static void ixgbe_free_rings(ixgbe_t *);
56 static void ixgbe_fini_rings(ixgbe_t *);
57 static void ixgbe_setup_rings(ixgbe_t *);
58 static void ixgbe_setup_rx(ixgbe_t *);
59 static void ixgbe_setup_tx(ixgbe_t *);
60 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
61 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
62 static void ixgbe_setup_rss(ixgbe_t *);
63 static void ixgbe_init_unicst(ixgbe_t *);
64 static void ixgbe_setup_multicst(ixgbe_t *);
65 static void ixgbe_get_hw_state(ixgbe_t *);
66 static void ixgbe_get_conf(ixgbe_t *);
67 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
68 static boolean_t ixgbe_driver_link_check(ixgbe_t *);
69 static void ixgbe_local_timer(void *);
70 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
71 static void ixgbe_start_watchdog_timer(ixgbe_t *);
72 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
73 static void ixgbe_stop_watchdog_timer(ixgbe_t *);
74 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
75 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
76 static boolean_t is_valid_mac_addr(uint8_t *);
77 static boolean_t ixgbe_stall_check(ixgbe_t *);
78 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
79 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
80 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
81 static int ixgbe_alloc_intrs(ixgbe_t *);
82 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
83 static int ixgbe_add_intr_handlers(ixgbe_t *);
84 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
85 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
86 static void ixgbe_set_ivar(ixgbe_t *, uint16_t, uint8_t);
87 static int ixgbe_map_rings_to_vectors(ixgbe_t *);
88 static void ixgbe_setup_adapter_vector(ixgbe_t *);
89 static void ixgbe_rem_intr_handlers(ixgbe_t *);
90 static void ixgbe_rem_intrs(ixgbe_t *);
91 static int ixgbe_enable_intrs(ixgbe_t *);
92 static int ixgbe_disable_intrs(ixgbe_t *);
93 static uint_t ixgbe_intr_legacy(void *, void *);
94 static uint_t ixgbe_intr_msi(void *, void *);
95 static uint_t ixgbe_intr_rx(void *, void *);
96 static uint_t ixgbe_intr_tx_other(void *, void *);
97 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
98 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
99 static void ixgbe_intr_other_work(ixgbe_t *);
100 static void ixgbe_get_driver_control(struct ixgbe_hw *);
101 static void ixgbe_release_driver_control(struct ixgbe_hw *);
102 
103 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
104 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
105 static int ixgbe_resume(dev_info_t *);
106 static int ixgbe_suspend(dev_info_t *);
107 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
108 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
109 
110 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
111     const void *impl_data);
112 static void ixgbe_fm_init(ixgbe_t *);
113 static void ixgbe_fm_fini(ixgbe_t *);
114 
115 static struct cb_ops ixgbe_cb_ops = {
116 	nulldev,		/* cb_open */
117 	nulldev,		/* cb_close */
118 	nodev,			/* cb_strategy */
119 	nodev,			/* cb_print */
120 	nodev,			/* cb_dump */
121 	nodev,			/* cb_read */
122 	nodev,			/* cb_write */
123 	nodev,			/* cb_ioctl */
124 	nodev,			/* cb_devmap */
125 	nodev,			/* cb_mmap */
126 	nodev,			/* cb_segmap */
127 	nochpoll,		/* cb_chpoll */
128 	ddi_prop_op,		/* cb_prop_op */
129 	NULL,			/* cb_stream */
130 	D_MP | D_HOTPLUG,	/* cb_flag */
131 	CB_REV,			/* cb_rev */
132 	nodev,			/* cb_aread */
133 	nodev			/* cb_awrite */
134 };
135 
136 static struct dev_ops ixgbe_dev_ops = {
137 	DEVO_REV,		/* devo_rev */
138 	0,			/* devo_refcnt */
139 	NULL,			/* devo_getinfo */
140 	nulldev,		/* devo_identify */
141 	nulldev,		/* devo_probe */
142 	ixgbe_attach,		/* devo_attach */
143 	ixgbe_detach,		/* devo_detach */
144 	nodev,			/* devo_reset */
145 	&ixgbe_cb_ops,		/* devo_cb_ops */
146 	NULL,			/* devo_bus_ops */
147 	ddi_power,		/* devo_power */
148 	ddi_quiesce_not_supported,	/* devo_quiesce */
149 };
150 
151 static struct modldrv ixgbe_modldrv = {
152 	&mod_driverops,		/* Type of module.  This one is a driver */
153 	ident,			/* Discription string */
154 	&ixgbe_dev_ops		/* driver ops */
155 };
156 
157 static struct modlinkage ixgbe_modlinkage = {
158 	MODREV_1, &ixgbe_modldrv, NULL
159 };
160 
161 /*
162  * Access attributes for register mapping
163  */
164 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
165 	DDI_DEVICE_ATTR_V0,
166 	DDI_STRUCTURE_LE_ACC,
167 	DDI_STRICTORDER_ACC,
168 	DDI_FLAGERR_ACC
169 };
170 
171 /*
172  * Loopback property
173  */
174 static lb_property_t lb_normal = {
175 	normal,	"normal", IXGBE_LB_NONE
176 };
177 
178 static lb_property_t lb_mac = {
179 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
180 };
181 
182 #define	IXGBE_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
183 
184 static mac_callbacks_t ixgbe_m_callbacks = {
185 	IXGBE_M_CALLBACK_FLAGS,
186 	ixgbe_m_stat,
187 	ixgbe_m_start,
188 	ixgbe_m_stop,
189 	ixgbe_m_promisc,
190 	ixgbe_m_multicst,
191 	ixgbe_m_unicst,
192 	ixgbe_m_tx,
193 	NULL,
194 	ixgbe_m_ioctl,
195 	ixgbe_m_getcapab
196 };
197 
198 /*
199  * Module Initialization Functions.
200  */
201 
202 int
203 _init(void)
204 {
205 	int status;
206 
207 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
208 
209 	status = mod_install(&ixgbe_modlinkage);
210 
211 	if (status != DDI_SUCCESS) {
212 		mac_fini_ops(&ixgbe_dev_ops);
213 	}
214 
215 	return (status);
216 }
217 
218 int
219 _fini(void)
220 {
221 	int status;
222 
223 	status = mod_remove(&ixgbe_modlinkage);
224 
225 	if (status == DDI_SUCCESS) {
226 		mac_fini_ops(&ixgbe_dev_ops);
227 	}
228 
229 	return (status);
230 }
231 
232 int
233 _info(struct modinfo *modinfop)
234 {
235 	int status;
236 
237 	status = mod_info(&ixgbe_modlinkage, modinfop);
238 
239 	return (status);
240 }
241 
242 /*
243  * ixgbe_attach - Driver attach.
244  *
245  * This function is the device specific initialization entry
246  * point. This entry point is required and must be written.
247  * The DDI_ATTACH command must be provided in the attach entry
248  * point. When attach() is called with cmd set to DDI_ATTACH,
249  * all normal kernel services (such as kmem_alloc(9F)) are
250  * available for use by the driver.
251  *
252  * The attach() function will be called once for each instance
253  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
254  * Until attach() succeeds, the only driver entry points which
255  * may be called are open(9E) and getinfo(9E).
256  */
257 static int
258 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
259 {
260 	ixgbe_t *ixgbe;
261 	struct ixgbe_osdep *osdep;
262 	struct ixgbe_hw *hw;
263 	int instance;
264 
265 	/*
266 	 * Check the command and perform corresponding operations
267 	 */
268 	switch (cmd) {
269 	default:
270 		return (DDI_FAILURE);
271 
272 	case DDI_RESUME:
273 		return (ixgbe_resume(devinfo));
274 
275 	case DDI_ATTACH:
276 		break;
277 	}
278 
279 	/* Get the device instance */
280 	instance = ddi_get_instance(devinfo);
281 
282 	/* Allocate memory for the instance data structure */
283 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
284 
285 	ixgbe->dip = devinfo;
286 	ixgbe->instance = instance;
287 
288 	hw = &ixgbe->hw;
289 	osdep = &ixgbe->osdep;
290 	hw->back = osdep;
291 	osdep->ixgbe = ixgbe;
292 
293 	/* Attach the instance pointer to the dev_info data structure */
294 	ddi_set_driver_private(devinfo, ixgbe);
295 
296 	/*
297 	 * Initialize for fma support
298 	 */
299 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
300 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
301 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
302 	ixgbe_fm_init(ixgbe);
303 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
304 
305 	/*
306 	 * Map PCI config space registers
307 	 */
308 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
309 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
310 		goto attach_fail;
311 	}
312 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
313 
314 	/*
315 	 * Identify the chipset family
316 	 */
317 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
318 		ixgbe_error(ixgbe, "Failed to identify hardware");
319 		goto attach_fail;
320 	}
321 
322 	/*
323 	 * Map device registers
324 	 */
325 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
326 		ixgbe_error(ixgbe, "Failed to map device registers");
327 		goto attach_fail;
328 	}
329 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
330 
331 	/*
332 	 * Initialize driver parameters
333 	 */
334 	ixgbe_init_properties(ixgbe);
335 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
336 
337 	/*
338 	 * Allocate interrupts
339 	 */
340 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
341 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
342 		goto attach_fail;
343 	}
344 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
345 
346 	/*
347 	 * Allocate rx/tx rings based on the ring numbers.
348 	 * The actual numbers of rx/tx rings are decided by the number of
349 	 * allocated interrupt vectors, so we should allocate the rings after
350 	 * interrupts are allocated.
351 	 */
352 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
353 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
354 		goto attach_fail;
355 	}
356 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
357 
358 	/*
359 	 * Map rings to interrupt vectors
360 	 */
361 	if (ixgbe_map_rings_to_vectors(ixgbe) != IXGBE_SUCCESS) {
362 		ixgbe_error(ixgbe, "Failed to map rings to vectors");
363 		goto attach_fail;
364 	}
365 
366 	/*
367 	 * Add interrupt handlers
368 	 */
369 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
370 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
371 		goto attach_fail;
372 	}
373 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
374 
375 	/*
376 	 * Initialize driver parameters
377 	 */
378 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
379 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
380 		goto attach_fail;
381 	}
382 
383 	/*
384 	 * Initialize mutexes for this device.
385 	 * Do this before enabling the interrupt handler and
386 	 * register the softint to avoid the condition where
387 	 * interrupt handler can try using uninitialized mutex.
388 	 */
389 	ixgbe_init_locks(ixgbe);
390 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
391 
392 	/*
393 	 * Initialize chipset hardware
394 	 */
395 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
396 		ixgbe_error(ixgbe, "Failed to initialize adapter");
397 		goto attach_fail;
398 	}
399 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
400 
401 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
402 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
403 		goto attach_fail;
404 	}
405 
406 	/*
407 	 * Initialize DMA and hardware settings for rx/tx rings
408 	 */
409 	if (ixgbe_init_rings(ixgbe) != IXGBE_SUCCESS) {
410 		ixgbe_error(ixgbe, "Failed to initialize rings");
411 		goto attach_fail;
412 	}
413 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT_RINGS;
414 
415 	/*
416 	 * Initialize statistics
417 	 */
418 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
419 		ixgbe_error(ixgbe, "Failed to initialize statistics");
420 		goto attach_fail;
421 	}
422 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
423 
424 	/*
425 	 * Initialize NDD parameters
426 	 */
427 	if (ixgbe_nd_init(ixgbe) != IXGBE_SUCCESS) {
428 		ixgbe_error(ixgbe, "Failed to initialize ndd");
429 		goto attach_fail;
430 	}
431 	ixgbe->attach_progress |= ATTACH_PROGRESS_NDD;
432 
433 	/*
434 	 * Register the driver to the MAC
435 	 */
436 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
437 		ixgbe_error(ixgbe, "Failed to register MAC");
438 		goto attach_fail;
439 	}
440 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
441 
442 	/*
443 	 * Now that mutex locks are initialized, and the chip is also
444 	 * initialized, enable interrupts.
445 	 */
446 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
447 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
448 		goto attach_fail;
449 	}
450 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
451 
452 	ixgbe->ixgbe_state |= IXGBE_INITIALIZED;
453 
454 	return (DDI_SUCCESS);
455 
456 attach_fail:
457 	ixgbe_unconfigure(devinfo, ixgbe);
458 	return (DDI_FAILURE);
459 }
460 
461 /*
462  * ixgbe_detach - Driver detach.
463  *
464  * The detach() function is the complement of the attach routine.
465  * If cmd is set to DDI_DETACH, detach() is used to remove  the
466  * state  associated  with  a  given  instance of a device node
467  * prior to the removal of that instance from the system.
468  *
469  * The detach() function will be called once for each  instance
470  * of the device for which there has been a successful attach()
471  * once there are no longer  any  opens  on  the  device.
472  *
473  * Interrupts routine are disabled, All memory allocated by this
474  * driver are freed.
475  */
476 static int
477 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
478 {
479 	ixgbe_t *ixgbe;
480 
481 	/*
482 	 * Check detach command
483 	 */
484 	switch (cmd) {
485 	default:
486 		return (DDI_FAILURE);
487 
488 	case DDI_SUSPEND:
489 		return (ixgbe_suspend(devinfo));
490 
491 	case DDI_DETACH:
492 		break;
493 	}
494 
495 
496 	/*
497 	 * Get the pointer to the driver private data structure
498 	 */
499 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
500 	if (ixgbe == NULL)
501 		return (DDI_FAILURE);
502 
503 	/*
504 	 * Unregister MAC. If failed, we have to fail the detach
505 	 */
506 	if (mac_unregister(ixgbe->mac_hdl) != 0) {
507 		ixgbe_error(ixgbe, "Failed to unregister MAC");
508 		return (DDI_FAILURE);
509 	}
510 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC;
511 
512 	/*
513 	 * If the device is still running, it needs to be stopped first.
514 	 * This check is necessary because under some specific circumstances,
515 	 * the detach routine can be called without stopping the interface
516 	 * first.
517 	 */
518 	mutex_enter(&ixgbe->gen_lock);
519 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
520 		ixgbe->ixgbe_state &= ~IXGBE_STARTED;
521 		ixgbe_stop(ixgbe);
522 		mutex_exit(&ixgbe->gen_lock);
523 		/* Disable and stop the watchdog timer */
524 		ixgbe_disable_watchdog_timer(ixgbe);
525 	} else
526 		mutex_exit(&ixgbe->gen_lock);
527 
528 	/*
529 	 * Check if there are still rx buffers held by the upper layer.
530 	 * If so, fail the detach.
531 	 */
532 	if (!ixgbe_rx_drain(ixgbe))
533 		return (DDI_FAILURE);
534 
535 	/*
536 	 * Do the remaining unconfigure routines
537 	 */
538 	ixgbe_unconfigure(devinfo, ixgbe);
539 
540 	return (DDI_SUCCESS);
541 }
542 
543 static void
544 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
545 {
546 	/*
547 	 * Disable interrupt
548 	 */
549 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
550 		(void) ixgbe_disable_intrs(ixgbe);
551 	}
552 
553 	/*
554 	 * Unregister MAC
555 	 */
556 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
557 		(void) mac_unregister(ixgbe->mac_hdl);
558 	}
559 
560 	/*
561 	 * Free ndd parameters
562 	 */
563 	if (ixgbe->attach_progress & ATTACH_PROGRESS_NDD) {
564 		ixgbe_nd_cleanup(ixgbe);
565 	}
566 
567 	/*
568 	 * Free statistics
569 	 */
570 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
571 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
572 	}
573 
574 	/*
575 	 * Remove interrupt handlers
576 	 */
577 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
578 		ixgbe_rem_intr_handlers(ixgbe);
579 	}
580 
581 	/*
582 	 * Remove interrupts
583 	 */
584 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
585 		ixgbe_rem_intrs(ixgbe);
586 	}
587 
588 	/*
589 	 * Remove driver properties
590 	 */
591 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
592 		(void) ddi_prop_remove_all(devinfo);
593 	}
594 
595 	/*
596 	 * Release the DMA resources of rx/tx rings
597 	 */
598 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT_RINGS) {
599 		ixgbe_fini_rings(ixgbe);
600 	}
601 
602 	/*
603 	 * Stop the chipset
604 	 */
605 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
606 		mutex_enter(&ixgbe->gen_lock);
607 		ixgbe_chip_stop(ixgbe);
608 		mutex_exit(&ixgbe->gen_lock);
609 	}
610 
611 	/*
612 	 * Free register handle
613 	 */
614 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
615 		if (ixgbe->osdep.reg_handle != NULL)
616 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
617 	}
618 
619 	/*
620 	 * Free PCI config handle
621 	 */
622 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
623 		if (ixgbe->osdep.cfg_handle != NULL)
624 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
625 	}
626 
627 	/*
628 	 * Free locks
629 	 */
630 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
631 		ixgbe_destroy_locks(ixgbe);
632 	}
633 
634 	/*
635 	 * Free the rx/tx rings
636 	 */
637 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
638 		ixgbe_free_rings(ixgbe);
639 	}
640 
641 	/*
642 	 * Unregister FMA capabilities
643 	 */
644 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
645 		ixgbe_fm_fini(ixgbe);
646 	}
647 
648 	/*
649 	 * Free the driver data structure
650 	 */
651 	kmem_free(ixgbe, sizeof (ixgbe_t));
652 
653 	ddi_set_driver_private(devinfo, NULL);
654 }
655 
656 /*
657  * ixgbe_register_mac - Register the driver and its function pointers with
658  * the GLD interface.
659  */
660 static int
661 ixgbe_register_mac(ixgbe_t *ixgbe)
662 {
663 	struct ixgbe_hw *hw = &ixgbe->hw;
664 	mac_register_t *mac;
665 	int status;
666 
667 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
668 		return (IXGBE_FAILURE);
669 
670 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
671 	mac->m_driver = ixgbe;
672 	mac->m_dip = ixgbe->dip;
673 	mac->m_src_addr = hw->mac.addr;
674 	mac->m_callbacks = &ixgbe_m_callbacks;
675 	mac->m_min_sdu = 0;
676 	mac->m_max_sdu = ixgbe->default_mtu;
677 	mac->m_margin = VLAN_TAGSZ;
678 
679 	status = mac_register(mac, &ixgbe->mac_hdl);
680 
681 	mac_free(mac);
682 
683 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
684 }
685 
686 /*
687  * ixgbe_identify_hardware - Identify the type of the chipset.
688  */
689 static int
690 ixgbe_identify_hardware(ixgbe_t *ixgbe)
691 {
692 	struct ixgbe_hw *hw = &ixgbe->hw;
693 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
694 
695 	/*
696 	 * Get the device id
697 	 */
698 	hw->vendor_id =
699 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
700 	hw->device_id =
701 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
702 	hw->revision_id =
703 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
704 	hw->subsystem_device_id =
705 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
706 	hw->subsystem_vendor_id =
707 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
708 
709 	return (IXGBE_SUCCESS);
710 }
711 
712 /*
713  * ixgbe_regs_map - Map the device registers.
714  *
715  */
716 static int
717 ixgbe_regs_map(ixgbe_t *ixgbe)
718 {
719 	dev_info_t *devinfo = ixgbe->dip;
720 	struct ixgbe_hw *hw = &ixgbe->hw;
721 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
722 	off_t mem_size;
723 
724 	/*
725 	 * First get the size of device registers to be mapped.
726 	 */
727 	if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) {
728 		return (IXGBE_FAILURE);
729 	}
730 
731 	/*
732 	 * Call ddi_regs_map_setup() to map registers
733 	 */
734 	if ((ddi_regs_map_setup(devinfo, 1,
735 	    (caddr_t *)&hw->hw_addr, 0,
736 	    mem_size, &ixgbe_regs_acc_attr,
737 	    &osdep->reg_handle)) != DDI_SUCCESS) {
738 		return (IXGBE_FAILURE);
739 	}
740 
741 	return (IXGBE_SUCCESS);
742 }
743 
744 /*
745  * ixgbe_init_properties - Initialize driver properties.
746  */
747 static void
748 ixgbe_init_properties(ixgbe_t *ixgbe)
749 {
750 	/*
751 	 * Get conf file properties, including link settings
752 	 * jumbo frames, ring number, descriptor number, etc.
753 	 */
754 	ixgbe_get_conf(ixgbe);
755 }
756 
757 /*
758  * ixgbe_init_driver_settings - Initialize driver settings.
759  *
760  * The settings include hardware function pointers, bus information,
761  * rx/tx rings settings, link state, and any other parameters that
762  * need to be setup during driver initialization.
763  */
764 static int
765 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
766 {
767 	struct ixgbe_hw *hw = &ixgbe->hw;
768 	ixgbe_rx_ring_t *rx_ring;
769 	ixgbe_tx_ring_t *tx_ring;
770 	uint32_t rx_size;
771 	uint32_t tx_size;
772 	int i;
773 
774 	/*
775 	 * Initialize chipset specific hardware function pointers
776 	 */
777 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
778 		return (IXGBE_FAILURE);
779 	}
780 
781 	/*
782 	 * Set rx buffer size
783 	 *
784 	 * The IP header alignment room is counted in the calculation.
785 	 * The rx buffer size is in unit of 1K that is required by the
786 	 * chipset hardware.
787 	 */
788 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
789 	ixgbe->rx_buf_size = ((rx_size >> 10) +
790 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
791 
792 	/*
793 	 * Set tx buffer size
794 	 */
795 	tx_size = ixgbe->max_frame_size;
796 	ixgbe->tx_buf_size = ((tx_size >> 10) +
797 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
798 
799 	/*
800 	 * Initialize rx/tx rings parameters
801 	 */
802 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
803 		rx_ring = &ixgbe->rx_rings[i];
804 		rx_ring->index = i;
805 		rx_ring->ixgbe = ixgbe;
806 
807 		rx_ring->ring_size = ixgbe->rx_ring_size;
808 		rx_ring->free_list_size = ixgbe->rx_ring_size;
809 		rx_ring->copy_thresh = ixgbe->rx_copy_thresh;
810 		rx_ring->limit_per_intr = ixgbe->rx_limit_per_intr;
811 	}
812 
813 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
814 		tx_ring = &ixgbe->tx_rings[i];
815 		tx_ring->index = i;
816 		tx_ring->ixgbe = ixgbe;
817 		if (ixgbe->tx_head_wb_enable)
818 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
819 		else
820 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
821 
822 		tx_ring->ring_size = ixgbe->tx_ring_size;
823 		tx_ring->free_list_size = ixgbe->tx_ring_size +
824 		    (ixgbe->tx_ring_size >> 1);
825 		tx_ring->copy_thresh = ixgbe->tx_copy_thresh;
826 		tx_ring->recycle_thresh = ixgbe->tx_recycle_thresh;
827 		tx_ring->overload_thresh = ixgbe->tx_overload_thresh;
828 	tx_ring->resched_thresh = ixgbe->tx_resched_thresh;
829 	}
830 
831 	/*
832 	 * Initialize values of interrupt throttling rate
833 	 */
834 	for (i = 1; i < IXGBE_MAX_RING_VECTOR; i++)
835 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
836 
837 	/*
838 	 * The initial link state should be "unknown"
839 	 */
840 	ixgbe->link_state = LINK_STATE_UNKNOWN;
841 	return (IXGBE_SUCCESS);
842 }
843 
844 /*
845  * ixgbe_init_locks - Initialize locks.
846  */
847 static void
848 ixgbe_init_locks(ixgbe_t *ixgbe)
849 {
850 	ixgbe_rx_ring_t *rx_ring;
851 	ixgbe_tx_ring_t *tx_ring;
852 	int i;
853 
854 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
855 		rx_ring = &ixgbe->rx_rings[i];
856 		mutex_init(&rx_ring->rx_lock, NULL,
857 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
858 		mutex_init(&rx_ring->recycle_lock, NULL,
859 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
860 	}
861 
862 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
863 		tx_ring = &ixgbe->tx_rings[i];
864 		mutex_init(&tx_ring->tx_lock, NULL,
865 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
866 		mutex_init(&tx_ring->recycle_lock, NULL,
867 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
868 		mutex_init(&tx_ring->tcb_head_lock, NULL,
869 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
870 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
871 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
872 	}
873 
874 	mutex_init(&ixgbe->gen_lock, NULL,
875 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
876 
877 	mutex_init(&ixgbe->watchdog_lock, NULL,
878 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
879 }
880 
881 /*
882  * ixgbe_destroy_locks - Destroy locks.
883  */
884 static void
885 ixgbe_destroy_locks(ixgbe_t *ixgbe)
886 {
887 	ixgbe_rx_ring_t *rx_ring;
888 	ixgbe_tx_ring_t *tx_ring;
889 	int i;
890 
891 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
892 		rx_ring = &ixgbe->rx_rings[i];
893 		mutex_destroy(&rx_ring->rx_lock);
894 		mutex_destroy(&rx_ring->recycle_lock);
895 	}
896 
897 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
898 		tx_ring = &ixgbe->tx_rings[i];
899 		mutex_destroy(&tx_ring->tx_lock);
900 		mutex_destroy(&tx_ring->recycle_lock);
901 		mutex_destroy(&tx_ring->tcb_head_lock);
902 		mutex_destroy(&tx_ring->tcb_tail_lock);
903 	}
904 
905 	mutex_destroy(&ixgbe->gen_lock);
906 	mutex_destroy(&ixgbe->watchdog_lock);
907 }
908 
909 static int
910 ixgbe_resume(dev_info_t *devinfo)
911 {
912 	ixgbe_t *ixgbe;
913 
914 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
915 	if (ixgbe == NULL)
916 		return (DDI_FAILURE);
917 
918 	mutex_enter(&ixgbe->gen_lock);
919 
920 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
921 		if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) {
922 			mutex_exit(&ixgbe->gen_lock);
923 			return (DDI_FAILURE);
924 		}
925 
926 		/*
927 		 * Enable and start the watchdog timer
928 		 */
929 		ixgbe_enable_watchdog_timer(ixgbe);
930 	}
931 
932 	ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
933 
934 	mutex_exit(&ixgbe->gen_lock);
935 
936 	return (DDI_SUCCESS);
937 }
938 
939 static int
940 ixgbe_suspend(dev_info_t *devinfo)
941 {
942 	ixgbe_t *ixgbe;
943 
944 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
945 	if (ixgbe == NULL)
946 		return (DDI_FAILURE);
947 
948 	mutex_enter(&ixgbe->gen_lock);
949 
950 	ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
951 
952 	ixgbe_stop(ixgbe);
953 
954 	mutex_exit(&ixgbe->gen_lock);
955 
956 	/*
957 	 * Disable and stop the watchdog timer
958 	 */
959 	ixgbe_disable_watchdog_timer(ixgbe);
960 
961 	return (DDI_SUCCESS);
962 }
963 
964 /*
965  * ixgbe_init - Initialize the device.
966  */
967 static int
968 ixgbe_init(ixgbe_t *ixgbe)
969 {
970 	struct ixgbe_hw *hw = &ixgbe->hw;
971 
972 	mutex_enter(&ixgbe->gen_lock);
973 
974 	/*
975 	 * Reset chipset to put the hardware in a known state
976 	 * before we try to do anything with the eeprom.
977 	 */
978 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
979 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
980 		goto init_fail;
981 	}
982 
983 	/*
984 	 * Need to init eeprom before validating the checksum.
985 	 */
986 	if (ixgbe_init_eeprom_params(hw) < 0) {
987 		ixgbe_error(ixgbe,
988 		    "Unable to intitialize the eeprom interface.");
989 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
990 		goto init_fail;
991 	}
992 
993 	/*
994 	 * NVM validation
995 	 */
996 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
997 		/*
998 		 * Some PCI-E parts fail the first check due to
999 		 * the link being in sleep state.  Call it again,
1000 		 * if it fails a second time it's a real issue.
1001 		 */
1002 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1003 			ixgbe_error(ixgbe,
1004 			    "Invalid NVM checksum. Please contact "
1005 			    "the vendor to update the NVM.");
1006 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1007 			goto init_fail;
1008 		}
1009 	}
1010 
1011 	/*
1012 	 * Setup default flow control thresholds - enable/disable
1013 	 * & flow control type is controlled by ixgbe.conf
1014 	 */
1015 	hw->fc.high_water = DEFAULT_FCRTH;
1016 	hw->fc.low_water = DEFAULT_FCRTL;
1017 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1018 	hw->fc.send_xon = B_TRUE;
1019 
1020 	/*
1021 	 * Don't wait for auto-negotiation to complete
1022 	 */
1023 	hw->phy.autoneg_wait_to_complete = B_FALSE;
1024 
1025 	/*
1026 	 * Initialize link settings
1027 	 */
1028 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1029 
1030 	/*
1031 	 * Initialize the chipset hardware
1032 	 */
1033 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1034 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1035 		goto init_fail;
1036 	}
1037 
1038 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
1039 		goto init_fail;
1040 	}
1041 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1042 		goto init_fail;
1043 	}
1044 
1045 	mutex_exit(&ixgbe->gen_lock);
1046 	return (IXGBE_SUCCESS);
1047 
1048 init_fail:
1049 	/*
1050 	 * Reset PHY
1051 	 */
1052 	(void) ixgbe_reset_phy(hw);
1053 
1054 	mutex_exit(&ixgbe->gen_lock);
1055 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1056 	return (IXGBE_FAILURE);
1057 }
1058 
1059 /*
1060  * ixgbe_init_rings - Allocate DMA resources for all rx/tx rings and
1061  * initialize relevant hardware settings.
1062  */
1063 static int
1064 ixgbe_init_rings(ixgbe_t *ixgbe)
1065 {
1066 	int i;
1067 
1068 	/*
1069 	 * Allocate buffers for all the rx/tx rings
1070 	 */
1071 	if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS)
1072 		return (IXGBE_FAILURE);
1073 
1074 	/*
1075 	 * Setup the rx/tx rings
1076 	 */
1077 	mutex_enter(&ixgbe->gen_lock);
1078 
1079 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1080 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1081 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1082 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1083 
1084 	ixgbe_setup_rings(ixgbe);
1085 
1086 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1087 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1088 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1089 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1090 
1091 	mutex_exit(&ixgbe->gen_lock);
1092 
1093 	return (IXGBE_SUCCESS);
1094 }
1095 
1096 /*
1097  * ixgbe_fini_rings - Release DMA resources of all rx/tx rings.
1098  */
1099 static void
1100 ixgbe_fini_rings(ixgbe_t *ixgbe)
1101 {
1102 	/*
1103 	 * Release the DMA/memory resources of rx/tx rings
1104 	 */
1105 	ixgbe_free_dma(ixgbe);
1106 }
1107 
1108 /*
1109  * ixgbe_chip_start - Initialize and start the chipset hardware.
1110  */
1111 static int
1112 ixgbe_chip_start(ixgbe_t *ixgbe)
1113 {
1114 	struct ixgbe_hw *hw = &ixgbe->hw;
1115 	int i;
1116 
1117 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1118 
1119 	/*
1120 	 * Get the mac address
1121 	 * This function should handle SPARC case correctly.
1122 	 */
1123 	if (!ixgbe_find_mac_address(ixgbe)) {
1124 		ixgbe_error(ixgbe, "Failed to get the mac address");
1125 		return (IXGBE_FAILURE);
1126 	}
1127 
1128 	/*
1129 	 * Validate the mac address
1130 	 */
1131 	(void) ixgbe_init_rx_addrs(hw);
1132 	if (!is_valid_mac_addr(hw->mac.addr)) {
1133 		ixgbe_error(ixgbe, "Invalid mac address");
1134 		return (IXGBE_FAILURE);
1135 	}
1136 
1137 	/*
1138 	 * Configure/Initialize hardware
1139 	 */
1140 	if (ixgbe_init_hw(hw) != IXGBE_SUCCESS) {
1141 		ixgbe_error(ixgbe, "Failed to initialize hardware");
1142 		return (IXGBE_FAILURE);
1143 	}
1144 
1145 	/*
1146 	 * Setup adapter interrupt vectors
1147 	 */
1148 	ixgbe_setup_adapter_vector(ixgbe);
1149 
1150 	/*
1151 	 * Initialize unicast addresses.
1152 	 */
1153 	ixgbe_init_unicst(ixgbe);
1154 
1155 	/*
1156 	 * Setup and initialize the mctable structures.
1157 	 */
1158 	ixgbe_setup_multicst(ixgbe);
1159 
1160 	/*
1161 	 * Set interrupt throttling rate
1162 	 */
1163 	for (i = 0; i < ixgbe->intr_cnt; i++)
1164 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1165 
1166 	/*
1167 	 * Save the state of the phy
1168 	 */
1169 	ixgbe_get_hw_state(ixgbe);
1170 
1171 	/*
1172 	 * Make sure driver has control
1173 	 */
1174 	ixgbe_get_driver_control(hw);
1175 
1176 	return (IXGBE_SUCCESS);
1177 }
1178 
1179 /*
1180  * ixgbe_chip_stop - Stop the chipset hardware
1181  */
1182 static void
1183 ixgbe_chip_stop(ixgbe_t *ixgbe)
1184 {
1185 	struct ixgbe_hw *hw = &ixgbe->hw;
1186 
1187 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1188 
1189 	/*
1190 	 * Tell firmware driver is no longer in control
1191 	 */
1192 	ixgbe_release_driver_control(hw);
1193 
1194 	/*
1195 	 * Reset the chipset
1196 	 */
1197 	(void) ixgbe_reset_hw(hw);
1198 
1199 	/*
1200 	 * Reset PHY
1201 	 */
1202 	(void) ixgbe_reset_phy(hw);
1203 }
1204 
1205 /*
1206  * ixgbe_reset - Reset the chipset and re-start the driver.
1207  *
1208  * It involves stopping and re-starting the chipset,
1209  * and re-configuring the rx/tx rings.
1210  */
1211 static int
1212 ixgbe_reset(ixgbe_t *ixgbe)
1213 {
1214 	int i;
1215 
1216 	mutex_enter(&ixgbe->gen_lock);
1217 
1218 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1219 	ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1220 
1221 	/*
1222 	 * Disable the adapter interrupts to stop any rx/tx activities
1223 	 * before draining pending data and resetting hardware.
1224 	 */
1225 	ixgbe_disable_adapter_interrupts(ixgbe);
1226 
1227 	/*
1228 	 * Drain the pending transmit packets
1229 	 */
1230 	(void) ixgbe_tx_drain(ixgbe);
1231 
1232 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1233 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1234 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1235 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1236 
1237 	/*
1238 	 * Stop the chipset hardware
1239 	 */
1240 	ixgbe_chip_stop(ixgbe);
1241 
1242 	/*
1243 	 * Clean the pending tx data/resources
1244 	 */
1245 	ixgbe_tx_clean(ixgbe);
1246 
1247 	/*
1248 	 * Start the chipset hardware
1249 	 */
1250 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1251 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1252 		goto reset_failure;
1253 	}
1254 
1255 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1256 		goto reset_failure;
1257 	}
1258 
1259 	/*
1260 	 * Setup the rx/tx rings
1261 	 */
1262 	ixgbe_setup_rings(ixgbe);
1263 
1264 	/*
1265 	 * Enable adapter interrupts
1266 	 * The interrupts must be enabled after the driver state is START
1267 	 */
1268 	ixgbe_enable_adapter_interrupts(ixgbe);
1269 
1270 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1271 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1272 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1273 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1274 
1275 	ixgbe->ixgbe_state |= IXGBE_STARTED;
1276 	mutex_exit(&ixgbe->gen_lock);
1277 
1278 	return (IXGBE_SUCCESS);
1279 
1280 reset_failure:
1281 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1282 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1283 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1284 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1285 
1286 	mutex_exit(&ixgbe->gen_lock);
1287 
1288 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1289 
1290 	return (IXGBE_FAILURE);
1291 }
1292 
1293 /*
1294  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1295  */
1296 static void
1297 ixgbe_tx_clean(ixgbe_t *ixgbe)
1298 {
1299 	ixgbe_tx_ring_t *tx_ring;
1300 	tx_control_block_t *tcb;
1301 	link_list_t pending_list;
1302 	uint32_t desc_num;
1303 	struct ixgbe_hw *hw = &ixgbe->hw;
1304 	int i, j;
1305 
1306 	LINK_LIST_INIT(&pending_list);
1307 
1308 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1309 		tx_ring = &ixgbe->tx_rings[i];
1310 
1311 		mutex_enter(&tx_ring->recycle_lock);
1312 
1313 		/*
1314 		 * Clean the pending tx data - the pending packets in the
1315 		 * work_list that have no chances to be transmitted again.
1316 		 *
1317 		 * We must ensure the chipset is stopped or the link is down
1318 		 * before cleaning the transmit packets.
1319 		 */
1320 		desc_num = 0;
1321 		for (j = 0; j < tx_ring->ring_size; j++) {
1322 			tcb = tx_ring->work_list[j];
1323 			if (tcb != NULL) {
1324 				desc_num += tcb->desc_num;
1325 
1326 				tx_ring->work_list[j] = NULL;
1327 
1328 				ixgbe_free_tcb(tcb);
1329 
1330 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1331 			}
1332 		}
1333 
1334 		if (desc_num > 0) {
1335 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1336 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1337 
1338 			/*
1339 			 * Reset the head and tail pointers of the tbd ring;
1340 			 * Reset the writeback head if it's enable.
1341 			 */
1342 			tx_ring->tbd_head = 0;
1343 			tx_ring->tbd_tail = 0;
1344 			if (ixgbe->tx_head_wb_enable)
1345 				*tx_ring->tbd_head_wb = 0;
1346 
1347 			IXGBE_WRITE_REG(&ixgbe->hw,
1348 			    IXGBE_TDH(tx_ring->index), 0);
1349 			IXGBE_WRITE_REG(&ixgbe->hw,
1350 			    IXGBE_TDT(tx_ring->index), 0);
1351 		}
1352 
1353 		mutex_exit(&tx_ring->recycle_lock);
1354 
1355 		/*
1356 		 * Add the tx control blocks in the pending list to
1357 		 * the free list.
1358 		 */
1359 		ixgbe_put_free_list(tx_ring, &pending_list);
1360 	}
1361 }
1362 
1363 /*
1364  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1365  * transmitted.
1366  */
1367 static boolean_t
1368 ixgbe_tx_drain(ixgbe_t *ixgbe)
1369 {
1370 	ixgbe_tx_ring_t *tx_ring;
1371 	boolean_t done;
1372 	int i, j;
1373 
1374 	/*
1375 	 * Wait for a specific time to allow pending tx packets
1376 	 * to be transmitted.
1377 	 *
1378 	 * Check the counter tbd_free to see if transmission is done.
1379 	 * No lock protection is needed here.
1380 	 *
1381 	 * Return B_TRUE if all pending packets have been transmitted;
1382 	 * Otherwise return B_FALSE;
1383 	 */
1384 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1385 
1386 		done = B_TRUE;
1387 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1388 			tx_ring = &ixgbe->tx_rings[j];
1389 			done = done &&
1390 			    (tx_ring->tbd_free == tx_ring->ring_size);
1391 		}
1392 
1393 		if (done)
1394 			break;
1395 
1396 		msec_delay(1);
1397 	}
1398 
1399 	return (done);
1400 }
1401 
1402 /*
1403  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1404  */
1405 static boolean_t
1406 ixgbe_rx_drain(ixgbe_t *ixgbe)
1407 {
1408 	ixgbe_rx_ring_t *rx_ring;
1409 	boolean_t done;
1410 	int i, j;
1411 
1412 	/*
1413 	 * Polling the rx free list to check if those rx buffers held by
1414 	 * the upper layer are released.
1415 	 *
1416 	 * Check the counter rcb_free to see if all pending buffers are
1417 	 * released. No lock protection is needed here.
1418 	 *
1419 	 * Return B_TRUE if all pending buffers have been released;
1420 	 * Otherwise return B_FALSE;
1421 	 */
1422 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1423 
1424 		done = B_TRUE;
1425 		for (j = 0; j < ixgbe->num_rx_rings; j++) {
1426 			rx_ring = &ixgbe->rx_rings[j];
1427 			done = done &&
1428 			    (rx_ring->rcb_free == rx_ring->free_list_size);
1429 		}
1430 
1431 		if (done)
1432 			break;
1433 
1434 		msec_delay(1);
1435 	}
1436 
1437 	return (done);
1438 }
1439 
1440 /*
1441  * ixgbe_start - Start the driver/chipset.
1442  */
1443 int
1444 ixgbe_start(ixgbe_t *ixgbe)
1445 {
1446 	int i;
1447 
1448 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1449 
1450 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1451 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1452 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1453 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1454 
1455 	/*
1456 	 * Start the chipset hardware
1457 	 */
1458 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1459 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1460 		goto start_failure;
1461 	}
1462 
1463 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1464 		goto start_failure;
1465 	}
1466 
1467 	/*
1468 	 * Setup the rx/tx rings
1469 	 */
1470 	ixgbe_setup_rings(ixgbe);
1471 
1472 	/*
1473 	 * Enable adapter interrupts
1474 	 * The interrupts must be enabled after the driver state is START
1475 	 */
1476 	ixgbe_enable_adapter_interrupts(ixgbe);
1477 
1478 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1479 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1480 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1481 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1482 
1483 	return (IXGBE_SUCCESS);
1484 
1485 start_failure:
1486 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1487 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1488 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1489 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1490 
1491 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1492 
1493 	return (IXGBE_FAILURE);
1494 }
1495 
1496 /*
1497  * ixgbe_stop - Stop the driver/chipset.
1498  */
1499 void
1500 ixgbe_stop(ixgbe_t *ixgbe)
1501 {
1502 	int i;
1503 
1504 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1505 
1506 	/*
1507 	 * Disable the adapter interrupts
1508 	 */
1509 	ixgbe_disable_adapter_interrupts(ixgbe);
1510 
1511 	/*
1512 	 * Drain the pending tx packets
1513 	 */
1514 	(void) ixgbe_tx_drain(ixgbe);
1515 
1516 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1517 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1518 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1519 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1520 
1521 	/*
1522 	 * Stop the chipset hardware
1523 	 */
1524 	ixgbe_chip_stop(ixgbe);
1525 
1526 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1527 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1528 	}
1529 
1530 	/*
1531 	 * Clean the pending tx data/resources
1532 	 */
1533 	ixgbe_tx_clean(ixgbe);
1534 
1535 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1536 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1537 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1538 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1539 }
1540 
1541 /*
1542  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1543  */
1544 static int
1545 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1546 {
1547 	/*
1548 	 * Allocate memory space for rx rings
1549 	 */
1550 	ixgbe->rx_rings = kmem_zalloc(
1551 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1552 	    KM_NOSLEEP);
1553 
1554 	if (ixgbe->rx_rings == NULL) {
1555 		return (IXGBE_FAILURE);
1556 	}
1557 
1558 	/*
1559 	 * Allocate memory space for tx rings
1560 	 */
1561 	ixgbe->tx_rings = kmem_zalloc(
1562 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1563 	    KM_NOSLEEP);
1564 
1565 	if (ixgbe->tx_rings == NULL) {
1566 		kmem_free(ixgbe->rx_rings,
1567 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1568 		ixgbe->rx_rings = NULL;
1569 		return (IXGBE_FAILURE);
1570 	}
1571 
1572 	return (IXGBE_SUCCESS);
1573 }
1574 
1575 /*
1576  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1577  */
1578 static void
1579 ixgbe_free_rings(ixgbe_t *ixgbe)
1580 {
1581 	if (ixgbe->rx_rings != NULL) {
1582 		kmem_free(ixgbe->rx_rings,
1583 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1584 		ixgbe->rx_rings = NULL;
1585 	}
1586 
1587 	if (ixgbe->tx_rings != NULL) {
1588 		kmem_free(ixgbe->tx_rings,
1589 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1590 		ixgbe->tx_rings = NULL;
1591 	}
1592 }
1593 
1594 /*
1595  * ixgbe_setup_rings - Setup rx/tx rings.
1596  */
1597 static void
1598 ixgbe_setup_rings(ixgbe_t *ixgbe)
1599 {
1600 	/*
1601 	 * Setup the rx/tx rings, including the following:
1602 	 *
1603 	 * 1. Setup the descriptor ring and the control block buffers;
1604 	 * 2. Initialize necessary registers for receive/transmit;
1605 	 * 3. Initialize software pointers/parameters for receive/transmit;
1606 	 */
1607 	ixgbe_setup_rx(ixgbe);
1608 
1609 	ixgbe_setup_tx(ixgbe);
1610 }
1611 
1612 static void
1613 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
1614 {
1615 	ixgbe_t *ixgbe = rx_ring->ixgbe;
1616 	struct ixgbe_hw *hw = &ixgbe->hw;
1617 	rx_control_block_t *rcb;
1618 	union ixgbe_adv_rx_desc	*rbd;
1619 	uint32_t size;
1620 	uint32_t buf_low;
1621 	uint32_t buf_high;
1622 	uint32_t reg_val;
1623 	int i;
1624 
1625 	ASSERT(mutex_owned(&rx_ring->rx_lock));
1626 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1627 
1628 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
1629 		rcb = rx_ring->work_list[i];
1630 		rbd = &rx_ring->rbd_ring[i];
1631 
1632 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
1633 		rbd->read.hdr_addr = NULL;
1634 	}
1635 
1636 	/*
1637 	 * Initialize the length register
1638 	 */
1639 	size = rx_ring->ring_size * sizeof (union ixgbe_adv_rx_desc);
1640 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
1641 
1642 	/*
1643 	 * Initialize the base address registers
1644 	 */
1645 	buf_low = (uint32_t)rx_ring->rbd_area.dma_address;
1646 	buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32);
1647 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
1648 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
1649 
1650 	/*
1651 	 * Setup head & tail pointers
1652 	 */
1653 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_ring->ring_size - 1);
1654 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
1655 
1656 	rx_ring->rbd_next = 0;
1657 
1658 	/*
1659 	 * Note: Considering the case that the chipset is being reset
1660 	 * and there are still some buffers held by the upper layer,
1661 	 * we should not reset the values of rcb_head, rcb_tail and
1662 	 * rcb_free if the state is not IXGBE_UNKNOWN.
1663 	 */
1664 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
1665 		rx_ring->rcb_head = 0;
1666 		rx_ring->rcb_tail = 0;
1667 		rx_ring->rcb_free = rx_ring->free_list_size;
1668 	}
1669 
1670 	/*
1671 	 * Setup the Receive Descriptor Control Register (RXDCTL)
1672 	 * PTHRESH=32 descriptors (half the internal cache)
1673 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
1674 	 * WTHRESH defaults to 1 (writeback each descriptor)
1675 	 */
1676 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
1677 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
1678 	reg_val |= 0x0020;		/* pthresh */
1679 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
1680 
1681 	/*
1682 	 * Setup the Split and Replication Receive Control Register.
1683 	 * Set the rx buffer size and the advanced descriptor type.
1684 	 */
1685 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
1686 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1687 
1688 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
1689 }
1690 
1691 static void
1692 ixgbe_setup_rx(ixgbe_t *ixgbe)
1693 {
1694 	ixgbe_rx_ring_t *rx_ring;
1695 	struct ixgbe_hw *hw = &ixgbe->hw;
1696 	uint32_t reg_val;
1697 	int i;
1698 
1699 	/*
1700 	 * Set filter control in FCTRL to accept broadcast packets and do
1701 	 * not pass pause frames to host.  Flow control settings are already
1702 	 * in this register, so preserve them.
1703 	 */
1704 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1705 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
1706 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
1707 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
1708 
1709 	/*
1710 	 * Enable the receive unit.  This must be done after filter
1711 	 * control is set in FCTRL.
1712 	 */
1713 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
1714 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
1715 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1716 
1717 	/*
1718 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
1719 	 */
1720 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1721 		rx_ring = &ixgbe->rx_rings[i];
1722 		ixgbe_setup_rx_ring(rx_ring);
1723 	}
1724 
1725 	/*
1726 	 * The Max Frame Size in MHADD will be internally increased by four
1727 	 * bytes if the packet has a VLAN field, so includes MTU, ethernet
1728 	 * header and frame check sequence.
1729 	 */
1730 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
1731 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
1732 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
1733 
1734 	/*
1735 	 * Setup Jumbo Frame enable bit
1736 	 */
1737 	if (ixgbe->default_mtu > ETHERMTU) {
1738 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1739 		reg_val |= IXGBE_HLREG0_JUMBOEN;
1740 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1741 	}
1742 
1743 	/*
1744 	 * Hardware checksum settings
1745 	 */
1746 	if (ixgbe->rx_hcksum_enable) {
1747 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
1748 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
1749 	}
1750 
1751 	/*
1752 	 * Setup RSS for multiple receive queues
1753 	 */
1754 	if (ixgbe->num_rx_rings > 1)
1755 		ixgbe_setup_rss(ixgbe);
1756 }
1757 
1758 static void
1759 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
1760 {
1761 	ixgbe_t *ixgbe = tx_ring->ixgbe;
1762 	struct ixgbe_hw *hw = &ixgbe->hw;
1763 	uint32_t size;
1764 	uint32_t buf_low;
1765 	uint32_t buf_high;
1766 	uint32_t reg_val;
1767 
1768 	ASSERT(mutex_owned(&tx_ring->tx_lock));
1769 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1770 
1771 	/*
1772 	 * Initialize the length register
1773 	 */
1774 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
1775 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
1776 
1777 	/*
1778 	 * Initialize the base address registers
1779 	 */
1780 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
1781 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
1782 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
1783 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
1784 
1785 	/*
1786 	 * setup TXDCTL(tx_ring->index)
1787 	 */
1788 	reg_val = IXGBE_TXDCTL_ENABLE;
1789 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
1790 
1791 	/*
1792 	 * Setup head & tail pointers
1793 	 */
1794 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
1795 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
1796 
1797 	/*
1798 	 * Setup head write-back
1799 	 */
1800 	if (ixgbe->tx_head_wb_enable) {
1801 		/*
1802 		 * The memory of the head write-back is allocated using
1803 		 * the extra tbd beyond the tail of the tbd ring.
1804 		 */
1805 		tx_ring->tbd_head_wb = (uint32_t *)
1806 		    ((uintptr_t)tx_ring->tbd_area.address + size);
1807 		*tx_ring->tbd_head_wb = 0;
1808 
1809 		buf_low = (uint32_t)
1810 		    (tx_ring->tbd_area.dma_address + size);
1811 		buf_high = (uint32_t)
1812 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
1813 
1814 		/* Set the head write-back enable bit */
1815 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1816 
1817 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
1818 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
1819 
1820 		/*
1821 		 * Turn off relaxed ordering for head write back or it will
1822 		 * cause problems with the tx recycling
1823 		 */
1824 		reg_val = IXGBE_READ_REG(hw,
1825 		    IXGBE_DCA_TXCTRL(tx_ring->index));
1826 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1827 		IXGBE_WRITE_REG(hw,
1828 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
1829 	} else {
1830 		tx_ring->tbd_head_wb = NULL;
1831 	}
1832 
1833 	tx_ring->tbd_head = 0;
1834 	tx_ring->tbd_tail = 0;
1835 	tx_ring->tbd_free = tx_ring->ring_size;
1836 
1837 	/*
1838 	 * Note: Considering the case that the chipset is being reset,
1839 	 * and there are still some tcb in the pending list,
1840 	 * we should not reset the values of tcb_head, tcb_tail and
1841 	 * tcb_free if the state is not IXGBE_UNKNOWN.
1842 	 */
1843 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
1844 		tx_ring->tcb_head = 0;
1845 		tx_ring->tcb_tail = 0;
1846 		tx_ring->tcb_free = tx_ring->free_list_size;
1847 	}
1848 
1849 	/*
1850 	 * Initialize the s/w context structure
1851 	 */
1852 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
1853 }
1854 
1855 static void
1856 ixgbe_setup_tx(ixgbe_t *ixgbe)
1857 {
1858 	struct ixgbe_hw *hw = &ixgbe->hw;
1859 	ixgbe_tx_ring_t *tx_ring;
1860 	uint32_t reg_val;
1861 	int i;
1862 
1863 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1864 		tx_ring = &ixgbe->tx_rings[i];
1865 		ixgbe_setup_tx_ring(tx_ring);
1866 	}
1867 
1868 	/*
1869 	 * Enable CRC appending and TX padding (for short tx frames)
1870 	 */
1871 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1872 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
1873 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1874 }
1875 
1876 /*
1877  * ixgbe_setup_rss - Setup receive-side scaling feature.
1878  */
1879 static void
1880 ixgbe_setup_rss(ixgbe_t *ixgbe)
1881 {
1882 	struct ixgbe_hw *hw = &ixgbe->hw;
1883 	uint32_t i, mrqc, rxcsum;
1884 	uint32_t random;
1885 	uint32_t reta;
1886 
1887 	/*
1888 	 * Fill out redirection table
1889 	 */
1890 	reta = 0;
1891 	for (i = 0; i < 128; i++) {
1892 		reta = (reta << 8) | (i % ixgbe->num_rx_rings);
1893 		if ((i & 3) == 3)
1894 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
1895 	}
1896 
1897 	/*
1898 	 * Fill out hash function seeds with a random constant
1899 	 */
1900 	for (i = 0; i < 10; i++) {
1901 		(void) random_get_pseudo_bytes((uint8_t *)&random,
1902 		    sizeof (uint32_t));
1903 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
1904 	}
1905 
1906 	/*
1907 	 * Enable RSS & perform hash on these packet types
1908 	 */
1909 	mrqc = IXGBE_MRQC_RSSEN |
1910 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
1911 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
1912 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
1913 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
1914 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
1915 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
1916 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
1917 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
1918 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1919 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1920 
1921 	/*
1922 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
1923 	 * It is an adapter hardware limitation that Packet Checksum is
1924 	 * mutually exclusive with RSS.
1925 	 */
1926 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1927 	rxcsum |= IXGBE_RXCSUM_PCSD;
1928 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
1929 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1930 }
1931 
1932 /*
1933  * ixgbe_init_unicst - Initialize the unicast addresses.
1934  */
1935 static void
1936 ixgbe_init_unicst(ixgbe_t *ixgbe)
1937 {
1938 	struct ixgbe_hw *hw = &ixgbe->hw;
1939 	int slot;
1940 	/*
1941 	 * Here we should consider two situations:
1942 	 *
1943 	 * 1. Chipset is initialized the first time
1944 	 *    Initialize the multiple unicast addresses, and
1945 	 *    save the default mac address.
1946 	 *
1947 	 * 2. Chipset is reset
1948 	 *    Recover the multiple unicast addresses from the
1949 	 *    software data structure to the RAR registers.
1950 	 */
1951 	if (!ixgbe->unicst_init) {
1952 		/*
1953 		 * Initialize the multiple unicast addresses
1954 		 */
1955 		ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
1956 
1957 		ixgbe->unicst_avail = ixgbe->unicst_total - 1;
1958 
1959 		bcopy(hw->mac.addr, ixgbe->unicst_addr[0].mac.addr,
1960 		    ETHERADDRL);
1961 		ixgbe->unicst_addr[0].mac.set = 1;
1962 
1963 		for (slot = 1; slot < ixgbe->unicst_total; slot++)
1964 			ixgbe->unicst_addr[slot].mac.set = 0;
1965 
1966 		ixgbe->unicst_init = B_TRUE;
1967 	} else {
1968 		/*
1969 		 * Recover the default mac address
1970 		 */
1971 		bcopy(ixgbe->unicst_addr[0].mac.addr, hw->mac.addr,
1972 		    ETHERADDRL);
1973 
1974 		/* Re-configure the RAR registers */
1975 		for (slot = 1; slot < ixgbe->unicst_total; slot++)
1976 			(void) ixgbe_set_rar(hw, slot,
1977 			    ixgbe->unicst_addr[slot].mac.addr, NULL, NULL);
1978 	}
1979 }
1980 /*
1981  * ixgbe_unicst_set - Set the unicast address to the specified slot.
1982  */
1983 int
1984 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
1985     mac_addr_slot_t slot)
1986 {
1987 	struct ixgbe_hw *hw = &ixgbe->hw;
1988 
1989 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1990 
1991 	/*
1992 	 * Save the unicast address in the software data structure
1993 	 */
1994 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
1995 
1996 	/*
1997 	 * Set the unicast address to the RAR register
1998 	 */
1999 	(void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, NULL);
2000 
2001 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2002 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2003 		return (EIO);
2004 	}
2005 
2006 	return (0);
2007 }
2008 
2009 /*
2010  * ixgbe_multicst_add - Add a multicst address.
2011  */
2012 int
2013 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2014 {
2015 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2016 
2017 	if ((multiaddr[0] & 01) == 0) {
2018 		return (EINVAL);
2019 	}
2020 
2021 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2022 		return (ENOENT);
2023 	}
2024 
2025 	bcopy(multiaddr,
2026 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2027 	ixgbe->mcast_count++;
2028 
2029 	/*
2030 	 * Update the multicast table in the hardware
2031 	 */
2032 	ixgbe_setup_multicst(ixgbe);
2033 
2034 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2035 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2036 		return (EIO);
2037 	}
2038 
2039 	return (0);
2040 }
2041 
2042 /*
2043  * ixgbe_multicst_remove - Remove a multicst address.
2044  */
2045 int
2046 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2047 {
2048 	int i;
2049 
2050 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2051 
2052 	for (i = 0; i < ixgbe->mcast_count; i++) {
2053 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2054 		    ETHERADDRL) == 0) {
2055 			for (i++; i < ixgbe->mcast_count; i++) {
2056 				ixgbe->mcast_table[i - 1] =
2057 				    ixgbe->mcast_table[i];
2058 			}
2059 			ixgbe->mcast_count--;
2060 			break;
2061 		}
2062 	}
2063 
2064 	/*
2065 	 * Update the multicast table in the hardware
2066 	 */
2067 	ixgbe_setup_multicst(ixgbe);
2068 
2069 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2070 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2071 		return (EIO);
2072 	}
2073 
2074 	return (0);
2075 }
2076 
2077 /*
2078  * ixgbe_setup_multicast - Setup multicast data structures.
2079  *
2080  * This routine initializes all of the multicast related structures
2081  * and save them in the hardware registers.
2082  */
2083 static void
2084 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2085 {
2086 	uint8_t *mc_addr_list;
2087 	uint32_t mc_addr_count;
2088 	struct ixgbe_hw *hw = &ixgbe->hw;
2089 
2090 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2091 
2092 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2093 
2094 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2095 	mc_addr_count = ixgbe->mcast_count;
2096 
2097 	/*
2098 	 * Update the multicast addresses to the MTA registers
2099 	 */
2100 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2101 	    ixgbe_mc_table_itr);
2102 }
2103 
2104 /*
2105  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2106  *
2107  * This routine gets user-configured values out of the configuration
2108  * file ixgbe.conf.
2109  *
2110  * For each configurable value, there is a minimum, a maximum, and a
2111  * default.
2112  * If user does not configure a value, use the default.
2113  * If user configures below the minimum, use the minumum.
2114  * If user configures above the maximum, use the maxumum.
2115  */
2116 static void
2117 ixgbe_get_conf(ixgbe_t *ixgbe)
2118 {
2119 	struct ixgbe_hw *hw = &ixgbe->hw;
2120 	uint32_t flow_control;
2121 
2122 	/*
2123 	 * ixgbe driver supports the following user configurations:
2124 	 *
2125 	 * Jumbo frame configuration:
2126 	 *    default_mtu
2127 	 *
2128 	 * Ethernet flow control configuration:
2129 	 *    flow_control
2130 	 *
2131 	 * Multiple rings configurations:
2132 	 *    tx_queue_number
2133 	 *    tx_ring_size
2134 	 *    rx_queue_number
2135 	 *    rx_ring_size
2136 	 *
2137 	 * Call ixgbe_get_prop() to get the value for a specific
2138 	 * configuration parameter.
2139 	 */
2140 
2141 	/*
2142 	 * Jumbo frame configuration - max_frame_size controls host buffer
2143 	 * allocation, so includes MTU, ethernet header, vlan tag and
2144 	 * frame check sequence.
2145 	 */
2146 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2147 	    MIN_MTU, MAX_MTU, DEFAULT_MTU);
2148 
2149 	ixgbe->max_frame_size = ixgbe->default_mtu +
2150 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2151 
2152 	/*
2153 	 * Ethernet flow control configuration
2154 	 */
2155 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2156 	    ixgbe_fc_none, 3, ixgbe_fc_full);
2157 	if (flow_control == 3)
2158 		flow_control = ixgbe_fc_default;
2159 
2160 	hw->fc.type = flow_control;
2161 
2162 	/*
2163 	 * Multiple rings configurations
2164 	 */
2165 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2166 	    MIN_TX_QUEUE_NUM, MAX_TX_QUEUE_NUM, DEFAULT_TX_QUEUE_NUM);
2167 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2168 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2169 
2170 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2171 	    MIN_RX_QUEUE_NUM, MAX_RX_QUEUE_NUM, DEFAULT_RX_QUEUE_NUM);
2172 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2173 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2174 
2175 	/*
2176 	 * Tunable used to force an interrupt type. The only use is
2177 	 * for testing of the lesser interrupt types.
2178 	 * 0 = don't force interrupt type
2179 	 * 1 = force interrupt type MSIX
2180 	 * 2 = force interrupt type MSI
2181 	 * 3 = force interrupt type Legacy
2182 	 */
2183 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2184 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2185 	ixgbe_log(ixgbe, "interrupt force: %d\n", ixgbe->intr_force);
2186 
2187 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2188 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
2189 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2190 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
2191 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2192 	    0, 1, DEFAULT_LSO_ENABLE);
2193 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2194 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
2195 
2196 	/*
2197 	 * ixgbe LSO needs the tx h/w checksum support.
2198 	 * LSO will be disabled if tx h/w checksum is not
2199 	 * enabled.
2200 	 */
2201 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
2202 		ixgbe->lso_enable = B_FALSE;
2203 	}
2204 
2205 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
2206 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
2207 	    DEFAULT_TX_COPY_THRESHOLD);
2208 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
2209 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
2210 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
2211 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
2212 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
2213 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
2214 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
2215 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
2216 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
2217 
2218 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
2219 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
2220 	    DEFAULT_RX_COPY_THRESHOLD);
2221 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
2222 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
2223 	    DEFAULT_RX_LIMIT_PER_INTR);
2224 
2225 	ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
2226 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
2227 	    DEFAULT_INTR_THROTTLING);
2228 }
2229 
2230 /*
2231  * ixgbe_get_prop - Get a property value out of the configuration file
2232  * ixgbe.conf.
2233  *
2234  * Caller provides the name of the property, a default value, a minimum
2235  * value, and a maximum value.
2236  *
2237  * Return configured value of the property, with default, minimum and
2238  * maximum properly applied.
2239  */
2240 static int
2241 ixgbe_get_prop(ixgbe_t *ixgbe,
2242     char *propname,	/* name of the property */
2243     int minval,		/* minimum acceptable value */
2244     int maxval,		/* maximim acceptable value */
2245     int defval)		/* default value */
2246 {
2247 	int value;
2248 
2249 	/*
2250 	 * Call ddi_prop_get_int() to read the conf settings
2251 	 */
2252 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
2253 	    DDI_PROP_DONTPASS, propname, defval);
2254 	if (value > maxval)
2255 		value = maxval;
2256 
2257 	if (value < minval)
2258 		value = minval;
2259 
2260 	return (value);
2261 }
2262 
2263 /*
2264  * ixgbe_driver_setup_link - Using the link properties to setup the link.
2265  */
2266 int
2267 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
2268 {
2269 	struct ixgbe_mac_info *mac;
2270 	struct ixgbe_phy_info *phy;
2271 	boolean_t invalid;
2272 
2273 	mac = &ixgbe->hw.mac;
2274 	phy = &ixgbe->hw.phy;
2275 	invalid = B_FALSE;
2276 
2277 	if (ixgbe->param_adv_autoneg_cap == 1) {
2278 		mac->autoneg = B_TRUE;
2279 		phy->autoneg_advertised = 0;
2280 
2281 		/*
2282 		 * No half duplex support with 10Gb parts
2283 		 */
2284 		if (ixgbe->param_adv_10000fdx_cap == 1)
2285 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2286 
2287 		if (ixgbe->param_adv_1000fdx_cap == 1)
2288 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2289 
2290 		if (ixgbe->param_adv_100fdx_cap == 1)
2291 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2292 
2293 		if (phy->autoneg_advertised == 0)
2294 			invalid = B_TRUE;
2295 	} else {
2296 		ixgbe->hw.mac.autoneg = B_FALSE;
2297 	}
2298 
2299 	if (invalid) {
2300 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link to "
2301 		    "autonegotiation with full link capabilities.");
2302 		ixgbe->hw.mac.autoneg = B_TRUE;
2303 	}
2304 
2305 	if (setup_hw) {
2306 		if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS)
2307 			return (IXGBE_FAILURE);
2308 	}
2309 
2310 	return (IXGBE_SUCCESS);
2311 }
2312 
2313 /*
2314  * ixgbe_driver_link_check - Link status processing.
2315  */
2316 static boolean_t
2317 ixgbe_driver_link_check(ixgbe_t *ixgbe)
2318 {
2319 	struct ixgbe_hw *hw = &ixgbe->hw;
2320 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
2321 	boolean_t link_up = B_FALSE;
2322 	boolean_t link_changed = B_FALSE;
2323 
2324 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2325 
2326 	(void) ixgbe_check_link(hw, &speed, &link_up);
2327 	if (link_up) {
2328 		/*
2329 		 * The Link is up, check whether it was marked as down earlier
2330 		 */
2331 		if (ixgbe->link_state != LINK_STATE_UP) {
2332 			switch (speed) {
2333 				case IXGBE_LINK_SPEED_10GB_FULL:
2334 					ixgbe->link_speed = SPEED_10GB;
2335 					break;
2336 				case IXGBE_LINK_SPEED_1GB_FULL:
2337 					ixgbe->link_speed = SPEED_1GB;
2338 					break;
2339 				case IXGBE_LINK_SPEED_100_FULL:
2340 					ixgbe->link_speed = SPEED_100;
2341 			}
2342 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
2343 			ixgbe->link_state = LINK_STATE_UP;
2344 			ixgbe->link_down_timeout = 0;
2345 			link_changed = B_TRUE;
2346 		}
2347 	} else {
2348 		if (ixgbe->link_state != LINK_STATE_DOWN) {
2349 			ixgbe->link_speed = 0;
2350 			ixgbe->link_duplex = 0;
2351 			ixgbe->link_state = LINK_STATE_DOWN;
2352 			link_changed = B_TRUE;
2353 		}
2354 
2355 		if (ixgbe->ixgbe_state & IXGBE_STARTED) {
2356 			if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) {
2357 				ixgbe->link_down_timeout++;
2358 			} else if (ixgbe->link_down_timeout ==
2359 			    MAX_LINK_DOWN_TIMEOUT) {
2360 				ixgbe_tx_clean(ixgbe);
2361 				ixgbe->link_down_timeout++;
2362 			}
2363 		}
2364 	}
2365 
2366 	return (link_changed);
2367 }
2368 
2369 /*
2370  * ixgbe_local_timer - Driver watchdog function.
2371  *
2372  * This function will handle the transmit stall check, link status check and
2373  * other routines.
2374  */
2375 static void
2376 ixgbe_local_timer(void *arg)
2377 {
2378 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2379 
2380 	if (ixgbe_stall_check(ixgbe)) {
2381 		ixgbe->reset_count++;
2382 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2383 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2384 	}
2385 
2386 	ixgbe_restart_watchdog_timer(ixgbe);
2387 }
2388 
2389 /*
2390  * ixgbe_stall_check - Check for transmit stall.
2391  *
2392  * This function checks if the adapter is stalled (in transmit).
2393  *
2394  * It is called each time the watchdog timeout is invoked.
2395  * If the transmit descriptor reclaim continuously fails,
2396  * the watchdog value will increment by 1. If the watchdog
2397  * value exceeds the threshold, the ixgbe is assumed to
2398  * have stalled and need to be reset.
2399  */
2400 static boolean_t
2401 ixgbe_stall_check(ixgbe_t *ixgbe)
2402 {
2403 	ixgbe_tx_ring_t *tx_ring;
2404 	boolean_t result;
2405 	int i;
2406 
2407 	if (ixgbe->link_state != LINK_STATE_UP)
2408 		return (B_FALSE);
2409 
2410 	/*
2411 	 * If any tx ring is stalled, we'll reset the chipset
2412 	 */
2413 	result = B_FALSE;
2414 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2415 		tx_ring = &ixgbe->tx_rings[i];
2416 
2417 		if (tx_ring->recycle_fail > 0)
2418 			tx_ring->stall_watchdog++;
2419 		else
2420 			tx_ring->stall_watchdog = 0;
2421 
2422 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
2423 			result = B_TRUE;
2424 			break;
2425 		}
2426 	}
2427 
2428 	if (result) {
2429 		tx_ring->stall_watchdog = 0;
2430 		tx_ring->recycle_fail = 0;
2431 	}
2432 
2433 	return (result);
2434 }
2435 
2436 
2437 /*
2438  * is_valid_mac_addr - Check if the mac address is valid.
2439  */
2440 static boolean_t
2441 is_valid_mac_addr(uint8_t *mac_addr)
2442 {
2443 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
2444 	const uint8_t addr_test2[6] =
2445 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2446 
2447 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
2448 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
2449 		return (B_FALSE);
2450 
2451 	return (B_TRUE);
2452 }
2453 
2454 static boolean_t
2455 ixgbe_find_mac_address(ixgbe_t *ixgbe)
2456 {
2457 #ifdef __sparc
2458 	struct ixgbe_hw *hw = &ixgbe->hw;
2459 	uchar_t *bytes;
2460 	struct ether_addr sysaddr;
2461 	uint_t nelts;
2462 	int err;
2463 	boolean_t found = B_FALSE;
2464 
2465 	/*
2466 	 * The "vendor's factory-set address" may already have
2467 	 * been extracted from the chip, but if the property
2468 	 * "local-mac-address" is set we use that instead.
2469 	 *
2470 	 * We check whether it looks like an array of 6
2471 	 * bytes (which it should, if OBP set it).  If we can't
2472 	 * make sense of it this way, we'll ignore it.
2473 	 */
2474 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2475 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
2476 	if (err == DDI_PROP_SUCCESS) {
2477 		if (nelts == ETHERADDRL) {
2478 			while (nelts--)
2479 				hw->mac.addr[nelts] = bytes[nelts];
2480 			found = B_TRUE;
2481 		}
2482 		ddi_prop_free(bytes);
2483 	}
2484 
2485 	/*
2486 	 * Look up the OBP property "local-mac-address?". If the user has set
2487 	 * 'local-mac-address? = false', use "the system address" instead.
2488 	 */
2489 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
2490 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
2491 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
2492 			if (localetheraddr(NULL, &sysaddr) != 0) {
2493 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
2494 				found = B_TRUE;
2495 			}
2496 		}
2497 		ddi_prop_free(bytes);
2498 	}
2499 
2500 	/*
2501 	 * Finally(!), if there's a valid "mac-address" property (created
2502 	 * if we netbooted from this interface), we must use this instead
2503 	 * of any of the above to ensure that the NFS/install server doesn't
2504 	 * get confused by the address changing as Solaris takes over!
2505 	 */
2506 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2507 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
2508 	if (err == DDI_PROP_SUCCESS) {
2509 		if (nelts == ETHERADDRL) {
2510 			while (nelts--)
2511 				hw->mac.addr[nelts] = bytes[nelts];
2512 			found = B_TRUE;
2513 		}
2514 		ddi_prop_free(bytes);
2515 	}
2516 
2517 	if (found) {
2518 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
2519 		return (B_TRUE);
2520 	}
2521 #else
2522 	_NOTE(ARGUNUSED(ixgbe));
2523 #endif
2524 
2525 	return (B_TRUE);
2526 }
2527 
2528 #pragma inline(ixgbe_arm_watchdog_timer)
2529 static void
2530 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
2531 {
2532 	/*
2533 	 * Fire a watchdog timer
2534 	 */
2535 	ixgbe->watchdog_tid =
2536 	    timeout(ixgbe_local_timer,
2537 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
2538 
2539 }
2540 
2541 /*
2542  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
2543  */
2544 void
2545 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
2546 {
2547 	mutex_enter(&ixgbe->watchdog_lock);
2548 
2549 	if (!ixgbe->watchdog_enable) {
2550 		ixgbe->watchdog_enable = B_TRUE;
2551 		ixgbe->watchdog_start = B_TRUE;
2552 		ixgbe_arm_watchdog_timer(ixgbe);
2553 	}
2554 
2555 	mutex_exit(&ixgbe->watchdog_lock);
2556 }
2557 
2558 /*
2559  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
2560  */
2561 void
2562 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
2563 {
2564 	timeout_id_t tid;
2565 
2566 	mutex_enter(&ixgbe->watchdog_lock);
2567 
2568 	ixgbe->watchdog_enable = B_FALSE;
2569 	ixgbe->watchdog_start = B_FALSE;
2570 	tid = ixgbe->watchdog_tid;
2571 	ixgbe->watchdog_tid = 0;
2572 
2573 	mutex_exit(&ixgbe->watchdog_lock);
2574 
2575 	if (tid != 0)
2576 		(void) untimeout(tid);
2577 }
2578 
2579 /*
2580  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
2581  */
2582 static void
2583 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
2584 {
2585 	mutex_enter(&ixgbe->watchdog_lock);
2586 
2587 	if (ixgbe->watchdog_enable) {
2588 		if (!ixgbe->watchdog_start) {
2589 			ixgbe->watchdog_start = B_TRUE;
2590 			ixgbe_arm_watchdog_timer(ixgbe);
2591 		}
2592 	}
2593 
2594 	mutex_exit(&ixgbe->watchdog_lock);
2595 }
2596 
2597 /*
2598  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
2599  */
2600 static void
2601 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
2602 {
2603 	mutex_enter(&ixgbe->watchdog_lock);
2604 
2605 	if (ixgbe->watchdog_start)
2606 		ixgbe_arm_watchdog_timer(ixgbe);
2607 
2608 	mutex_exit(&ixgbe->watchdog_lock);
2609 }
2610 
2611 /*
2612  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
2613  */
2614 static void
2615 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
2616 {
2617 	timeout_id_t tid;
2618 
2619 	mutex_enter(&ixgbe->watchdog_lock);
2620 
2621 	ixgbe->watchdog_start = B_FALSE;
2622 	tid = ixgbe->watchdog_tid;
2623 	ixgbe->watchdog_tid = 0;
2624 
2625 	mutex_exit(&ixgbe->watchdog_lock);
2626 
2627 	if (tid != 0)
2628 		(void) untimeout(tid);
2629 }
2630 
2631 /*
2632  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
2633  */
2634 static void
2635 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
2636 {
2637 	struct ixgbe_hw *hw = &ixgbe->hw;
2638 
2639 	/*
2640 	 * mask all interrupts off
2641 	 */
2642 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
2643 
2644 	/*
2645 	 * for MSI-X, also disable autoclear
2646 	 */
2647 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2648 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
2649 	}
2650 
2651 	IXGBE_WRITE_FLUSH(hw);
2652 }
2653 
2654 /*
2655  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
2656  */
2657 static void
2658 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
2659 {
2660 	struct ixgbe_hw *hw = &ixgbe->hw;
2661 	uint32_t eims, eiac, gpie;
2662 
2663 	gpie = 0;
2664 	eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
2665 	eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
2666 
2667 	/*
2668 	 * msi-x mode
2669 	 */
2670 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2671 		/* enable autoclear but not on bits 29:20 */
2672 		eiac = (eims & ~0x3ff00000);
2673 
2674 		/* general purpose interrupt enable */
2675 		gpie |= (IXGBE_GPIE_MSIX_MODE |
2676 		    IXGBE_GPIE_PBA_SUPPORT |IXGBE_GPIE_OCD);
2677 	/*
2678 	 * non-msi-x mode
2679 	 */
2680 	} else {
2681 
2682 		/* disable autoclear, leave gpie at default */
2683 		eiac = 0;
2684 	}
2685 
2686 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims);
2687 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
2688 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2689 	IXGBE_WRITE_FLUSH(hw);
2690 }
2691 
2692 /*
2693  * ixgbe_loopback_ioctl - Loopback support.
2694  */
2695 enum ioc_reply
2696 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
2697 {
2698 	lb_info_sz_t *lbsp;
2699 	lb_property_t *lbpp;
2700 	uint32_t *lbmp;
2701 	uint32_t size;
2702 	uint32_t value;
2703 
2704 	if (mp->b_cont == NULL)
2705 		return (IOC_INVAL);
2706 
2707 	switch (iocp->ioc_cmd) {
2708 	default:
2709 		return (IOC_INVAL);
2710 
2711 	case LB_GET_INFO_SIZE:
2712 		size = sizeof (lb_info_sz_t);
2713 		if (iocp->ioc_count != size)
2714 			return (IOC_INVAL);
2715 
2716 		value = sizeof (lb_normal);
2717 		value += sizeof (lb_mac);
2718 
2719 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
2720 		*lbsp = value;
2721 		break;
2722 
2723 	case LB_GET_INFO:
2724 		value = sizeof (lb_normal);
2725 		value += sizeof (lb_mac);
2726 
2727 		size = value;
2728 		if (iocp->ioc_count != size)
2729 			return (IOC_INVAL);
2730 
2731 		value = 0;
2732 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
2733 
2734 		lbpp[value++] = lb_normal;
2735 		lbpp[value++] = lb_mac;
2736 		break;
2737 
2738 	case LB_GET_MODE:
2739 		size = sizeof (uint32_t);
2740 		if (iocp->ioc_count != size)
2741 			return (IOC_INVAL);
2742 
2743 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
2744 		*lbmp = ixgbe->loopback_mode;
2745 		break;
2746 
2747 	case LB_SET_MODE:
2748 		size = 0;
2749 		if (iocp->ioc_count != sizeof (uint32_t))
2750 			return (IOC_INVAL);
2751 
2752 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
2753 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
2754 			return (IOC_INVAL);
2755 		break;
2756 	}
2757 
2758 	iocp->ioc_count = size;
2759 	iocp->ioc_error = 0;
2760 
2761 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2762 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2763 		return (IOC_INVAL);
2764 	}
2765 
2766 	return (IOC_REPLY);
2767 }
2768 
2769 /*
2770  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
2771  */
2772 static boolean_t
2773 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
2774 {
2775 	struct ixgbe_hw *hw;
2776 
2777 	if (mode == ixgbe->loopback_mode)
2778 		return (B_TRUE);
2779 
2780 	hw = &ixgbe->hw;
2781 
2782 	ixgbe->loopback_mode = mode;
2783 
2784 	if (mode == IXGBE_LB_NONE) {
2785 		/*
2786 		 * Reset the chip
2787 		 */
2788 		hw->phy.autoneg_wait_to_complete = B_TRUE;
2789 		(void) ixgbe_reset(ixgbe);
2790 		hw->phy.autoneg_wait_to_complete = B_FALSE;
2791 		return (B_TRUE);
2792 	}
2793 
2794 	mutex_enter(&ixgbe->gen_lock);
2795 
2796 	switch (mode) {
2797 	default:
2798 		mutex_exit(&ixgbe->gen_lock);
2799 		return (B_FALSE);
2800 
2801 	case IXGBE_LB_INTERNAL_MAC:
2802 		ixgbe_set_internal_mac_loopback(ixgbe);
2803 		break;
2804 	}
2805 
2806 	mutex_exit(&ixgbe->gen_lock);
2807 
2808 	return (B_TRUE);
2809 }
2810 
2811 /*
2812  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
2813  */
2814 static void
2815 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
2816 {
2817 	struct ixgbe_hw *hw;
2818 	uint32_t reg;
2819 	uint8_t atlas;
2820 
2821 	hw = &ixgbe->hw;
2822 
2823 	/*
2824 	 * Setup MAC loopback
2825 	 */
2826 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
2827 	reg |= IXGBE_HLREG0_LPBK;
2828 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
2829 
2830 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
2831 	reg &= ~IXGBE_AUTOC_LMS_MASK;
2832 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
2833 
2834 	/*
2835 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
2836 	 */
2837 	if (hw->mac.type == ixgbe_mac_82598EB) {
2838 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
2839 		    &atlas);
2840 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
2841 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
2842 		    atlas);
2843 
2844 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
2845 		    &atlas);
2846 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
2847 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
2848 		    atlas);
2849 
2850 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
2851 		    &atlas);
2852 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
2853 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
2854 		    atlas);
2855 
2856 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
2857 		    &atlas);
2858 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
2859 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
2860 		    atlas);
2861 	}
2862 }
2863 
2864 #pragma inline(ixgbe_intr_rx_work)
2865 /*
2866  * ixgbe_intr_rx_work - RX processing of ISR.
2867  */
2868 static void
2869 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
2870 {
2871 	mblk_t *mp;
2872 
2873 	mutex_enter(&rx_ring->rx_lock);
2874 
2875 	mp = ixgbe_rx(rx_ring);
2876 	mutex_exit(&rx_ring->rx_lock);
2877 
2878 	if (mp != NULL)
2879 		mac_rx(rx_ring->ixgbe->mac_hdl, NULL, mp);
2880 }
2881 
2882 #pragma inline(ixgbe_intr_tx_work)
2883 /*
2884  * ixgbe_intr_tx_work - TX processing of ISR.
2885  */
2886 static void
2887 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
2888 {
2889 	/*
2890 	 * Recycle the tx descriptors
2891 	 */
2892 	tx_ring->tx_recycle(tx_ring);
2893 
2894 	/*
2895 	 * Schedule the re-transmit
2896 	 */
2897 	if (tx_ring->reschedule &&
2898 	    (tx_ring->tbd_free >= tx_ring->resched_thresh)) {
2899 		tx_ring->reschedule = B_FALSE;
2900 		mac_tx_update(tx_ring->ixgbe->mac_hdl);
2901 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
2902 	}
2903 }
2904 
2905 #pragma inline(ixgbe_intr_other_work)
2906 /*
2907  * ixgbe_intr_other_work - Other processing of ISR.
2908  */
2909 static void
2910 ixgbe_intr_other_work(ixgbe_t *ixgbe)
2911 {
2912 	boolean_t link_changed;
2913 
2914 	ixgbe_stop_watchdog_timer(ixgbe);
2915 
2916 	mutex_enter(&ixgbe->gen_lock);
2917 
2918 	/*
2919 	 * Take care of link status change
2920 	 */
2921 	link_changed = ixgbe_driver_link_check(ixgbe);
2922 
2923 	/*
2924 	 * Get new phy state
2925 	 */
2926 	ixgbe_get_hw_state(ixgbe);
2927 
2928 	mutex_exit(&ixgbe->gen_lock);
2929 
2930 	if (link_changed)
2931 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
2932 
2933 	ixgbe_start_watchdog_timer(ixgbe);
2934 }
2935 
2936 /*
2937  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
2938  */
2939 static uint_t
2940 ixgbe_intr_legacy(void *arg1, void *arg2)
2941 {
2942 	_NOTE(ARGUNUSED(arg2));
2943 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
2944 	struct ixgbe_hw *hw = &ixgbe->hw;
2945 	ixgbe_tx_ring_t *tx_ring;
2946 	uint32_t eicr;
2947 	mblk_t *mp;
2948 	boolean_t tx_reschedule;
2949 	boolean_t link_changed;
2950 	uint_t result;
2951 
2952 
2953 	mutex_enter(&ixgbe->gen_lock);
2954 
2955 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
2956 		mutex_exit(&ixgbe->gen_lock);
2957 		return (DDI_INTR_UNCLAIMED);
2958 	}
2959 
2960 	mp = NULL;
2961 	tx_reschedule = B_FALSE;
2962 	link_changed = B_FALSE;
2963 
2964 	/*
2965 	 * Any bit set in eicr: claim this interrupt
2966 	 */
2967 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2968 	if (eicr) {
2969 		/*
2970 		 * For legacy interrupt, we have only one interrupt,
2971 		 * so we have only one rx ring and one tx ring enabled.
2972 		 */
2973 		ASSERT(ixgbe->num_rx_rings == 1);
2974 		ASSERT(ixgbe->num_tx_rings == 1);
2975 
2976 		/*
2977 		 * For legacy interrupt, we can't differentiate
2978 		 * between tx and rx, so always clean both
2979 		 */
2980 		if (eicr & IXGBE_EICR_RTX_QUEUE) {
2981 
2982 			/*
2983 			 * Clean the rx descriptors
2984 			 */
2985 			mp = ixgbe_rx(&ixgbe->rx_rings[0]);
2986 
2987 			/*
2988 			 * Recycle the tx descriptors
2989 			 */
2990 			tx_ring = &ixgbe->tx_rings[0];
2991 			tx_ring->tx_recycle(tx_ring);
2992 
2993 			/*
2994 			 * Schedule the re-transmit
2995 			 */
2996 			tx_reschedule = (tx_ring->reschedule &&
2997 			    (tx_ring->tbd_free >= tx_ring->resched_thresh));
2998 		}
2999 
3000 		if (eicr & IXGBE_EICR_LSC) {
3001 
3002 			/* take care of link status change */
3003 			link_changed = ixgbe_driver_link_check(ixgbe);
3004 
3005 			/* Get new phy state */
3006 			ixgbe_get_hw_state(ixgbe);
3007 		}
3008 
3009 		result = DDI_INTR_CLAIMED;
3010 	} else {
3011 		/*
3012 		 * No interrupt cause bits set: don't claim this interrupt.
3013 		 */
3014 		result = DDI_INTR_UNCLAIMED;
3015 	}
3016 
3017 	mutex_exit(&ixgbe->gen_lock);
3018 
3019 	/*
3020 	 * Do the following work outside of the gen_lock
3021 	 */
3022 	if (mp != NULL)
3023 		mac_rx(ixgbe->mac_hdl, NULL, mp);
3024 
3025 	if (tx_reschedule)  {
3026 		tx_ring->reschedule = B_FALSE;
3027 		mac_tx_update(ixgbe->mac_hdl);
3028 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3029 	}
3030 
3031 	if (link_changed)
3032 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3033 
3034 	return (result);
3035 }
3036 
3037 /*
3038  * ixgbe_intr_msi - Interrupt handler for MSI.
3039  */
3040 static uint_t
3041 ixgbe_intr_msi(void *arg1, void *arg2)
3042 {
3043 	_NOTE(ARGUNUSED(arg2));
3044 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3045 	struct ixgbe_hw *hw = &ixgbe->hw;
3046 	uint32_t eicr;
3047 
3048 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3049 
3050 	/*
3051 	 * For MSI interrupt, we have only one vector,
3052 	 * so we have only one rx ring and one tx ring enabled.
3053 	 */
3054 	ASSERT(ixgbe->num_rx_rings == 1);
3055 	ASSERT(ixgbe->num_tx_rings == 1);
3056 
3057 	/*
3058 	 * For MSI interrupt, we can't differentiate
3059 	 * between tx and rx, so always clean both.
3060 	 */
3061 	if (eicr & IXGBE_EICR_RTX_QUEUE) {
3062 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
3063 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3064 	}
3065 
3066 	if (eicr & IXGBE_EICR_LSC) {
3067 		ixgbe_intr_other_work(ixgbe);
3068 	}
3069 
3070 	return (DDI_INTR_CLAIMED);
3071 }
3072 
3073 /*
3074  * ixgbe_intr_rx - Interrupt handler for rx.
3075  */
3076 static uint_t
3077 ixgbe_intr_rx(void *arg1, void *arg2)
3078 {
3079 	_NOTE(ARGUNUSED(arg2));
3080 	ixgbe_ring_vector_t	*vect = (ixgbe_ring_vector_t *)arg1;
3081 	ixgbe_t			*ixgbe = vect->ixgbe;
3082 	int			r_idx;
3083 
3084 	/*
3085 	 * clean each rx ring that has its bit set in the map
3086 	 */
3087 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
3088 
3089 	while (r_idx >= 0) {
3090 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
3091 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3092 		    (ixgbe->num_rx_rings - 1));
3093 	}
3094 
3095 	return (DDI_INTR_CLAIMED);
3096 }
3097 
3098 /*
3099  * ixgbe_intr_tx_other - Interrupt handler for both tx and other.
3100  *
3101  * Always look for Tx cleanup work.  Only look for other work if the right
3102  * bits are set in the Interrupt Cause Register.
3103  */
3104 static uint_t
3105 ixgbe_intr_tx_other(void *arg1, void *arg2)
3106 {
3107 	_NOTE(ARGUNUSED(arg2));
3108 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3109 	struct ixgbe_hw *hw = &ixgbe->hw;
3110 	uint32_t eicr;
3111 
3112 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3113 
3114 	/*
3115 	 * Always look for Tx cleanup work.  We don't have separate
3116 	 * transmit vectors, so we have only one tx ring enabled.
3117 	 */
3118 	ASSERT(ixgbe->num_tx_rings == 1);
3119 	ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3120 
3121 	/*
3122 	 * Check for "other" causes.
3123 	 */
3124 	if (eicr & IXGBE_EICR_LSC) {
3125 		ixgbe_intr_other_work(ixgbe);
3126 	}
3127 
3128 	return (DDI_INTR_CLAIMED);
3129 }
3130 
3131 /*
3132  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
3133  *
3134  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
3135  * if not successful, try Legacy.
3136  * ixgbe->intr_force can be used to force sequence to start with
3137  * any of the 3 types.
3138  * If MSI-X is not used, number of tx/rx rings is forced to 1.
3139  */
3140 static int
3141 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
3142 {
3143 	dev_info_t *devinfo;
3144 	int intr_types;
3145 	int rc;
3146 
3147 	devinfo = ixgbe->dip;
3148 
3149 	/*
3150 	 * Get supported interrupt types
3151 	 */
3152 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
3153 
3154 	if (rc != DDI_SUCCESS) {
3155 		ixgbe_log(ixgbe,
3156 		    "Get supported interrupt types failed: %d", rc);
3157 		return (IXGBE_FAILURE);
3158 	}
3159 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
3160 
3161 	ixgbe->intr_type = 0;
3162 
3163 	/*
3164 	 * Install MSI-X interrupts
3165 	 */
3166 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
3167 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
3168 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
3169 		if (rc == IXGBE_SUCCESS)
3170 			return (IXGBE_SUCCESS);
3171 
3172 		ixgbe_log(ixgbe,
3173 		    "Allocate MSI-X failed, trying MSI interrupts...");
3174 	}
3175 
3176 	/*
3177 	 * MSI-X not used, force rings to 1
3178 	 */
3179 	ixgbe->num_rx_rings = 1;
3180 	ixgbe->num_tx_rings = 1;
3181 	ixgbe_log(ixgbe,
3182 	    "MSI-X not used, force rx and tx queue number to 1");
3183 
3184 	/*
3185 	 * Install MSI interrupts
3186 	 */
3187 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
3188 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
3189 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
3190 		if (rc == IXGBE_SUCCESS)
3191 			return (IXGBE_SUCCESS);
3192 
3193 		ixgbe_log(ixgbe,
3194 		    "Allocate MSI failed, trying Legacy interrupts...");
3195 	}
3196 
3197 	/*
3198 	 * Install legacy interrupts
3199 	 */
3200 	if (intr_types & DDI_INTR_TYPE_FIXED) {
3201 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
3202 		if (rc == IXGBE_SUCCESS)
3203 			return (IXGBE_SUCCESS);
3204 
3205 		ixgbe_log(ixgbe,
3206 		    "Allocate Legacy interrupts failed");
3207 	}
3208 
3209 	/*
3210 	 * If none of the 3 types succeeded, return failure
3211 	 */
3212 	return (IXGBE_FAILURE);
3213 }
3214 
3215 /*
3216  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
3217  *
3218  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
3219  * if fewer than 2 handles are available, return failure.
3220  * Upon success, this sets the number of Rx rings to a number that
3221  * matches the handles available for Rx interrupts.
3222  */
3223 static int
3224 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
3225 {
3226 	dev_info_t *devinfo;
3227 	int request, count, avail, actual;
3228 	int rx_rings, minimum;
3229 	int rc;
3230 
3231 	devinfo = ixgbe->dip;
3232 
3233 	/*
3234 	 * Currently only 1 tx ring is supported. More tx rings
3235 	 * will be supported with future enhancement.
3236 	 */
3237 	if (ixgbe->num_tx_rings > 1) {
3238 		ixgbe->num_tx_rings = 1;
3239 		ixgbe_log(ixgbe,
3240 		    "Use only 1 MSI-X vector for tx, "
3241 		    "force tx queue number to 1");
3242 	}
3243 
3244 	switch (intr_type) {
3245 	case DDI_INTR_TYPE_FIXED:
3246 		request = 1;	/* Request 1 legacy interrupt handle */
3247 		minimum = 1;
3248 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
3249 		break;
3250 
3251 	case DDI_INTR_TYPE_MSI:
3252 		request = 1;	/* Request 1 MSI interrupt handle */
3253 		minimum = 1;
3254 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
3255 		break;
3256 
3257 	case DDI_INTR_TYPE_MSIX:
3258 		/*
3259 		 * Best number of vectors for the adapter is
3260 		 * # rx rings + # tx rings + 1 for other
3261 		 * But currently we only support number of vectors of
3262 		 * # rx rings + 1 for tx & other
3263 		 */
3264 		request = ixgbe->num_rx_rings + 1;
3265 		minimum = 2;
3266 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
3267 		break;
3268 
3269 	default:
3270 		ixgbe_log(ixgbe,
3271 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
3272 		    intr_type);
3273 		return (IXGBE_FAILURE);
3274 	}
3275 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
3276 	    request, minimum);
3277 
3278 	/*
3279 	 * Get number of supported interrupts
3280 	 */
3281 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3282 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3283 		ixgbe_log(ixgbe,
3284 		    "Get interrupt number failed. Return: %d, count: %d",
3285 		    rc, count);
3286 		return (IXGBE_FAILURE);
3287 	}
3288 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
3289 
3290 	/*
3291 	 * Get number of available interrupts
3292 	 */
3293 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3294 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3295 		ixgbe_log(ixgbe,
3296 		    "Get interrupt available number failed. "
3297 		    "Return: %d, available: %d", rc, avail);
3298 		return (IXGBE_FAILURE);
3299 	}
3300 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
3301 
3302 	if (avail < request) {
3303 		ixgbe_log(ixgbe, "Request %d handles, %d available",
3304 		    request, avail);
3305 		request = avail;
3306 	}
3307 
3308 	actual = 0;
3309 	ixgbe->intr_cnt = 0;
3310 
3311 	/*
3312 	 * Allocate an array of interrupt handles
3313 	 */
3314 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
3315 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
3316 
3317 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
3318 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
3319 	if (rc != DDI_SUCCESS) {
3320 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
3321 		    "return: %d, request: %d, actual: %d",
3322 		    rc, request, actual);
3323 		goto alloc_handle_fail;
3324 	}
3325 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
3326 
3327 	ixgbe->intr_cnt = actual;
3328 
3329 	/*
3330 	 * Now we know the actual number of vectors.  Here we assume that
3331 	 * tx and other will share 1 vector and all remaining (must be at
3332 	 * least 1 remaining) will be used for rx.
3333 	 */
3334 	if (actual < minimum) {
3335 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
3336 		    actual);
3337 		goto alloc_handle_fail;
3338 	}
3339 
3340 	/*
3341 	 * For MSI-X, actual might force us to reduce number of rx rings
3342 	 */
3343 	if (intr_type == DDI_INTR_TYPE_MSIX) {
3344 		rx_rings = actual - 1;
3345 		if (rx_rings < ixgbe->num_rx_rings) {
3346 			ixgbe_log(ixgbe,
3347 			    "MSI-X vectors force Rx queue number to %d",
3348 			    rx_rings);
3349 			ixgbe->num_rx_rings = rx_rings;
3350 		}
3351 	}
3352 
3353 	/*
3354 	 * Get priority for first vector, assume remaining are all the same
3355 	 */
3356 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
3357 	if (rc != DDI_SUCCESS) {
3358 		ixgbe_log(ixgbe,
3359 		    "Get interrupt priority failed: %d", rc);
3360 		goto alloc_handle_fail;
3361 	}
3362 
3363 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
3364 	if (rc != DDI_SUCCESS) {
3365 		ixgbe_log(ixgbe,
3366 		    "Get interrupt cap failed: %d", rc);
3367 		goto alloc_handle_fail;
3368 	}
3369 
3370 	ixgbe->intr_type = intr_type;
3371 
3372 	return (IXGBE_SUCCESS);
3373 
3374 alloc_handle_fail:
3375 	ixgbe_rem_intrs(ixgbe);
3376 
3377 	return (IXGBE_FAILURE);
3378 }
3379 
3380 /*
3381  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
3382  *
3383  * Before adding the interrupt handlers, the interrupt vectors have
3384  * been allocated, and the rx/tx rings have also been allocated.
3385  */
3386 static int
3387 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
3388 {
3389 	ixgbe_rx_ring_t *rx_ring;
3390 	int vector;
3391 	int rc;
3392 	int i;
3393 
3394 	vector = 0;
3395 
3396 	switch (ixgbe->intr_type) {
3397 	case DDI_INTR_TYPE_MSIX:
3398 		/*
3399 		 * Add interrupt handler for tx + other
3400 		 */
3401 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3402 		    (ddi_intr_handler_t *)ixgbe_intr_tx_other,
3403 		    (void *)ixgbe, NULL);
3404 		if (rc != DDI_SUCCESS) {
3405 			ixgbe_log(ixgbe,
3406 			    "Add tx/other interrupt handler failed: %d", rc);
3407 			return (IXGBE_FAILURE);
3408 		}
3409 		vector++;
3410 
3411 		/*
3412 		 * Add interrupt handler for each rx ring
3413 		 */
3414 		for (i = 0; i < ixgbe->num_rx_rings; i++) {
3415 			rx_ring = &ixgbe->rx_rings[i];
3416 
3417 			/*
3418 			 * install pointer to vect_map[vector]
3419 			 */
3420 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
3421 			    (ddi_intr_handler_t *)ixgbe_intr_rx,
3422 			    (void *)&ixgbe->vect_map[vector], NULL);
3423 
3424 			if (rc != DDI_SUCCESS) {
3425 				ixgbe_log(ixgbe,
3426 				    "Add rx interrupt handler failed. "
3427 				    "return: %d, rx ring: %d", rc, i);
3428 				for (vector--; vector >= 0; vector--) {
3429 					(void) ddi_intr_remove_handler(
3430 					    ixgbe->htable[vector]);
3431 				}
3432 				return (IXGBE_FAILURE);
3433 			}
3434 
3435 			rx_ring->intr_vector = vector;
3436 
3437 			vector++;
3438 		}
3439 		break;
3440 
3441 	case DDI_INTR_TYPE_MSI:
3442 		/*
3443 		 * Add interrupt handlers for the only vector
3444 		 */
3445 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3446 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
3447 		    (void *)ixgbe, NULL);
3448 
3449 		if (rc != DDI_SUCCESS) {
3450 			ixgbe_log(ixgbe,
3451 			    "Add MSI interrupt handler failed: %d", rc);
3452 			return (IXGBE_FAILURE);
3453 		}
3454 
3455 		rx_ring = &ixgbe->rx_rings[0];
3456 		rx_ring->intr_vector = vector;
3457 
3458 		vector++;
3459 		break;
3460 
3461 	case DDI_INTR_TYPE_FIXED:
3462 		/*
3463 		 * Add interrupt handlers for the only vector
3464 		 */
3465 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3466 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
3467 		    (void *)ixgbe, NULL);
3468 
3469 		if (rc != DDI_SUCCESS) {
3470 			ixgbe_log(ixgbe,
3471 			    "Add legacy interrupt handler failed: %d", rc);
3472 			return (IXGBE_FAILURE);
3473 		}
3474 
3475 		rx_ring = &ixgbe->rx_rings[0];
3476 		rx_ring->intr_vector = vector;
3477 
3478 		vector++;
3479 		break;
3480 
3481 	default:
3482 		return (IXGBE_FAILURE);
3483 	}
3484 
3485 	ASSERT(vector == ixgbe->intr_cnt);
3486 
3487 	return (IXGBE_SUCCESS);
3488 }
3489 
3490 #pragma inline(ixgbe_map_rxring_to_vector)
3491 /*
3492  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
3493  */
3494 static void
3495 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
3496 {
3497 	ixgbe->vect_map[v_idx].ixgbe = ixgbe;
3498 
3499 	/*
3500 	 * Set bit in map
3501 	 */
3502 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
3503 
3504 	/*
3505 	 * Count bits set
3506 	 */
3507 	ixgbe->vect_map[v_idx].rxr_cnt++;
3508 
3509 	/*
3510 	 * Remember bit position
3511 	 */
3512 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
3513 }
3514 
3515 #pragma inline(ixgbe_map_txring_to_vector)
3516 /*
3517  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
3518  */
3519 static void
3520 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
3521 {
3522 	ixgbe->vect_map[v_idx].ixgbe = ixgbe;
3523 
3524 	/*
3525 	 * Set bit in map
3526 	 */
3527 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
3528 
3529 	/*
3530 	 * Count bits set
3531 	 */
3532 	ixgbe->vect_map[v_idx].txr_cnt++;
3533 
3534 	/*
3535 	 * Remember bit position
3536 	 */
3537 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
3538 }
3539 
3540 /*
3541  * ixgbe_set_ivar - Set the given entry in the given interrupt vector
3542  * allocation register (IVAR).
3543  */
3544 static void
3545 ixgbe_set_ivar(ixgbe_t *ixgbe, uint16_t int_alloc_entry, uint8_t msix_vector)
3546 {
3547 	struct ixgbe_hw *hw = &ixgbe->hw;
3548 	u32 ivar, index;
3549 
3550 	msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3551 	index = (int_alloc_entry >> 2) & 0x1F;
3552 	ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3553 	ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
3554 	ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
3555 	IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3556 }
3557 
3558 /*
3559  * ixgbe_map_rings_to_vectors - Map descriptor rings to interrupt vectors.
3560  *
3561  * For msi-x, this currently implements only the scheme which is
3562  * 1 vector for tx + other, 1 vector for each rx ring.
3563  */
3564 static int
3565 ixgbe_map_rings_to_vectors(ixgbe_t *ixgbe)
3566 {
3567 	int i, vector = 0;
3568 	int vect_remain = ixgbe->intr_cnt;
3569 
3570 	/* initialize vector map */
3571 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
3572 
3573 	/*
3574 	 * non-MSI-X case is very simple: all interrupts on vector 0
3575 	 */
3576 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
3577 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
3578 		ixgbe_map_txring_to_vector(ixgbe, 0, 0);
3579 		return (IXGBE_SUCCESS);
3580 	}
3581 
3582 	/*
3583 	 * Ring/vector mapping for MSI-X
3584 	 */
3585 
3586 	/*
3587 	 * Map vector 0 to tx
3588 	 */
3589 	ixgbe_map_txring_to_vector(ixgbe, 0, vector++);
3590 	vect_remain--;
3591 
3592 	/*
3593 	 * Map remaining vectors to rx rings
3594 	 */
3595 	for (i = 0; i < vect_remain; i++) {
3596 		ixgbe_map_rxring_to_vector(ixgbe, i, vector++);
3597 	}
3598 
3599 	return (IXGBE_SUCCESS);
3600 }
3601 
3602 /*
3603  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
3604  *
3605  * This relies on queue/vector mapping already set up in the
3606  * vect_map[] structures
3607  */
3608 static void
3609 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
3610 {
3611 	struct ixgbe_hw *hw = &ixgbe->hw;
3612 	ixgbe_ring_vector_t	*vect;	/* vector bitmap */
3613 	int			r_idx;	/* ring index */
3614 	int			v_idx;	/* vector index */
3615 
3616 	/*
3617 	 * Clear any previous entries
3618 	 */
3619 	for (v_idx = 0; v_idx < IXGBE_IVAR_REG_NUM; v_idx++)
3620 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
3621 
3622 	/*
3623 	 * "Other" is always on vector 0
3624 	 */
3625 	ixgbe_set_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0);
3626 
3627 	/*
3628 	 * For each interrupt vector, populate the IVAR table
3629 	 */
3630 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
3631 		vect = &ixgbe->vect_map[v_idx];
3632 
3633 		/*
3634 		 * For each rx ring bit set
3635 		 */
3636 		r_idx = bt_getlowbit(vect->rx_map, 0,
3637 		    (ixgbe->num_rx_rings - 1));
3638 
3639 		while (r_idx >= 0) {
3640 			ixgbe_set_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(r_idx),
3641 			    v_idx);
3642 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3643 			    (ixgbe->num_rx_rings - 1));
3644 		}
3645 
3646 		/*
3647 		 * For each tx ring bit set
3648 		 */
3649 		r_idx = bt_getlowbit(vect->tx_map, 0,
3650 		    (ixgbe->num_tx_rings - 1));
3651 
3652 		while (r_idx >= 0) {
3653 			ixgbe_set_ivar(ixgbe, IXGBE_IVAR_TX_QUEUE(r_idx),
3654 			    v_idx);
3655 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3656 			    (ixgbe->num_tx_rings - 1));
3657 		}
3658 	}
3659 }
3660 
3661 /*
3662  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
3663  */
3664 static void
3665 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
3666 {
3667 	int i;
3668 	int rc;
3669 
3670 	for (i = 0; i < ixgbe->intr_cnt; i++) {
3671 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
3672 		if (rc != DDI_SUCCESS) {
3673 			IXGBE_DEBUGLOG_1(ixgbe,
3674 			    "Remove intr handler failed: %d", rc);
3675 		}
3676 	}
3677 }
3678 
3679 /*
3680  * ixgbe_rem_intrs - Remove the allocated interrupts.
3681  */
3682 static void
3683 ixgbe_rem_intrs(ixgbe_t *ixgbe)
3684 {
3685 	int i;
3686 	int rc;
3687 
3688 	for (i = 0; i < ixgbe->intr_cnt; i++) {
3689 		rc = ddi_intr_free(ixgbe->htable[i]);
3690 		if (rc != DDI_SUCCESS) {
3691 			IXGBE_DEBUGLOG_1(ixgbe,
3692 			    "Free intr failed: %d", rc);
3693 		}
3694 	}
3695 
3696 	kmem_free(ixgbe->htable, ixgbe->intr_size);
3697 	ixgbe->htable = NULL;
3698 }
3699 
3700 /*
3701  * ixgbe_enable_intrs - Enable all the ddi interrupts.
3702  */
3703 static int
3704 ixgbe_enable_intrs(ixgbe_t *ixgbe)
3705 {
3706 	int i;
3707 	int rc;
3708 
3709 	/*
3710 	 * Enable interrupts
3711 	 */
3712 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
3713 		/*
3714 		 * Call ddi_intr_block_enable() for MSI
3715 		 */
3716 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
3717 		if (rc != DDI_SUCCESS) {
3718 			ixgbe_log(ixgbe,
3719 			    "Enable block intr failed: %d", rc);
3720 			return (IXGBE_FAILURE);
3721 		}
3722 	} else {
3723 		/*
3724 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
3725 		 */
3726 		for (i = 0; i < ixgbe->intr_cnt; i++) {
3727 			rc = ddi_intr_enable(ixgbe->htable[i]);
3728 			if (rc != DDI_SUCCESS) {
3729 				ixgbe_log(ixgbe,
3730 				    "Enable intr failed: %d", rc);
3731 				return (IXGBE_FAILURE);
3732 			}
3733 		}
3734 	}
3735 
3736 	return (IXGBE_SUCCESS);
3737 }
3738 
3739 /*
3740  * ixgbe_disable_intrs - Disable all the interrupts.
3741  */
3742 static int
3743 ixgbe_disable_intrs(ixgbe_t *ixgbe)
3744 {
3745 	int i;
3746 	int rc;
3747 
3748 	/*
3749 	 * Disable all interrupts
3750 	 */
3751 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
3752 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
3753 		if (rc != DDI_SUCCESS) {
3754 			ixgbe_log(ixgbe,
3755 			    "Disable block intr failed: %d", rc);
3756 			return (IXGBE_FAILURE);
3757 		}
3758 	} else {
3759 		for (i = 0; i < ixgbe->intr_cnt; i++) {
3760 			rc = ddi_intr_disable(ixgbe->htable[i]);
3761 			if (rc != DDI_SUCCESS) {
3762 				ixgbe_log(ixgbe,
3763 				    "Disable intr failed: %d", rc);
3764 				return (IXGBE_FAILURE);
3765 			}
3766 		}
3767 	}
3768 
3769 	return (IXGBE_SUCCESS);
3770 }
3771 
3772 /*
3773  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
3774  */
3775 static void
3776 ixgbe_get_hw_state(ixgbe_t *ixgbe)
3777 {
3778 	struct ixgbe_hw *hw = &ixgbe->hw;
3779 	uint32_t links;
3780 	uint32_t pcs1g_anlp = 0;
3781 	uint32_t pcs1g_ana = 0;
3782 
3783 	ASSERT(mutex_owned(&ixgbe->gen_lock));
3784 	ixgbe->param_lp_1000fdx_cap = 0;
3785 	ixgbe->param_lp_100fdx_cap  = 0;
3786 
3787 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3788 	if (links & IXGBE_LINKS_PCS_1G_EN) {
3789 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3790 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3791 
3792 		ixgbe->param_lp_1000fdx_cap =
3793 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
3794 		ixgbe->param_lp_100fdx_cap =
3795 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
3796 	}
3797 
3798 	ixgbe->param_1000fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
3799 	ixgbe->param_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
3800 }
3801 
3802 /*
3803  * ixgbe_get_driver_control - Notify that driver is in control of device.
3804  */
3805 static void
3806 ixgbe_get_driver_control(struct ixgbe_hw *hw)
3807 {
3808 	uint32_t ctrl_ext;
3809 
3810 	/*
3811 	 * Notify firmware that driver is in control of device
3812 	 */
3813 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3814 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
3815 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3816 }
3817 
3818 /*
3819  * ixgbe_release_driver_control - Notify that driver is no longer in control
3820  * of device.
3821  */
3822 static void
3823 ixgbe_release_driver_control(struct ixgbe_hw *hw)
3824 {
3825 	uint32_t ctrl_ext;
3826 
3827 	/*
3828 	 * Notify firmware that driver is no longer in control of device
3829 	 */
3830 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3831 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3832 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3833 }
3834 
3835 /*
3836  * ixgbe_atomic_reserve - Atomic decrease operation.
3837  */
3838 int
3839 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
3840 {
3841 	uint32_t oldval;
3842 	uint32_t newval;
3843 
3844 	/*
3845 	 * ATOMICALLY
3846 	 */
3847 	do {
3848 		oldval = *count_p;
3849 		if (oldval < n)
3850 			return (-1);
3851 		newval = oldval - n;
3852 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
3853 
3854 	return (newval);
3855 }
3856 
3857 /*
3858  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
3859  */
3860 static uint8_t *
3861 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
3862 {
3863 	_NOTE(ARGUNUSED(hw));
3864 	_NOTE(ARGUNUSED(vmdq));
3865 	uint8_t *addr = *upd_ptr;
3866 	uint8_t *new_ptr;
3867 
3868 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
3869 	*upd_ptr = new_ptr;
3870 	return (addr);
3871 }
3872 
3873 /*
3874  * FMA support
3875  */
3876 int
3877 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
3878 {
3879 	ddi_fm_error_t de;
3880 
3881 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
3882 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
3883 	return (de.fme_status);
3884 }
3885 
3886 int
3887 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
3888 {
3889 	ddi_fm_error_t de;
3890 
3891 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
3892 	return (de.fme_status);
3893 }
3894 
3895 /*
3896  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
3897  */
3898 static int
3899 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
3900 {
3901 	_NOTE(ARGUNUSED(impl_data));
3902 	/*
3903 	 * as the driver can always deal with an error in any dma or
3904 	 * access handle, we can just return the fme_status value.
3905 	 */
3906 	pci_ereport_post(dip, err, NULL);
3907 	return (err->fme_status);
3908 }
3909 
3910 static void
3911 ixgbe_fm_init(ixgbe_t *ixgbe)
3912 {
3913 	ddi_iblock_cookie_t iblk;
3914 	int fma_acc_flag, fma_dma_flag;
3915 
3916 	/*
3917 	 * Only register with IO Fault Services if we have some capability
3918 	 */
3919 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
3920 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
3921 		fma_acc_flag = 1;
3922 	} else {
3923 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
3924 		fma_acc_flag = 0;
3925 	}
3926 
3927 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
3928 		fma_dma_flag = 1;
3929 	} else {
3930 		fma_dma_flag = 0;
3931 	}
3932 
3933 	ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag);
3934 
3935 	if (ixgbe->fm_capabilities) {
3936 
3937 		/*
3938 		 * Register capabilities with IO Fault Services
3939 		 */
3940 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
3941 
3942 		/*
3943 		 * Initialize pci ereport capabilities if ereport capable
3944 		 */
3945 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
3946 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
3947 			pci_ereport_setup(ixgbe->dip);
3948 
3949 		/*
3950 		 * Register error callback if error callback capable
3951 		 */
3952 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
3953 			ddi_fm_handler_register(ixgbe->dip,
3954 			    ixgbe_fm_error_cb, (void*) ixgbe);
3955 	}
3956 }
3957 
3958 static void
3959 ixgbe_fm_fini(ixgbe_t *ixgbe)
3960 {
3961 	/*
3962 	 * Only unregister FMA capabilities if they are registered
3963 	 */
3964 	if (ixgbe->fm_capabilities) {
3965 
3966 		/*
3967 		 * Release any resources allocated by pci_ereport_setup()
3968 		 */
3969 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
3970 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
3971 			pci_ereport_teardown(ixgbe->dip);
3972 
3973 		/*
3974 		 * Un-register error callback if error callback capable
3975 		 */
3976 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
3977 			ddi_fm_handler_unregister(ixgbe->dip);
3978 
3979 		/*
3980 		 * Unregister from IO Fault Service
3981 		 */
3982 		ddi_fm_fini(ixgbe->dip);
3983 	}
3984 }
3985 
3986 void
3987 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
3988 {
3989 	uint64_t ena;
3990 	char buf[FM_MAX_CLASS];
3991 
3992 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
3993 	ena = fm_ena_generate(0, FM_ENA_FMT1);
3994 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
3995 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
3996 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
3997 	}
3998 }
3999