xref: /titanic_50/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision 81fd181a33bee65d5be7a49c6093bb13b382b172)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #include "ixgbe_sw.h"
29 
30 static char ident[] = "Intel 10Gb Ethernet";
31 static char ixgbe_version[] = "ixgbe 1.1.1";
32 
33 /*
34  * Local function protoypes
35  */
36 static int ixgbe_register_mac(ixgbe_t *);
37 static int ixgbe_identify_hardware(ixgbe_t *);
38 static int ixgbe_regs_map(ixgbe_t *);
39 static void ixgbe_init_properties(ixgbe_t *);
40 static int ixgbe_init_driver_settings(ixgbe_t *);
41 static void ixgbe_init_locks(ixgbe_t *);
42 static void ixgbe_destroy_locks(ixgbe_t *);
43 static int ixgbe_init(ixgbe_t *);
44 static int ixgbe_chip_start(ixgbe_t *);
45 static void ixgbe_chip_stop(ixgbe_t *);
46 static int ixgbe_reset(ixgbe_t *);
47 static void ixgbe_tx_clean(ixgbe_t *);
48 static boolean_t ixgbe_tx_drain(ixgbe_t *);
49 static boolean_t ixgbe_rx_drain(ixgbe_t *);
50 static int ixgbe_alloc_rings(ixgbe_t *);
51 static void ixgbe_free_rings(ixgbe_t *);
52 static int ixgbe_alloc_rx_data(ixgbe_t *);
53 static void ixgbe_free_rx_data(ixgbe_t *);
54 static void ixgbe_setup_rings(ixgbe_t *);
55 static void ixgbe_setup_rx(ixgbe_t *);
56 static void ixgbe_setup_tx(ixgbe_t *);
57 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
58 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
59 static void ixgbe_setup_rss(ixgbe_t *);
60 static void ixgbe_init_unicst(ixgbe_t *);
61 static int ixgbe_unicst_set(ixgbe_t *, const uint8_t *, int);
62 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
63 static void ixgbe_setup_multicst(ixgbe_t *);
64 static void ixgbe_get_hw_state(ixgbe_t *);
65 static void ixgbe_get_conf(ixgbe_t *);
66 static void ixgbe_init_params(ixgbe_t *);
67 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
68 static void ixgbe_driver_link_check(void *);
69 static void ixgbe_sfp_check(void *);
70 static void ixgbe_local_timer(void *);
71 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
72 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
73 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
74 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
75 static boolean_t is_valid_mac_addr(uint8_t *);
76 static boolean_t ixgbe_stall_check(ixgbe_t *);
77 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
78 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
79 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
80 static int ixgbe_alloc_intrs(ixgbe_t *);
81 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
82 static int ixgbe_add_intr_handlers(ixgbe_t *);
83 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
84 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
85 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
86 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
87 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
88 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
89 static void ixgbe_setup_adapter_vector(ixgbe_t *);
90 static void ixgbe_rem_intr_handlers(ixgbe_t *);
91 static void ixgbe_rem_intrs(ixgbe_t *);
92 static int ixgbe_enable_intrs(ixgbe_t *);
93 static int ixgbe_disable_intrs(ixgbe_t *);
94 static uint_t ixgbe_intr_legacy(void *, void *);
95 static uint_t ixgbe_intr_msi(void *, void *);
96 static uint_t ixgbe_intr_msix(void *, void *);
97 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
98 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
99 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
100 static void ixgbe_get_driver_control(struct ixgbe_hw *);
101 static int ixgbe_addmac(void *, const uint8_t *);
102 static int ixgbe_remmac(void *, const uint8_t *);
103 static void ixgbe_release_driver_control(struct ixgbe_hw *);
104 
105 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
106 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
107 static int ixgbe_resume(dev_info_t *);
108 static int ixgbe_suspend(dev_info_t *);
109 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
110 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
111 
112 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
113     const void *impl_data);
114 static void ixgbe_fm_init(ixgbe_t *);
115 static void ixgbe_fm_fini(ixgbe_t *);
116 
117 mac_priv_prop_t ixgbe_priv_props[] = {
118 	{"_tx_copy_thresh", MAC_PROP_PERM_RW},
119 	{"_tx_recycle_thresh", MAC_PROP_PERM_RW},
120 	{"_tx_overload_thresh", MAC_PROP_PERM_RW},
121 	{"_tx_resched_thresh", MAC_PROP_PERM_RW},
122 	{"_rx_copy_thresh", MAC_PROP_PERM_RW},
123 	{"_rx_limit_per_intr", MAC_PROP_PERM_RW},
124 	{"_intr_throttling", MAC_PROP_PERM_RW},
125 	{"_adv_pause_cap", MAC_PROP_PERM_READ},
126 	{"_adv_asym_pause_cap", MAC_PROP_PERM_READ}
127 };
128 
129 #define	IXGBE_MAX_PRIV_PROPS \
130 	(sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
131 
132 static struct cb_ops ixgbe_cb_ops = {
133 	nulldev,		/* cb_open */
134 	nulldev,		/* cb_close */
135 	nodev,			/* cb_strategy */
136 	nodev,			/* cb_print */
137 	nodev,			/* cb_dump */
138 	nodev,			/* cb_read */
139 	nodev,			/* cb_write */
140 	nodev,			/* cb_ioctl */
141 	nodev,			/* cb_devmap */
142 	nodev,			/* cb_mmap */
143 	nodev,			/* cb_segmap */
144 	nochpoll,		/* cb_chpoll */
145 	ddi_prop_op,		/* cb_prop_op */
146 	NULL,			/* cb_stream */
147 	D_MP | D_HOTPLUG,	/* cb_flag */
148 	CB_REV,			/* cb_rev */
149 	nodev,			/* cb_aread */
150 	nodev			/* cb_awrite */
151 };
152 
153 static struct dev_ops ixgbe_dev_ops = {
154 	DEVO_REV,		/* devo_rev */
155 	0,			/* devo_refcnt */
156 	NULL,			/* devo_getinfo */
157 	nulldev,		/* devo_identify */
158 	nulldev,		/* devo_probe */
159 	ixgbe_attach,		/* devo_attach */
160 	ixgbe_detach,		/* devo_detach */
161 	nodev,			/* devo_reset */
162 	&ixgbe_cb_ops,		/* devo_cb_ops */
163 	NULL,			/* devo_bus_ops */
164 	ddi_power,		/* devo_power */
165 	ddi_quiesce_not_supported,	/* devo_quiesce */
166 };
167 
168 static struct modldrv ixgbe_modldrv = {
169 	&mod_driverops,		/* Type of module.  This one is a driver */
170 	ident,			/* Discription string */
171 	&ixgbe_dev_ops		/* driver ops */
172 };
173 
174 static struct modlinkage ixgbe_modlinkage = {
175 	MODREV_1, &ixgbe_modldrv, NULL
176 };
177 
178 /*
179  * Access attributes for register mapping
180  */
181 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
182 	DDI_DEVICE_ATTR_V0,
183 	DDI_STRUCTURE_LE_ACC,
184 	DDI_STRICTORDER_ACC,
185 	DDI_FLAGERR_ACC
186 };
187 
188 /*
189  * Loopback property
190  */
191 static lb_property_t lb_normal = {
192 	normal,	"normal", IXGBE_LB_NONE
193 };
194 
195 static lb_property_t lb_mac = {
196 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
197 };
198 
199 #define	IXGBE_M_CALLBACK_FLAGS \
200 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
201 
202 static mac_callbacks_t ixgbe_m_callbacks = {
203 	IXGBE_M_CALLBACK_FLAGS,
204 	ixgbe_m_stat,
205 	ixgbe_m_start,
206 	ixgbe_m_stop,
207 	ixgbe_m_promisc,
208 	ixgbe_m_multicst,
209 	NULL,
210 	NULL,
211 	ixgbe_m_ioctl,
212 	ixgbe_m_getcapab,
213 	NULL,
214 	NULL,
215 	ixgbe_m_setprop,
216 	ixgbe_m_getprop
217 };
218 
219 /*
220  * Initialize capabilities of each supported adapter type
221  */
222 static adapter_info_t ixgbe_82598eb_cap = {
223 	64,		/* maximum number of rx queues */
224 	1,		/* minimum number of rx queues */
225 	8,		/* default number of rx queues */
226 	32,		/* maximum number of tx queues */
227 	1,		/* minimum number of tx queues */
228 	8,		/* default number of tx queues */
229 	0xFFFF,		/* maximum interrupt throttle rate */
230 	0,		/* minimum interrupt throttle rate */
231 	200,		/* default interrupt throttle rate */
232 	18,		/* maximum total msix vectors */
233 	16,		/* maximum number of ring vectors */
234 	2,		/* maximum number of other vectors */
235 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
236 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
237 	| IXGBE_FLAG_RSS_CAPABLE
238 	| IXGBE_FLAG_VMDQ_CAPABLE)
239 };
240 
241 static adapter_info_t ixgbe_82599eb_cap = {
242 	128,		/* maximum number of rx queues */
243 	1,		/* minimum number of rx queues */
244 	8,		/* default number of rx queues */
245 	128,		/* maximum number of tx queues */
246 	1,		/* minimum number of tx queues */
247 	8,		/* default number of tx queues */
248 	0xFF8,		/* maximum interrupt throttle rate */
249 	0,		/* minimum interrupt throttle rate */
250 	200,		/* default interrupt throttle rate */
251 	64,		/* maximum total msix vectors */
252 	16,		/* maximum number of ring vectors */
253 	2,		/* maximum number of other vectors */
254 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
255 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
256 	| IXGBE_FLAG_RSS_CAPABLE
257 	| IXGBE_FLAG_VMDQ_CAPABLE)
258 };
259 
260 /*
261  * Module Initialization Functions.
262  */
263 
264 int
265 _init(void)
266 {
267 	int status;
268 
269 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
270 
271 	status = mod_install(&ixgbe_modlinkage);
272 
273 	if (status != DDI_SUCCESS) {
274 		mac_fini_ops(&ixgbe_dev_ops);
275 	}
276 
277 	return (status);
278 }
279 
280 int
281 _fini(void)
282 {
283 	int status;
284 
285 	status = mod_remove(&ixgbe_modlinkage);
286 
287 	if (status == DDI_SUCCESS) {
288 		mac_fini_ops(&ixgbe_dev_ops);
289 	}
290 
291 	return (status);
292 }
293 
294 int
295 _info(struct modinfo *modinfop)
296 {
297 	int status;
298 
299 	status = mod_info(&ixgbe_modlinkage, modinfop);
300 
301 	return (status);
302 }
303 
304 /*
305  * ixgbe_attach - Driver attach.
306  *
307  * This function is the device specific initialization entry
308  * point. This entry point is required and must be written.
309  * The DDI_ATTACH command must be provided in the attach entry
310  * point. When attach() is called with cmd set to DDI_ATTACH,
311  * all normal kernel services (such as kmem_alloc(9F)) are
312  * available for use by the driver.
313  *
314  * The attach() function will be called once for each instance
315  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
316  * Until attach() succeeds, the only driver entry points which
317  * may be called are open(9E) and getinfo(9E).
318  */
319 static int
320 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
321 {
322 	ixgbe_t *ixgbe;
323 	struct ixgbe_osdep *osdep;
324 	struct ixgbe_hw *hw;
325 	int instance;
326 	char taskqname[32];
327 
328 	/*
329 	 * Check the command and perform corresponding operations
330 	 */
331 	switch (cmd) {
332 	default:
333 		return (DDI_FAILURE);
334 
335 	case DDI_RESUME:
336 		return (ixgbe_resume(devinfo));
337 
338 	case DDI_ATTACH:
339 		break;
340 	}
341 
342 	/* Get the device instance */
343 	instance = ddi_get_instance(devinfo);
344 
345 	/* Allocate memory for the instance data structure */
346 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
347 
348 	ixgbe->dip = devinfo;
349 	ixgbe->instance = instance;
350 
351 	hw = &ixgbe->hw;
352 	osdep = &ixgbe->osdep;
353 	hw->back = osdep;
354 	osdep->ixgbe = ixgbe;
355 
356 	/* Attach the instance pointer to the dev_info data structure */
357 	ddi_set_driver_private(devinfo, ixgbe);
358 
359 	/*
360 	 * Initialize for fma support
361 	 */
362 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
363 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
364 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
365 	ixgbe_fm_init(ixgbe);
366 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
367 
368 	/*
369 	 * Map PCI config space registers
370 	 */
371 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
372 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
373 		goto attach_fail;
374 	}
375 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
376 
377 	/*
378 	 * Identify the chipset family
379 	 */
380 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
381 		ixgbe_error(ixgbe, "Failed to identify hardware");
382 		goto attach_fail;
383 	}
384 
385 	/*
386 	 * Map device registers
387 	 */
388 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
389 		ixgbe_error(ixgbe, "Failed to map device registers");
390 		goto attach_fail;
391 	}
392 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
393 
394 	/*
395 	 * Initialize driver parameters
396 	 */
397 	ixgbe_init_properties(ixgbe);
398 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
399 
400 	/*
401 	 * Allocate interrupts
402 	 */
403 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
404 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
405 		goto attach_fail;
406 	}
407 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
408 
409 	/*
410 	 * Allocate rx/tx rings based on the ring numbers.
411 	 * The actual numbers of rx/tx rings are decided by the number of
412 	 * allocated interrupt vectors, so we should allocate the rings after
413 	 * interrupts are allocated.
414 	 */
415 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
416 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
417 		goto attach_fail;
418 	}
419 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
420 
421 	/*
422 	 * Map rings to interrupt vectors
423 	 */
424 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
425 		ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
426 		goto attach_fail;
427 	}
428 
429 	/*
430 	 * Add interrupt handlers
431 	 */
432 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
433 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
434 		goto attach_fail;
435 	}
436 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
437 
438 	/*
439 	 * Create a taskq for link-status-change
440 	 */
441 	(void) sprintf(taskqname, "ixgbe%d_taskq", instance);
442 	if ((ixgbe->lsc_taskq = ddi_taskq_create(devinfo, taskqname,
443 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
444 		ixgbe_error(ixgbe, "taskq_create failed");
445 		goto attach_fail;
446 	}
447 	ixgbe->attach_progress |= ATTACH_PROGRESS_LSC_TASKQ;
448 
449 	/*
450 	 * Initialize driver parameters
451 	 */
452 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
453 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
454 		goto attach_fail;
455 	}
456 
457 	/*
458 	 * Initialize mutexes for this device.
459 	 * Do this before enabling the interrupt handler and
460 	 * register the softint to avoid the condition where
461 	 * interrupt handler can try using uninitialized mutex.
462 	 */
463 	ixgbe_init_locks(ixgbe);
464 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
465 
466 	/*
467 	 * Initialize chipset hardware
468 	 */
469 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
470 		ixgbe_error(ixgbe, "Failed to initialize adapter");
471 		goto attach_fail;
472 	}
473 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
474 
475 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
476 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
477 		goto attach_fail;
478 	}
479 
480 	/*
481 	 * Initialize statistics
482 	 */
483 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
484 		ixgbe_error(ixgbe, "Failed to initialize statistics");
485 		goto attach_fail;
486 	}
487 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
488 
489 	/*
490 	 * Register the driver to the MAC
491 	 */
492 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
493 		ixgbe_error(ixgbe, "Failed to register MAC");
494 		goto attach_fail;
495 	}
496 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
497 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
498 
499 	/*
500 	 * Now that mutex locks are initialized, and the chip is also
501 	 * initialized, enable interrupts.
502 	 */
503 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
504 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
505 		goto attach_fail;
506 	}
507 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
508 
509 	ixgbe_log(ixgbe, "%s", ixgbe_version);
510 	ixgbe->ixgbe_state |= IXGBE_INITIALIZED;
511 
512 	return (DDI_SUCCESS);
513 
514 attach_fail:
515 	ixgbe_unconfigure(devinfo, ixgbe);
516 	return (DDI_FAILURE);
517 }
518 
519 /*
520  * ixgbe_detach - Driver detach.
521  *
522  * The detach() function is the complement of the attach routine.
523  * If cmd is set to DDI_DETACH, detach() is used to remove  the
524  * state  associated  with  a  given  instance of a device node
525  * prior to the removal of that instance from the system.
526  *
527  * The detach() function will be called once for each  instance
528  * of the device for which there has been a successful attach()
529  * once there are no longer  any  opens  on  the  device.
530  *
531  * Interrupts routine are disabled, All memory allocated by this
532  * driver are freed.
533  */
534 static int
535 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
536 {
537 	ixgbe_t *ixgbe;
538 
539 	/*
540 	 * Check detach command
541 	 */
542 	switch (cmd) {
543 	default:
544 		return (DDI_FAILURE);
545 
546 	case DDI_SUSPEND:
547 		return (ixgbe_suspend(devinfo));
548 
549 	case DDI_DETACH:
550 		break;
551 	}
552 
553 
554 	/*
555 	 * Get the pointer to the driver private data structure
556 	 */
557 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
558 	if (ixgbe == NULL)
559 		return (DDI_FAILURE);
560 
561 	/*
562 	 * Unregister MAC. If failed, we have to fail the detach
563 	 */
564 	if (mac_unregister(ixgbe->mac_hdl) != 0) {
565 		ixgbe_error(ixgbe, "Failed to unregister MAC");
566 		return (DDI_FAILURE);
567 	}
568 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC;
569 
570 	/*
571 	 * If the device is still running, it needs to be stopped first.
572 	 * This check is necessary because under some specific circumstances,
573 	 * the detach routine can be called without stopping the interface
574 	 * first.
575 	 */
576 	mutex_enter(&ixgbe->gen_lock);
577 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
578 		ixgbe->ixgbe_state &= ~IXGBE_STARTED;
579 		ixgbe_stop(ixgbe, B_TRUE);
580 		mutex_exit(&ixgbe->gen_lock);
581 		/* Disable and stop the watchdog timer */
582 		ixgbe_disable_watchdog_timer(ixgbe);
583 	} else
584 		mutex_exit(&ixgbe->gen_lock);
585 
586 	/*
587 	 * Check if there are still rx buffers held by the upper layer.
588 	 * If so, fail the detach.
589 	 */
590 	if (!ixgbe_rx_drain(ixgbe))
591 		return (DDI_FAILURE);
592 
593 	/*
594 	 * Do the remaining unconfigure routines
595 	 */
596 	ixgbe_unconfigure(devinfo, ixgbe);
597 
598 	return (DDI_SUCCESS);
599 }
600 
601 static void
602 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
603 {
604 	/*
605 	 * Disable interrupt
606 	 */
607 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
608 		(void) ixgbe_disable_intrs(ixgbe);
609 	}
610 
611 	/*
612 	 * Unregister MAC
613 	 */
614 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
615 		(void) mac_unregister(ixgbe->mac_hdl);
616 	}
617 
618 	/*
619 	 * Free statistics
620 	 */
621 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
622 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
623 	}
624 
625 	/*
626 	 * Remove interrupt handlers
627 	 */
628 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
629 		ixgbe_rem_intr_handlers(ixgbe);
630 	}
631 
632 	/*
633 	 * Remove taskq for link-status-change
634 	 */
635 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LSC_TASKQ) {
636 		ddi_taskq_destroy(ixgbe->lsc_taskq);
637 	}
638 
639 	/*
640 	 * Remove interrupts
641 	 */
642 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
643 		ixgbe_rem_intrs(ixgbe);
644 	}
645 
646 	/*
647 	 * Remove driver properties
648 	 */
649 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
650 		(void) ddi_prop_remove_all(devinfo);
651 	}
652 
653 	/*
654 	 * Stop the chipset
655 	 */
656 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
657 		mutex_enter(&ixgbe->gen_lock);
658 		ixgbe_chip_stop(ixgbe);
659 		mutex_exit(&ixgbe->gen_lock);
660 	}
661 
662 	/*
663 	 * Free register handle
664 	 */
665 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
666 		if (ixgbe->osdep.reg_handle != NULL)
667 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
668 	}
669 
670 	/*
671 	 * Free PCI config handle
672 	 */
673 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
674 		if (ixgbe->osdep.cfg_handle != NULL)
675 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
676 	}
677 
678 	/*
679 	 * Free locks
680 	 */
681 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
682 		ixgbe_destroy_locks(ixgbe);
683 	}
684 
685 	/*
686 	 * Free the rx/tx rings
687 	 */
688 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
689 		ixgbe_free_rings(ixgbe);
690 	}
691 
692 	/*
693 	 * Unregister FMA capabilities
694 	 */
695 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
696 		ixgbe_fm_fini(ixgbe);
697 	}
698 
699 	/*
700 	 * Free the driver data structure
701 	 */
702 	kmem_free(ixgbe, sizeof (ixgbe_t));
703 
704 	ddi_set_driver_private(devinfo, NULL);
705 }
706 
707 /*
708  * ixgbe_register_mac - Register the driver and its function pointers with
709  * the GLD interface.
710  */
711 static int
712 ixgbe_register_mac(ixgbe_t *ixgbe)
713 {
714 	struct ixgbe_hw *hw = &ixgbe->hw;
715 	mac_register_t *mac;
716 	int status;
717 
718 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
719 		return (IXGBE_FAILURE);
720 
721 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
722 	mac->m_driver = ixgbe;
723 	mac->m_dip = ixgbe->dip;
724 	mac->m_src_addr = hw->mac.addr;
725 	mac->m_callbacks = &ixgbe_m_callbacks;
726 	mac->m_min_sdu = 0;
727 	mac->m_max_sdu = ixgbe->default_mtu;
728 	mac->m_margin = VLAN_TAGSZ;
729 	mac->m_priv_props = ixgbe_priv_props;
730 	mac->m_priv_prop_count = IXGBE_MAX_PRIV_PROPS;
731 	mac->m_v12n = MAC_VIRT_LEVEL1;
732 
733 	status = mac_register(mac, &ixgbe->mac_hdl);
734 
735 	mac_free(mac);
736 
737 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
738 }
739 
740 /*
741  * ixgbe_identify_hardware - Identify the type of the chipset.
742  */
743 static int
744 ixgbe_identify_hardware(ixgbe_t *ixgbe)
745 {
746 	struct ixgbe_hw *hw = &ixgbe->hw;
747 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
748 
749 	/*
750 	 * Get the device id
751 	 */
752 	hw->vendor_id =
753 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
754 	hw->device_id =
755 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
756 	hw->revision_id =
757 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
758 	hw->subsystem_device_id =
759 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
760 	hw->subsystem_vendor_id =
761 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
762 
763 	/*
764 	 * Set the mac type of the adapter based on the device id
765 	 */
766 	if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
767 		return (IXGBE_FAILURE);
768 	}
769 
770 	/*
771 	 * Install adapter capabilities
772 	 */
773 	switch (hw->mac.type) {
774 	case ixgbe_mac_82598EB:
775 		ixgbe_log(ixgbe, "identify 82598 adapter\n");
776 		ixgbe->capab = &ixgbe_82598eb_cap;
777 
778 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
779 			ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
780 			ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
781 		}
782 		ixgbe->capab->other_intr |= IXGBE_EICR_LSC;
783 
784 		break;
785 	case ixgbe_mac_82599EB:
786 		ixgbe_log(ixgbe, "identify 82599 adapter\n");
787 		ixgbe->capab = &ixgbe_82599eb_cap;
788 
789 		ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 |
790 		    IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC);
791 
792 		break;
793 	default:
794 		ixgbe_log(ixgbe,
795 		    "adapter not supported in ixgbe_identify_hardware(): %d\n",
796 		    hw->mac.type);
797 		return (IXGBE_FAILURE);
798 	}
799 
800 	return (IXGBE_SUCCESS);
801 }
802 
803 /*
804  * ixgbe_regs_map - Map the device registers.
805  *
806  */
807 static int
808 ixgbe_regs_map(ixgbe_t *ixgbe)
809 {
810 	dev_info_t *devinfo = ixgbe->dip;
811 	struct ixgbe_hw *hw = &ixgbe->hw;
812 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
813 	off_t mem_size;
814 
815 	/*
816 	 * First get the size of device registers to be mapped.
817 	 */
818 	if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
819 	    != DDI_SUCCESS) {
820 		return (IXGBE_FAILURE);
821 	}
822 
823 	/*
824 	 * Call ddi_regs_map_setup() to map registers
825 	 */
826 	if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
827 	    (caddr_t *)&hw->hw_addr, 0,
828 	    mem_size, &ixgbe_regs_acc_attr,
829 	    &osdep->reg_handle)) != DDI_SUCCESS) {
830 		return (IXGBE_FAILURE);
831 	}
832 
833 	return (IXGBE_SUCCESS);
834 }
835 
836 /*
837  * ixgbe_init_properties - Initialize driver properties.
838  */
839 static void
840 ixgbe_init_properties(ixgbe_t *ixgbe)
841 {
842 	/*
843 	 * Get conf file properties, including link settings
844 	 * jumbo frames, ring number, descriptor number, etc.
845 	 */
846 	ixgbe_get_conf(ixgbe);
847 
848 	ixgbe_init_params(ixgbe);
849 }
850 
851 /*
852  * ixgbe_init_driver_settings - Initialize driver settings.
853  *
854  * The settings include hardware function pointers, bus information,
855  * rx/tx rings settings, link state, and any other parameters that
856  * need to be setup during driver initialization.
857  */
858 static int
859 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
860 {
861 	struct ixgbe_hw *hw = &ixgbe->hw;
862 	dev_info_t *devinfo = ixgbe->dip;
863 	ixgbe_rx_ring_t *rx_ring;
864 	ixgbe_tx_ring_t *tx_ring;
865 	uint32_t rx_size;
866 	uint32_t tx_size;
867 	int i;
868 
869 	/*
870 	 * Initialize chipset specific hardware function pointers
871 	 */
872 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
873 		return (IXGBE_FAILURE);
874 	}
875 
876 	/*
877 	 * Get the system page size
878 	 */
879 	ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
880 
881 	/*
882 	 * Set rx buffer size
883 	 *
884 	 * The IP header alignment room is counted in the calculation.
885 	 * The rx buffer size is in unit of 1K that is required by the
886 	 * chipset hardware.
887 	 */
888 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
889 	ixgbe->rx_buf_size = ((rx_size >> 10) +
890 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
891 
892 	/*
893 	 * Set tx buffer size
894 	 */
895 	tx_size = ixgbe->max_frame_size;
896 	ixgbe->tx_buf_size = ((tx_size >> 10) +
897 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
898 
899 	/*
900 	 * Initialize rx/tx rings parameters
901 	 */
902 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
903 		rx_ring = &ixgbe->rx_rings[i];
904 		rx_ring->index = i;
905 		rx_ring->ixgbe = ixgbe;
906 	}
907 
908 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
909 		tx_ring = &ixgbe->tx_rings[i];
910 		tx_ring->index = i;
911 		tx_ring->ixgbe = ixgbe;
912 		if (ixgbe->tx_head_wb_enable)
913 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
914 		else
915 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
916 
917 		tx_ring->ring_size = ixgbe->tx_ring_size;
918 		tx_ring->free_list_size = ixgbe->tx_ring_size +
919 		    (ixgbe->tx_ring_size >> 1);
920 	}
921 
922 	/*
923 	 * Initialize values of interrupt throttling rate
924 	 */
925 	for (i = 1; i < MAX_INTR_VECTOR; i++)
926 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
927 
928 	/*
929 	 * The initial link state should be "unknown"
930 	 */
931 	ixgbe->link_state = LINK_STATE_UNKNOWN;
932 
933 	return (IXGBE_SUCCESS);
934 }
935 
936 /*
937  * ixgbe_init_locks - Initialize locks.
938  */
939 static void
940 ixgbe_init_locks(ixgbe_t *ixgbe)
941 {
942 	ixgbe_rx_ring_t *rx_ring;
943 	ixgbe_tx_ring_t *tx_ring;
944 	int i;
945 
946 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
947 		rx_ring = &ixgbe->rx_rings[i];
948 		mutex_init(&rx_ring->rx_lock, NULL,
949 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
950 	}
951 
952 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
953 		tx_ring = &ixgbe->tx_rings[i];
954 		mutex_init(&tx_ring->tx_lock, NULL,
955 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
956 		mutex_init(&tx_ring->recycle_lock, NULL,
957 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
958 		mutex_init(&tx_ring->tcb_head_lock, NULL,
959 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
960 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
961 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
962 	}
963 
964 	mutex_init(&ixgbe->gen_lock, NULL,
965 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
966 
967 	mutex_init(&ixgbe->watchdog_lock, NULL,
968 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
969 }
970 
971 /*
972  * ixgbe_destroy_locks - Destroy locks.
973  */
974 static void
975 ixgbe_destroy_locks(ixgbe_t *ixgbe)
976 {
977 	ixgbe_rx_ring_t *rx_ring;
978 	ixgbe_tx_ring_t *tx_ring;
979 	int i;
980 
981 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
982 		rx_ring = &ixgbe->rx_rings[i];
983 		mutex_destroy(&rx_ring->rx_lock);
984 	}
985 
986 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
987 		tx_ring = &ixgbe->tx_rings[i];
988 		mutex_destroy(&tx_ring->tx_lock);
989 		mutex_destroy(&tx_ring->recycle_lock);
990 		mutex_destroy(&tx_ring->tcb_head_lock);
991 		mutex_destroy(&tx_ring->tcb_tail_lock);
992 	}
993 
994 	mutex_destroy(&ixgbe->gen_lock);
995 	mutex_destroy(&ixgbe->watchdog_lock);
996 }
997 
998 static int
999 ixgbe_resume(dev_info_t *devinfo)
1000 {
1001 	ixgbe_t *ixgbe;
1002 
1003 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1004 	if (ixgbe == NULL)
1005 		return (DDI_FAILURE);
1006 
1007 	mutex_enter(&ixgbe->gen_lock);
1008 
1009 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1010 		if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1011 			mutex_exit(&ixgbe->gen_lock);
1012 			return (DDI_FAILURE);
1013 		}
1014 
1015 		/*
1016 		 * Enable and start the watchdog timer
1017 		 */
1018 		ixgbe_enable_watchdog_timer(ixgbe);
1019 	}
1020 
1021 	ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1022 
1023 	mutex_exit(&ixgbe->gen_lock);
1024 
1025 	return (DDI_SUCCESS);
1026 }
1027 
1028 static int
1029 ixgbe_suspend(dev_info_t *devinfo)
1030 {
1031 	ixgbe_t *ixgbe;
1032 
1033 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1034 	if (ixgbe == NULL)
1035 		return (DDI_FAILURE);
1036 
1037 	mutex_enter(&ixgbe->gen_lock);
1038 
1039 	ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1040 	if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1041 		mutex_exit(&ixgbe->gen_lock);
1042 		return (DDI_SUCCESS);
1043 	}
1044 	ixgbe_stop(ixgbe, B_FALSE);
1045 
1046 	mutex_exit(&ixgbe->gen_lock);
1047 
1048 	/*
1049 	 * Disable and stop the watchdog timer
1050 	 */
1051 	ixgbe_disable_watchdog_timer(ixgbe);
1052 
1053 	return (DDI_SUCCESS);
1054 }
1055 
1056 /*
1057  * ixgbe_init - Initialize the device.
1058  */
1059 static int
1060 ixgbe_init(ixgbe_t *ixgbe)
1061 {
1062 	struct ixgbe_hw *hw = &ixgbe->hw;
1063 
1064 	mutex_enter(&ixgbe->gen_lock);
1065 
1066 	/*
1067 	 * Reset chipset to put the hardware in a known state
1068 	 * before we try to do anything with the eeprom.
1069 	 */
1070 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1071 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1072 		goto init_fail;
1073 	}
1074 
1075 	/*
1076 	 * Need to init eeprom before validating the checksum.
1077 	 */
1078 	if (ixgbe_init_eeprom_params(hw) < 0) {
1079 		ixgbe_error(ixgbe,
1080 		    "Unable to intitialize the eeprom interface.");
1081 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1082 		goto init_fail;
1083 	}
1084 
1085 	/*
1086 	 * NVM validation
1087 	 */
1088 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1089 		/*
1090 		 * Some PCI-E parts fail the first check due to
1091 		 * the link being in sleep state.  Call it again,
1092 		 * if it fails a second time it's a real issue.
1093 		 */
1094 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1095 			ixgbe_error(ixgbe,
1096 			    "Invalid NVM checksum. Please contact "
1097 			    "the vendor to update the NVM.");
1098 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1099 			goto init_fail;
1100 		}
1101 	}
1102 
1103 	/*
1104 	 * Setup default flow control thresholds - enable/disable
1105 	 * & flow control type is controlled by ixgbe.conf
1106 	 */
1107 	hw->fc.high_water = DEFAULT_FCRTH;
1108 	hw->fc.low_water = DEFAULT_FCRTL;
1109 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1110 	hw->fc.send_xon = B_TRUE;
1111 
1112 	/*
1113 	 * Initialize link settings
1114 	 */
1115 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1116 
1117 	/*
1118 	 * Initialize the chipset hardware
1119 	 */
1120 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1121 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1122 		goto init_fail;
1123 	}
1124 
1125 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
1126 		goto init_fail;
1127 	}
1128 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1129 		goto init_fail;
1130 	}
1131 
1132 	mutex_exit(&ixgbe->gen_lock);
1133 	return (IXGBE_SUCCESS);
1134 
1135 init_fail:
1136 	/*
1137 	 * Reset PHY
1138 	 */
1139 	(void) ixgbe_reset_phy(hw);
1140 
1141 	mutex_exit(&ixgbe->gen_lock);
1142 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1143 	return (IXGBE_FAILURE);
1144 }
1145 
1146 /*
1147  * ixgbe_chip_start - Initialize and start the chipset hardware.
1148  */
1149 static int
1150 ixgbe_chip_start(ixgbe_t *ixgbe)
1151 {
1152 	struct ixgbe_hw *hw = &ixgbe->hw;
1153 	int ret_val, i;
1154 
1155 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1156 
1157 	/*
1158 	 * Get the mac address
1159 	 * This function should handle SPARC case correctly.
1160 	 */
1161 	if (!ixgbe_find_mac_address(ixgbe)) {
1162 		ixgbe_error(ixgbe, "Failed to get the mac address");
1163 		return (IXGBE_FAILURE);
1164 	}
1165 
1166 	/*
1167 	 * Validate the mac address
1168 	 */
1169 	(void) ixgbe_init_rx_addrs(hw);
1170 	if (!is_valid_mac_addr(hw->mac.addr)) {
1171 		ixgbe_error(ixgbe, "Invalid mac address");
1172 		return (IXGBE_FAILURE);
1173 	}
1174 
1175 	/*
1176 	 * Configure/Initialize hardware
1177 	 */
1178 	ret_val = ixgbe_init_hw(hw);
1179 	if (ret_val != IXGBE_SUCCESS) {
1180 		if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1181 			ixgbe_error(ixgbe,
1182 			    "This 82599 device is pre-release and contains"
1183 			    " outdated firmware, please contact your hardware"
1184 			    " vendor for a replacement.");
1185 		} else {
1186 			ixgbe_error(ixgbe, "Failed to initialize hardware");
1187 			return (IXGBE_FAILURE);
1188 		}
1189 	}
1190 
1191 	/*
1192 	 * Setup adapter interrupt vectors
1193 	 */
1194 	ixgbe_setup_adapter_vector(ixgbe);
1195 
1196 	/*
1197 	 * Initialize unicast addresses.
1198 	 */
1199 	ixgbe_init_unicst(ixgbe);
1200 
1201 	/*
1202 	 * Setup and initialize the mctable structures.
1203 	 */
1204 	ixgbe_setup_multicst(ixgbe);
1205 
1206 	/*
1207 	 * Set interrupt throttling rate
1208 	 */
1209 	for (i = 0; i < ixgbe->intr_cnt; i++) {
1210 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1211 	}
1212 
1213 	/*
1214 	 * Save the state of the phy
1215 	 */
1216 	ixgbe_get_hw_state(ixgbe);
1217 
1218 	/*
1219 	 * Make sure driver has control
1220 	 */
1221 	ixgbe_get_driver_control(hw);
1222 
1223 	return (IXGBE_SUCCESS);
1224 }
1225 
1226 /*
1227  * ixgbe_chip_stop - Stop the chipset hardware
1228  */
1229 static void
1230 ixgbe_chip_stop(ixgbe_t *ixgbe)
1231 {
1232 	struct ixgbe_hw *hw = &ixgbe->hw;
1233 
1234 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1235 
1236 	/*
1237 	 * Tell firmware driver is no longer in control
1238 	 */
1239 	ixgbe_release_driver_control(hw);
1240 
1241 	/*
1242 	 * Reset the chipset
1243 	 */
1244 	(void) ixgbe_reset_hw(hw);
1245 
1246 	/*
1247 	 * Reset PHY
1248 	 */
1249 	(void) ixgbe_reset_phy(hw);
1250 }
1251 
1252 /*
1253  * ixgbe_reset - Reset the chipset and re-start the driver.
1254  *
1255  * It involves stopping and re-starting the chipset,
1256  * and re-configuring the rx/tx rings.
1257  */
1258 static int
1259 ixgbe_reset(ixgbe_t *ixgbe)
1260 {
1261 	/*
1262 	 * Disable and stop the watchdog timer
1263 	 */
1264 	ixgbe_disable_watchdog_timer(ixgbe);
1265 
1266 	mutex_enter(&ixgbe->gen_lock);
1267 
1268 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1269 	ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1270 
1271 	ixgbe_stop(ixgbe, B_FALSE);
1272 
1273 	if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1274 		mutex_exit(&ixgbe->gen_lock);
1275 		return (IXGBE_FAILURE);
1276 	}
1277 
1278 	ixgbe->ixgbe_state |= IXGBE_STARTED;
1279 	mutex_exit(&ixgbe->gen_lock);
1280 
1281 	/*
1282 	 * Enable and start the watchdog timer
1283 	 */
1284 	ixgbe_enable_watchdog_timer(ixgbe);
1285 
1286 	return (IXGBE_SUCCESS);
1287 }
1288 
1289 /*
1290  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1291  */
1292 static void
1293 ixgbe_tx_clean(ixgbe_t *ixgbe)
1294 {
1295 	ixgbe_tx_ring_t *tx_ring;
1296 	tx_control_block_t *tcb;
1297 	link_list_t pending_list;
1298 	uint32_t desc_num;
1299 	int i, j;
1300 
1301 	LINK_LIST_INIT(&pending_list);
1302 
1303 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1304 		tx_ring = &ixgbe->tx_rings[i];
1305 
1306 		mutex_enter(&tx_ring->recycle_lock);
1307 
1308 		/*
1309 		 * Clean the pending tx data - the pending packets in the
1310 		 * work_list that have no chances to be transmitted again.
1311 		 *
1312 		 * We must ensure the chipset is stopped or the link is down
1313 		 * before cleaning the transmit packets.
1314 		 */
1315 		desc_num = 0;
1316 		for (j = 0; j < tx_ring->ring_size; j++) {
1317 			tcb = tx_ring->work_list[j];
1318 			if (tcb != NULL) {
1319 				desc_num += tcb->desc_num;
1320 
1321 				tx_ring->work_list[j] = NULL;
1322 
1323 				ixgbe_free_tcb(tcb);
1324 
1325 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1326 			}
1327 		}
1328 
1329 		if (desc_num > 0) {
1330 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1331 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1332 
1333 			/*
1334 			 * Reset the head and tail pointers of the tbd ring;
1335 			 * Reset the writeback head if it's enable.
1336 			 */
1337 			tx_ring->tbd_head = 0;
1338 			tx_ring->tbd_tail = 0;
1339 			if (ixgbe->tx_head_wb_enable)
1340 				*tx_ring->tbd_head_wb = 0;
1341 
1342 			IXGBE_WRITE_REG(&ixgbe->hw,
1343 			    IXGBE_TDH(tx_ring->index), 0);
1344 			IXGBE_WRITE_REG(&ixgbe->hw,
1345 			    IXGBE_TDT(tx_ring->index), 0);
1346 		}
1347 
1348 		mutex_exit(&tx_ring->recycle_lock);
1349 
1350 		/*
1351 		 * Add the tx control blocks in the pending list to
1352 		 * the free list.
1353 		 */
1354 		ixgbe_put_free_list(tx_ring, &pending_list);
1355 	}
1356 }
1357 
1358 /*
1359  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1360  * transmitted.
1361  */
1362 static boolean_t
1363 ixgbe_tx_drain(ixgbe_t *ixgbe)
1364 {
1365 	ixgbe_tx_ring_t *tx_ring;
1366 	boolean_t done;
1367 	int i, j;
1368 
1369 	/*
1370 	 * Wait for a specific time to allow pending tx packets
1371 	 * to be transmitted.
1372 	 *
1373 	 * Check the counter tbd_free to see if transmission is done.
1374 	 * No lock protection is needed here.
1375 	 *
1376 	 * Return B_TRUE if all pending packets have been transmitted;
1377 	 * Otherwise return B_FALSE;
1378 	 */
1379 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1380 
1381 		done = B_TRUE;
1382 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1383 			tx_ring = &ixgbe->tx_rings[j];
1384 			done = done &&
1385 			    (tx_ring->tbd_free == tx_ring->ring_size);
1386 		}
1387 
1388 		if (done)
1389 			break;
1390 
1391 		msec_delay(1);
1392 	}
1393 
1394 	return (done);
1395 }
1396 
1397 /*
1398  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1399  */
1400 static boolean_t
1401 ixgbe_rx_drain(ixgbe_t *ixgbe)
1402 {
1403 	boolean_t done = B_TRUE;
1404 	int i;
1405 
1406 	/*
1407 	 * Polling the rx free list to check if those rx buffers held by
1408 	 * the upper layer are released.
1409 	 *
1410 	 * Check the counter rcb_free to see if all pending buffers are
1411 	 * released. No lock protection is needed here.
1412 	 *
1413 	 * Return B_TRUE if all pending buffers have been released;
1414 	 * Otherwise return B_FALSE;
1415 	 */
1416 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1417 		done = (ixgbe->rcb_pending == 0);
1418 
1419 		if (done)
1420 			break;
1421 
1422 		msec_delay(1);
1423 	}
1424 
1425 	return (done);
1426 }
1427 
1428 /*
1429  * ixgbe_start - Start the driver/chipset.
1430  */
1431 int
1432 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1433 {
1434 	int i;
1435 
1436 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1437 
1438 	if (alloc_buffer) {
1439 		if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1440 			ixgbe_error(ixgbe,
1441 			    "Failed to allocate software receive rings");
1442 			return (IXGBE_FAILURE);
1443 		}
1444 
1445 		/* Allocate buffers for all the rx/tx rings */
1446 		if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1447 			ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1448 			return (IXGBE_FAILURE);
1449 		}
1450 
1451 		ixgbe->tx_ring_init = B_TRUE;
1452 	} else {
1453 		ixgbe->tx_ring_init = B_FALSE;
1454 	}
1455 
1456 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1457 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1458 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1459 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1460 
1461 	/*
1462 	 * Start the chipset hardware
1463 	 */
1464 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1465 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1466 		goto start_failure;
1467 	}
1468 
1469 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1470 		goto start_failure;
1471 	}
1472 
1473 	/*
1474 	 * Setup the rx/tx rings
1475 	 */
1476 	ixgbe_setup_rings(ixgbe);
1477 
1478 	/*
1479 	 * Enable adapter interrupts
1480 	 * The interrupts must be enabled after the driver state is START
1481 	 */
1482 	ixgbe_enable_adapter_interrupts(ixgbe);
1483 
1484 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1485 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1486 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1487 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1488 
1489 	return (IXGBE_SUCCESS);
1490 
1491 start_failure:
1492 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1493 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1494 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1495 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1496 
1497 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1498 
1499 	return (IXGBE_FAILURE);
1500 }
1501 
1502 /*
1503  * ixgbe_stop - Stop the driver/chipset.
1504  */
1505 void
1506 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1507 {
1508 	int i;
1509 
1510 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1511 
1512 	/*
1513 	 * Disable the adapter interrupts
1514 	 */
1515 	ixgbe_disable_adapter_interrupts(ixgbe);
1516 
1517 	/*
1518 	 * Drain the pending tx packets
1519 	 */
1520 	(void) ixgbe_tx_drain(ixgbe);
1521 
1522 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1523 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1524 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1525 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1526 
1527 	/*
1528 	 * Stop the chipset hardware
1529 	 */
1530 	ixgbe_chip_stop(ixgbe);
1531 
1532 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1533 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1534 	}
1535 
1536 	/*
1537 	 * Clean the pending tx data/resources
1538 	 */
1539 	ixgbe_tx_clean(ixgbe);
1540 
1541 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1542 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1543 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1544 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1545 
1546 	if (ixgbe->link_state == LINK_STATE_UP) {
1547 		ixgbe->link_state = LINK_STATE_UNKNOWN;
1548 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1549 	}
1550 
1551 	if (free_buffer) {
1552 		/*
1553 		 * Release the DMA/memory resources of rx/tx rings
1554 		 */
1555 		ixgbe_free_dma(ixgbe);
1556 		ixgbe_free_rx_data(ixgbe);
1557 	}
1558 }
1559 
1560 /*
1561  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1562  */
1563 static int
1564 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1565 {
1566 	/*
1567 	 * Allocate memory space for rx rings
1568 	 */
1569 	ixgbe->rx_rings = kmem_zalloc(
1570 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1571 	    KM_NOSLEEP);
1572 
1573 	if (ixgbe->rx_rings == NULL) {
1574 		return (IXGBE_FAILURE);
1575 	}
1576 
1577 	/*
1578 	 * Allocate memory space for tx rings
1579 	 */
1580 	ixgbe->tx_rings = kmem_zalloc(
1581 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1582 	    KM_NOSLEEP);
1583 
1584 	if (ixgbe->tx_rings == NULL) {
1585 		kmem_free(ixgbe->rx_rings,
1586 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1587 		ixgbe->rx_rings = NULL;
1588 		return (IXGBE_FAILURE);
1589 	}
1590 
1591 	/*
1592 	 * Allocate memory space for rx ring groups
1593 	 */
1594 	ixgbe->rx_groups = kmem_zalloc(
1595 	    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1596 	    KM_NOSLEEP);
1597 
1598 	if (ixgbe->rx_groups == NULL) {
1599 		kmem_free(ixgbe->rx_rings,
1600 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1601 		kmem_free(ixgbe->tx_rings,
1602 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1603 		ixgbe->rx_rings = NULL;
1604 		ixgbe->tx_rings = NULL;
1605 		return (IXGBE_FAILURE);
1606 	}
1607 
1608 	return (IXGBE_SUCCESS);
1609 }
1610 
1611 /*
1612  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1613  */
1614 static void
1615 ixgbe_free_rings(ixgbe_t *ixgbe)
1616 {
1617 	if (ixgbe->rx_rings != NULL) {
1618 		kmem_free(ixgbe->rx_rings,
1619 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1620 		ixgbe->rx_rings = NULL;
1621 	}
1622 
1623 	if (ixgbe->tx_rings != NULL) {
1624 		kmem_free(ixgbe->tx_rings,
1625 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1626 		ixgbe->tx_rings = NULL;
1627 	}
1628 
1629 	if (ixgbe->rx_groups != NULL) {
1630 		kmem_free(ixgbe->rx_groups,
1631 		    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1632 		ixgbe->rx_groups = NULL;
1633 	}
1634 }
1635 
1636 static int
1637 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
1638 {
1639 	ixgbe_rx_ring_t *rx_ring;
1640 	int i;
1641 
1642 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1643 		rx_ring = &ixgbe->rx_rings[i];
1644 		if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
1645 			goto alloc_rx_rings_failure;
1646 	}
1647 	return (IXGBE_SUCCESS);
1648 
1649 alloc_rx_rings_failure:
1650 	ixgbe_free_rx_data(ixgbe);
1651 	return (IXGBE_FAILURE);
1652 }
1653 
1654 static void
1655 ixgbe_free_rx_data(ixgbe_t *ixgbe)
1656 {
1657 	ixgbe_rx_ring_t *rx_ring;
1658 	ixgbe_rx_data_t *rx_data;
1659 	int i;
1660 
1661 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1662 		rx_ring = &ixgbe->rx_rings[i];
1663 
1664 		mutex_enter(&ixgbe->rx_pending_lock);
1665 		rx_data = rx_ring->rx_data;
1666 
1667 		if (rx_data != NULL) {
1668 			rx_data->flag |= IXGBE_RX_STOPPED;
1669 
1670 			if (rx_data->rcb_pending == 0) {
1671 				ixgbe_free_rx_ring_data(rx_data);
1672 				rx_ring->rx_data = NULL;
1673 			}
1674 		}
1675 
1676 		mutex_exit(&ixgbe->rx_pending_lock);
1677 	}
1678 }
1679 
1680 /*
1681  * ixgbe_setup_rings - Setup rx/tx rings.
1682  */
1683 static void
1684 ixgbe_setup_rings(ixgbe_t *ixgbe)
1685 {
1686 	/*
1687 	 * Setup the rx/tx rings, including the following:
1688 	 *
1689 	 * 1. Setup the descriptor ring and the control block buffers;
1690 	 * 2. Initialize necessary registers for receive/transmit;
1691 	 * 3. Initialize software pointers/parameters for receive/transmit;
1692 	 */
1693 	ixgbe_setup_rx(ixgbe);
1694 
1695 	ixgbe_setup_tx(ixgbe);
1696 }
1697 
1698 static void
1699 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
1700 {
1701 	ixgbe_t *ixgbe = rx_ring->ixgbe;
1702 	ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
1703 	struct ixgbe_hw *hw = &ixgbe->hw;
1704 	rx_control_block_t *rcb;
1705 	union ixgbe_adv_rx_desc	*rbd;
1706 	uint32_t size;
1707 	uint32_t buf_low;
1708 	uint32_t buf_high;
1709 	uint32_t reg_val;
1710 	int i;
1711 
1712 	ASSERT(mutex_owned(&rx_ring->rx_lock));
1713 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1714 
1715 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
1716 		rcb = rx_data->work_list[i];
1717 		rbd = &rx_data->rbd_ring[i];
1718 
1719 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
1720 		rbd->read.hdr_addr = NULL;
1721 	}
1722 
1723 	/*
1724 	 * Initialize the length register
1725 	 */
1726 	size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
1727 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
1728 
1729 	/*
1730 	 * Initialize the base address registers
1731 	 */
1732 	buf_low = (uint32_t)rx_data->rbd_area.dma_address;
1733 	buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
1734 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
1735 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
1736 
1737 	/*
1738 	 * Setup head & tail pointers
1739 	 */
1740 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_data->ring_size - 1);
1741 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
1742 
1743 	rx_data->rbd_next = 0;
1744 
1745 	/*
1746 	 * Setup the Receive Descriptor Control Register (RXDCTL)
1747 	 * PTHRESH=32 descriptors (half the internal cache)
1748 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
1749 	 * WTHRESH defaults to 1 (writeback each descriptor)
1750 	 */
1751 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
1752 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
1753 
1754 	/* Not a valid value for 82599 */
1755 	if (hw->mac.type < ixgbe_mac_82599EB) {
1756 		reg_val |= 0x0020;	/* pthresh */
1757 	}
1758 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
1759 
1760 	if (hw->mac.type == ixgbe_mac_82599EB) {
1761 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1762 		reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
1763 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
1764 	}
1765 
1766 	/*
1767 	 * Setup the Split and Replication Receive Control Register.
1768 	 * Set the rx buffer size and the advanced descriptor type.
1769 	 */
1770 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
1771 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1772 	reg_val |= IXGBE_SRRCTL_DROP_EN;
1773 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
1774 }
1775 
1776 static void
1777 ixgbe_setup_rx(ixgbe_t *ixgbe)
1778 {
1779 	ixgbe_rx_ring_t *rx_ring;
1780 	struct ixgbe_hw *hw = &ixgbe->hw;
1781 	ixgbe_rx_group_t *rx_group;
1782 	uint32_t reg_val;
1783 	uint32_t ring_mapping;
1784 	int i;
1785 
1786 	/* PSRTYPE must be configured for 82599 */
1787 	reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1788 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
1789 #define	IXGBE_PSRTYPE_L2_PKT	0x00001000
1790 	reg_val |= IXGBE_PSRTYPE_L2_PKT;
1791 	reg_val |= 0xE0000000;
1792 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
1793 
1794 	/*
1795 	 * Set filter control in FCTRL to accept broadcast packets and do
1796 	 * not pass pause frames to host.  Flow control settings are already
1797 	 * in this register, so preserve them.
1798 	 */
1799 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1800 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
1801 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
1802 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
1803 
1804 	/*
1805 	 * Enable the receive unit.  This must be done after filter
1806 	 * control is set in FCTRL.
1807 	 */
1808 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
1809 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
1810 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1811 
1812 	/*
1813 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
1814 	 */
1815 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1816 		rx_ring = &ixgbe->rx_rings[i];
1817 		ixgbe_setup_rx_ring(rx_ring);
1818 	}
1819 
1820 	/*
1821 	 * Setup rx groups.
1822 	 */
1823 	for (i = 0; i < ixgbe->num_rx_groups; i++) {
1824 		rx_group = &ixgbe->rx_groups[i];
1825 		rx_group->index = i;
1826 		rx_group->ixgbe = ixgbe;
1827 	}
1828 
1829 	/*
1830 	 * Setup the per-ring statistics mapping.
1831 	 */
1832 	ring_mapping = 0;
1833 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1834 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1835 		if ((i & 0x3) == 0x3) {
1836 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1837 			ring_mapping = 0;
1838 		}
1839 	}
1840 	if ((i & 0x3) != 0x3)
1841 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1842 
1843 	/*
1844 	 * The Max Frame Size in MHADD/MAXFRS will be internally increased
1845 	 * by four bytes if the packet has a VLAN field, so includes MTU,
1846 	 * ethernet header and frame check sequence.
1847 	 * Register is MAXFRS in 82599.
1848 	 */
1849 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
1850 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
1851 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
1852 
1853 	/*
1854 	 * Setup Jumbo Frame enable bit
1855 	 */
1856 	if (ixgbe->default_mtu > ETHERMTU) {
1857 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1858 		reg_val |= IXGBE_HLREG0_JUMBOEN;
1859 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1860 	}
1861 
1862 	/*
1863 	 * Hardware checksum settings
1864 	 */
1865 	if (ixgbe->rx_hcksum_enable) {
1866 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
1867 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
1868 	}
1869 
1870 	/*
1871 	 * Setup RSS for multiple receive queues
1872 	 */
1873 	if (ixgbe->num_rx_rings > 1)
1874 		ixgbe_setup_rss(ixgbe);
1875 }
1876 
1877 static void
1878 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
1879 {
1880 	ixgbe_t *ixgbe = tx_ring->ixgbe;
1881 	struct ixgbe_hw *hw = &ixgbe->hw;
1882 	uint32_t size;
1883 	uint32_t buf_low;
1884 	uint32_t buf_high;
1885 	uint32_t reg_val;
1886 
1887 	ASSERT(mutex_owned(&tx_ring->tx_lock));
1888 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1889 
1890 	/*
1891 	 * Initialize the length register
1892 	 */
1893 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
1894 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
1895 
1896 	/*
1897 	 * Initialize the base address registers
1898 	 */
1899 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
1900 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
1901 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
1902 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
1903 
1904 	/*
1905 	 * Setup head & tail pointers
1906 	 */
1907 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
1908 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
1909 
1910 	/*
1911 	 * Setup head write-back
1912 	 */
1913 	if (ixgbe->tx_head_wb_enable) {
1914 		/*
1915 		 * The memory of the head write-back is allocated using
1916 		 * the extra tbd beyond the tail of the tbd ring.
1917 		 */
1918 		tx_ring->tbd_head_wb = (uint32_t *)
1919 		    ((uintptr_t)tx_ring->tbd_area.address + size);
1920 		*tx_ring->tbd_head_wb = 0;
1921 
1922 		buf_low = (uint32_t)
1923 		    (tx_ring->tbd_area.dma_address + size);
1924 		buf_high = (uint32_t)
1925 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
1926 
1927 		/* Set the head write-back enable bit */
1928 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1929 
1930 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
1931 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
1932 
1933 		/*
1934 		 * Turn off relaxed ordering for head write back or it will
1935 		 * cause problems with the tx recycling
1936 		 */
1937 		reg_val = IXGBE_READ_REG(hw,
1938 		    IXGBE_DCA_TXCTRL(tx_ring->index));
1939 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1940 		IXGBE_WRITE_REG(hw,
1941 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
1942 	} else {
1943 		tx_ring->tbd_head_wb = NULL;
1944 	}
1945 
1946 	tx_ring->tbd_head = 0;
1947 	tx_ring->tbd_tail = 0;
1948 	tx_ring->tbd_free = tx_ring->ring_size;
1949 
1950 	if (ixgbe->tx_ring_init == B_TRUE) {
1951 		tx_ring->tcb_head = 0;
1952 		tx_ring->tcb_tail = 0;
1953 		tx_ring->tcb_free = tx_ring->free_list_size;
1954 	}
1955 
1956 	/*
1957 	 * Initialize the s/w context structure
1958 	 */
1959 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
1960 }
1961 
1962 static void
1963 ixgbe_setup_tx(ixgbe_t *ixgbe)
1964 {
1965 	struct ixgbe_hw *hw = &ixgbe->hw;
1966 	ixgbe_tx_ring_t *tx_ring;
1967 	uint32_t reg_val;
1968 	uint32_t ring_mapping;
1969 	int i;
1970 
1971 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1972 		tx_ring = &ixgbe->tx_rings[i];
1973 		ixgbe_setup_tx_ring(tx_ring);
1974 	}
1975 
1976 	/*
1977 	 * Setup the per-ring statistics mapping.
1978 	 */
1979 	ring_mapping = 0;
1980 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1981 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1982 		if ((i & 0x3) == 0x3) {
1983 			if (hw->mac.type >= ixgbe_mac_82599EB) {
1984 				IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
1985 				    ring_mapping);
1986 			} else {
1987 				IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
1988 				    ring_mapping);
1989 			}
1990 			ring_mapping = 0;
1991 		}
1992 	}
1993 	if ((i & 0x3) != 0x3)
1994 		if (hw->mac.type >= ixgbe_mac_82599EB) {
1995 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
1996 		} else {
1997 			IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
1998 		}
1999 
2000 	/*
2001 	 * Enable CRC appending and TX padding (for short tx frames)
2002 	 */
2003 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2004 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2005 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2006 
2007 	/*
2008 	 * enable DMA for 82599 parts
2009 	 */
2010 	if (hw->mac.type == ixgbe_mac_82599EB) {
2011 	/* DMATXCTL.TE must be set after all Tx config is complete */
2012 		reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2013 		reg_val |= IXGBE_DMATXCTL_TE;
2014 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2015 	}
2016 
2017 	/*
2018 	 * Enabling tx queues ..
2019 	 * For 82599 must be done after DMATXCTL.TE is set
2020 	 */
2021 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2022 		tx_ring = &ixgbe->tx_rings[i];
2023 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2024 		reg_val |= IXGBE_TXDCTL_ENABLE;
2025 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2026 	}
2027 }
2028 
2029 /*
2030  * ixgbe_setup_rss - Setup receive-side scaling feature.
2031  */
2032 static void
2033 ixgbe_setup_rss(ixgbe_t *ixgbe)
2034 {
2035 	struct ixgbe_hw *hw = &ixgbe->hw;
2036 	uint32_t i, mrqc, rxcsum;
2037 	uint32_t random;
2038 	uint32_t reta;
2039 
2040 	/*
2041 	 * Fill out redirection table
2042 	 */
2043 	reta = 0;
2044 	for (i = 0; i < 128; i++) {
2045 		reta = (reta << 8) | (i % ixgbe->num_rx_rings);
2046 		if ((i & 3) == 3)
2047 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2048 	}
2049 
2050 	/*
2051 	 * Fill out hash function seeds with a random constant
2052 	 */
2053 	for (i = 0; i < 10; i++) {
2054 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2055 		    sizeof (uint32_t));
2056 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2057 	}
2058 
2059 	/*
2060 	 * Enable RSS & perform hash on these packet types
2061 	 */
2062 	mrqc = IXGBE_MRQC_RSSEN |
2063 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
2064 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2065 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2066 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2067 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2068 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
2069 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2070 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2071 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2072 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2073 
2074 	/*
2075 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2076 	 * It is an adapter hardware limitation that Packet Checksum is
2077 	 * mutually exclusive with RSS.
2078 	 */
2079 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2080 	rxcsum |= IXGBE_RXCSUM_PCSD;
2081 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2082 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2083 }
2084 
2085 /*
2086  * ixgbe_init_unicst - Initialize the unicast addresses.
2087  */
2088 static void
2089 ixgbe_init_unicst(ixgbe_t *ixgbe)
2090 {
2091 	struct ixgbe_hw *hw = &ixgbe->hw;
2092 	uint8_t *mac_addr;
2093 	int slot;
2094 	/*
2095 	 * Here we should consider two situations:
2096 	 *
2097 	 * 1. Chipset is initialized at the first time,
2098 	 *    Clear all the multiple unicast addresses.
2099 	 *
2100 	 * 2. Chipset is reset
2101 	 *    Recover the multiple unicast addresses from the
2102 	 *    software data structure to the RAR registers.
2103 	 */
2104 	if (!ixgbe->unicst_init) {
2105 		/*
2106 		 * Initialize the multiple unicast addresses
2107 		 */
2108 		ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2109 		ixgbe->unicst_avail = ixgbe->unicst_total;
2110 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2111 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2112 			bzero(mac_addr, ETHERADDRL);
2113 			(void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2114 			ixgbe->unicst_addr[slot].mac.set = 0;
2115 		}
2116 		ixgbe->unicst_init = B_TRUE;
2117 	} else {
2118 		/* Re-configure the RAR registers */
2119 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2120 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2121 			if (ixgbe->unicst_addr[slot].mac.set == 1) {
2122 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2123 				    NULL, IXGBE_RAH_AV);
2124 			} else {
2125 				bzero(mac_addr, ETHERADDRL);
2126 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2127 				    NULL, NULL);
2128 			}
2129 		}
2130 	}
2131 }
2132 
2133 /*
2134  * ixgbe_unicst_set - Set the unicast address to the specified slot.
2135  */
2136 int
2137 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
2138     int slot)
2139 {
2140 	struct ixgbe_hw *hw = &ixgbe->hw;
2141 
2142 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2143 
2144 	/*
2145 	 * Save the unicast address in the software data structure
2146 	 */
2147 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
2148 
2149 	/*
2150 	 * Set the unicast address to the RAR register
2151 	 */
2152 	(void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, IXGBE_RAH_AV);
2153 
2154 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2155 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2156 		return (EIO);
2157 	}
2158 
2159 	return (0);
2160 }
2161 
2162 /*
2163  * ixgbe_unicst_find - Find the slot for the specified unicast address
2164  */
2165 int
2166 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2167 {
2168 	int slot;
2169 
2170 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2171 
2172 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2173 		if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2174 		    mac_addr, ETHERADDRL) == 0)
2175 			return (slot);
2176 	}
2177 
2178 	return (-1);
2179 }
2180 
2181 /*
2182  * ixgbe_multicst_add - Add a multicst address.
2183  */
2184 int
2185 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2186 {
2187 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2188 
2189 	if ((multiaddr[0] & 01) == 0) {
2190 		return (EINVAL);
2191 	}
2192 
2193 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2194 		return (ENOENT);
2195 	}
2196 
2197 	bcopy(multiaddr,
2198 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2199 	ixgbe->mcast_count++;
2200 
2201 	/*
2202 	 * Update the multicast table in the hardware
2203 	 */
2204 	ixgbe_setup_multicst(ixgbe);
2205 
2206 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2207 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2208 		return (EIO);
2209 	}
2210 
2211 	return (0);
2212 }
2213 
2214 /*
2215  * ixgbe_multicst_remove - Remove a multicst address.
2216  */
2217 int
2218 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2219 {
2220 	int i;
2221 
2222 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2223 
2224 	for (i = 0; i < ixgbe->mcast_count; i++) {
2225 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2226 		    ETHERADDRL) == 0) {
2227 			for (i++; i < ixgbe->mcast_count; i++) {
2228 				ixgbe->mcast_table[i - 1] =
2229 				    ixgbe->mcast_table[i];
2230 			}
2231 			ixgbe->mcast_count--;
2232 			break;
2233 		}
2234 	}
2235 
2236 	/*
2237 	 * Update the multicast table in the hardware
2238 	 */
2239 	ixgbe_setup_multicst(ixgbe);
2240 
2241 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2242 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2243 		return (EIO);
2244 	}
2245 
2246 	return (0);
2247 }
2248 
2249 /*
2250  * ixgbe_setup_multicast - Setup multicast data structures.
2251  *
2252  * This routine initializes all of the multicast related structures
2253  * and save them in the hardware registers.
2254  */
2255 static void
2256 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2257 {
2258 	uint8_t *mc_addr_list;
2259 	uint32_t mc_addr_count;
2260 	struct ixgbe_hw *hw = &ixgbe->hw;
2261 
2262 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2263 
2264 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2265 
2266 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2267 	mc_addr_count = ixgbe->mcast_count;
2268 
2269 	/*
2270 	 * Update the multicast addresses to the MTA registers
2271 	 */
2272 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2273 	    ixgbe_mc_table_itr);
2274 }
2275 
2276 /*
2277  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2278  *
2279  * This routine gets user-configured values out of the configuration
2280  * file ixgbe.conf.
2281  *
2282  * For each configurable value, there is a minimum, a maximum, and a
2283  * default.
2284  * If user does not configure a value, use the default.
2285  * If user configures below the minimum, use the minumum.
2286  * If user configures above the maximum, use the maxumum.
2287  */
2288 static void
2289 ixgbe_get_conf(ixgbe_t *ixgbe)
2290 {
2291 	struct ixgbe_hw *hw = &ixgbe->hw;
2292 	uint32_t flow_control;
2293 
2294 	/*
2295 	 * ixgbe driver supports the following user configurations:
2296 	 *
2297 	 * Jumbo frame configuration:
2298 	 *    default_mtu
2299 	 *
2300 	 * Ethernet flow control configuration:
2301 	 *    flow_control
2302 	 *
2303 	 * Multiple rings configurations:
2304 	 *    tx_queue_number
2305 	 *    tx_ring_size
2306 	 *    rx_queue_number
2307 	 *    rx_ring_size
2308 	 *
2309 	 * Call ixgbe_get_prop() to get the value for a specific
2310 	 * configuration parameter.
2311 	 */
2312 
2313 	/*
2314 	 * Jumbo frame configuration - max_frame_size controls host buffer
2315 	 * allocation, so includes MTU, ethernet header, vlan tag and
2316 	 * frame check sequence.
2317 	 */
2318 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2319 	    MIN_MTU, MAX_MTU, DEFAULT_MTU);
2320 
2321 	ixgbe->max_frame_size = ixgbe->default_mtu +
2322 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2323 
2324 	/*
2325 	 * Ethernet flow control configuration
2326 	 */
2327 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2328 	    ixgbe_fc_none, 3, ixgbe_fc_none);
2329 	if (flow_control == 3)
2330 		flow_control = ixgbe_fc_default;
2331 
2332 	/*
2333 	 * fc.requested mode is what the user requests.  After autoneg,
2334 	 * fc.current_mode will be the flow_control mode that was negotiated.
2335 	 */
2336 	hw->fc.requested_mode = flow_control;
2337 
2338 	/*
2339 	 * Multiple rings configurations
2340 	 */
2341 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2342 	    ixgbe->capab->min_tx_que_num,
2343 	    ixgbe->capab->max_tx_que_num,
2344 	    ixgbe->capab->def_tx_que_num);
2345 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2346 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2347 
2348 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2349 	    ixgbe->capab->min_rx_que_num,
2350 	    ixgbe->capab->max_rx_que_num,
2351 	    ixgbe->capab->def_rx_que_num);
2352 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2353 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2354 
2355 	/*
2356 	 * Multiple groups configuration
2357 	 */
2358 	ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
2359 	    MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM);
2360 
2361 	ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
2362 	    0, 1, DEFAULT_MR_ENABLE);
2363 
2364 	if (ixgbe->mr_enable == B_FALSE) {
2365 		ixgbe->num_tx_rings = 1;
2366 		ixgbe->num_rx_rings = 1;
2367 		ixgbe->num_rx_groups = 1;
2368 	}
2369 
2370 	/*
2371 	 * Tunable used to force an interrupt type. The only use is
2372 	 * for testing of the lesser interrupt types.
2373 	 * 0 = don't force interrupt type
2374 	 * 1 = force interrupt type MSI-X
2375 	 * 2 = force interrupt type MSI
2376 	 * 3 = force interrupt type Legacy
2377 	 */
2378 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2379 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2380 
2381 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2382 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
2383 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2384 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
2385 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2386 	    0, 1, DEFAULT_LSO_ENABLE);
2387 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2388 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
2389 
2390 	/* Head Write Back not recommended for 82599 */
2391 	if (hw->mac.type >= ixgbe_mac_82599EB) {
2392 		ixgbe->tx_head_wb_enable = B_FALSE;
2393 	}
2394 
2395 	/*
2396 	 * ixgbe LSO needs the tx h/w checksum support.
2397 	 * LSO will be disabled if tx h/w checksum is not
2398 	 * enabled.
2399 	 */
2400 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
2401 		ixgbe->lso_enable = B_FALSE;
2402 	}
2403 
2404 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
2405 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
2406 	    DEFAULT_TX_COPY_THRESHOLD);
2407 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
2408 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
2409 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
2410 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
2411 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
2412 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
2413 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
2414 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
2415 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
2416 
2417 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
2418 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
2419 	    DEFAULT_RX_COPY_THRESHOLD);
2420 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
2421 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
2422 	    DEFAULT_RX_LIMIT_PER_INTR);
2423 
2424 	ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
2425 	    ixgbe->capab->min_intr_throttle,
2426 	    ixgbe->capab->max_intr_throttle,
2427 	    ixgbe->capab->def_intr_throttle);
2428 	/*
2429 	 * 82599 requires the interupt throttling rate is
2430 	 * a multiple of 8. This is enforced by the register
2431 	 * definiton.
2432 	 */
2433 	if (hw->mac.type == ixgbe_mac_82599EB)
2434 		ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
2435 }
2436 
2437 static void
2438 ixgbe_init_params(ixgbe_t *ixgbe)
2439 {
2440 	ixgbe->param_en_10000fdx_cap = 1;
2441 	ixgbe->param_en_1000fdx_cap = 1;
2442 	ixgbe->param_en_100fdx_cap = 1;
2443 	ixgbe->param_adv_10000fdx_cap = 1;
2444 	ixgbe->param_adv_1000fdx_cap = 1;
2445 	ixgbe->param_adv_100fdx_cap = 1;
2446 
2447 	ixgbe->param_pause_cap = 1;
2448 	ixgbe->param_asym_pause_cap = 1;
2449 	ixgbe->param_rem_fault = 0;
2450 
2451 	ixgbe->param_adv_autoneg_cap = 1;
2452 	ixgbe->param_adv_pause_cap = 1;
2453 	ixgbe->param_adv_asym_pause_cap = 1;
2454 	ixgbe->param_adv_rem_fault = 0;
2455 
2456 	ixgbe->param_lp_10000fdx_cap = 0;
2457 	ixgbe->param_lp_1000fdx_cap = 0;
2458 	ixgbe->param_lp_100fdx_cap = 0;
2459 	ixgbe->param_lp_autoneg_cap = 0;
2460 	ixgbe->param_lp_pause_cap = 0;
2461 	ixgbe->param_lp_asym_pause_cap = 0;
2462 	ixgbe->param_lp_rem_fault = 0;
2463 }
2464 
2465 /*
2466  * ixgbe_get_prop - Get a property value out of the configuration file
2467  * ixgbe.conf.
2468  *
2469  * Caller provides the name of the property, a default value, a minimum
2470  * value, and a maximum value.
2471  *
2472  * Return configured value of the property, with default, minimum and
2473  * maximum properly applied.
2474  */
2475 static int
2476 ixgbe_get_prop(ixgbe_t *ixgbe,
2477     char *propname,	/* name of the property */
2478     int minval,		/* minimum acceptable value */
2479     int maxval,		/* maximim acceptable value */
2480     int defval)		/* default value */
2481 {
2482 	int value;
2483 
2484 	/*
2485 	 * Call ddi_prop_get_int() to read the conf settings
2486 	 */
2487 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
2488 	    DDI_PROP_DONTPASS, propname, defval);
2489 	if (value > maxval)
2490 		value = maxval;
2491 
2492 	if (value < minval)
2493 		value = minval;
2494 
2495 	return (value);
2496 }
2497 
2498 /*
2499  * ixgbe_driver_setup_link - Using the link properties to setup the link.
2500  */
2501 int
2502 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
2503 {
2504 	u32 autoneg_advertised = 0;
2505 
2506 	/*
2507 	 * No half duplex support with 10Gb parts
2508 	 */
2509 	if (ixgbe->param_adv_10000fdx_cap == 1)
2510 		autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2511 
2512 	if (ixgbe->param_adv_1000fdx_cap == 1)
2513 		autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2514 
2515 	if (ixgbe->param_adv_100fdx_cap == 1)
2516 		autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2517 
2518 	if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
2519 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
2520 		    "to autonegotiation with full link capabilities.");
2521 
2522 		autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
2523 		    IXGBE_LINK_SPEED_1GB_FULL |
2524 		    IXGBE_LINK_SPEED_100_FULL;
2525 	}
2526 
2527 	if (setup_hw) {
2528 		if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
2529 		    ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
2530 			ixgbe_notice(ixgbe, "Setup link failed on this "
2531 			    "device.");
2532 			return (IXGBE_FAILURE);
2533 		}
2534 	}
2535 
2536 	return (IXGBE_SUCCESS);
2537 }
2538 
2539 /*
2540  * ixgbe_driver_link_check - Link status processing done in taskq.
2541  */
2542 static void
2543 ixgbe_driver_link_check(void *arg)
2544 {
2545 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2546 	struct ixgbe_hw *hw = &ixgbe->hw;
2547 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
2548 	boolean_t link_up = B_FALSE;
2549 	boolean_t link_changed = B_FALSE;
2550 
2551 	mutex_enter(&ixgbe->gen_lock);
2552 
2553 	/* check for link, wait the full time */
2554 	(void) ixgbe_check_link(hw, &speed, &link_up, true);
2555 	if (link_up) {
2556 		/* Link is up, enable flow control settings */
2557 		(void) ixgbe_fc_enable(hw, 0);
2558 
2559 		/*
2560 		 * The Link is up, check whether it was marked as down earlier
2561 		 */
2562 		if (ixgbe->link_state != LINK_STATE_UP) {
2563 			switch (speed) {
2564 			case IXGBE_LINK_SPEED_10GB_FULL:
2565 				ixgbe->link_speed = SPEED_10GB;
2566 				break;
2567 			case IXGBE_LINK_SPEED_1GB_FULL:
2568 				ixgbe->link_speed = SPEED_1GB;
2569 				break;
2570 			case IXGBE_LINK_SPEED_100_FULL:
2571 				ixgbe->link_speed = SPEED_100;
2572 			}
2573 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
2574 			ixgbe->link_state = LINK_STATE_UP;
2575 			ixgbe->link_down_timeout = 0;
2576 			link_changed = B_TRUE;
2577 		}
2578 	} else {
2579 		if (ixgbe->link_state != LINK_STATE_DOWN) {
2580 			ixgbe->link_speed = 0;
2581 			ixgbe->link_duplex = 0;
2582 			ixgbe->link_state = LINK_STATE_DOWN;
2583 			link_changed = B_TRUE;
2584 		}
2585 
2586 		if (ixgbe->ixgbe_state & IXGBE_STARTED) {
2587 			if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) {
2588 				ixgbe->link_down_timeout++;
2589 			} else if (ixgbe->link_down_timeout ==
2590 			    MAX_LINK_DOWN_TIMEOUT) {
2591 				ixgbe_tx_clean(ixgbe);
2592 				ixgbe->link_down_timeout++;
2593 			}
2594 		}
2595 	}
2596 
2597 	/*
2598 	 * this is only reached after a link-status-change interrupt
2599 	 * so always get new phy state
2600 	 */
2601 	ixgbe_get_hw_state(ixgbe);
2602 
2603 	/* re-enable the interrupt, which was automasked */
2604 	ixgbe->eims |= IXGBE_EICR_LSC;
2605 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
2606 
2607 	mutex_exit(&ixgbe->gen_lock);
2608 
2609 	/* outside the gen_lock */
2610 	if (link_changed) {
2611 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
2612 	}
2613 }
2614 
2615 /*
2616  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
2617  */
2618 static void
2619 ixgbe_sfp_check(void *arg)
2620 {
2621 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2622 	uint32_t eicr = ixgbe->eicr;
2623 	struct ixgbe_hw *hw = &ixgbe->hw;
2624 
2625 	if (eicr & IXGBE_EICR_GPI_SDP1) {
2626 		/* clear the interrupt */
2627 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2628 
2629 		/* if link up, do multispeed fiber setup */
2630 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
2631 		    B_TRUE, B_TRUE);
2632 		ixgbe_driver_link_check(ixgbe);
2633 	} else if (eicr & IXGBE_EICR_GPI_SDP2) {
2634 		/* clear the interrupt */
2635 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2636 
2637 		/* if link up, do sfp module setup */
2638 		(void) hw->mac.ops.setup_sfp(hw);
2639 
2640 		/* do multispeed fiber setup */
2641 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
2642 		    B_TRUE, B_TRUE);
2643 		ixgbe_driver_link_check(ixgbe);
2644 	}
2645 }
2646 
2647 /*
2648  * ixgbe_local_timer - Driver watchdog function.
2649  *
2650  * This function will handle the transmit stall check, link status check and
2651  * other routines.
2652  */
2653 static void
2654 ixgbe_local_timer(void *arg)
2655 {
2656 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2657 
2658 	if (ixgbe_stall_check(ixgbe)) {
2659 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2660 		ixgbe->reset_count++;
2661 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2662 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2663 	}
2664 
2665 	ixgbe_restart_watchdog_timer(ixgbe);
2666 }
2667 
2668 /*
2669  * ixgbe_stall_check - Check for transmit stall.
2670  *
2671  * This function checks if the adapter is stalled (in transmit).
2672  *
2673  * It is called each time the watchdog timeout is invoked.
2674  * If the transmit descriptor reclaim continuously fails,
2675  * the watchdog value will increment by 1. If the watchdog
2676  * value exceeds the threshold, the ixgbe is assumed to
2677  * have stalled and need to be reset.
2678  */
2679 static boolean_t
2680 ixgbe_stall_check(ixgbe_t *ixgbe)
2681 {
2682 	ixgbe_tx_ring_t *tx_ring;
2683 	boolean_t result;
2684 	int i;
2685 
2686 	if (ixgbe->link_state != LINK_STATE_UP)
2687 		return (B_FALSE);
2688 
2689 	/*
2690 	 * If any tx ring is stalled, we'll reset the chipset
2691 	 */
2692 	result = B_FALSE;
2693 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2694 		tx_ring = &ixgbe->tx_rings[i];
2695 		if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
2696 			tx_ring->tx_recycle(tx_ring);
2697 		}
2698 
2699 		if (tx_ring->recycle_fail > 0)
2700 			tx_ring->stall_watchdog++;
2701 		else
2702 			tx_ring->stall_watchdog = 0;
2703 
2704 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
2705 			result = B_TRUE;
2706 			break;
2707 		}
2708 	}
2709 
2710 	if (result) {
2711 		tx_ring->stall_watchdog = 0;
2712 		tx_ring->recycle_fail = 0;
2713 	}
2714 
2715 	return (result);
2716 }
2717 
2718 
2719 /*
2720  * is_valid_mac_addr - Check if the mac address is valid.
2721  */
2722 static boolean_t
2723 is_valid_mac_addr(uint8_t *mac_addr)
2724 {
2725 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
2726 	const uint8_t addr_test2[6] =
2727 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2728 
2729 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
2730 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
2731 		return (B_FALSE);
2732 
2733 	return (B_TRUE);
2734 }
2735 
2736 static boolean_t
2737 ixgbe_find_mac_address(ixgbe_t *ixgbe)
2738 {
2739 #ifdef __sparc
2740 	struct ixgbe_hw *hw = &ixgbe->hw;
2741 	uchar_t *bytes;
2742 	struct ether_addr sysaddr;
2743 	uint_t nelts;
2744 	int err;
2745 	boolean_t found = B_FALSE;
2746 
2747 	/*
2748 	 * The "vendor's factory-set address" may already have
2749 	 * been extracted from the chip, but if the property
2750 	 * "local-mac-address" is set we use that instead.
2751 	 *
2752 	 * We check whether it looks like an array of 6
2753 	 * bytes (which it should, if OBP set it).  If we can't
2754 	 * make sense of it this way, we'll ignore it.
2755 	 */
2756 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2757 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
2758 	if (err == DDI_PROP_SUCCESS) {
2759 		if (nelts == ETHERADDRL) {
2760 			while (nelts--)
2761 				hw->mac.addr[nelts] = bytes[nelts];
2762 			found = B_TRUE;
2763 		}
2764 		ddi_prop_free(bytes);
2765 	}
2766 
2767 	/*
2768 	 * Look up the OBP property "local-mac-address?". If the user has set
2769 	 * 'local-mac-address? = false', use "the system address" instead.
2770 	 */
2771 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
2772 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
2773 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
2774 			if (localetheraddr(NULL, &sysaddr) != 0) {
2775 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
2776 				found = B_TRUE;
2777 			}
2778 		}
2779 		ddi_prop_free(bytes);
2780 	}
2781 
2782 	/*
2783 	 * Finally(!), if there's a valid "mac-address" property (created
2784 	 * if we netbooted from this interface), we must use this instead
2785 	 * of any of the above to ensure that the NFS/install server doesn't
2786 	 * get confused by the address changing as Solaris takes over!
2787 	 */
2788 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2789 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
2790 	if (err == DDI_PROP_SUCCESS) {
2791 		if (nelts == ETHERADDRL) {
2792 			while (nelts--)
2793 				hw->mac.addr[nelts] = bytes[nelts];
2794 			found = B_TRUE;
2795 		}
2796 		ddi_prop_free(bytes);
2797 	}
2798 
2799 	if (found) {
2800 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
2801 		return (B_TRUE);
2802 	}
2803 #else
2804 	_NOTE(ARGUNUSED(ixgbe));
2805 #endif
2806 
2807 	return (B_TRUE);
2808 }
2809 
2810 #pragma inline(ixgbe_arm_watchdog_timer)
2811 static void
2812 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
2813 {
2814 	/*
2815 	 * Fire a watchdog timer
2816 	 */
2817 	ixgbe->watchdog_tid =
2818 	    timeout(ixgbe_local_timer,
2819 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
2820 
2821 }
2822 
2823 /*
2824  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
2825  */
2826 void
2827 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
2828 {
2829 	mutex_enter(&ixgbe->watchdog_lock);
2830 
2831 	if (!ixgbe->watchdog_enable) {
2832 		ixgbe->watchdog_enable = B_TRUE;
2833 		ixgbe->watchdog_start = B_TRUE;
2834 		ixgbe_arm_watchdog_timer(ixgbe);
2835 	}
2836 
2837 	mutex_exit(&ixgbe->watchdog_lock);
2838 }
2839 
2840 /*
2841  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
2842  */
2843 void
2844 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
2845 {
2846 	timeout_id_t tid;
2847 
2848 	mutex_enter(&ixgbe->watchdog_lock);
2849 
2850 	ixgbe->watchdog_enable = B_FALSE;
2851 	ixgbe->watchdog_start = B_FALSE;
2852 	tid = ixgbe->watchdog_tid;
2853 	ixgbe->watchdog_tid = 0;
2854 
2855 	mutex_exit(&ixgbe->watchdog_lock);
2856 
2857 	if (tid != 0)
2858 		(void) untimeout(tid);
2859 }
2860 
2861 /*
2862  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
2863  */
2864 void
2865 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
2866 {
2867 	mutex_enter(&ixgbe->watchdog_lock);
2868 
2869 	if (ixgbe->watchdog_enable) {
2870 		if (!ixgbe->watchdog_start) {
2871 			ixgbe->watchdog_start = B_TRUE;
2872 			ixgbe_arm_watchdog_timer(ixgbe);
2873 		}
2874 	}
2875 
2876 	mutex_exit(&ixgbe->watchdog_lock);
2877 }
2878 
2879 /*
2880  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
2881  */
2882 static void
2883 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
2884 {
2885 	mutex_enter(&ixgbe->watchdog_lock);
2886 
2887 	if (ixgbe->watchdog_start)
2888 		ixgbe_arm_watchdog_timer(ixgbe);
2889 
2890 	mutex_exit(&ixgbe->watchdog_lock);
2891 }
2892 
2893 /*
2894  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
2895  */
2896 void
2897 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
2898 {
2899 	timeout_id_t tid;
2900 
2901 	mutex_enter(&ixgbe->watchdog_lock);
2902 
2903 	ixgbe->watchdog_start = B_FALSE;
2904 	tid = ixgbe->watchdog_tid;
2905 	ixgbe->watchdog_tid = 0;
2906 
2907 	mutex_exit(&ixgbe->watchdog_lock);
2908 
2909 	if (tid != 0)
2910 		(void) untimeout(tid);
2911 }
2912 
2913 /*
2914  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
2915  */
2916 static void
2917 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
2918 {
2919 	struct ixgbe_hw *hw = &ixgbe->hw;
2920 
2921 	/*
2922 	 * mask all interrupts off
2923 	 */
2924 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
2925 
2926 	/*
2927 	 * for MSI-X, also disable autoclear
2928 	 */
2929 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2930 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
2931 	}
2932 
2933 	IXGBE_WRITE_FLUSH(hw);
2934 }
2935 
2936 /*
2937  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
2938  */
2939 static void
2940 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
2941 {
2942 	struct ixgbe_hw *hw = &ixgbe->hw;
2943 	uint32_t eiac, eiam;
2944 	uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2945 
2946 	/* interrupt types to enable */
2947 	ixgbe->eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
2948 	ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
2949 	ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
2950 
2951 	/* enable automask on "other" causes that this adapter can generate */
2952 	eiam = ixgbe->capab->other_intr;
2953 
2954 	/*
2955 	 * msi-x mode
2956 	 */
2957 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2958 		/* enable autoclear but not on bits 29:20 */
2959 		eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
2960 
2961 		/* general purpose interrupt enable */
2962 		gpie |= (IXGBE_GPIE_MSIX_MODE
2963 		    | IXGBE_GPIE_PBA_SUPPORT
2964 		    | IXGBE_GPIE_OCD
2965 		    | IXGBE_GPIE_EIAME);
2966 	/*
2967 	 * non-msi-x mode
2968 	 */
2969 	} else {
2970 
2971 		/* disable autoclear, leave gpie at default */
2972 		eiac = 0;
2973 
2974 		/*
2975 		 * General purpose interrupt enable.
2976 		 * For 82599, extended interrupt automask enable
2977 		 * only in MSI or MSI-X mode
2978 		 */
2979 		if ((hw->mac.type < ixgbe_mac_82599EB) ||
2980 		    (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
2981 			gpie |= IXGBE_GPIE_EIAME;
2982 		}
2983 	}
2984 	/* Enable specific interrupts for 82599  */
2985 	if (hw->mac.type == ixgbe_mac_82599EB) {
2986 		gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */
2987 		gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */
2988 	}
2989 
2990 	/* write to interrupt control registers */
2991 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
2992 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
2993 	IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
2994 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2995 	IXGBE_WRITE_FLUSH(hw);
2996 }
2997 
2998 /*
2999  * ixgbe_loopback_ioctl - Loopback support.
3000  */
3001 enum ioc_reply
3002 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3003 {
3004 	lb_info_sz_t *lbsp;
3005 	lb_property_t *lbpp;
3006 	uint32_t *lbmp;
3007 	uint32_t size;
3008 	uint32_t value;
3009 
3010 	if (mp->b_cont == NULL)
3011 		return (IOC_INVAL);
3012 
3013 	switch (iocp->ioc_cmd) {
3014 	default:
3015 		return (IOC_INVAL);
3016 
3017 	case LB_GET_INFO_SIZE:
3018 		size = sizeof (lb_info_sz_t);
3019 		if (iocp->ioc_count != size)
3020 			return (IOC_INVAL);
3021 
3022 		value = sizeof (lb_normal);
3023 		value += sizeof (lb_mac);
3024 
3025 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3026 		*lbsp = value;
3027 		break;
3028 
3029 	case LB_GET_INFO:
3030 		value = sizeof (lb_normal);
3031 		value += sizeof (lb_mac);
3032 
3033 		size = value;
3034 		if (iocp->ioc_count != size)
3035 			return (IOC_INVAL);
3036 
3037 		value = 0;
3038 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3039 
3040 		lbpp[value++] = lb_normal;
3041 		lbpp[value++] = lb_mac;
3042 		break;
3043 
3044 	case LB_GET_MODE:
3045 		size = sizeof (uint32_t);
3046 		if (iocp->ioc_count != size)
3047 			return (IOC_INVAL);
3048 
3049 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3050 		*lbmp = ixgbe->loopback_mode;
3051 		break;
3052 
3053 	case LB_SET_MODE:
3054 		size = 0;
3055 		if (iocp->ioc_count != sizeof (uint32_t))
3056 			return (IOC_INVAL);
3057 
3058 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3059 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3060 			return (IOC_INVAL);
3061 		break;
3062 	}
3063 
3064 	iocp->ioc_count = size;
3065 	iocp->ioc_error = 0;
3066 
3067 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3068 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3069 		return (IOC_INVAL);
3070 	}
3071 
3072 	return (IOC_REPLY);
3073 }
3074 
3075 /*
3076  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3077  */
3078 static boolean_t
3079 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3080 {
3081 	if (mode == ixgbe->loopback_mode)
3082 		return (B_TRUE);
3083 
3084 	ixgbe->loopback_mode = mode;
3085 
3086 	if (mode == IXGBE_LB_NONE) {
3087 		/*
3088 		 * Reset the chip
3089 		 */
3090 		(void) ixgbe_reset(ixgbe);
3091 		return (B_TRUE);
3092 	}
3093 
3094 	mutex_enter(&ixgbe->gen_lock);
3095 
3096 	switch (mode) {
3097 	default:
3098 		mutex_exit(&ixgbe->gen_lock);
3099 		return (B_FALSE);
3100 
3101 	case IXGBE_LB_INTERNAL_MAC:
3102 		ixgbe_set_internal_mac_loopback(ixgbe);
3103 		break;
3104 	}
3105 
3106 	mutex_exit(&ixgbe->gen_lock);
3107 
3108 	return (B_TRUE);
3109 }
3110 
3111 /*
3112  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3113  */
3114 static void
3115 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3116 {
3117 	struct ixgbe_hw *hw;
3118 	uint32_t reg;
3119 	uint8_t atlas;
3120 
3121 	hw = &ixgbe->hw;
3122 
3123 	/*
3124 	 * Setup MAC loopback
3125 	 */
3126 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3127 	reg |= IXGBE_HLREG0_LPBK;
3128 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3129 
3130 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3131 	reg &= ~IXGBE_AUTOC_LMS_MASK;
3132 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3133 
3134 	/*
3135 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3136 	 */
3137 	if (hw->mac.type == ixgbe_mac_82598EB) {
3138 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3139 		    &atlas);
3140 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3141 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3142 		    atlas);
3143 
3144 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3145 		    &atlas);
3146 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3147 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3148 		    atlas);
3149 
3150 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3151 		    &atlas);
3152 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3153 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3154 		    atlas);
3155 
3156 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3157 		    &atlas);
3158 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3159 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3160 		    atlas);
3161 	}
3162 }
3163 
3164 #pragma inline(ixgbe_intr_rx_work)
3165 /*
3166  * ixgbe_intr_rx_work - RX processing of ISR.
3167  */
3168 static void
3169 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3170 {
3171 	mblk_t *mp;
3172 
3173 	mutex_enter(&rx_ring->rx_lock);
3174 
3175 	mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3176 	mutex_exit(&rx_ring->rx_lock);
3177 
3178 	if (mp != NULL)
3179 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3180 		    rx_ring->ring_gen_num);
3181 }
3182 
3183 #pragma inline(ixgbe_intr_tx_work)
3184 /*
3185  * ixgbe_intr_tx_work - TX processing of ISR.
3186  */
3187 static void
3188 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3189 {
3190 	ixgbe_t *ixgbe = tx_ring->ixgbe;
3191 
3192 	/*
3193 	 * Recycle the tx descriptors
3194 	 */
3195 	tx_ring->tx_recycle(tx_ring);
3196 
3197 	/*
3198 	 * Schedule the re-transmit
3199 	 */
3200 	if (tx_ring->reschedule &&
3201 	    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
3202 		tx_ring->reschedule = B_FALSE;
3203 		mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3204 		    tx_ring->ring_handle);
3205 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3206 	}
3207 }
3208 
3209 #pragma inline(ixgbe_intr_other_work)
3210 /*
3211  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
3212  */
3213 static void
3214 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
3215 {
3216 	struct ixgbe_hw *hw = &ixgbe->hw;
3217 	/*
3218 	 * dispatch taskq to handle link status change
3219 	 */
3220 	if (eicr & IXGBE_EICR_LSC) {
3221 		if ((ddi_taskq_dispatch(ixgbe->lsc_taskq,
3222 		    ixgbe_driver_link_check, (void *)ixgbe, DDI_NOSLEEP))
3223 		    != DDI_SUCCESS) {
3224 			ixgbe_log(ixgbe, "Fail to dispatch taskq");
3225 		}
3226 	}
3227 
3228 	/*
3229 	 * check for fan failure on adapters with fans
3230 	 */
3231 	if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
3232 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3233 		if (hw->mac.type < ixgbe_mac_82599EB) {
3234 			ixgbe_log(ixgbe,
3235 			    "Fan has stopped, replace the adapter\n");
3236 
3237 			/* re-enable the interrupt, which was automasked */
3238 			ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
3239 		}
3240 	}
3241 
3242 	/*
3243 	 * Do SFP check for 82599
3244 	 */
3245 	if (hw->mac.type == ixgbe_mac_82599EB) {
3246 		if ((ddi_taskq_dispatch(ixgbe->lsc_taskq,
3247 		    ixgbe_sfp_check, (void *)ixgbe,
3248 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
3249 			ixgbe_log(ixgbe, "No memory available to dispatch "
3250 			    "taskq for SFP check");
3251 		}
3252 	}
3253 }
3254 
3255 /*
3256  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
3257  */
3258 static uint_t
3259 ixgbe_intr_legacy(void *arg1, void *arg2)
3260 {
3261 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3262 	struct ixgbe_hw *hw = &ixgbe->hw;
3263 	ixgbe_tx_ring_t *tx_ring;
3264 	ixgbe_rx_ring_t *rx_ring;
3265 	uint32_t eicr;
3266 	mblk_t *mp;
3267 	boolean_t tx_reschedule;
3268 	uint_t result;
3269 
3270 	_NOTE(ARGUNUSED(arg2));
3271 
3272 	mutex_enter(&ixgbe->gen_lock);
3273 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
3274 		mutex_exit(&ixgbe->gen_lock);
3275 		return (DDI_INTR_UNCLAIMED);
3276 	}
3277 
3278 	mp = NULL;
3279 	tx_reschedule = B_FALSE;
3280 
3281 	/*
3282 	 * Any bit set in eicr: claim this interrupt
3283 	 */
3284 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3285 	if (eicr) {
3286 		/*
3287 		 * For legacy interrupt, we have only one interrupt,
3288 		 * so we have only one rx ring and one tx ring enabled.
3289 		 */
3290 		ASSERT(ixgbe->num_rx_rings == 1);
3291 		ASSERT(ixgbe->num_tx_rings == 1);
3292 
3293 		/*
3294 		 * For legacy interrupt, rx rings[0] will use RTxQ[0].
3295 		 */
3296 		if (eicr & 0x1) {
3297 			ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
3298 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3299 			ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3300 			/*
3301 			 * Clean the rx descriptors
3302 			 */
3303 			rx_ring = &ixgbe->rx_rings[0];
3304 			mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3305 		}
3306 
3307 		/*
3308 		 * For legacy interrupt, tx rings[0] will use RTxQ[1].
3309 		 */
3310 		if (eicr & 0x2) {
3311 			/*
3312 			 * Recycle the tx descriptors
3313 			 */
3314 			tx_ring = &ixgbe->tx_rings[0];
3315 			tx_ring->tx_recycle(tx_ring);
3316 
3317 			/*
3318 			 * Schedule the re-transmit
3319 			 */
3320 			tx_reschedule = (tx_ring->reschedule &&
3321 			    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
3322 		}
3323 
3324 		/* any interrupt type other than tx/rx */
3325 		if (eicr & ixgbe->capab->other_intr) {
3326 			if (hw->mac.type < ixgbe_mac_82599EB) {
3327 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3328 			}
3329 			if (hw->mac.type == ixgbe_mac_82599EB) {
3330 				ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3331 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3332 			}
3333 			ixgbe_intr_other_work(ixgbe, eicr);
3334 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3335 		}
3336 
3337 		mutex_exit(&ixgbe->gen_lock);
3338 
3339 		result = DDI_INTR_CLAIMED;
3340 	} else {
3341 		mutex_exit(&ixgbe->gen_lock);
3342 
3343 		/*
3344 		 * No interrupt cause bits set: don't claim this interrupt.
3345 		 */
3346 		result = DDI_INTR_UNCLAIMED;
3347 	}
3348 
3349 	/* re-enable the interrupts which were automasked */
3350 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3351 
3352 	/*
3353 	 * Do the following work outside of the gen_lock
3354 	 */
3355 	if (mp != NULL) {
3356 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3357 		    rx_ring->ring_gen_num);
3358 	}
3359 
3360 	if (tx_reschedule)  {
3361 		tx_ring->reschedule = B_FALSE;
3362 		mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
3363 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3364 	}
3365 
3366 	return (result);
3367 }
3368 
3369 /*
3370  * ixgbe_intr_msi - Interrupt handler for MSI.
3371  */
3372 static uint_t
3373 ixgbe_intr_msi(void *arg1, void *arg2)
3374 {
3375 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3376 	struct ixgbe_hw *hw = &ixgbe->hw;
3377 	uint32_t eicr;
3378 
3379 	_NOTE(ARGUNUSED(arg2));
3380 
3381 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3382 
3383 	/*
3384 	 * For MSI interrupt, we have only one vector,
3385 	 * so we have only one rx ring and one tx ring enabled.
3386 	 */
3387 	ASSERT(ixgbe->num_rx_rings == 1);
3388 	ASSERT(ixgbe->num_tx_rings == 1);
3389 
3390 	/*
3391 	 * For MSI interrupt, rx rings[0] will use RTxQ[0].
3392 	 */
3393 	if (eicr & 0x1) {
3394 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
3395 	}
3396 
3397 	/*
3398 	 * For MSI interrupt, tx rings[0] will use RTxQ[1].
3399 	 */
3400 	if (eicr & 0x2) {
3401 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3402 	}
3403 
3404 	/* any interrupt type other than tx/rx */
3405 	if (eicr & ixgbe->capab->other_intr) {
3406 		mutex_enter(&ixgbe->gen_lock);
3407 		if (hw->mac.type < ixgbe_mac_82599EB) {
3408 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3409 		}
3410 		if (hw->mac.type == ixgbe_mac_82599EB) {
3411 			ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3412 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3413 		}
3414 		ixgbe_intr_other_work(ixgbe, eicr);
3415 		ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3416 		mutex_exit(&ixgbe->gen_lock);
3417 	}
3418 
3419 	/* re-enable the interrupts which were automasked */
3420 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3421 
3422 	return (DDI_INTR_CLAIMED);
3423 }
3424 
3425 /*
3426  * ixgbe_intr_msix - Interrupt handler for MSI-X.
3427  */
3428 static uint_t
3429 ixgbe_intr_msix(void *arg1, void *arg2)
3430 {
3431 	ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
3432 	ixgbe_t *ixgbe = vect->ixgbe;
3433 	struct ixgbe_hw *hw = &ixgbe->hw;
3434 	uint32_t eicr;
3435 	int r_idx = 0;
3436 
3437 	_NOTE(ARGUNUSED(arg2));
3438 
3439 	/*
3440 	 * Clean each rx ring that has its bit set in the map
3441 	 */
3442 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
3443 	while (r_idx >= 0) {
3444 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
3445 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3446 		    (ixgbe->num_rx_rings - 1));
3447 	}
3448 
3449 	/*
3450 	 * Clean each tx ring that has its bit set in the map
3451 	 */
3452 	r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
3453 	while (r_idx >= 0) {
3454 		ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
3455 		r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3456 		    (ixgbe->num_tx_rings - 1));
3457 	}
3458 
3459 
3460 	/*
3461 	 * Clean other interrupt (link change) that has its bit set in the map
3462 	 */
3463 	if (BT_TEST(vect->other_map, 0) == 1) {
3464 		eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3465 
3466 		/*
3467 		 * Need check cause bits and only other causes will
3468 		 * be processed
3469 		 */
3470 		/* any interrupt type other than tx/rx */
3471 		if (eicr & ixgbe->capab->other_intr) {
3472 			if (hw->mac.type < ixgbe_mac_82599EB) {
3473 				mutex_enter(&ixgbe->gen_lock);
3474 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3475 				ixgbe_intr_other_work(ixgbe, eicr);
3476 				mutex_exit(&ixgbe->gen_lock);
3477 			} else {
3478 				if (hw->mac.type == ixgbe_mac_82599EB) {
3479 					mutex_enter(&ixgbe->gen_lock);
3480 					ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3481 					ixgbe_intr_other_work(ixgbe, eicr);
3482 					mutex_exit(&ixgbe->gen_lock);
3483 				}
3484 			}
3485 		}
3486 
3487 		/* re-enable the interrupts which were automasked */
3488 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3489 	}
3490 
3491 	return (DDI_INTR_CLAIMED);
3492 }
3493 
3494 /*
3495  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
3496  *
3497  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
3498  * if not successful, try Legacy.
3499  * ixgbe->intr_force can be used to force sequence to start with
3500  * any of the 3 types.
3501  * If MSI-X is not used, number of tx/rx rings is forced to 1.
3502  */
3503 static int
3504 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
3505 {
3506 	dev_info_t *devinfo;
3507 	int intr_types;
3508 	int rc;
3509 
3510 	devinfo = ixgbe->dip;
3511 
3512 	/*
3513 	 * Get supported interrupt types
3514 	 */
3515 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
3516 
3517 	if (rc != DDI_SUCCESS) {
3518 		ixgbe_log(ixgbe,
3519 		    "Get supported interrupt types failed: %d", rc);
3520 		return (IXGBE_FAILURE);
3521 	}
3522 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
3523 
3524 	ixgbe->intr_type = 0;
3525 
3526 	/*
3527 	 * Install MSI-X interrupts
3528 	 */
3529 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
3530 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
3531 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
3532 		if (rc == IXGBE_SUCCESS)
3533 			return (IXGBE_SUCCESS);
3534 
3535 		ixgbe_log(ixgbe,
3536 		    "Allocate MSI-X failed, trying MSI interrupts...");
3537 	}
3538 
3539 	/*
3540 	 * MSI-X not used, force rings and groups to 1
3541 	 */
3542 	ixgbe->num_rx_rings = 1;
3543 	ixgbe->num_rx_groups = 1;
3544 	ixgbe->num_tx_rings = 1;
3545 	ixgbe_log(ixgbe,
3546 	    "MSI-X not used, force rings and groups number to 1");
3547 
3548 	/*
3549 	 * Install MSI interrupts
3550 	 */
3551 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
3552 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
3553 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
3554 		if (rc == IXGBE_SUCCESS)
3555 			return (IXGBE_SUCCESS);
3556 
3557 		ixgbe_log(ixgbe,
3558 		    "Allocate MSI failed, trying Legacy interrupts...");
3559 	}
3560 
3561 	/*
3562 	 * Install legacy interrupts
3563 	 */
3564 	if (intr_types & DDI_INTR_TYPE_FIXED) {
3565 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
3566 		if (rc == IXGBE_SUCCESS)
3567 			return (IXGBE_SUCCESS);
3568 
3569 		ixgbe_log(ixgbe,
3570 		    "Allocate Legacy interrupts failed");
3571 	}
3572 
3573 	/*
3574 	 * If none of the 3 types succeeded, return failure
3575 	 */
3576 	return (IXGBE_FAILURE);
3577 }
3578 
3579 /*
3580  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
3581  *
3582  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
3583  * if fewer than 2 handles are available, return failure.
3584  * Upon success, this maps the vectors to rx and tx rings for
3585  * interrupts.
3586  */
3587 static int
3588 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
3589 {
3590 	dev_info_t *devinfo;
3591 	int request, count, avail, actual;
3592 	int minimum;
3593 	int rc;
3594 
3595 	devinfo = ixgbe->dip;
3596 
3597 	switch (intr_type) {
3598 	case DDI_INTR_TYPE_FIXED:
3599 		request = 1;	/* Request 1 legacy interrupt handle */
3600 		minimum = 1;
3601 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
3602 		break;
3603 
3604 	case DDI_INTR_TYPE_MSI:
3605 		request = 1;	/* Request 1 MSI interrupt handle */
3606 		minimum = 1;
3607 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
3608 		break;
3609 
3610 	case DDI_INTR_TYPE_MSIX:
3611 		/*
3612 		 * Best number of vectors for the adapter is
3613 		 * # rx rings + # tx rings.
3614 		 */
3615 		request = ixgbe->num_rx_rings + ixgbe->num_tx_rings;
3616 		if (request > ixgbe->capab->max_ring_vect)
3617 			request = ixgbe->capab->max_ring_vect;
3618 		minimum = 2;
3619 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
3620 		break;
3621 
3622 	default:
3623 		ixgbe_log(ixgbe,
3624 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
3625 		    intr_type);
3626 		return (IXGBE_FAILURE);
3627 	}
3628 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
3629 	    request, minimum);
3630 
3631 	/*
3632 	 * Get number of supported interrupts
3633 	 */
3634 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3635 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3636 		ixgbe_log(ixgbe,
3637 		    "Get interrupt number failed. Return: %d, count: %d",
3638 		    rc, count);
3639 		return (IXGBE_FAILURE);
3640 	}
3641 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
3642 
3643 	/*
3644 	 * Get number of available interrupts
3645 	 */
3646 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3647 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3648 		ixgbe_log(ixgbe,
3649 		    "Get interrupt available number failed. "
3650 		    "Return: %d, available: %d", rc, avail);
3651 		return (IXGBE_FAILURE);
3652 	}
3653 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
3654 
3655 	if (avail < request) {
3656 		ixgbe_log(ixgbe, "Request %d handles, %d available",
3657 		    request, avail);
3658 		request = avail;
3659 	}
3660 
3661 	actual = 0;
3662 	ixgbe->intr_cnt = 0;
3663 
3664 	/*
3665 	 * Allocate an array of interrupt handles
3666 	 */
3667 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
3668 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
3669 
3670 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
3671 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
3672 	if (rc != DDI_SUCCESS) {
3673 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
3674 		    "return: %d, request: %d, actual: %d",
3675 		    rc, request, actual);
3676 		goto alloc_handle_fail;
3677 	}
3678 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
3679 
3680 	ixgbe->intr_cnt = actual;
3681 
3682 	/*
3683 	 * Now we know the actual number of vectors.  Here we map the vector
3684 	 * to other, rx rings and tx ring.
3685 	 */
3686 	if (actual < minimum) {
3687 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
3688 		    actual);
3689 		goto alloc_handle_fail;
3690 	}
3691 
3692 	/*
3693 	 * Get priority for first vector, assume remaining are all the same
3694 	 */
3695 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
3696 	if (rc != DDI_SUCCESS) {
3697 		ixgbe_log(ixgbe,
3698 		    "Get interrupt priority failed: %d", rc);
3699 		goto alloc_handle_fail;
3700 	}
3701 
3702 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
3703 	if (rc != DDI_SUCCESS) {
3704 		ixgbe_log(ixgbe,
3705 		    "Get interrupt cap failed: %d", rc);
3706 		goto alloc_handle_fail;
3707 	}
3708 
3709 	ixgbe->intr_type = intr_type;
3710 
3711 	return (IXGBE_SUCCESS);
3712 
3713 alloc_handle_fail:
3714 	ixgbe_rem_intrs(ixgbe);
3715 
3716 	return (IXGBE_FAILURE);
3717 }
3718 
3719 /*
3720  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
3721  *
3722  * Before adding the interrupt handlers, the interrupt vectors have
3723  * been allocated, and the rx/tx rings have also been allocated.
3724  */
3725 static int
3726 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
3727 {
3728 	int vector = 0;
3729 	int rc;
3730 
3731 	switch (ixgbe->intr_type) {
3732 	case DDI_INTR_TYPE_MSIX:
3733 		/*
3734 		 * Add interrupt handler for all vectors
3735 		 */
3736 		for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
3737 			/*
3738 			 * install pointer to vect_map[vector]
3739 			 */
3740 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
3741 			    (ddi_intr_handler_t *)ixgbe_intr_msix,
3742 			    (void *)&ixgbe->vect_map[vector], NULL);
3743 
3744 			if (rc != DDI_SUCCESS) {
3745 				ixgbe_log(ixgbe,
3746 				    "Add rx interrupt handler failed. "
3747 				    "return: %d, vector: %d", rc, vector);
3748 				for (vector--; vector >= 0; vector--) {
3749 					(void) ddi_intr_remove_handler(
3750 					    ixgbe->htable[vector]);
3751 				}
3752 				return (IXGBE_FAILURE);
3753 			}
3754 		}
3755 
3756 		break;
3757 
3758 	case DDI_INTR_TYPE_MSI:
3759 		/*
3760 		 * Add interrupt handlers for the only vector
3761 		 */
3762 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3763 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
3764 		    (void *)ixgbe, NULL);
3765 
3766 		if (rc != DDI_SUCCESS) {
3767 			ixgbe_log(ixgbe,
3768 			    "Add MSI interrupt handler failed: %d", rc);
3769 			return (IXGBE_FAILURE);
3770 		}
3771 
3772 		break;
3773 
3774 	case DDI_INTR_TYPE_FIXED:
3775 		/*
3776 		 * Add interrupt handlers for the only vector
3777 		 */
3778 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3779 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
3780 		    (void *)ixgbe, NULL);
3781 
3782 		if (rc != DDI_SUCCESS) {
3783 			ixgbe_log(ixgbe,
3784 			    "Add legacy interrupt handler failed: %d", rc);
3785 			return (IXGBE_FAILURE);
3786 		}
3787 
3788 		break;
3789 
3790 	default:
3791 		return (IXGBE_FAILURE);
3792 	}
3793 
3794 	return (IXGBE_SUCCESS);
3795 }
3796 
3797 #pragma inline(ixgbe_map_rxring_to_vector)
3798 /*
3799  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
3800  */
3801 static void
3802 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
3803 {
3804 	/*
3805 	 * Set bit in map
3806 	 */
3807 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
3808 
3809 	/*
3810 	 * Count bits set
3811 	 */
3812 	ixgbe->vect_map[v_idx].rxr_cnt++;
3813 
3814 	/*
3815 	 * Remember bit position
3816 	 */
3817 	ixgbe->rx_rings[r_idx].intr_vector = v_idx;
3818 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
3819 }
3820 
3821 #pragma inline(ixgbe_map_txring_to_vector)
3822 /*
3823  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
3824  */
3825 static void
3826 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
3827 {
3828 	/*
3829 	 * Set bit in map
3830 	 */
3831 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
3832 
3833 	/*
3834 	 * Count bits set
3835 	 */
3836 	ixgbe->vect_map[v_idx].txr_cnt++;
3837 
3838 	/*
3839 	 * Remember bit position
3840 	 */
3841 	ixgbe->tx_rings[t_idx].intr_vector = v_idx;
3842 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
3843 }
3844 
3845 /*
3846  * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
3847  * allocation register (IVAR).
3848  * cause:
3849  *   -1 : other cause
3850  *    0 : rx
3851  *    1 : tx
3852  */
3853 static void
3854 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
3855     int8_t cause)
3856 {
3857 	struct ixgbe_hw *hw = &ixgbe->hw;
3858 	u32 ivar, index;
3859 
3860 	switch (hw->mac.type) {
3861 	case ixgbe_mac_82598EB:
3862 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3863 		if (cause == -1) {
3864 			cause = 0;
3865 		}
3866 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3867 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3868 		ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
3869 		ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
3870 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3871 		break;
3872 	case ixgbe_mac_82599EB:
3873 		if (cause == -1) {
3874 			/* other causes */
3875 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3876 			index = (intr_alloc_entry & 1) * 8;
3877 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3878 			ivar &= ~(0xFF << index);
3879 			ivar |= (msix_vector << index);
3880 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3881 		} else {
3882 			/* tx or rx causes */
3883 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3884 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
3885 			ivar = IXGBE_READ_REG(hw,
3886 			    IXGBE_IVAR(intr_alloc_entry >> 1));
3887 			ivar &= ~(0xFF << index);
3888 			ivar |= (msix_vector << index);
3889 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
3890 			    ivar);
3891 		}
3892 		break;
3893 	default:
3894 		break;
3895 	}
3896 }
3897 
3898 /*
3899  * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
3900  * given interrupt vector allocation register (IVAR).
3901  * cause:
3902  *   -1 : other cause
3903  *    0 : rx
3904  *    1 : tx
3905  */
3906 static void
3907 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
3908 {
3909 	struct ixgbe_hw *hw = &ixgbe->hw;
3910 	u32 ivar, index;
3911 
3912 	switch (hw->mac.type) {
3913 	case ixgbe_mac_82598EB:
3914 		if (cause == -1) {
3915 			cause = 0;
3916 		}
3917 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3918 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3919 		ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
3920 		    (intr_alloc_entry & 0x3)));
3921 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3922 		break;
3923 	case ixgbe_mac_82599EB:
3924 		if (cause == -1) {
3925 			/* other causes */
3926 			index = (intr_alloc_entry & 1) * 8;
3927 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3928 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
3929 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3930 		} else {
3931 			/* tx or rx causes */
3932 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
3933 			ivar = IXGBE_READ_REG(hw,
3934 			    IXGBE_IVAR(intr_alloc_entry >> 1));
3935 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
3936 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
3937 			    ivar);
3938 		}
3939 		break;
3940 	default:
3941 		break;
3942 	}
3943 }
3944 
3945 /*
3946  * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
3947  * given interrupt vector allocation register (IVAR).
3948  * cause:
3949  *   -1 : other cause
3950  *    0 : rx
3951  *    1 : tx
3952  */
3953 static void
3954 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
3955 {
3956 	struct ixgbe_hw *hw = &ixgbe->hw;
3957 	u32 ivar, index;
3958 
3959 	switch (hw->mac.type) {
3960 	case ixgbe_mac_82598EB:
3961 		if (cause == -1) {
3962 			cause = 0;
3963 		}
3964 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3965 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3966 		ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
3967 		    (intr_alloc_entry & 0x3)));
3968 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3969 		break;
3970 	case ixgbe_mac_82599EB:
3971 		if (cause == -1) {
3972 			/* other causes */
3973 			index = (intr_alloc_entry & 1) * 8;
3974 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3975 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
3976 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3977 		} else {
3978 			/* tx or rx causes */
3979 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
3980 			ivar = IXGBE_READ_REG(hw,
3981 			    IXGBE_IVAR(intr_alloc_entry >> 1));
3982 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
3983 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
3984 			    ivar);
3985 		}
3986 		break;
3987 	default:
3988 		break;
3989 	}
3990 }
3991 
3992 /*
3993  * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
3994  *
3995  * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
3996  * to vector[0 - (intr_cnt -1)].
3997  */
3998 static int
3999 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4000 {
4001 	int i, vector = 0;
4002 
4003 	/* initialize vector map */
4004 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4005 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4006 		ixgbe->vect_map[i].ixgbe = ixgbe;
4007 	}
4008 
4009 	/*
4010 	 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4011 	 * tx rings[0] on RTxQ[1].
4012 	 */
4013 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4014 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4015 		ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4016 		return (IXGBE_SUCCESS);
4017 	}
4018 
4019 	/*
4020 	 * Interrupts/vectors mapping for MSI-X
4021 	 */
4022 
4023 	/*
4024 	 * Map other interrupt to vector 0,
4025 	 * Set bit in map and count the bits set.
4026 	 */
4027 	BT_SET(ixgbe->vect_map[vector].other_map, 0);
4028 	ixgbe->vect_map[vector].other_cnt++;
4029 	vector++;
4030 
4031 	/*
4032 	 * Map rx ring interrupts to vectors
4033 	 */
4034 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
4035 		ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4036 		vector = (vector +1) % ixgbe->intr_cnt;
4037 	}
4038 
4039 	/*
4040 	 * Map tx ring interrupts to vectors
4041 	 */
4042 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
4043 		ixgbe_map_txring_to_vector(ixgbe, i, vector);
4044 		vector = (vector +1) % ixgbe->intr_cnt;
4045 	}
4046 
4047 	return (IXGBE_SUCCESS);
4048 }
4049 
4050 /*
4051  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4052  *
4053  * This relies on ring/vector mapping already set up in the
4054  * vect_map[] structures
4055  */
4056 static void
4057 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4058 {
4059 	struct ixgbe_hw *hw = &ixgbe->hw;
4060 	ixgbe_intr_vector_t *vect;	/* vector bitmap */
4061 	int r_idx;	/* ring index */
4062 	int v_idx;	/* vector index */
4063 
4064 	/*
4065 	 * Clear any previous entries
4066 	 */
4067 	switch (hw->mac.type) {
4068 	case ixgbe_mac_82598EB:
4069 		for (v_idx = 0; v_idx < 25; v_idx++)
4070 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4071 
4072 		break;
4073 	case ixgbe_mac_82599EB:
4074 		for (v_idx = 0; v_idx < 64; v_idx++)
4075 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4076 		IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
4077 
4078 		break;
4079 	default:
4080 		break;
4081 	}
4082 
4083 	/*
4084 	 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
4085 	 * tx rings[0] will use RTxQ[1].
4086 	 */
4087 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4088 		ixgbe_setup_ivar(ixgbe, 0, 0, 0);
4089 		ixgbe_setup_ivar(ixgbe, 0, 1, 1);
4090 		return;
4091 	}
4092 
4093 	/*
4094 	 * For MSI-X interrupt, "Other" is always on vector[0].
4095 	 */
4096 	ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
4097 
4098 	/*
4099 	 * For each interrupt vector, populate the IVAR table
4100 	 */
4101 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
4102 		vect = &ixgbe->vect_map[v_idx];
4103 
4104 		/*
4105 		 * For each rx ring bit set
4106 		 */
4107 		r_idx = bt_getlowbit(vect->rx_map, 0,
4108 		    (ixgbe->num_rx_rings - 1));
4109 
4110 		while (r_idx >= 0) {
4111 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 0);
4112 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4113 			    (ixgbe->num_rx_rings - 1));
4114 		}
4115 
4116 		/*
4117 		 * For each tx ring bit set
4118 		 */
4119 		r_idx = bt_getlowbit(vect->tx_map, 0,
4120 		    (ixgbe->num_tx_rings - 1));
4121 
4122 		while (r_idx >= 0) {
4123 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
4124 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4125 			    (ixgbe->num_tx_rings - 1));
4126 		}
4127 	}
4128 }
4129 
4130 /*
4131  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
4132  */
4133 static void
4134 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
4135 {
4136 	int i;
4137 	int rc;
4138 
4139 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4140 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
4141 		if (rc != DDI_SUCCESS) {
4142 			IXGBE_DEBUGLOG_1(ixgbe,
4143 			    "Remove intr handler failed: %d", rc);
4144 		}
4145 	}
4146 }
4147 
4148 /*
4149  * ixgbe_rem_intrs - Remove the allocated interrupts.
4150  */
4151 static void
4152 ixgbe_rem_intrs(ixgbe_t *ixgbe)
4153 {
4154 	int i;
4155 	int rc;
4156 
4157 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4158 		rc = ddi_intr_free(ixgbe->htable[i]);
4159 		if (rc != DDI_SUCCESS) {
4160 			IXGBE_DEBUGLOG_1(ixgbe,
4161 			    "Free intr failed: %d", rc);
4162 		}
4163 	}
4164 
4165 	kmem_free(ixgbe->htable, ixgbe->intr_size);
4166 	ixgbe->htable = NULL;
4167 }
4168 
4169 /*
4170  * ixgbe_enable_intrs - Enable all the ddi interrupts.
4171  */
4172 static int
4173 ixgbe_enable_intrs(ixgbe_t *ixgbe)
4174 {
4175 	int i;
4176 	int rc;
4177 
4178 	/*
4179 	 * Enable interrupts
4180 	 */
4181 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4182 		/*
4183 		 * Call ddi_intr_block_enable() for MSI
4184 		 */
4185 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
4186 		if (rc != DDI_SUCCESS) {
4187 			ixgbe_log(ixgbe,
4188 			    "Enable block intr failed: %d", rc);
4189 			return (IXGBE_FAILURE);
4190 		}
4191 	} else {
4192 		/*
4193 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
4194 		 */
4195 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4196 			rc = ddi_intr_enable(ixgbe->htable[i]);
4197 			if (rc != DDI_SUCCESS) {
4198 				ixgbe_log(ixgbe,
4199 				    "Enable intr failed: %d", rc);
4200 				return (IXGBE_FAILURE);
4201 			}
4202 		}
4203 	}
4204 
4205 	return (IXGBE_SUCCESS);
4206 }
4207 
4208 /*
4209  * ixgbe_disable_intrs - Disable all the interrupts.
4210  */
4211 static int
4212 ixgbe_disable_intrs(ixgbe_t *ixgbe)
4213 {
4214 	int i;
4215 	int rc;
4216 
4217 	/*
4218 	 * Disable all interrupts
4219 	 */
4220 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4221 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
4222 		if (rc != DDI_SUCCESS) {
4223 			ixgbe_log(ixgbe,
4224 			    "Disable block intr failed: %d", rc);
4225 			return (IXGBE_FAILURE);
4226 		}
4227 	} else {
4228 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4229 			rc = ddi_intr_disable(ixgbe->htable[i]);
4230 			if (rc != DDI_SUCCESS) {
4231 				ixgbe_log(ixgbe,
4232 				    "Disable intr failed: %d", rc);
4233 				return (IXGBE_FAILURE);
4234 			}
4235 		}
4236 	}
4237 
4238 	return (IXGBE_SUCCESS);
4239 }
4240 
4241 /*
4242  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
4243  */
4244 static void
4245 ixgbe_get_hw_state(ixgbe_t *ixgbe)
4246 {
4247 	struct ixgbe_hw *hw = &ixgbe->hw;
4248 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
4249 	boolean_t link_up = B_FALSE;
4250 	uint32_t pcs1g_anlp = 0;
4251 	uint32_t pcs1g_ana = 0;
4252 
4253 	ASSERT(mutex_owned(&ixgbe->gen_lock));
4254 	ixgbe->param_lp_1000fdx_cap = 0;
4255 	ixgbe->param_lp_100fdx_cap  = 0;
4256 
4257 	/* check for link, don't wait */
4258 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
4259 	if (link_up) {
4260 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
4261 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
4262 
4263 		ixgbe->param_lp_1000fdx_cap =
4264 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4265 		ixgbe->param_lp_100fdx_cap =
4266 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4267 	}
4268 
4269 	ixgbe->param_adv_1000fdx_cap =
4270 	    (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4271 	ixgbe->param_adv_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4272 }
4273 
4274 /*
4275  * ixgbe_get_driver_control - Notify that driver is in control of device.
4276  */
4277 static void
4278 ixgbe_get_driver_control(struct ixgbe_hw *hw)
4279 {
4280 	uint32_t ctrl_ext;
4281 
4282 	/*
4283 	 * Notify firmware that driver is in control of device
4284 	 */
4285 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4286 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
4287 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4288 }
4289 
4290 /*
4291  * ixgbe_release_driver_control - Notify that driver is no longer in control
4292  * of device.
4293  */
4294 static void
4295 ixgbe_release_driver_control(struct ixgbe_hw *hw)
4296 {
4297 	uint32_t ctrl_ext;
4298 
4299 	/*
4300 	 * Notify firmware that driver is no longer in control of device
4301 	 */
4302 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4303 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
4304 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4305 }
4306 
4307 /*
4308  * ixgbe_atomic_reserve - Atomic decrease operation.
4309  */
4310 int
4311 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
4312 {
4313 	uint32_t oldval;
4314 	uint32_t newval;
4315 
4316 	/*
4317 	 * ATOMICALLY
4318 	 */
4319 	do {
4320 		oldval = *count_p;
4321 		if (oldval < n)
4322 			return (-1);
4323 		newval = oldval - n;
4324 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
4325 
4326 	return (newval);
4327 }
4328 
4329 /*
4330  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
4331  */
4332 static uint8_t *
4333 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
4334 {
4335 	uint8_t *addr = *upd_ptr;
4336 	uint8_t *new_ptr;
4337 
4338 	_NOTE(ARGUNUSED(hw));
4339 	_NOTE(ARGUNUSED(vmdq));
4340 
4341 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
4342 	*upd_ptr = new_ptr;
4343 	return (addr);
4344 }
4345 
4346 /*
4347  * FMA support
4348  */
4349 int
4350 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
4351 {
4352 	ddi_fm_error_t de;
4353 
4354 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4355 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
4356 	return (de.fme_status);
4357 }
4358 
4359 int
4360 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
4361 {
4362 	ddi_fm_error_t de;
4363 
4364 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4365 	return (de.fme_status);
4366 }
4367 
4368 /*
4369  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
4370  */
4371 static int
4372 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4373 {
4374 	_NOTE(ARGUNUSED(impl_data));
4375 	/*
4376 	 * as the driver can always deal with an error in any dma or
4377 	 * access handle, we can just return the fme_status value.
4378 	 */
4379 	pci_ereport_post(dip, err, NULL);
4380 	return (err->fme_status);
4381 }
4382 
4383 static void
4384 ixgbe_fm_init(ixgbe_t *ixgbe)
4385 {
4386 	ddi_iblock_cookie_t iblk;
4387 	int fma_acc_flag, fma_dma_flag;
4388 
4389 	/*
4390 	 * Only register with IO Fault Services if we have some capability
4391 	 */
4392 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
4393 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4394 		fma_acc_flag = 1;
4395 	} else {
4396 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4397 		fma_acc_flag = 0;
4398 	}
4399 
4400 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
4401 		fma_dma_flag = 1;
4402 	} else {
4403 		fma_dma_flag = 0;
4404 	}
4405 
4406 	ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag);
4407 
4408 	if (ixgbe->fm_capabilities) {
4409 
4410 		/*
4411 		 * Register capabilities with IO Fault Services
4412 		 */
4413 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
4414 
4415 		/*
4416 		 * Initialize pci ereport capabilities if ereport capable
4417 		 */
4418 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4419 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4420 			pci_ereport_setup(ixgbe->dip);
4421 
4422 		/*
4423 		 * Register error callback if error callback capable
4424 		 */
4425 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4426 			ddi_fm_handler_register(ixgbe->dip,
4427 			    ixgbe_fm_error_cb, (void*) ixgbe);
4428 	}
4429 }
4430 
4431 static void
4432 ixgbe_fm_fini(ixgbe_t *ixgbe)
4433 {
4434 	/*
4435 	 * Only unregister FMA capabilities if they are registered
4436 	 */
4437 	if (ixgbe->fm_capabilities) {
4438 
4439 		/*
4440 		 * Release any resources allocated by pci_ereport_setup()
4441 		 */
4442 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4443 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4444 			pci_ereport_teardown(ixgbe->dip);
4445 
4446 		/*
4447 		 * Un-register error callback if error callback capable
4448 		 */
4449 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4450 			ddi_fm_handler_unregister(ixgbe->dip);
4451 
4452 		/*
4453 		 * Unregister from IO Fault Service
4454 		 */
4455 		ddi_fm_fini(ixgbe->dip);
4456 	}
4457 }
4458 
4459 void
4460 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
4461 {
4462 	uint64_t ena;
4463 	char buf[FM_MAX_CLASS];
4464 
4465 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4466 	ena = fm_ena_generate(0, FM_ENA_FMT1);
4467 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
4468 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
4469 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
4470 	}
4471 }
4472 
4473 static int
4474 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
4475 {
4476 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
4477 
4478 	mutex_enter(&rx_ring->rx_lock);
4479 	rx_ring->ring_gen_num = mr_gen_num;
4480 	mutex_exit(&rx_ring->rx_lock);
4481 	return (0);
4482 }
4483 
4484 /*
4485  * Callback funtion for MAC layer to register all rings.
4486  */
4487 /* ARGSUSED */
4488 void
4489 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
4490     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
4491 {
4492 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4493 	mac_intr_t *mintr = &infop->mri_intr;
4494 
4495 	switch (rtype) {
4496 	case MAC_RING_TYPE_RX: {
4497 		ASSERT(rg_index == 0);
4498 		ASSERT(ring_index < ixgbe->num_rx_rings);
4499 
4500 		ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[ring_index];
4501 		rx_ring->ring_handle = rh;
4502 
4503 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
4504 		infop->mri_start = ixgbe_ring_start;
4505 		infop->mri_stop = NULL;
4506 		infop->mri_poll = ixgbe_ring_rx_poll;
4507 
4508 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
4509 		mintr->mi_enable = ixgbe_rx_ring_intr_enable;
4510 		mintr->mi_disable = ixgbe_rx_ring_intr_disable;
4511 
4512 		break;
4513 	}
4514 	case MAC_RING_TYPE_TX: {
4515 		ASSERT(rg_index == -1);
4516 		ASSERT(ring_index < ixgbe->num_tx_rings);
4517 
4518 		ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
4519 		tx_ring->ring_handle = rh;
4520 
4521 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
4522 		infop->mri_start = NULL;
4523 		infop->mri_stop = NULL;
4524 		infop->mri_tx = ixgbe_ring_tx;
4525 
4526 		break;
4527 	}
4528 	default:
4529 		break;
4530 	}
4531 }
4532 
4533 /*
4534  * Callback funtion for MAC layer to register all groups.
4535  */
4536 void
4537 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
4538     mac_group_info_t *infop, mac_group_handle_t gh)
4539 {
4540 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4541 
4542 	switch (rtype) {
4543 	case MAC_RING_TYPE_RX: {
4544 		ixgbe_rx_group_t *rx_group;
4545 
4546 		rx_group = &ixgbe->rx_groups[index];
4547 		rx_group->group_handle = gh;
4548 
4549 		infop->mgi_driver = (mac_group_driver_t)rx_group;
4550 		infop->mgi_start = NULL;
4551 		infop->mgi_stop = NULL;
4552 		infop->mgi_addmac = ixgbe_addmac;
4553 		infop->mgi_remmac = ixgbe_remmac;
4554 		infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
4555 
4556 		break;
4557 	}
4558 	case MAC_RING_TYPE_TX:
4559 		break;
4560 	default:
4561 		break;
4562 	}
4563 }
4564 
4565 /*
4566  * Enable interrupt on the specificed rx ring.
4567  */
4568 int
4569 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
4570 {
4571 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4572 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4573 	int r_idx = rx_ring->index;
4574 	int v_idx = rx_ring->intr_vector;
4575 
4576 	mutex_enter(&ixgbe->gen_lock);
4577 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 0);
4578 
4579 	/*
4580 	 * To enable interrupt by setting the VAL bit of given interrupt
4581 	 * vector allocation register (IVAR).
4582 	 */
4583 	ixgbe_enable_ivar(ixgbe, r_idx, 0);
4584 
4585 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4586 
4587 	/*
4588 	 * To trigger a Rx interrupt to on this ring
4589 	 */
4590 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
4591 	IXGBE_WRITE_FLUSH(&ixgbe->hw);
4592 
4593 	mutex_exit(&ixgbe->gen_lock);
4594 
4595 	return (0);
4596 }
4597 
4598 /*
4599  * Disable interrupt on the specificed rx ring.
4600  */
4601 int
4602 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
4603 {
4604 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4605 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4606 	int r_idx = rx_ring->index;
4607 	int v_idx = rx_ring->intr_vector;
4608 
4609 	mutex_enter(&ixgbe->gen_lock);
4610 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 1);
4611 
4612 	/*
4613 	 * To disable interrupt by clearing the VAL bit of given interrupt
4614 	 * vector allocation register (IVAR).
4615 	 */
4616 	ixgbe_disable_ivar(ixgbe, r_idx, 0);
4617 
4618 	BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
4619 
4620 	mutex_exit(&ixgbe->gen_lock);
4621 
4622 	return (0);
4623 }
4624 
4625 /*
4626  * Add a mac address.
4627  */
4628 static int
4629 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
4630 {
4631 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4632 	ixgbe_t *ixgbe = rx_group->ixgbe;
4633 	int slot;
4634 	int err;
4635 
4636 	mutex_enter(&ixgbe->gen_lock);
4637 
4638 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4639 		mutex_exit(&ixgbe->gen_lock);
4640 		return (ECANCELED);
4641 	}
4642 
4643 	if (ixgbe->unicst_avail == 0) {
4644 		/* no slots available */
4645 		mutex_exit(&ixgbe->gen_lock);
4646 		return (ENOSPC);
4647 	}
4648 
4649 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
4650 		if (ixgbe->unicst_addr[slot].mac.set == 0)
4651 			break;
4652 	}
4653 
4654 	ASSERT((slot >= 0) && (slot < ixgbe->unicst_total));
4655 
4656 	if ((err = ixgbe_unicst_set(ixgbe, mac_addr, slot)) == 0) {
4657 		ixgbe->unicst_addr[slot].mac.set = 1;
4658 		ixgbe->unicst_avail--;
4659 	}
4660 
4661 	mutex_exit(&ixgbe->gen_lock);
4662 
4663 	return (err);
4664 }
4665 
4666 /*
4667  * Remove a mac address.
4668  */
4669 static int
4670 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
4671 {
4672 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4673 	ixgbe_t *ixgbe = rx_group->ixgbe;
4674 	int slot;
4675 	int err;
4676 
4677 	mutex_enter(&ixgbe->gen_lock);
4678 
4679 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4680 		mutex_exit(&ixgbe->gen_lock);
4681 		return (ECANCELED);
4682 	}
4683 
4684 	slot = ixgbe_unicst_find(ixgbe, mac_addr);
4685 	if (slot == -1) {
4686 		mutex_exit(&ixgbe->gen_lock);
4687 		return (EINVAL);
4688 	}
4689 
4690 	if (ixgbe->unicst_addr[slot].mac.set == 0) {
4691 		mutex_exit(&ixgbe->gen_lock);
4692 		return (EINVAL);
4693 	}
4694 
4695 	bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
4696 	if ((err = ixgbe_unicst_set(ixgbe,
4697 	    ixgbe->unicst_addr[slot].mac.addr, slot)) == 0) {
4698 		ixgbe->unicst_addr[slot].mac.set = 0;
4699 		ixgbe->unicst_avail++;
4700 	}
4701 
4702 	mutex_exit(&ixgbe->gen_lock);
4703 
4704 	return (err);
4705 }
4706