xref: /illumos-gate/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision 2e0fe3efe5f9d579d4e44b3532d8e342c68b40ca)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  */
29 
30 #include "ixgbe_sw.h"
31 
32 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
33 static char ixgbe_version[] = "ixgbe 1.1.6";
34 
35 /*
36  * Local function protoypes
37  */
38 static int ixgbe_register_mac(ixgbe_t *);
39 static int ixgbe_identify_hardware(ixgbe_t *);
40 static int ixgbe_regs_map(ixgbe_t *);
41 static void ixgbe_init_properties(ixgbe_t *);
42 static int ixgbe_init_driver_settings(ixgbe_t *);
43 static void ixgbe_init_locks(ixgbe_t *);
44 static void ixgbe_destroy_locks(ixgbe_t *);
45 static int ixgbe_init(ixgbe_t *);
46 static int ixgbe_chip_start(ixgbe_t *);
47 static void ixgbe_chip_stop(ixgbe_t *);
48 static int ixgbe_reset(ixgbe_t *);
49 static void ixgbe_tx_clean(ixgbe_t *);
50 static boolean_t ixgbe_tx_drain(ixgbe_t *);
51 static boolean_t ixgbe_rx_drain(ixgbe_t *);
52 static int ixgbe_alloc_rings(ixgbe_t *);
53 static void ixgbe_free_rings(ixgbe_t *);
54 static int ixgbe_alloc_rx_data(ixgbe_t *);
55 static void ixgbe_free_rx_data(ixgbe_t *);
56 static void ixgbe_setup_rings(ixgbe_t *);
57 static void ixgbe_setup_rx(ixgbe_t *);
58 static void ixgbe_setup_tx(ixgbe_t *);
59 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
60 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
61 static void ixgbe_setup_rss(ixgbe_t *);
62 static void ixgbe_setup_vmdq(ixgbe_t *);
63 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
64 static void ixgbe_init_unicst(ixgbe_t *);
65 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
66 static void ixgbe_setup_multicst(ixgbe_t *);
67 static void ixgbe_get_hw_state(ixgbe_t *);
68 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
69 static void ixgbe_get_conf(ixgbe_t *);
70 static void ixgbe_init_params(ixgbe_t *);
71 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
72 static void ixgbe_driver_link_check(ixgbe_t *);
73 static void ixgbe_sfp_check(void *);
74 static void ixgbe_link_timer(void *);
75 static void ixgbe_local_timer(void *);
76 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
77 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
78 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
79 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
80 static boolean_t is_valid_mac_addr(uint8_t *);
81 static boolean_t ixgbe_stall_check(ixgbe_t *);
82 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
83 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
84 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
85 static int ixgbe_alloc_intrs(ixgbe_t *);
86 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
87 static int ixgbe_add_intr_handlers(ixgbe_t *);
88 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
89 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
90 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
91 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
92 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
93 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
94 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
95 static void ixgbe_setup_adapter_vector(ixgbe_t *);
96 static void ixgbe_rem_intr_handlers(ixgbe_t *);
97 static void ixgbe_rem_intrs(ixgbe_t *);
98 static int ixgbe_enable_intrs(ixgbe_t *);
99 static int ixgbe_disable_intrs(ixgbe_t *);
100 static uint_t ixgbe_intr_legacy(void *, void *);
101 static uint_t ixgbe_intr_msi(void *, void *);
102 static uint_t ixgbe_intr_msix(void *, void *);
103 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
104 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
105 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
106 static void ixgbe_get_driver_control(struct ixgbe_hw *);
107 static int ixgbe_addmac(void *, const uint8_t *);
108 static int ixgbe_remmac(void *, const uint8_t *);
109 static void ixgbe_release_driver_control(struct ixgbe_hw *);
110 
111 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
112 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
113 static int ixgbe_resume(dev_info_t *);
114 static int ixgbe_suspend(dev_info_t *);
115 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
116 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
117 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
118 static int ixgbe_intr_cb_register(ixgbe_t *);
119 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
120 
121 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
122     const void *impl_data);
123 static void ixgbe_fm_init(ixgbe_t *);
124 static void ixgbe_fm_fini(ixgbe_t *);
125 
126 char *ixgbe_priv_props[] = {
127 	"_tx_copy_thresh",
128 	"_tx_recycle_thresh",
129 	"_tx_overload_thresh",
130 	"_tx_resched_thresh",
131 	"_rx_copy_thresh",
132 	"_rx_limit_per_intr",
133 	"_intr_throttling",
134 	"_adv_pause_cap",
135 	"_adv_asym_pause_cap",
136 	NULL
137 };
138 
139 #define	IXGBE_MAX_PRIV_PROPS \
140 	(sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
141 
142 static struct cb_ops ixgbe_cb_ops = {
143 	nulldev,		/* cb_open */
144 	nulldev,		/* cb_close */
145 	nodev,			/* cb_strategy */
146 	nodev,			/* cb_print */
147 	nodev,			/* cb_dump */
148 	nodev,			/* cb_read */
149 	nodev,			/* cb_write */
150 	nodev,			/* cb_ioctl */
151 	nodev,			/* cb_devmap */
152 	nodev,			/* cb_mmap */
153 	nodev,			/* cb_segmap */
154 	nochpoll,		/* cb_chpoll */
155 	ddi_prop_op,		/* cb_prop_op */
156 	NULL,			/* cb_stream */
157 	D_MP | D_HOTPLUG,	/* cb_flag */
158 	CB_REV,			/* cb_rev */
159 	nodev,			/* cb_aread */
160 	nodev			/* cb_awrite */
161 };
162 
163 static struct dev_ops ixgbe_dev_ops = {
164 	DEVO_REV,		/* devo_rev */
165 	0,			/* devo_refcnt */
166 	NULL,			/* devo_getinfo */
167 	nulldev,		/* devo_identify */
168 	nulldev,		/* devo_probe */
169 	ixgbe_attach,		/* devo_attach */
170 	ixgbe_detach,		/* devo_detach */
171 	nodev,			/* devo_reset */
172 	&ixgbe_cb_ops,		/* devo_cb_ops */
173 	NULL,			/* devo_bus_ops */
174 	ddi_power,		/* devo_power */
175 	ddi_quiesce_not_supported,	/* devo_quiesce */
176 };
177 
178 static struct modldrv ixgbe_modldrv = {
179 	&mod_driverops,		/* Type of module.  This one is a driver */
180 	ixgbe_ident,		/* Discription string */
181 	&ixgbe_dev_ops		/* driver ops */
182 };
183 
184 static struct modlinkage ixgbe_modlinkage = {
185 	MODREV_1, &ixgbe_modldrv, NULL
186 };
187 
188 /*
189  * Access attributes for register mapping
190  */
191 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
192 	DDI_DEVICE_ATTR_V1,
193 	DDI_STRUCTURE_LE_ACC,
194 	DDI_STRICTORDER_ACC,
195 	DDI_FLAGERR_ACC
196 };
197 
198 /*
199  * Loopback property
200  */
201 static lb_property_t lb_normal = {
202 	normal,	"normal", IXGBE_LB_NONE
203 };
204 
205 static lb_property_t lb_mac = {
206 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
207 };
208 
209 static lb_property_t lb_external = {
210 	external, "External", IXGBE_LB_EXTERNAL
211 };
212 
213 #define	IXGBE_M_CALLBACK_FLAGS \
214 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
215 
216 static mac_callbacks_t ixgbe_m_callbacks = {
217 	IXGBE_M_CALLBACK_FLAGS,
218 	ixgbe_m_stat,
219 	ixgbe_m_start,
220 	ixgbe_m_stop,
221 	ixgbe_m_promisc,
222 	ixgbe_m_multicst,
223 	NULL,
224 	NULL,
225 	NULL,
226 	ixgbe_m_ioctl,
227 	ixgbe_m_getcapab,
228 	NULL,
229 	NULL,
230 	ixgbe_m_setprop,
231 	ixgbe_m_getprop,
232 	ixgbe_m_propinfo
233 };
234 
235 /*
236  * Initialize capabilities of each supported adapter type
237  */
238 static adapter_info_t ixgbe_82598eb_cap = {
239 	64,		/* maximum number of rx queues */
240 	1,		/* minimum number of rx queues */
241 	64,		/* default number of rx queues */
242 	16,		/* maximum number of rx groups */
243 	1,		/* minimum number of rx groups */
244 	1,		/* default number of rx groups */
245 	32,		/* maximum number of tx queues */
246 	1,		/* minimum number of tx queues */
247 	8,		/* default number of tx queues */
248 	16366,		/* maximum MTU size */
249 	0xFFFF,		/* maximum interrupt throttle rate */
250 	0,		/* minimum interrupt throttle rate */
251 	200,		/* default interrupt throttle rate */
252 	18,		/* maximum total msix vectors */
253 	16,		/* maximum number of ring vectors */
254 	2,		/* maximum number of other vectors */
255 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
256 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
257 	| IXGBE_FLAG_RSS_CAPABLE
258 	| IXGBE_FLAG_VMDQ_CAPABLE)
259 };
260 
261 static adapter_info_t ixgbe_82599eb_cap = {
262 	128,		/* maximum number of rx queues */
263 	1,		/* minimum number of rx queues */
264 	128,		/* default number of rx queues */
265 	64,		/* maximum number of rx groups */
266 	1,		/* minimum number of rx groups */
267 	1,		/* default number of rx groups */
268 	128,		/* maximum number of tx queues */
269 	1,		/* minimum number of tx queues */
270 	8,		/* default number of tx queues */
271 	15500,		/* maximum MTU size */
272 	0xFF8,		/* maximum interrupt throttle rate */
273 	0,		/* minimum interrupt throttle rate */
274 	200,		/* default interrupt throttle rate */
275 	64,		/* maximum total msix vectors */
276 	16,		/* maximum number of ring vectors */
277 	2,		/* maximum number of other vectors */
278 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
279 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
280 	| IXGBE_FLAG_RSS_CAPABLE
281 	| IXGBE_FLAG_VMDQ_CAPABLE
282 	| IXGBE_FLAG_RSC_CAPABLE)
283 };
284 
285 /*
286  * Module Initialization Functions.
287  */
288 
289 int
290 _init(void)
291 {
292 	int status;
293 
294 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
295 
296 	status = mod_install(&ixgbe_modlinkage);
297 
298 	if (status != DDI_SUCCESS) {
299 		mac_fini_ops(&ixgbe_dev_ops);
300 	}
301 
302 	return (status);
303 }
304 
305 int
306 _fini(void)
307 {
308 	int status;
309 
310 	status = mod_remove(&ixgbe_modlinkage);
311 
312 	if (status == DDI_SUCCESS) {
313 		mac_fini_ops(&ixgbe_dev_ops);
314 	}
315 
316 	return (status);
317 }
318 
319 int
320 _info(struct modinfo *modinfop)
321 {
322 	int status;
323 
324 	status = mod_info(&ixgbe_modlinkage, modinfop);
325 
326 	return (status);
327 }
328 
329 /*
330  * ixgbe_attach - Driver attach.
331  *
332  * This function is the device specific initialization entry
333  * point. This entry point is required and must be written.
334  * The DDI_ATTACH command must be provided in the attach entry
335  * point. When attach() is called with cmd set to DDI_ATTACH,
336  * all normal kernel services (such as kmem_alloc(9F)) are
337  * available for use by the driver.
338  *
339  * The attach() function will be called once for each instance
340  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
341  * Until attach() succeeds, the only driver entry points which
342  * may be called are open(9E) and getinfo(9E).
343  */
344 static int
345 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
346 {
347 	ixgbe_t *ixgbe;
348 	struct ixgbe_osdep *osdep;
349 	struct ixgbe_hw *hw;
350 	int instance;
351 	char taskqname[32];
352 
353 	/*
354 	 * Check the command and perform corresponding operations
355 	 */
356 	switch (cmd) {
357 	default:
358 		return (DDI_FAILURE);
359 
360 	case DDI_RESUME:
361 		return (ixgbe_resume(devinfo));
362 
363 	case DDI_ATTACH:
364 		break;
365 	}
366 
367 	/* Get the device instance */
368 	instance = ddi_get_instance(devinfo);
369 
370 	/* Allocate memory for the instance data structure */
371 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
372 
373 	ixgbe->dip = devinfo;
374 	ixgbe->instance = instance;
375 
376 	hw = &ixgbe->hw;
377 	osdep = &ixgbe->osdep;
378 	hw->back = osdep;
379 	osdep->ixgbe = ixgbe;
380 
381 	/* Attach the instance pointer to the dev_info data structure */
382 	ddi_set_driver_private(devinfo, ixgbe);
383 
384 	/*
385 	 * Initialize for fma support
386 	 */
387 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
388 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
389 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
390 	ixgbe_fm_init(ixgbe);
391 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
392 
393 	/*
394 	 * Map PCI config space registers
395 	 */
396 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
397 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
398 		goto attach_fail;
399 	}
400 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
401 
402 	/*
403 	 * Identify the chipset family
404 	 */
405 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
406 		ixgbe_error(ixgbe, "Failed to identify hardware");
407 		goto attach_fail;
408 	}
409 
410 	/*
411 	 * Map device registers
412 	 */
413 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
414 		ixgbe_error(ixgbe, "Failed to map device registers");
415 		goto attach_fail;
416 	}
417 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
418 
419 	/*
420 	 * Initialize driver parameters
421 	 */
422 	ixgbe_init_properties(ixgbe);
423 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
424 
425 	/*
426 	 * Register interrupt callback
427 	 */
428 	if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
429 		ixgbe_error(ixgbe, "Failed to register interrupt callback");
430 		goto attach_fail;
431 	}
432 
433 	/*
434 	 * Allocate interrupts
435 	 */
436 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
437 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
438 		goto attach_fail;
439 	}
440 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
441 
442 	/*
443 	 * Allocate rx/tx rings based on the ring numbers.
444 	 * The actual numbers of rx/tx rings are decided by the number of
445 	 * allocated interrupt vectors, so we should allocate the rings after
446 	 * interrupts are allocated.
447 	 */
448 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
449 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
450 		goto attach_fail;
451 	}
452 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
453 
454 	/*
455 	 * Map rings to interrupt vectors
456 	 */
457 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
458 		ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
459 		goto attach_fail;
460 	}
461 
462 	/*
463 	 * Add interrupt handlers
464 	 */
465 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
466 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
467 		goto attach_fail;
468 	}
469 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
470 
471 	/*
472 	 * Create a taskq for sfp-change
473 	 */
474 	(void) sprintf(taskqname, "ixgbe%d_taskq", instance);
475 	if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
476 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
477 		ixgbe_error(ixgbe, "taskq_create failed");
478 		goto attach_fail;
479 	}
480 	ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
481 
482 	/*
483 	 * Initialize driver parameters
484 	 */
485 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
486 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
487 		goto attach_fail;
488 	}
489 
490 	/*
491 	 * Initialize mutexes for this device.
492 	 * Do this before enabling the interrupt handler and
493 	 * register the softint to avoid the condition where
494 	 * interrupt handler can try using uninitialized mutex.
495 	 */
496 	ixgbe_init_locks(ixgbe);
497 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
498 
499 	/*
500 	 * Initialize chipset hardware
501 	 */
502 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
503 		ixgbe_error(ixgbe, "Failed to initialize adapter");
504 		goto attach_fail;
505 	}
506 	ixgbe->link_check_complete = B_FALSE;
507 	ixgbe->link_check_hrtime = gethrtime() +
508 	    (IXGBE_LINK_UP_TIME * 100000000ULL);
509 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
510 
511 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
512 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
513 		goto attach_fail;
514 	}
515 
516 	/*
517 	 * Initialize statistics
518 	 */
519 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
520 		ixgbe_error(ixgbe, "Failed to initialize statistics");
521 		goto attach_fail;
522 	}
523 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
524 
525 	/*
526 	 * Register the driver to the MAC
527 	 */
528 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
529 		ixgbe_error(ixgbe, "Failed to register MAC");
530 		goto attach_fail;
531 	}
532 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
533 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
534 
535 	ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
536 	    IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
537 	if (ixgbe->periodic_id == 0) {
538 		ixgbe_error(ixgbe, "Failed to add the link check timer");
539 		goto attach_fail;
540 	}
541 	ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
542 
543 	/*
544 	 * Now that mutex locks are initialized, and the chip is also
545 	 * initialized, enable interrupts.
546 	 */
547 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
548 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
549 		goto attach_fail;
550 	}
551 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
552 
553 	ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
554 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
555 
556 	return (DDI_SUCCESS);
557 
558 attach_fail:
559 	ixgbe_unconfigure(devinfo, ixgbe);
560 	return (DDI_FAILURE);
561 }
562 
563 /*
564  * ixgbe_detach - Driver detach.
565  *
566  * The detach() function is the complement of the attach routine.
567  * If cmd is set to DDI_DETACH, detach() is used to remove  the
568  * state  associated  with  a  given  instance of a device node
569  * prior to the removal of that instance from the system.
570  *
571  * The detach() function will be called once for each  instance
572  * of the device for which there has been a successful attach()
573  * once there are no longer  any  opens  on  the  device.
574  *
575  * Interrupts routine are disabled, All memory allocated by this
576  * driver are freed.
577  */
578 static int
579 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
580 {
581 	ixgbe_t *ixgbe;
582 
583 	/*
584 	 * Check detach command
585 	 */
586 	switch (cmd) {
587 	default:
588 		return (DDI_FAILURE);
589 
590 	case DDI_SUSPEND:
591 		return (ixgbe_suspend(devinfo));
592 
593 	case DDI_DETACH:
594 		break;
595 	}
596 
597 	/*
598 	 * Get the pointer to the driver private data structure
599 	 */
600 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
601 	if (ixgbe == NULL)
602 		return (DDI_FAILURE);
603 
604 	/*
605 	 * If the device is still running, it needs to be stopped first.
606 	 * This check is necessary because under some specific circumstances,
607 	 * the detach routine can be called without stopping the interface
608 	 * first.
609 	 */
610 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
611 		atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
612 		mutex_enter(&ixgbe->gen_lock);
613 		ixgbe_stop(ixgbe, B_TRUE);
614 		mutex_exit(&ixgbe->gen_lock);
615 		/* Disable and stop the watchdog timer */
616 		ixgbe_disable_watchdog_timer(ixgbe);
617 	}
618 
619 	/*
620 	 * Check if there are still rx buffers held by the upper layer.
621 	 * If so, fail the detach.
622 	 */
623 	if (!ixgbe_rx_drain(ixgbe))
624 		return (DDI_FAILURE);
625 
626 	/*
627 	 * Do the remaining unconfigure routines
628 	 */
629 	ixgbe_unconfigure(devinfo, ixgbe);
630 
631 	return (DDI_SUCCESS);
632 }
633 
634 static void
635 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
636 {
637 	/*
638 	 * Disable interrupt
639 	 */
640 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
641 		(void) ixgbe_disable_intrs(ixgbe);
642 	}
643 
644 	/*
645 	 * remove the link check timer
646 	 */
647 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
648 		if (ixgbe->periodic_id != NULL) {
649 			ddi_periodic_delete(ixgbe->periodic_id);
650 			ixgbe->periodic_id = NULL;
651 		}
652 	}
653 
654 	/*
655 	 * Unregister MAC
656 	 */
657 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
658 		(void) mac_unregister(ixgbe->mac_hdl);
659 	}
660 
661 	/*
662 	 * Free statistics
663 	 */
664 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
665 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
666 	}
667 
668 	/*
669 	 * Remove interrupt handlers
670 	 */
671 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
672 		ixgbe_rem_intr_handlers(ixgbe);
673 	}
674 
675 	/*
676 	 * Remove taskq for sfp-status-change
677 	 */
678 	if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
679 		ddi_taskq_destroy(ixgbe->sfp_taskq);
680 	}
681 
682 	/*
683 	 * Remove interrupts
684 	 */
685 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
686 		ixgbe_rem_intrs(ixgbe);
687 	}
688 
689 	/*
690 	 * Unregister interrupt callback handler
691 	 */
692 	(void) ddi_cb_unregister(ixgbe->cb_hdl);
693 
694 	/*
695 	 * Remove driver properties
696 	 */
697 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
698 		(void) ddi_prop_remove_all(devinfo);
699 	}
700 
701 	/*
702 	 * Stop the chipset
703 	 */
704 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
705 		mutex_enter(&ixgbe->gen_lock);
706 		ixgbe_chip_stop(ixgbe);
707 		mutex_exit(&ixgbe->gen_lock);
708 	}
709 
710 	/*
711 	 * Free register handle
712 	 */
713 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
714 		if (ixgbe->osdep.reg_handle != NULL)
715 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
716 	}
717 
718 	/*
719 	 * Free PCI config handle
720 	 */
721 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
722 		if (ixgbe->osdep.cfg_handle != NULL)
723 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
724 	}
725 
726 	/*
727 	 * Free locks
728 	 */
729 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
730 		ixgbe_destroy_locks(ixgbe);
731 	}
732 
733 	/*
734 	 * Free the rx/tx rings
735 	 */
736 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
737 		ixgbe_free_rings(ixgbe);
738 	}
739 
740 	/*
741 	 * Unregister FMA capabilities
742 	 */
743 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
744 		ixgbe_fm_fini(ixgbe);
745 	}
746 
747 	/*
748 	 * Free the driver data structure
749 	 */
750 	kmem_free(ixgbe, sizeof (ixgbe_t));
751 
752 	ddi_set_driver_private(devinfo, NULL);
753 }
754 
755 /*
756  * ixgbe_register_mac - Register the driver and its function pointers with
757  * the GLD interface.
758  */
759 static int
760 ixgbe_register_mac(ixgbe_t *ixgbe)
761 {
762 	struct ixgbe_hw *hw = &ixgbe->hw;
763 	mac_register_t *mac;
764 	int status;
765 
766 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
767 		return (IXGBE_FAILURE);
768 
769 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
770 	mac->m_driver = ixgbe;
771 	mac->m_dip = ixgbe->dip;
772 	mac->m_src_addr = hw->mac.addr;
773 	mac->m_callbacks = &ixgbe_m_callbacks;
774 	mac->m_min_sdu = 0;
775 	mac->m_max_sdu = ixgbe->default_mtu;
776 	mac->m_margin = VLAN_TAGSZ;
777 	mac->m_priv_props = ixgbe_priv_props;
778 	mac->m_v12n = MAC_VIRT_LEVEL1;
779 
780 	status = mac_register(mac, &ixgbe->mac_hdl);
781 
782 	mac_free(mac);
783 
784 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
785 }
786 
787 /*
788  * ixgbe_identify_hardware - Identify the type of the chipset.
789  */
790 static int
791 ixgbe_identify_hardware(ixgbe_t *ixgbe)
792 {
793 	struct ixgbe_hw *hw = &ixgbe->hw;
794 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
795 
796 	/*
797 	 * Get the device id
798 	 */
799 	hw->vendor_id =
800 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
801 	hw->device_id =
802 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
803 	hw->revision_id =
804 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
805 	hw->subsystem_device_id =
806 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
807 	hw->subsystem_vendor_id =
808 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
809 
810 	/*
811 	 * Set the mac type of the adapter based on the device id
812 	 */
813 	if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
814 		return (IXGBE_FAILURE);
815 	}
816 
817 	/*
818 	 * Install adapter capabilities
819 	 */
820 	switch (hw->mac.type) {
821 	case ixgbe_mac_82598EB:
822 		IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
823 		ixgbe->capab = &ixgbe_82598eb_cap;
824 
825 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
826 			ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
827 			ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
828 		}
829 		ixgbe->capab->other_intr |= IXGBE_EICR_LSC;
830 
831 		break;
832 	case ixgbe_mac_82599EB:
833 		IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
834 		ixgbe->capab = &ixgbe_82599eb_cap;
835 
836 		ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 |
837 		    IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC);
838 
839 		break;
840 	default:
841 		IXGBE_DEBUGLOG_1(ixgbe,
842 		    "adapter not supported in ixgbe_identify_hardware(): %d\n",
843 		    hw->mac.type);
844 		return (IXGBE_FAILURE);
845 	}
846 
847 	return (IXGBE_SUCCESS);
848 }
849 
850 /*
851  * ixgbe_regs_map - Map the device registers.
852  *
853  */
854 static int
855 ixgbe_regs_map(ixgbe_t *ixgbe)
856 {
857 	dev_info_t *devinfo = ixgbe->dip;
858 	struct ixgbe_hw *hw = &ixgbe->hw;
859 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
860 	off_t mem_size;
861 
862 	/*
863 	 * First get the size of device registers to be mapped.
864 	 */
865 	if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
866 	    != DDI_SUCCESS) {
867 		return (IXGBE_FAILURE);
868 	}
869 
870 	/*
871 	 * Call ddi_regs_map_setup() to map registers
872 	 */
873 	if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
874 	    (caddr_t *)&hw->hw_addr, 0,
875 	    mem_size, &ixgbe_regs_acc_attr,
876 	    &osdep->reg_handle)) != DDI_SUCCESS) {
877 		return (IXGBE_FAILURE);
878 	}
879 
880 	return (IXGBE_SUCCESS);
881 }
882 
883 /*
884  * ixgbe_init_properties - Initialize driver properties.
885  */
886 static void
887 ixgbe_init_properties(ixgbe_t *ixgbe)
888 {
889 	/*
890 	 * Get conf file properties, including link settings
891 	 * jumbo frames, ring number, descriptor number, etc.
892 	 */
893 	ixgbe_get_conf(ixgbe);
894 
895 	ixgbe_init_params(ixgbe);
896 }
897 
898 /*
899  * ixgbe_init_driver_settings - Initialize driver settings.
900  *
901  * The settings include hardware function pointers, bus information,
902  * rx/tx rings settings, link state, and any other parameters that
903  * need to be setup during driver initialization.
904  */
905 static int
906 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
907 {
908 	struct ixgbe_hw *hw = &ixgbe->hw;
909 	dev_info_t *devinfo = ixgbe->dip;
910 	ixgbe_rx_ring_t *rx_ring;
911 	ixgbe_rx_group_t *rx_group;
912 	ixgbe_tx_ring_t *tx_ring;
913 	uint32_t rx_size;
914 	uint32_t tx_size;
915 	uint32_t ring_per_group;
916 	int i;
917 
918 	/*
919 	 * Initialize chipset specific hardware function pointers
920 	 */
921 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
922 		return (IXGBE_FAILURE);
923 	}
924 
925 	/*
926 	 * Get the system page size
927 	 */
928 	ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
929 
930 	/*
931 	 * Set rx buffer size
932 	 *
933 	 * The IP header alignment room is counted in the calculation.
934 	 * The rx buffer size is in unit of 1K that is required by the
935 	 * chipset hardware.
936 	 */
937 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
938 	ixgbe->rx_buf_size = ((rx_size >> 10) +
939 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
940 
941 	/*
942 	 * Set tx buffer size
943 	 */
944 	tx_size = ixgbe->max_frame_size;
945 	ixgbe->tx_buf_size = ((tx_size >> 10) +
946 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
947 
948 	/*
949 	 * Initialize rx/tx rings/groups parameters
950 	 */
951 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
952 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
953 		rx_ring = &ixgbe->rx_rings[i];
954 		rx_ring->index = i;
955 		rx_ring->ixgbe = ixgbe;
956 		rx_ring->group_index = i / ring_per_group;
957 		rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
958 	}
959 
960 	for (i = 0; i < ixgbe->num_rx_groups; i++) {
961 		rx_group = &ixgbe->rx_groups[i];
962 		rx_group->index = i;
963 		rx_group->ixgbe = ixgbe;
964 	}
965 
966 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
967 		tx_ring = &ixgbe->tx_rings[i];
968 		tx_ring->index = i;
969 		tx_ring->ixgbe = ixgbe;
970 		if (ixgbe->tx_head_wb_enable)
971 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
972 		else
973 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
974 
975 		tx_ring->ring_size = ixgbe->tx_ring_size;
976 		tx_ring->free_list_size = ixgbe->tx_ring_size +
977 		    (ixgbe->tx_ring_size >> 1);
978 	}
979 
980 	/*
981 	 * Initialize values of interrupt throttling rate
982 	 */
983 	for (i = 1; i < MAX_INTR_VECTOR; i++)
984 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
985 
986 	/*
987 	 * The initial link state should be "unknown"
988 	 */
989 	ixgbe->link_state = LINK_STATE_UNKNOWN;
990 
991 	return (IXGBE_SUCCESS);
992 }
993 
994 /*
995  * ixgbe_init_locks - Initialize locks.
996  */
997 static void
998 ixgbe_init_locks(ixgbe_t *ixgbe)
999 {
1000 	ixgbe_rx_ring_t *rx_ring;
1001 	ixgbe_tx_ring_t *tx_ring;
1002 	int i;
1003 
1004 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1005 		rx_ring = &ixgbe->rx_rings[i];
1006 		mutex_init(&rx_ring->rx_lock, NULL,
1007 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1008 	}
1009 
1010 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1011 		tx_ring = &ixgbe->tx_rings[i];
1012 		mutex_init(&tx_ring->tx_lock, NULL,
1013 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1014 		mutex_init(&tx_ring->recycle_lock, NULL,
1015 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1016 		mutex_init(&tx_ring->tcb_head_lock, NULL,
1017 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1018 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
1019 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1020 	}
1021 
1022 	mutex_init(&ixgbe->gen_lock, NULL,
1023 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1024 
1025 	mutex_init(&ixgbe->watchdog_lock, NULL,
1026 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1027 }
1028 
1029 /*
1030  * ixgbe_destroy_locks - Destroy locks.
1031  */
1032 static void
1033 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1034 {
1035 	ixgbe_rx_ring_t *rx_ring;
1036 	ixgbe_tx_ring_t *tx_ring;
1037 	int i;
1038 
1039 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1040 		rx_ring = &ixgbe->rx_rings[i];
1041 		mutex_destroy(&rx_ring->rx_lock);
1042 	}
1043 
1044 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1045 		tx_ring = &ixgbe->tx_rings[i];
1046 		mutex_destroy(&tx_ring->tx_lock);
1047 		mutex_destroy(&tx_ring->recycle_lock);
1048 		mutex_destroy(&tx_ring->tcb_head_lock);
1049 		mutex_destroy(&tx_ring->tcb_tail_lock);
1050 	}
1051 
1052 	mutex_destroy(&ixgbe->gen_lock);
1053 	mutex_destroy(&ixgbe->watchdog_lock);
1054 }
1055 
1056 static int
1057 ixgbe_resume(dev_info_t *devinfo)
1058 {
1059 	ixgbe_t *ixgbe;
1060 	int i;
1061 
1062 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1063 	if (ixgbe == NULL)
1064 		return (DDI_FAILURE);
1065 
1066 	mutex_enter(&ixgbe->gen_lock);
1067 
1068 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1069 		if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1070 			mutex_exit(&ixgbe->gen_lock);
1071 			return (DDI_FAILURE);
1072 		}
1073 
1074 		/*
1075 		 * Enable and start the watchdog timer
1076 		 */
1077 		ixgbe_enable_watchdog_timer(ixgbe);
1078 	}
1079 
1080 	atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1081 
1082 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1083 		for (i = 0; i < ixgbe->num_tx_rings; i++) {
1084 			mac_tx_ring_update(ixgbe->mac_hdl,
1085 			    ixgbe->tx_rings[i].ring_handle);
1086 		}
1087 	}
1088 
1089 	mutex_exit(&ixgbe->gen_lock);
1090 
1091 	return (DDI_SUCCESS);
1092 }
1093 
1094 static int
1095 ixgbe_suspend(dev_info_t *devinfo)
1096 {
1097 	ixgbe_t *ixgbe;
1098 
1099 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1100 	if (ixgbe == NULL)
1101 		return (DDI_FAILURE);
1102 
1103 	mutex_enter(&ixgbe->gen_lock);
1104 
1105 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1106 	if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1107 		mutex_exit(&ixgbe->gen_lock);
1108 		return (DDI_SUCCESS);
1109 	}
1110 	ixgbe_stop(ixgbe, B_FALSE);
1111 
1112 	mutex_exit(&ixgbe->gen_lock);
1113 
1114 	/*
1115 	 * Disable and stop the watchdog timer
1116 	 */
1117 	ixgbe_disable_watchdog_timer(ixgbe);
1118 
1119 	return (DDI_SUCCESS);
1120 }
1121 
1122 /*
1123  * ixgbe_init - Initialize the device.
1124  */
1125 static int
1126 ixgbe_init(ixgbe_t *ixgbe)
1127 {
1128 	struct ixgbe_hw *hw = &ixgbe->hw;
1129 
1130 	mutex_enter(&ixgbe->gen_lock);
1131 
1132 	/*
1133 	 * Reset chipset to put the hardware in a known state
1134 	 * before we try to do anything with the eeprom.
1135 	 */
1136 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1137 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1138 		goto init_fail;
1139 	}
1140 
1141 	/*
1142 	 * Need to init eeprom before validating the checksum.
1143 	 */
1144 	if (ixgbe_init_eeprom_params(hw) < 0) {
1145 		ixgbe_error(ixgbe,
1146 		    "Unable to intitialize the eeprom interface.");
1147 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1148 		goto init_fail;
1149 	}
1150 
1151 	/*
1152 	 * NVM validation
1153 	 */
1154 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1155 		/*
1156 		 * Some PCI-E parts fail the first check due to
1157 		 * the link being in sleep state.  Call it again,
1158 		 * if it fails a second time it's a real issue.
1159 		 */
1160 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1161 			ixgbe_error(ixgbe,
1162 			    "Invalid NVM checksum. Please contact "
1163 			    "the vendor to update the NVM.");
1164 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1165 			goto init_fail;
1166 		}
1167 	}
1168 
1169 	/*
1170 	 * Setup default flow control thresholds - enable/disable
1171 	 * & flow control type is controlled by ixgbe.conf
1172 	 */
1173 	hw->fc.high_water = DEFAULT_FCRTH;
1174 	hw->fc.low_water = DEFAULT_FCRTL;
1175 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1176 	hw->fc.send_xon = B_TRUE;
1177 
1178 	/*
1179 	 * Initialize link settings
1180 	 */
1181 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1182 
1183 	/*
1184 	 * Initialize the chipset hardware
1185 	 */
1186 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1187 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1188 		goto init_fail;
1189 	}
1190 
1191 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1192 		goto init_fail;
1193 	}
1194 
1195 	mutex_exit(&ixgbe->gen_lock);
1196 	return (IXGBE_SUCCESS);
1197 
1198 init_fail:
1199 	/*
1200 	 * Reset PHY
1201 	 */
1202 	(void) ixgbe_reset_phy(hw);
1203 
1204 	mutex_exit(&ixgbe->gen_lock);
1205 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1206 	return (IXGBE_FAILURE);
1207 }
1208 
1209 /*
1210  * ixgbe_chip_start - Initialize and start the chipset hardware.
1211  */
1212 static int
1213 ixgbe_chip_start(ixgbe_t *ixgbe)
1214 {
1215 	struct ixgbe_hw *hw = &ixgbe->hw;
1216 	int ret_val, i;
1217 
1218 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1219 
1220 	/*
1221 	 * Get the mac address
1222 	 * This function should handle SPARC case correctly.
1223 	 */
1224 	if (!ixgbe_find_mac_address(ixgbe)) {
1225 		ixgbe_error(ixgbe, "Failed to get the mac address");
1226 		return (IXGBE_FAILURE);
1227 	}
1228 
1229 	/*
1230 	 * Validate the mac address
1231 	 */
1232 	(void) ixgbe_init_rx_addrs(hw);
1233 	if (!is_valid_mac_addr(hw->mac.addr)) {
1234 		ixgbe_error(ixgbe, "Invalid mac address");
1235 		return (IXGBE_FAILURE);
1236 	}
1237 
1238 	/*
1239 	 * Configure/Initialize hardware
1240 	 */
1241 	ret_val = ixgbe_init_hw(hw);
1242 	if (ret_val != IXGBE_SUCCESS) {
1243 		if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1244 			ixgbe_error(ixgbe,
1245 			    "This 82599 device is pre-release and contains"
1246 			    " outdated firmware, please contact your hardware"
1247 			    " vendor for a replacement.");
1248 		} else {
1249 			ixgbe_error(ixgbe, "Failed to initialize hardware");
1250 			return (IXGBE_FAILURE);
1251 		}
1252 	}
1253 
1254 	/*
1255 	 * Re-enable relaxed ordering for performance.  It is disabled
1256 	 * by default in the hardware init.
1257 	 */
1258 	ixgbe_enable_relaxed_ordering(hw);
1259 
1260 	/*
1261 	 * Setup adapter interrupt vectors
1262 	 */
1263 	ixgbe_setup_adapter_vector(ixgbe);
1264 
1265 	/*
1266 	 * Initialize unicast addresses.
1267 	 */
1268 	ixgbe_init_unicst(ixgbe);
1269 
1270 	/*
1271 	 * Setup and initialize the mctable structures.
1272 	 */
1273 	ixgbe_setup_multicst(ixgbe);
1274 
1275 	/*
1276 	 * Set interrupt throttling rate
1277 	 */
1278 	for (i = 0; i < ixgbe->intr_cnt; i++) {
1279 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1280 	}
1281 
1282 	/*
1283 	 * Save the state of the phy
1284 	 */
1285 	ixgbe_get_hw_state(ixgbe);
1286 
1287 	/*
1288 	 * Make sure driver has control
1289 	 */
1290 	ixgbe_get_driver_control(hw);
1291 
1292 	return (IXGBE_SUCCESS);
1293 }
1294 
1295 /*
1296  * ixgbe_chip_stop - Stop the chipset hardware
1297  */
1298 static void
1299 ixgbe_chip_stop(ixgbe_t *ixgbe)
1300 {
1301 	struct ixgbe_hw *hw = &ixgbe->hw;
1302 
1303 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1304 
1305 	/*
1306 	 * Tell firmware driver is no longer in control
1307 	 */
1308 	ixgbe_release_driver_control(hw);
1309 
1310 	/*
1311 	 * Reset the chipset
1312 	 */
1313 	(void) ixgbe_reset_hw(hw);
1314 
1315 	/*
1316 	 * Reset PHY
1317 	 */
1318 	(void) ixgbe_reset_phy(hw);
1319 }
1320 
1321 /*
1322  * ixgbe_reset - Reset the chipset and re-start the driver.
1323  *
1324  * It involves stopping and re-starting the chipset,
1325  * and re-configuring the rx/tx rings.
1326  */
1327 static int
1328 ixgbe_reset(ixgbe_t *ixgbe)
1329 {
1330 	int i;
1331 
1332 	/*
1333 	 * Disable and stop the watchdog timer
1334 	 */
1335 	ixgbe_disable_watchdog_timer(ixgbe);
1336 
1337 	mutex_enter(&ixgbe->gen_lock);
1338 
1339 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1340 	atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1341 
1342 	ixgbe_stop(ixgbe, B_FALSE);
1343 
1344 	if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1345 		mutex_exit(&ixgbe->gen_lock);
1346 		return (IXGBE_FAILURE);
1347 	}
1348 
1349 	/*
1350 	 * After resetting, need to recheck the link status.
1351 	 */
1352 	ixgbe->link_check_complete = B_FALSE;
1353 	ixgbe->link_check_hrtime = gethrtime() +
1354 	    (IXGBE_LINK_UP_TIME * 100000000ULL);
1355 
1356 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1357 
1358 	if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1359 		for (i = 0; i < ixgbe->num_tx_rings; i++) {
1360 			mac_tx_ring_update(ixgbe->mac_hdl,
1361 			    ixgbe->tx_rings[i].ring_handle);
1362 		}
1363 	}
1364 
1365 	mutex_exit(&ixgbe->gen_lock);
1366 
1367 	/*
1368 	 * Enable and start the watchdog timer
1369 	 */
1370 	ixgbe_enable_watchdog_timer(ixgbe);
1371 
1372 	return (IXGBE_SUCCESS);
1373 }
1374 
1375 /*
1376  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1377  */
1378 static void
1379 ixgbe_tx_clean(ixgbe_t *ixgbe)
1380 {
1381 	ixgbe_tx_ring_t *tx_ring;
1382 	tx_control_block_t *tcb;
1383 	link_list_t pending_list;
1384 	uint32_t desc_num;
1385 	int i, j;
1386 
1387 	LINK_LIST_INIT(&pending_list);
1388 
1389 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1390 		tx_ring = &ixgbe->tx_rings[i];
1391 
1392 		mutex_enter(&tx_ring->recycle_lock);
1393 
1394 		/*
1395 		 * Clean the pending tx data - the pending packets in the
1396 		 * work_list that have no chances to be transmitted again.
1397 		 *
1398 		 * We must ensure the chipset is stopped or the link is down
1399 		 * before cleaning the transmit packets.
1400 		 */
1401 		desc_num = 0;
1402 		for (j = 0; j < tx_ring->ring_size; j++) {
1403 			tcb = tx_ring->work_list[j];
1404 			if (tcb != NULL) {
1405 				desc_num += tcb->desc_num;
1406 
1407 				tx_ring->work_list[j] = NULL;
1408 
1409 				ixgbe_free_tcb(tcb);
1410 
1411 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1412 			}
1413 		}
1414 
1415 		if (desc_num > 0) {
1416 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1417 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1418 
1419 			/*
1420 			 * Reset the head and tail pointers of the tbd ring;
1421 			 * Reset the writeback head if it's enable.
1422 			 */
1423 			tx_ring->tbd_head = 0;
1424 			tx_ring->tbd_tail = 0;
1425 			if (ixgbe->tx_head_wb_enable)
1426 				*tx_ring->tbd_head_wb = 0;
1427 
1428 			IXGBE_WRITE_REG(&ixgbe->hw,
1429 			    IXGBE_TDH(tx_ring->index), 0);
1430 			IXGBE_WRITE_REG(&ixgbe->hw,
1431 			    IXGBE_TDT(tx_ring->index), 0);
1432 		}
1433 
1434 		mutex_exit(&tx_ring->recycle_lock);
1435 
1436 		/*
1437 		 * Add the tx control blocks in the pending list to
1438 		 * the free list.
1439 		 */
1440 		ixgbe_put_free_list(tx_ring, &pending_list);
1441 	}
1442 }
1443 
1444 /*
1445  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1446  * transmitted.
1447  */
1448 static boolean_t
1449 ixgbe_tx_drain(ixgbe_t *ixgbe)
1450 {
1451 	ixgbe_tx_ring_t *tx_ring;
1452 	boolean_t done;
1453 	int i, j;
1454 
1455 	/*
1456 	 * Wait for a specific time to allow pending tx packets
1457 	 * to be transmitted.
1458 	 *
1459 	 * Check the counter tbd_free to see if transmission is done.
1460 	 * No lock protection is needed here.
1461 	 *
1462 	 * Return B_TRUE if all pending packets have been transmitted;
1463 	 * Otherwise return B_FALSE;
1464 	 */
1465 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1466 
1467 		done = B_TRUE;
1468 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1469 			tx_ring = &ixgbe->tx_rings[j];
1470 			done = done &&
1471 			    (tx_ring->tbd_free == tx_ring->ring_size);
1472 		}
1473 
1474 		if (done)
1475 			break;
1476 
1477 		msec_delay(1);
1478 	}
1479 
1480 	return (done);
1481 }
1482 
1483 /*
1484  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1485  */
1486 static boolean_t
1487 ixgbe_rx_drain(ixgbe_t *ixgbe)
1488 {
1489 	boolean_t done = B_TRUE;
1490 	int i;
1491 
1492 	/*
1493 	 * Polling the rx free list to check if those rx buffers held by
1494 	 * the upper layer are released.
1495 	 *
1496 	 * Check the counter rcb_free to see if all pending buffers are
1497 	 * released. No lock protection is needed here.
1498 	 *
1499 	 * Return B_TRUE if all pending buffers have been released;
1500 	 * Otherwise return B_FALSE;
1501 	 */
1502 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1503 		done = (ixgbe->rcb_pending == 0);
1504 
1505 		if (done)
1506 			break;
1507 
1508 		msec_delay(1);
1509 	}
1510 
1511 	return (done);
1512 }
1513 
1514 /*
1515  * ixgbe_start - Start the driver/chipset.
1516  */
1517 int
1518 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1519 {
1520 	int i;
1521 
1522 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1523 
1524 	if (alloc_buffer) {
1525 		if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1526 			ixgbe_error(ixgbe,
1527 			    "Failed to allocate software receive rings");
1528 			return (IXGBE_FAILURE);
1529 		}
1530 
1531 		/* Allocate buffers for all the rx/tx rings */
1532 		if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1533 			ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1534 			return (IXGBE_FAILURE);
1535 		}
1536 
1537 		ixgbe->tx_ring_init = B_TRUE;
1538 	} else {
1539 		ixgbe->tx_ring_init = B_FALSE;
1540 	}
1541 
1542 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1543 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1544 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1545 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1546 
1547 	/*
1548 	 * Start the chipset hardware
1549 	 */
1550 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1551 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1552 		goto start_failure;
1553 	}
1554 
1555 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1556 		goto start_failure;
1557 	}
1558 
1559 	/*
1560 	 * Setup the rx/tx rings
1561 	 */
1562 	ixgbe_setup_rings(ixgbe);
1563 
1564 	/*
1565 	 * ixgbe_start() will be called when resetting, however if reset
1566 	 * happens, we need to clear the ERROR and STALL flags before
1567 	 * enabling the interrupts.
1568 	 */
1569 	atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR | IXGBE_STALL));
1570 
1571 	/*
1572 	 * Enable adapter interrupts
1573 	 * The interrupts must be enabled after the driver state is START
1574 	 */
1575 	ixgbe_enable_adapter_interrupts(ixgbe);
1576 
1577 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1578 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1579 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1580 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1581 
1582 	return (IXGBE_SUCCESS);
1583 
1584 start_failure:
1585 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1586 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1587 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1588 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1589 
1590 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1591 
1592 	return (IXGBE_FAILURE);
1593 }
1594 
1595 /*
1596  * ixgbe_stop - Stop the driver/chipset.
1597  */
1598 void
1599 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1600 {
1601 	int i;
1602 
1603 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1604 
1605 	/*
1606 	 * Disable the adapter interrupts
1607 	 */
1608 	ixgbe_disable_adapter_interrupts(ixgbe);
1609 
1610 	/*
1611 	 * Drain the pending tx packets
1612 	 */
1613 	(void) ixgbe_tx_drain(ixgbe);
1614 
1615 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1616 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1617 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1618 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1619 
1620 	/*
1621 	 * Stop the chipset hardware
1622 	 */
1623 	ixgbe_chip_stop(ixgbe);
1624 
1625 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1626 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1627 	}
1628 
1629 	/*
1630 	 * Clean the pending tx data/resources
1631 	 */
1632 	ixgbe_tx_clean(ixgbe);
1633 
1634 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1635 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1636 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1637 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1638 
1639 	if (ixgbe->link_state == LINK_STATE_UP) {
1640 		ixgbe->link_state = LINK_STATE_UNKNOWN;
1641 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1642 	}
1643 
1644 	if (free_buffer) {
1645 		/*
1646 		 * Release the DMA/memory resources of rx/tx rings
1647 		 */
1648 		ixgbe_free_dma(ixgbe);
1649 		ixgbe_free_rx_data(ixgbe);
1650 	}
1651 }
1652 
1653 /*
1654  * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1655  */
1656 /* ARGSUSED */
1657 static int
1658 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1659     void *arg1, void *arg2)
1660 {
1661 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1662 
1663 	switch (cbaction) {
1664 	/* IRM callback */
1665 	int count;
1666 	case DDI_CB_INTR_ADD:
1667 	case DDI_CB_INTR_REMOVE:
1668 		count = (int)(uintptr_t)cbarg;
1669 		ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1670 		DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1671 		    int, ixgbe->intr_cnt);
1672 		if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1673 		    DDI_SUCCESS) {
1674 			ixgbe_error(ixgbe,
1675 			    "IRM CB: Failed to adjust interrupts");
1676 			goto cb_fail;
1677 		}
1678 		break;
1679 	default:
1680 		IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1681 		    cbaction);
1682 		return (DDI_ENOTSUP);
1683 	}
1684 	return (DDI_SUCCESS);
1685 cb_fail:
1686 	return (DDI_FAILURE);
1687 }
1688 
1689 /*
1690  * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1691  */
1692 static int
1693 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1694 {
1695 	int i, rc, actual;
1696 
1697 	if (count == 0)
1698 		return (DDI_SUCCESS);
1699 
1700 	if ((cbaction == DDI_CB_INTR_ADD &&
1701 	    ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
1702 	    (cbaction == DDI_CB_INTR_REMOVE &&
1703 	    ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
1704 		return (DDI_FAILURE);
1705 
1706 	if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1707 		return (DDI_FAILURE);
1708 	}
1709 
1710 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1711 		mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
1712 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1713 		mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
1714 
1715 	mutex_enter(&ixgbe->gen_lock);
1716 	ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1717 	ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
1718 	ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1719 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
1720 
1721 	ixgbe_stop(ixgbe, B_FALSE);
1722 	/*
1723 	 * Disable interrupts
1724 	 */
1725 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1726 		rc = ixgbe_disable_intrs(ixgbe);
1727 		ASSERT(rc == IXGBE_SUCCESS);
1728 	}
1729 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
1730 
1731 	/*
1732 	 * Remove interrupt handlers
1733 	 */
1734 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1735 		ixgbe_rem_intr_handlers(ixgbe);
1736 	}
1737 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
1738 
1739 	/*
1740 	 * Clear vect_map
1741 	 */
1742 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
1743 	switch (cbaction) {
1744 	case DDI_CB_INTR_ADD:
1745 		rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
1746 		    DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
1747 		    DDI_INTR_ALLOC_NORMAL);
1748 		if (rc != DDI_SUCCESS || actual != count) {
1749 			ixgbe_log(ixgbe, "Adjust interrupts failed."
1750 			    "return: %d, irm cb size: %d, actual: %d",
1751 			    rc, count, actual);
1752 			goto intr_adjust_fail;
1753 		}
1754 		ixgbe->intr_cnt += count;
1755 		break;
1756 
1757 	case DDI_CB_INTR_REMOVE:
1758 		for (i = ixgbe->intr_cnt - count;
1759 		    i < ixgbe->intr_cnt; i ++) {
1760 			rc = ddi_intr_free(ixgbe->htable[i]);
1761 			ixgbe->htable[i] = NULL;
1762 			if (rc != DDI_SUCCESS) {
1763 				ixgbe_log(ixgbe, "Adjust interrupts failed."
1764 				    "return: %d, irm cb size: %d, actual: %d",
1765 				    rc, count, actual);
1766 				goto intr_adjust_fail;
1767 			}
1768 		}
1769 		ixgbe->intr_cnt -= count;
1770 		break;
1771 	}
1772 
1773 	/*
1774 	 * Get priority for first vector, assume remaining are all the same
1775 	 */
1776 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
1777 	if (rc != DDI_SUCCESS) {
1778 		ixgbe_log(ixgbe,
1779 		    "Get interrupt priority failed: %d", rc);
1780 		goto intr_adjust_fail;
1781 	}
1782 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
1783 	if (rc != DDI_SUCCESS) {
1784 		ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
1785 		goto intr_adjust_fail;
1786 	}
1787 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
1788 
1789 	/*
1790 	 * Map rings to interrupt vectors
1791 	 */
1792 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
1793 		ixgbe_error(ixgbe,
1794 		    "IRM CB: Failed to map interrupts to vectors");
1795 		goto intr_adjust_fail;
1796 	}
1797 
1798 	/*
1799 	 * Add interrupt handlers
1800 	 */
1801 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
1802 		ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
1803 		goto intr_adjust_fail;
1804 	}
1805 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
1806 
1807 	/*
1808 	 * Now that mutex locks are initialized, and the chip is also
1809 	 * initialized, enable interrupts.
1810 	 */
1811 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
1812 		ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
1813 		goto intr_adjust_fail;
1814 	}
1815 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
1816 	if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1817 		ixgbe_error(ixgbe, "IRM CB: Failed to start");
1818 		goto intr_adjust_fail;
1819 	}
1820 	ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
1821 	ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1822 	ixgbe->ixgbe_state |= IXGBE_STARTED;
1823 	mutex_exit(&ixgbe->gen_lock);
1824 
1825 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1826 		mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
1827 		    ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
1828 	}
1829 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1830 		mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
1831 		    ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
1832 	}
1833 
1834 	/* Wakeup all Tx rings */
1835 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1836 		mac_tx_ring_update(ixgbe->mac_hdl,
1837 		    ixgbe->tx_rings[i].ring_handle);
1838 	}
1839 
1840 	IXGBE_DEBUGLOG_3(ixgbe,
1841 	    "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
1842 	    ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
1843 	return (DDI_SUCCESS);
1844 
1845 intr_adjust_fail:
1846 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1847 	mutex_exit(&ixgbe->gen_lock);
1848 	return (DDI_FAILURE);
1849 }
1850 
1851 /*
1852  * ixgbe_intr_cb_register - Register interrupt callback function.
1853  */
1854 static int
1855 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
1856 {
1857 	if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
1858 	    ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
1859 		return (IXGBE_FAILURE);
1860 	}
1861 	IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
1862 	return (IXGBE_SUCCESS);
1863 }
1864 
1865 /*
1866  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1867  */
1868 static int
1869 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1870 {
1871 	/*
1872 	 * Allocate memory space for rx rings
1873 	 */
1874 	ixgbe->rx_rings = kmem_zalloc(
1875 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1876 	    KM_NOSLEEP);
1877 
1878 	if (ixgbe->rx_rings == NULL) {
1879 		return (IXGBE_FAILURE);
1880 	}
1881 
1882 	/*
1883 	 * Allocate memory space for tx rings
1884 	 */
1885 	ixgbe->tx_rings = kmem_zalloc(
1886 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1887 	    KM_NOSLEEP);
1888 
1889 	if (ixgbe->tx_rings == NULL) {
1890 		kmem_free(ixgbe->rx_rings,
1891 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1892 		ixgbe->rx_rings = NULL;
1893 		return (IXGBE_FAILURE);
1894 	}
1895 
1896 	/*
1897 	 * Allocate memory space for rx ring groups
1898 	 */
1899 	ixgbe->rx_groups = kmem_zalloc(
1900 	    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1901 	    KM_NOSLEEP);
1902 
1903 	if (ixgbe->rx_groups == NULL) {
1904 		kmem_free(ixgbe->rx_rings,
1905 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1906 		kmem_free(ixgbe->tx_rings,
1907 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1908 		ixgbe->rx_rings = NULL;
1909 		ixgbe->tx_rings = NULL;
1910 		return (IXGBE_FAILURE);
1911 	}
1912 
1913 	return (IXGBE_SUCCESS);
1914 }
1915 
1916 /*
1917  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1918  */
1919 static void
1920 ixgbe_free_rings(ixgbe_t *ixgbe)
1921 {
1922 	if (ixgbe->rx_rings != NULL) {
1923 		kmem_free(ixgbe->rx_rings,
1924 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1925 		ixgbe->rx_rings = NULL;
1926 	}
1927 
1928 	if (ixgbe->tx_rings != NULL) {
1929 		kmem_free(ixgbe->tx_rings,
1930 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1931 		ixgbe->tx_rings = NULL;
1932 	}
1933 
1934 	if (ixgbe->rx_groups != NULL) {
1935 		kmem_free(ixgbe->rx_groups,
1936 		    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1937 		ixgbe->rx_groups = NULL;
1938 	}
1939 }
1940 
1941 static int
1942 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
1943 {
1944 	ixgbe_rx_ring_t *rx_ring;
1945 	int i;
1946 
1947 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1948 		rx_ring = &ixgbe->rx_rings[i];
1949 		if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
1950 			goto alloc_rx_rings_failure;
1951 	}
1952 	return (IXGBE_SUCCESS);
1953 
1954 alloc_rx_rings_failure:
1955 	ixgbe_free_rx_data(ixgbe);
1956 	return (IXGBE_FAILURE);
1957 }
1958 
1959 static void
1960 ixgbe_free_rx_data(ixgbe_t *ixgbe)
1961 {
1962 	ixgbe_rx_ring_t *rx_ring;
1963 	ixgbe_rx_data_t *rx_data;
1964 	int i;
1965 
1966 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1967 		rx_ring = &ixgbe->rx_rings[i];
1968 
1969 		mutex_enter(&ixgbe->rx_pending_lock);
1970 		rx_data = rx_ring->rx_data;
1971 
1972 		if (rx_data != NULL) {
1973 			rx_data->flag |= IXGBE_RX_STOPPED;
1974 
1975 			if (rx_data->rcb_pending == 0) {
1976 				ixgbe_free_rx_ring_data(rx_data);
1977 				rx_ring->rx_data = NULL;
1978 			}
1979 		}
1980 
1981 		mutex_exit(&ixgbe->rx_pending_lock);
1982 	}
1983 }
1984 
1985 /*
1986  * ixgbe_setup_rings - Setup rx/tx rings.
1987  */
1988 static void
1989 ixgbe_setup_rings(ixgbe_t *ixgbe)
1990 {
1991 	/*
1992 	 * Setup the rx/tx rings, including the following:
1993 	 *
1994 	 * 1. Setup the descriptor ring and the control block buffers;
1995 	 * 2. Initialize necessary registers for receive/transmit;
1996 	 * 3. Initialize software pointers/parameters for receive/transmit;
1997 	 */
1998 	ixgbe_setup_rx(ixgbe);
1999 
2000 	ixgbe_setup_tx(ixgbe);
2001 }
2002 
2003 static void
2004 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2005 {
2006 	ixgbe_t *ixgbe = rx_ring->ixgbe;
2007 	ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2008 	struct ixgbe_hw *hw = &ixgbe->hw;
2009 	rx_control_block_t *rcb;
2010 	union ixgbe_adv_rx_desc	*rbd;
2011 	uint32_t size;
2012 	uint32_t buf_low;
2013 	uint32_t buf_high;
2014 	uint32_t reg_val;
2015 	int i;
2016 
2017 	ASSERT(mutex_owned(&rx_ring->rx_lock));
2018 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2019 
2020 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
2021 		rcb = rx_data->work_list[i];
2022 		rbd = &rx_data->rbd_ring[i];
2023 
2024 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2025 		rbd->read.hdr_addr = NULL;
2026 	}
2027 
2028 	/*
2029 	 * Initialize the length register
2030 	 */
2031 	size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2032 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2033 
2034 	/*
2035 	 * Initialize the base address registers
2036 	 */
2037 	buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2038 	buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2039 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2040 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2041 
2042 	/*
2043 	 * Setup head & tail pointers
2044 	 */
2045 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2046 	    rx_data->ring_size - 1);
2047 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2048 
2049 	rx_data->rbd_next = 0;
2050 	rx_data->lro_first = 0;
2051 
2052 	/*
2053 	 * Setup the Receive Descriptor Control Register (RXDCTL)
2054 	 * PTHRESH=32 descriptors (half the internal cache)
2055 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
2056 	 * WTHRESH defaults to 1 (writeback each descriptor)
2057 	 */
2058 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2059 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
2060 
2061 	/* Not a valid value for 82599 */
2062 	if (hw->mac.type < ixgbe_mac_82599EB) {
2063 		reg_val |= 0x0020;	/* pthresh */
2064 	}
2065 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2066 
2067 	if (hw->mac.type == ixgbe_mac_82599EB) {
2068 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2069 		reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2070 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2071 	}
2072 
2073 	/*
2074 	 * Setup the Split and Replication Receive Control Register.
2075 	 * Set the rx buffer size and the advanced descriptor type.
2076 	 */
2077 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2078 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2079 	reg_val |= IXGBE_SRRCTL_DROP_EN;
2080 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2081 }
2082 
2083 static void
2084 ixgbe_setup_rx(ixgbe_t *ixgbe)
2085 {
2086 	ixgbe_rx_ring_t *rx_ring;
2087 	struct ixgbe_hw *hw = &ixgbe->hw;
2088 	uint32_t reg_val;
2089 	uint32_t ring_mapping;
2090 	uint32_t i, index;
2091 	uint32_t psrtype_rss_bit;
2092 
2093 	/* PSRTYPE must be configured for 82599 */
2094 	if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2095 	    ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2096 		reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2097 		    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2098 		reg_val |= IXGBE_PSRTYPE_L2HDR;
2099 		reg_val |= 0x80000000;
2100 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2101 	} else {
2102 		if (ixgbe->num_rx_groups > 32) {
2103 			psrtype_rss_bit = 0x20000000;
2104 		} else {
2105 			psrtype_rss_bit = 0x40000000;
2106 		}
2107 		for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2108 			reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2109 			    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2110 			reg_val |= IXGBE_PSRTYPE_L2HDR;
2111 			reg_val |= psrtype_rss_bit;
2112 			IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2113 		}
2114 	}
2115 
2116 	/*
2117 	 * Set filter control in FCTRL to accept broadcast packets and do
2118 	 * not pass pause frames to host.  Flow control settings are already
2119 	 * in this register, so preserve them.
2120 	 */
2121 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2122 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
2123 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
2124 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2125 
2126 	/*
2127 	 * Hardware checksum settings
2128 	 */
2129 	if (ixgbe->rx_hcksum_enable) {
2130 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
2131 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2132 	}
2133 
2134 	/*
2135 	 * Setup VMDq and RSS for multiple receive queues
2136 	 */
2137 	switch (ixgbe->classify_mode) {
2138 	case IXGBE_CLASSIFY_RSS:
2139 		/*
2140 		 * One group, only RSS is needed when more than
2141 		 * one ring enabled.
2142 		 */
2143 		ixgbe_setup_rss(ixgbe);
2144 		break;
2145 
2146 	case IXGBE_CLASSIFY_VMDQ:
2147 		/*
2148 		 * Multiple groups, each group has one ring,
2149 		 * only VMDq is needed.
2150 		 */
2151 		ixgbe_setup_vmdq(ixgbe);
2152 		break;
2153 
2154 	case IXGBE_CLASSIFY_VMDQ_RSS:
2155 		/*
2156 		 * Multiple groups and multiple rings, both
2157 		 * VMDq and RSS are needed.
2158 		 */
2159 		ixgbe_setup_vmdq_rss(ixgbe);
2160 		break;
2161 
2162 	default:
2163 		break;
2164 	}
2165 
2166 	/*
2167 	 * Enable the receive unit.  This must be done after filter
2168 	 * control is set in FCTRL.
2169 	 */
2170 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
2171 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
2172 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
2173 
2174 	/*
2175 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2176 	 */
2177 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
2178 		rx_ring = &ixgbe->rx_rings[i];
2179 		ixgbe_setup_rx_ring(rx_ring);
2180 	}
2181 
2182 	/*
2183 	 * Setup the per-ring statistics mapping.
2184 	 */
2185 	ring_mapping = 0;
2186 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
2187 		index = ixgbe->rx_rings[i].hw_index;
2188 		ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2189 		ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2190 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2191 	}
2192 
2193 	/*
2194 	 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2195 	 * by four bytes if the packet has a VLAN field, so includes MTU,
2196 	 * ethernet header and frame check sequence.
2197 	 * Register is MAXFRS in 82599.
2198 	 */
2199 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
2200 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2201 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2202 
2203 	/*
2204 	 * Setup Jumbo Frame enable bit
2205 	 */
2206 	if (ixgbe->default_mtu > ETHERMTU) {
2207 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2208 		reg_val |= IXGBE_HLREG0_JUMBOEN;
2209 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2210 	}
2211 
2212 	/*
2213 	 * Setup RSC for multiple receive queues.
2214 	 */
2215 	if (ixgbe->lro_enable) {
2216 		for (i = 0; i < ixgbe->num_rx_rings; i++) {
2217 			/*
2218 			 * Make sure rx_buf_size * MAXDESC not greater
2219 			 * than 65535.
2220 			 * Intel recommends 4 for MAXDESC field value.
2221 			 */
2222 			reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2223 			reg_val |= IXGBE_RSCCTL_RSCEN;
2224 			if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2225 				reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2226 			else
2227 				reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2228 			IXGBE_WRITE_REG(hw,  IXGBE_RSCCTL(i), reg_val);
2229 		}
2230 
2231 		reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2232 		reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2233 		IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2234 
2235 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2236 		reg_val |= IXGBE_RDRXCTL_RSCACKC;
2237 		reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2238 		reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2239 
2240 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2241 	}
2242 }
2243 
2244 static void
2245 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2246 {
2247 	ixgbe_t *ixgbe = tx_ring->ixgbe;
2248 	struct ixgbe_hw *hw = &ixgbe->hw;
2249 	uint32_t size;
2250 	uint32_t buf_low;
2251 	uint32_t buf_high;
2252 	uint32_t reg_val;
2253 
2254 	ASSERT(mutex_owned(&tx_ring->tx_lock));
2255 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2256 
2257 	/*
2258 	 * Initialize the length register
2259 	 */
2260 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2261 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2262 
2263 	/*
2264 	 * Initialize the base address registers
2265 	 */
2266 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2267 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2268 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2269 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2270 
2271 	/*
2272 	 * Setup head & tail pointers
2273 	 */
2274 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2275 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2276 
2277 	/*
2278 	 * Setup head write-back
2279 	 */
2280 	if (ixgbe->tx_head_wb_enable) {
2281 		/*
2282 		 * The memory of the head write-back is allocated using
2283 		 * the extra tbd beyond the tail of the tbd ring.
2284 		 */
2285 		tx_ring->tbd_head_wb = (uint32_t *)
2286 		    ((uintptr_t)tx_ring->tbd_area.address + size);
2287 		*tx_ring->tbd_head_wb = 0;
2288 
2289 		buf_low = (uint32_t)
2290 		    (tx_ring->tbd_area.dma_address + size);
2291 		buf_high = (uint32_t)
2292 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
2293 
2294 		/* Set the head write-back enable bit */
2295 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2296 
2297 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2298 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2299 
2300 		/*
2301 		 * Turn off relaxed ordering for head write back or it will
2302 		 * cause problems with the tx recycling
2303 		 */
2304 		reg_val = IXGBE_READ_REG(hw,
2305 		    IXGBE_DCA_TXCTRL(tx_ring->index));
2306 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2307 		IXGBE_WRITE_REG(hw,
2308 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2309 	} else {
2310 		tx_ring->tbd_head_wb = NULL;
2311 	}
2312 
2313 	tx_ring->tbd_head = 0;
2314 	tx_ring->tbd_tail = 0;
2315 	tx_ring->tbd_free = tx_ring->ring_size;
2316 
2317 	if (ixgbe->tx_ring_init == B_TRUE) {
2318 		tx_ring->tcb_head = 0;
2319 		tx_ring->tcb_tail = 0;
2320 		tx_ring->tcb_free = tx_ring->free_list_size;
2321 	}
2322 
2323 	/*
2324 	 * Initialize the s/w context structure
2325 	 */
2326 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2327 }
2328 
2329 static void
2330 ixgbe_setup_tx(ixgbe_t *ixgbe)
2331 {
2332 	struct ixgbe_hw *hw = &ixgbe->hw;
2333 	ixgbe_tx_ring_t *tx_ring;
2334 	uint32_t reg_val;
2335 	uint32_t ring_mapping;
2336 	int i;
2337 
2338 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2339 		tx_ring = &ixgbe->tx_rings[i];
2340 		ixgbe_setup_tx_ring(tx_ring);
2341 	}
2342 
2343 	/*
2344 	 * Setup the per-ring statistics mapping.
2345 	 */
2346 	ring_mapping = 0;
2347 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2348 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2349 		if ((i & 0x3) == 0x3) {
2350 			if (hw->mac.type >= ixgbe_mac_82599EB) {
2351 				IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2352 				    ring_mapping);
2353 			} else {
2354 				IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2355 				    ring_mapping);
2356 			}
2357 			ring_mapping = 0;
2358 		}
2359 	}
2360 	if ((i & 0x3) != 0x3)
2361 		if (hw->mac.type >= ixgbe_mac_82599EB) {
2362 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2363 		} else {
2364 			IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2365 		}
2366 
2367 	/*
2368 	 * Enable CRC appending and TX padding (for short tx frames)
2369 	 */
2370 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2371 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2372 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2373 
2374 	/*
2375 	 * enable DMA for 82599 parts
2376 	 */
2377 	if (hw->mac.type == ixgbe_mac_82599EB) {
2378 	/* DMATXCTL.TE must be set after all Tx config is complete */
2379 		reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2380 		reg_val |= IXGBE_DMATXCTL_TE;
2381 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2382 	}
2383 
2384 	/*
2385 	 * Enabling tx queues ..
2386 	 * For 82599 must be done after DMATXCTL.TE is set
2387 	 */
2388 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2389 		tx_ring = &ixgbe->tx_rings[i];
2390 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2391 		reg_val |= IXGBE_TXDCTL_ENABLE;
2392 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2393 	}
2394 }
2395 
2396 /*
2397  * ixgbe_setup_rss - Setup receive-side scaling feature.
2398  */
2399 static void
2400 ixgbe_setup_rss(ixgbe_t *ixgbe)
2401 {
2402 	struct ixgbe_hw *hw = &ixgbe->hw;
2403 	uint32_t i, mrqc, rxcsum;
2404 	uint32_t random;
2405 	uint32_t reta;
2406 	uint32_t ring_per_group;
2407 
2408 	/*
2409 	 * Fill out redirection table
2410 	 */
2411 	reta = 0;
2412 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2413 
2414 	for (i = 0; i < 128; i++) {
2415 		reta = (reta << 8) | (i % ring_per_group) |
2416 		    ((i % ring_per_group) << 4);
2417 		if ((i & 3) == 3)
2418 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2419 	}
2420 
2421 	/*
2422 	 * Fill out hash function seeds with a random constant
2423 	 */
2424 	for (i = 0; i < 10; i++) {
2425 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2426 		    sizeof (uint32_t));
2427 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2428 	}
2429 
2430 	/*
2431 	 * Enable RSS & perform hash on these packet types
2432 	 */
2433 	mrqc = IXGBE_MRQC_RSSEN |
2434 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
2435 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2436 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2437 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2438 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2439 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
2440 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2441 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2442 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2443 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2444 
2445 	/*
2446 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2447 	 * It is an adapter hardware limitation that Packet Checksum is
2448 	 * mutually exclusive with RSS.
2449 	 */
2450 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2451 	rxcsum |= IXGBE_RXCSUM_PCSD;
2452 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2453 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2454 }
2455 
2456 /*
2457  * ixgbe_setup_vmdq - Setup MAC classification feature
2458  */
2459 static void
2460 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2461 {
2462 	struct ixgbe_hw *hw = &ixgbe->hw;
2463 	uint32_t vmdctl, i, vtctl;
2464 
2465 	/*
2466 	 * Setup the VMDq Control register, enable VMDq based on
2467 	 * packet destination MAC address:
2468 	 */
2469 	switch (hw->mac.type) {
2470 	case ixgbe_mac_82598EB:
2471 		/*
2472 		 * VMDq Enable = 1;
2473 		 * VMDq Filter = 0; MAC filtering
2474 		 * Default VMDq output index = 0;
2475 		 */
2476 		vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2477 		IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2478 		break;
2479 
2480 	case ixgbe_mac_82599EB:
2481 		/*
2482 		 * Enable VMDq-only.
2483 		 */
2484 		vmdctl = IXGBE_MRQC_VMDQEN;
2485 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2486 
2487 		for (i = 0; i < hw->mac.num_rar_entries; i++) {
2488 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2489 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2490 		}
2491 
2492 		/*
2493 		 * Enable Virtualization and Replication.
2494 		 */
2495 		vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2496 		IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2497 
2498 		/*
2499 		 * Enable receiving packets to all VFs
2500 		 */
2501 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2502 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2503 
2504 		break;
2505 
2506 	default:
2507 		break;
2508 	}
2509 }
2510 
2511 /*
2512  * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2513  */
2514 static void
2515 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2516 {
2517 	struct ixgbe_hw *hw = &ixgbe->hw;
2518 	uint32_t i, mrqc, rxcsum;
2519 	uint32_t random;
2520 	uint32_t reta;
2521 	uint32_t ring_per_group;
2522 	uint32_t vmdctl, vtctl;
2523 
2524 	/*
2525 	 * Fill out redirection table
2526 	 */
2527 	reta = 0;
2528 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2529 	for (i = 0; i < 128; i++) {
2530 		reta = (reta << 8) | (i % ring_per_group) |
2531 		    ((i % ring_per_group) << 4);
2532 		if ((i & 3) == 3)
2533 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2534 	}
2535 
2536 	/*
2537 	 * Fill out hash function seeds with a random constant
2538 	 */
2539 	for (i = 0; i < 10; i++) {
2540 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2541 		    sizeof (uint32_t));
2542 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2543 	}
2544 
2545 	/*
2546 	 * Enable and setup RSS and VMDq
2547 	 */
2548 	switch (hw->mac.type) {
2549 	case ixgbe_mac_82598EB:
2550 		/*
2551 		 * Enable RSS & Setup RSS Hash functions
2552 		 */
2553 		mrqc = IXGBE_MRQC_RSSEN |
2554 		    IXGBE_MRQC_RSS_FIELD_IPV4 |
2555 		    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2556 		    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2557 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2558 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2559 		    IXGBE_MRQC_RSS_FIELD_IPV6 |
2560 		    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2561 		    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2562 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2563 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2564 
2565 		/*
2566 		 * Enable and Setup VMDq
2567 		 * VMDq Filter = 0; MAC filtering
2568 		 * Default VMDq output index = 0;
2569 		 */
2570 		vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2571 		IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2572 		break;
2573 
2574 	case ixgbe_mac_82599EB:
2575 		/*
2576 		 * Enable RSS & Setup RSS Hash functions
2577 		 */
2578 		mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2579 		    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2580 		    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2581 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2582 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2583 		    IXGBE_MRQC_RSS_FIELD_IPV6 |
2584 		    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2585 		    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2586 		    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2587 
2588 		/*
2589 		 * Enable VMDq+RSS.
2590 		 */
2591 		if (ixgbe->num_rx_groups > 32)  {
2592 			mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2593 		} else {
2594 			mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2595 		}
2596 
2597 		IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2598 
2599 		for (i = 0; i < hw->mac.num_rar_entries; i++) {
2600 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2601 			IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2602 		}
2603 		break;
2604 
2605 	default:
2606 		break;
2607 
2608 	}
2609 
2610 	/*
2611 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2612 	 * It is an adapter hardware limitation that Packet Checksum is
2613 	 * mutually exclusive with RSS.
2614 	 */
2615 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2616 	rxcsum |= IXGBE_RXCSUM_PCSD;
2617 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2618 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2619 
2620 	if (hw->mac.type == ixgbe_mac_82599EB) {
2621 		/*
2622 		 * Enable Virtualization and Replication.
2623 		 */
2624 		vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2625 		IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2626 
2627 		/*
2628 		 * Enable receiving packets to all VFs
2629 		 */
2630 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2631 		IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2632 	}
2633 }
2634 
2635 /*
2636  * ixgbe_init_unicst - Initialize the unicast addresses.
2637  */
2638 static void
2639 ixgbe_init_unicst(ixgbe_t *ixgbe)
2640 {
2641 	struct ixgbe_hw *hw = &ixgbe->hw;
2642 	uint8_t *mac_addr;
2643 	int slot;
2644 	/*
2645 	 * Here we should consider two situations:
2646 	 *
2647 	 * 1. Chipset is initialized at the first time,
2648 	 *    Clear all the multiple unicast addresses.
2649 	 *
2650 	 * 2. Chipset is reset
2651 	 *    Recover the multiple unicast addresses from the
2652 	 *    software data structure to the RAR registers.
2653 	 */
2654 	if (!ixgbe->unicst_init) {
2655 		/*
2656 		 * Initialize the multiple unicast addresses
2657 		 */
2658 		ixgbe->unicst_total = hw->mac.num_rar_entries;
2659 		ixgbe->unicst_avail = ixgbe->unicst_total;
2660 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2661 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2662 			bzero(mac_addr, ETHERADDRL);
2663 			(void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2664 			ixgbe->unicst_addr[slot].mac.set = 0;
2665 		}
2666 		ixgbe->unicst_init = B_TRUE;
2667 	} else {
2668 		/* Re-configure the RAR registers */
2669 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2670 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2671 			if (ixgbe->unicst_addr[slot].mac.set == 1) {
2672 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2673 				    ixgbe->unicst_addr[slot].mac.group_index,
2674 				    IXGBE_RAH_AV);
2675 			} else {
2676 				bzero(mac_addr, ETHERADDRL);
2677 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2678 				    NULL, NULL);
2679 			}
2680 		}
2681 	}
2682 }
2683 
2684 /*
2685  * ixgbe_unicst_find - Find the slot for the specified unicast address
2686  */
2687 int
2688 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2689 {
2690 	int slot;
2691 
2692 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2693 
2694 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2695 		if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2696 		    mac_addr, ETHERADDRL) == 0)
2697 			return (slot);
2698 	}
2699 
2700 	return (-1);
2701 }
2702 
2703 /*
2704  * ixgbe_multicst_add - Add a multicst address.
2705  */
2706 int
2707 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2708 {
2709 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2710 
2711 	if ((multiaddr[0] & 01) == 0) {
2712 		return (EINVAL);
2713 	}
2714 
2715 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2716 		return (ENOENT);
2717 	}
2718 
2719 	bcopy(multiaddr,
2720 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2721 	ixgbe->mcast_count++;
2722 
2723 	/*
2724 	 * Update the multicast table in the hardware
2725 	 */
2726 	ixgbe_setup_multicst(ixgbe);
2727 
2728 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2729 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2730 		return (EIO);
2731 	}
2732 
2733 	return (0);
2734 }
2735 
2736 /*
2737  * ixgbe_multicst_remove - Remove a multicst address.
2738  */
2739 int
2740 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2741 {
2742 	int i;
2743 
2744 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2745 
2746 	for (i = 0; i < ixgbe->mcast_count; i++) {
2747 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2748 		    ETHERADDRL) == 0) {
2749 			for (i++; i < ixgbe->mcast_count; i++) {
2750 				ixgbe->mcast_table[i - 1] =
2751 				    ixgbe->mcast_table[i];
2752 			}
2753 			ixgbe->mcast_count--;
2754 			break;
2755 		}
2756 	}
2757 
2758 	/*
2759 	 * Update the multicast table in the hardware
2760 	 */
2761 	ixgbe_setup_multicst(ixgbe);
2762 
2763 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2764 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2765 		return (EIO);
2766 	}
2767 
2768 	return (0);
2769 }
2770 
2771 /*
2772  * ixgbe_setup_multicast - Setup multicast data structures.
2773  *
2774  * This routine initializes all of the multicast related structures
2775  * and save them in the hardware registers.
2776  */
2777 static void
2778 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2779 {
2780 	uint8_t *mc_addr_list;
2781 	uint32_t mc_addr_count;
2782 	struct ixgbe_hw *hw = &ixgbe->hw;
2783 
2784 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2785 
2786 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2787 
2788 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2789 	mc_addr_count = ixgbe->mcast_count;
2790 
2791 	/*
2792 	 * Update the multicast addresses to the MTA registers
2793 	 */
2794 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2795 	    ixgbe_mc_table_itr);
2796 }
2797 
2798 /*
2799  * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2800  *
2801  * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2802  * Different chipsets may have different allowed configuration of vmdq and rss.
2803  */
2804 static void
2805 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2806 {
2807 	struct ixgbe_hw *hw = &ixgbe->hw;
2808 	uint32_t ring_per_group;
2809 
2810 	switch (hw->mac.type) {
2811 	case ixgbe_mac_82598EB:
2812 		/*
2813 		 * 82598 supports the following combination:
2814 		 * vmdq no. x rss no.
2815 		 * [5..16]  x 1
2816 		 * [1..4]   x [1..16]
2817 		 * However 8 rss queue per pool (vmdq) is sufficient for
2818 		 * most cases.
2819 		 */
2820 		ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2821 		if (ixgbe->num_rx_groups > 4) {
2822 			ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2823 		} else {
2824 			ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2825 			    min(8, ring_per_group);
2826 		}
2827 
2828 		break;
2829 
2830 	case ixgbe_mac_82599EB:
2831 		/*
2832 		 * 82599 supports the following combination:
2833 		 * vmdq no. x rss no.
2834 		 * [33..64] x [1..2]
2835 		 * [2..32]  x [1..4]
2836 		 * 1 x [1..16]
2837 		 * However 8 rss queue per pool (vmdq) is sufficient for
2838 		 * most cases.
2839 		 */
2840 		ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2841 		if (ixgbe->num_rx_groups == 1) {
2842 			ixgbe->num_rx_rings = min(8, ring_per_group);
2843 		} else if (ixgbe->num_rx_groups <= 32) {
2844 			ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2845 			    min(4, ring_per_group);
2846 		} else if (ixgbe->num_rx_groups <= 64) {
2847 			ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2848 			    min(2, ring_per_group);
2849 		}
2850 
2851 		break;
2852 
2853 	default:
2854 		break;
2855 	}
2856 
2857 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2858 
2859 	if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
2860 		ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
2861 	} else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
2862 		ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
2863 	} else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
2864 		ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
2865 	} else {
2866 		ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
2867 	}
2868 
2869 	IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
2870 	    ixgbe->num_rx_groups, ixgbe->num_rx_rings);
2871 }
2872 
2873 /*
2874  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2875  *
2876  * This routine gets user-configured values out of the configuration
2877  * file ixgbe.conf.
2878  *
2879  * For each configurable value, there is a minimum, a maximum, and a
2880  * default.
2881  * If user does not configure a value, use the default.
2882  * If user configures below the minimum, use the minumum.
2883  * If user configures above the maximum, use the maxumum.
2884  */
2885 static void
2886 ixgbe_get_conf(ixgbe_t *ixgbe)
2887 {
2888 	struct ixgbe_hw *hw = &ixgbe->hw;
2889 	uint32_t flow_control;
2890 
2891 	/*
2892 	 * ixgbe driver supports the following user configurations:
2893 	 *
2894 	 * Jumbo frame configuration:
2895 	 *    default_mtu
2896 	 *
2897 	 * Ethernet flow control configuration:
2898 	 *    flow_control
2899 	 *
2900 	 * Multiple rings configurations:
2901 	 *    tx_queue_number
2902 	 *    tx_ring_size
2903 	 *    rx_queue_number
2904 	 *    rx_ring_size
2905 	 *
2906 	 * Call ixgbe_get_prop() to get the value for a specific
2907 	 * configuration parameter.
2908 	 */
2909 
2910 	/*
2911 	 * Jumbo frame configuration - max_frame_size controls host buffer
2912 	 * allocation, so includes MTU, ethernet header, vlan tag and
2913 	 * frame check sequence.
2914 	 */
2915 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2916 	    MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
2917 
2918 	ixgbe->max_frame_size = ixgbe->default_mtu +
2919 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2920 
2921 	/*
2922 	 * Ethernet flow control configuration
2923 	 */
2924 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2925 	    ixgbe_fc_none, 3, ixgbe_fc_none);
2926 	if (flow_control == 3)
2927 		flow_control = ixgbe_fc_default;
2928 
2929 	/*
2930 	 * fc.requested mode is what the user requests.  After autoneg,
2931 	 * fc.current_mode will be the flow_control mode that was negotiated.
2932 	 */
2933 	hw->fc.requested_mode = flow_control;
2934 
2935 	/*
2936 	 * Multiple rings configurations
2937 	 */
2938 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2939 	    ixgbe->capab->min_tx_que_num,
2940 	    ixgbe->capab->max_tx_que_num,
2941 	    ixgbe->capab->def_tx_que_num);
2942 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2943 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2944 
2945 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2946 	    ixgbe->capab->min_rx_que_num,
2947 	    ixgbe->capab->max_rx_que_num,
2948 	    ixgbe->capab->def_rx_que_num);
2949 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2950 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2951 
2952 	/*
2953 	 * Multiple groups configuration
2954 	 */
2955 	ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
2956 	    ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
2957 	    ixgbe->capab->def_rx_grp_num);
2958 
2959 	ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
2960 	    0, 1, DEFAULT_MR_ENABLE);
2961 
2962 	if (ixgbe->mr_enable == B_FALSE) {
2963 		ixgbe->num_tx_rings = 1;
2964 		ixgbe->num_rx_rings = 1;
2965 		ixgbe->num_rx_groups = 1;
2966 		ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
2967 	} else {
2968 		ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2969 		    max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
2970 		/*
2971 		 * The combination of num_rx_rings and num_rx_groups
2972 		 * may be not supported by h/w. We need to adjust
2973 		 * them to appropriate values.
2974 		 */
2975 		ixgbe_setup_vmdq_rss_conf(ixgbe);
2976 	}
2977 
2978 	/*
2979 	 * Tunable used to force an interrupt type. The only use is
2980 	 * for testing of the lesser interrupt types.
2981 	 * 0 = don't force interrupt type
2982 	 * 1 = force interrupt type MSI-X
2983 	 * 2 = force interrupt type MSI
2984 	 * 3 = force interrupt type Legacy
2985 	 */
2986 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2987 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2988 
2989 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2990 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
2991 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2992 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
2993 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2994 	    0, 1, DEFAULT_LSO_ENABLE);
2995 	ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
2996 	    0, 1, DEFAULT_LRO_ENABLE);
2997 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2998 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
2999 
3000 	/* Head Write Back not recommended for 82599 */
3001 	if (hw->mac.type >= ixgbe_mac_82599EB) {
3002 		ixgbe->tx_head_wb_enable = B_FALSE;
3003 	}
3004 
3005 	/*
3006 	 * ixgbe LSO needs the tx h/w checksum support.
3007 	 * LSO will be disabled if tx h/w checksum is not
3008 	 * enabled.
3009 	 */
3010 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
3011 		ixgbe->lso_enable = B_FALSE;
3012 	}
3013 
3014 	/*
3015 	 * ixgbe LRO needs the rx h/w checksum support.
3016 	 * LRO will be disabled if rx h/w checksum is not
3017 	 * enabled.
3018 	 */
3019 	if (ixgbe->rx_hcksum_enable == B_FALSE) {
3020 		ixgbe->lro_enable = B_FALSE;
3021 	}
3022 
3023 	/*
3024 	 * ixgbe LRO only been supported by 82599 now
3025 	 */
3026 	if (hw->mac.type != ixgbe_mac_82599EB) {
3027 		ixgbe->lro_enable = B_FALSE;
3028 	}
3029 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3030 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3031 	    DEFAULT_TX_COPY_THRESHOLD);
3032 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3033 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3034 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3035 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3036 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3037 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3038 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3039 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3040 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3041 
3042 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3043 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3044 	    DEFAULT_RX_COPY_THRESHOLD);
3045 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3046 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3047 	    DEFAULT_RX_LIMIT_PER_INTR);
3048 
3049 	ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3050 	    ixgbe->capab->min_intr_throttle,
3051 	    ixgbe->capab->max_intr_throttle,
3052 	    ixgbe->capab->def_intr_throttle);
3053 	/*
3054 	 * 82599 requires the interupt throttling rate is
3055 	 * a multiple of 8. This is enforced by the register
3056 	 * definiton.
3057 	 */
3058 	if (hw->mac.type == ixgbe_mac_82599EB)
3059 		ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3060 }
3061 
3062 static void
3063 ixgbe_init_params(ixgbe_t *ixgbe)
3064 {
3065 	ixgbe->param_en_10000fdx_cap = 1;
3066 	ixgbe->param_en_1000fdx_cap = 1;
3067 	ixgbe->param_en_100fdx_cap = 1;
3068 	ixgbe->param_adv_10000fdx_cap = 1;
3069 	ixgbe->param_adv_1000fdx_cap = 1;
3070 	ixgbe->param_adv_100fdx_cap = 1;
3071 
3072 	ixgbe->param_pause_cap = 1;
3073 	ixgbe->param_asym_pause_cap = 1;
3074 	ixgbe->param_rem_fault = 0;
3075 
3076 	ixgbe->param_adv_autoneg_cap = 1;
3077 	ixgbe->param_adv_pause_cap = 1;
3078 	ixgbe->param_adv_asym_pause_cap = 1;
3079 	ixgbe->param_adv_rem_fault = 0;
3080 
3081 	ixgbe->param_lp_10000fdx_cap = 0;
3082 	ixgbe->param_lp_1000fdx_cap = 0;
3083 	ixgbe->param_lp_100fdx_cap = 0;
3084 	ixgbe->param_lp_autoneg_cap = 0;
3085 	ixgbe->param_lp_pause_cap = 0;
3086 	ixgbe->param_lp_asym_pause_cap = 0;
3087 	ixgbe->param_lp_rem_fault = 0;
3088 }
3089 
3090 /*
3091  * ixgbe_get_prop - Get a property value out of the configuration file
3092  * ixgbe.conf.
3093  *
3094  * Caller provides the name of the property, a default value, a minimum
3095  * value, and a maximum value.
3096  *
3097  * Return configured value of the property, with default, minimum and
3098  * maximum properly applied.
3099  */
3100 static int
3101 ixgbe_get_prop(ixgbe_t *ixgbe,
3102     char *propname,	/* name of the property */
3103     int minval,		/* minimum acceptable value */
3104     int maxval,		/* maximim acceptable value */
3105     int defval)		/* default value */
3106 {
3107 	int value;
3108 
3109 	/*
3110 	 * Call ddi_prop_get_int() to read the conf settings
3111 	 */
3112 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3113 	    DDI_PROP_DONTPASS, propname, defval);
3114 	if (value > maxval)
3115 		value = maxval;
3116 
3117 	if (value < minval)
3118 		value = minval;
3119 
3120 	return (value);
3121 }
3122 
3123 /*
3124  * ixgbe_driver_setup_link - Using the link properties to setup the link.
3125  */
3126 int
3127 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3128 {
3129 	u32 autoneg_advertised = 0;
3130 
3131 	/*
3132 	 * No half duplex support with 10Gb parts
3133 	 */
3134 	if (ixgbe->param_adv_10000fdx_cap == 1)
3135 		autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3136 
3137 	if (ixgbe->param_adv_1000fdx_cap == 1)
3138 		autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3139 
3140 	if (ixgbe->param_adv_100fdx_cap == 1)
3141 		autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3142 
3143 	if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3144 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3145 		    "to autonegotiation with full link capabilities.");
3146 
3147 		autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3148 		    IXGBE_LINK_SPEED_1GB_FULL |
3149 		    IXGBE_LINK_SPEED_100_FULL;
3150 	}
3151 
3152 	if (setup_hw) {
3153 		if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3154 		    ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3155 			ixgbe_notice(ixgbe, "Setup link failed on this "
3156 			    "device.");
3157 			return (IXGBE_FAILURE);
3158 		}
3159 	}
3160 
3161 	return (IXGBE_SUCCESS);
3162 }
3163 
3164 /*
3165  * ixgbe_driver_link_check - Link status processing.
3166  *
3167  * This function can be called in both kernel context and interrupt context
3168  */
3169 static void
3170 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3171 {
3172 	struct ixgbe_hw *hw = &ixgbe->hw;
3173 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3174 	boolean_t link_up = B_FALSE;
3175 	boolean_t link_changed = B_FALSE;
3176 
3177 	ASSERT(mutex_owned(&ixgbe->gen_lock));
3178 
3179 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
3180 	if (link_up) {
3181 		ixgbe->link_check_complete = B_TRUE;
3182 
3183 		/* Link is up, enable flow control settings */
3184 		(void) ixgbe_fc_enable(hw, 0);
3185 
3186 		/*
3187 		 * The Link is up, check whether it was marked as down earlier
3188 		 */
3189 		if (ixgbe->link_state != LINK_STATE_UP) {
3190 			switch (speed) {
3191 			case IXGBE_LINK_SPEED_10GB_FULL:
3192 				ixgbe->link_speed = SPEED_10GB;
3193 				break;
3194 			case IXGBE_LINK_SPEED_1GB_FULL:
3195 				ixgbe->link_speed = SPEED_1GB;
3196 				break;
3197 			case IXGBE_LINK_SPEED_100_FULL:
3198 				ixgbe->link_speed = SPEED_100;
3199 			}
3200 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
3201 			ixgbe->link_state = LINK_STATE_UP;
3202 			link_changed = B_TRUE;
3203 		}
3204 	} else {
3205 		if (ixgbe->link_check_complete == B_TRUE ||
3206 		    (ixgbe->link_check_complete == B_FALSE &&
3207 		    gethrtime() >= ixgbe->link_check_hrtime)) {
3208 			/*
3209 			 * The link is really down
3210 			 */
3211 			ixgbe->link_check_complete = B_TRUE;
3212 
3213 			if (ixgbe->link_state != LINK_STATE_DOWN) {
3214 				ixgbe->link_speed = 0;
3215 				ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3216 				ixgbe->link_state = LINK_STATE_DOWN;
3217 				link_changed = B_TRUE;
3218 			}
3219 		}
3220 	}
3221 
3222 	/*
3223 	 * this is only reached after a link-status-change interrupt
3224 	 * so always get new phy state
3225 	 */
3226 	ixgbe_get_hw_state(ixgbe);
3227 
3228 	/*
3229 	 * If we are in an interrupt context, need to re-enable the
3230 	 * interrupt, which was automasked
3231 	 */
3232 	if (servicing_interrupt() != 0) {
3233 		ixgbe->eims |= IXGBE_EICR_LSC;
3234 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3235 	}
3236 
3237 	if (link_changed) {
3238 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3239 	}
3240 }
3241 
3242 /*
3243  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3244  */
3245 static void
3246 ixgbe_sfp_check(void *arg)
3247 {
3248 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
3249 	uint32_t eicr = ixgbe->eicr;
3250 	struct ixgbe_hw *hw = &ixgbe->hw;
3251 
3252 	mutex_enter(&ixgbe->gen_lock);
3253 	if (eicr & IXGBE_EICR_GPI_SDP1) {
3254 		/* clear the interrupt */
3255 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3256 
3257 		/* if link up, do multispeed fiber setup */
3258 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3259 		    B_TRUE, B_TRUE);
3260 		ixgbe_driver_link_check(ixgbe);
3261 	} else if (eicr & IXGBE_EICR_GPI_SDP2) {
3262 		/* clear the interrupt */
3263 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3264 
3265 		/* if link up, do sfp module setup */
3266 		(void) hw->mac.ops.setup_sfp(hw);
3267 
3268 		/* do multispeed fiber setup */
3269 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3270 		    B_TRUE, B_TRUE);
3271 		ixgbe_driver_link_check(ixgbe);
3272 	}
3273 	mutex_exit(&ixgbe->gen_lock);
3274 }
3275 
3276 /*
3277  * ixgbe_link_timer - timer for link status detection
3278  */
3279 static void
3280 ixgbe_link_timer(void *arg)
3281 {
3282 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
3283 
3284 	mutex_enter(&ixgbe->gen_lock);
3285 	ixgbe_driver_link_check(ixgbe);
3286 	mutex_exit(&ixgbe->gen_lock);
3287 }
3288 
3289 /*
3290  * ixgbe_local_timer - Driver watchdog function.
3291  *
3292  * This function will handle the transmit stall check and other routines.
3293  */
3294 static void
3295 ixgbe_local_timer(void *arg)
3296 {
3297 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
3298 
3299 	if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3300 		ixgbe->reset_count++;
3301 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3302 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3303 		ixgbe_restart_watchdog_timer(ixgbe);
3304 		return;
3305 	}
3306 
3307 	if (ixgbe_stall_check(ixgbe)) {
3308 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3309 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3310 
3311 		ixgbe->reset_count++;
3312 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3313 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3314 	}
3315 
3316 	ixgbe_restart_watchdog_timer(ixgbe);
3317 }
3318 
3319 /*
3320  * ixgbe_stall_check - Check for transmit stall.
3321  *
3322  * This function checks if the adapter is stalled (in transmit).
3323  *
3324  * It is called each time the watchdog timeout is invoked.
3325  * If the transmit descriptor reclaim continuously fails,
3326  * the watchdog value will increment by 1. If the watchdog
3327  * value exceeds the threshold, the ixgbe is assumed to
3328  * have stalled and need to be reset.
3329  */
3330 static boolean_t
3331 ixgbe_stall_check(ixgbe_t *ixgbe)
3332 {
3333 	ixgbe_tx_ring_t *tx_ring;
3334 	boolean_t result;
3335 	int i;
3336 
3337 	if (ixgbe->link_state != LINK_STATE_UP)
3338 		return (B_FALSE);
3339 
3340 	/*
3341 	 * If any tx ring is stalled, we'll reset the chipset
3342 	 */
3343 	result = B_FALSE;
3344 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
3345 		tx_ring = &ixgbe->tx_rings[i];
3346 		if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3347 			tx_ring->tx_recycle(tx_ring);
3348 		}
3349 
3350 		if (tx_ring->recycle_fail > 0)
3351 			tx_ring->stall_watchdog++;
3352 		else
3353 			tx_ring->stall_watchdog = 0;
3354 
3355 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3356 			result = B_TRUE;
3357 			break;
3358 		}
3359 	}
3360 
3361 	if (result) {
3362 		tx_ring->stall_watchdog = 0;
3363 		tx_ring->recycle_fail = 0;
3364 	}
3365 
3366 	return (result);
3367 }
3368 
3369 
3370 /*
3371  * is_valid_mac_addr - Check if the mac address is valid.
3372  */
3373 static boolean_t
3374 is_valid_mac_addr(uint8_t *mac_addr)
3375 {
3376 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3377 	const uint8_t addr_test2[6] =
3378 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3379 
3380 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3381 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3382 		return (B_FALSE);
3383 
3384 	return (B_TRUE);
3385 }
3386 
3387 static boolean_t
3388 ixgbe_find_mac_address(ixgbe_t *ixgbe)
3389 {
3390 #ifdef __sparc
3391 	struct ixgbe_hw *hw = &ixgbe->hw;
3392 	uchar_t *bytes;
3393 	struct ether_addr sysaddr;
3394 	uint_t nelts;
3395 	int err;
3396 	boolean_t found = B_FALSE;
3397 
3398 	/*
3399 	 * The "vendor's factory-set address" may already have
3400 	 * been extracted from the chip, but if the property
3401 	 * "local-mac-address" is set we use that instead.
3402 	 *
3403 	 * We check whether it looks like an array of 6
3404 	 * bytes (which it should, if OBP set it).  If we can't
3405 	 * make sense of it this way, we'll ignore it.
3406 	 */
3407 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3408 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3409 	if (err == DDI_PROP_SUCCESS) {
3410 		if (nelts == ETHERADDRL) {
3411 			while (nelts--)
3412 				hw->mac.addr[nelts] = bytes[nelts];
3413 			found = B_TRUE;
3414 		}
3415 		ddi_prop_free(bytes);
3416 	}
3417 
3418 	/*
3419 	 * Look up the OBP property "local-mac-address?". If the user has set
3420 	 * 'local-mac-address? = false', use "the system address" instead.
3421 	 */
3422 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3423 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3424 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3425 			if (localetheraddr(NULL, &sysaddr) != 0) {
3426 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3427 				found = B_TRUE;
3428 			}
3429 		}
3430 		ddi_prop_free(bytes);
3431 	}
3432 
3433 	/*
3434 	 * Finally(!), if there's a valid "mac-address" property (created
3435 	 * if we netbooted from this interface), we must use this instead
3436 	 * of any of the above to ensure that the NFS/install server doesn't
3437 	 * get confused by the address changing as Solaris takes over!
3438 	 */
3439 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3440 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3441 	if (err == DDI_PROP_SUCCESS) {
3442 		if (nelts == ETHERADDRL) {
3443 			while (nelts--)
3444 				hw->mac.addr[nelts] = bytes[nelts];
3445 			found = B_TRUE;
3446 		}
3447 		ddi_prop_free(bytes);
3448 	}
3449 
3450 	if (found) {
3451 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3452 		return (B_TRUE);
3453 	}
3454 #else
3455 	_NOTE(ARGUNUSED(ixgbe));
3456 #endif
3457 
3458 	return (B_TRUE);
3459 }
3460 
3461 #pragma inline(ixgbe_arm_watchdog_timer)
3462 static void
3463 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
3464 {
3465 	/*
3466 	 * Fire a watchdog timer
3467 	 */
3468 	ixgbe->watchdog_tid =
3469 	    timeout(ixgbe_local_timer,
3470 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
3471 
3472 }
3473 
3474 /*
3475  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
3476  */
3477 void
3478 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
3479 {
3480 	mutex_enter(&ixgbe->watchdog_lock);
3481 
3482 	if (!ixgbe->watchdog_enable) {
3483 		ixgbe->watchdog_enable = B_TRUE;
3484 		ixgbe->watchdog_start = B_TRUE;
3485 		ixgbe_arm_watchdog_timer(ixgbe);
3486 	}
3487 
3488 	mutex_exit(&ixgbe->watchdog_lock);
3489 }
3490 
3491 /*
3492  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
3493  */
3494 void
3495 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
3496 {
3497 	timeout_id_t tid;
3498 
3499 	mutex_enter(&ixgbe->watchdog_lock);
3500 
3501 	ixgbe->watchdog_enable = B_FALSE;
3502 	ixgbe->watchdog_start = B_FALSE;
3503 	tid = ixgbe->watchdog_tid;
3504 	ixgbe->watchdog_tid = 0;
3505 
3506 	mutex_exit(&ixgbe->watchdog_lock);
3507 
3508 	if (tid != 0)
3509 		(void) untimeout(tid);
3510 }
3511 
3512 /*
3513  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
3514  */
3515 void
3516 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
3517 {
3518 	mutex_enter(&ixgbe->watchdog_lock);
3519 
3520 	if (ixgbe->watchdog_enable) {
3521 		if (!ixgbe->watchdog_start) {
3522 			ixgbe->watchdog_start = B_TRUE;
3523 			ixgbe_arm_watchdog_timer(ixgbe);
3524 		}
3525 	}
3526 
3527 	mutex_exit(&ixgbe->watchdog_lock);
3528 }
3529 
3530 /*
3531  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3532  */
3533 static void
3534 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3535 {
3536 	mutex_enter(&ixgbe->watchdog_lock);
3537 
3538 	if (ixgbe->watchdog_start)
3539 		ixgbe_arm_watchdog_timer(ixgbe);
3540 
3541 	mutex_exit(&ixgbe->watchdog_lock);
3542 }
3543 
3544 /*
3545  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3546  */
3547 void
3548 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3549 {
3550 	timeout_id_t tid;
3551 
3552 	mutex_enter(&ixgbe->watchdog_lock);
3553 
3554 	ixgbe->watchdog_start = B_FALSE;
3555 	tid = ixgbe->watchdog_tid;
3556 	ixgbe->watchdog_tid = 0;
3557 
3558 	mutex_exit(&ixgbe->watchdog_lock);
3559 
3560 	if (tid != 0)
3561 		(void) untimeout(tid);
3562 }
3563 
3564 /*
3565  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3566  */
3567 static void
3568 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3569 {
3570 	struct ixgbe_hw *hw = &ixgbe->hw;
3571 
3572 	/*
3573 	 * mask all interrupts off
3574 	 */
3575 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3576 
3577 	/*
3578 	 * for MSI-X, also disable autoclear
3579 	 */
3580 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3581 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3582 	}
3583 
3584 	IXGBE_WRITE_FLUSH(hw);
3585 }
3586 
3587 /*
3588  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3589  */
3590 static void
3591 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3592 {
3593 	struct ixgbe_hw *hw = &ixgbe->hw;
3594 	uint32_t eiac, eiam;
3595 	uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3596 
3597 	/* interrupt types to enable */
3598 	ixgbe->eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
3599 	ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
3600 	ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3601 
3602 	/* enable automask on "other" causes that this adapter can generate */
3603 	eiam = ixgbe->capab->other_intr;
3604 
3605 	/*
3606 	 * msi-x mode
3607 	 */
3608 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3609 		/* enable autoclear but not on bits 29:20 */
3610 		eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3611 
3612 		/* general purpose interrupt enable */
3613 		gpie |= (IXGBE_GPIE_MSIX_MODE
3614 		    | IXGBE_GPIE_PBA_SUPPORT
3615 		    | IXGBE_GPIE_OCD
3616 		    | IXGBE_GPIE_EIAME);
3617 	/*
3618 	 * non-msi-x mode
3619 	 */
3620 	} else {
3621 
3622 		/* disable autoclear, leave gpie at default */
3623 		eiac = 0;
3624 
3625 		/*
3626 		 * General purpose interrupt enable.
3627 		 * For 82599, extended interrupt automask enable
3628 		 * only in MSI or MSI-X mode
3629 		 */
3630 		if ((hw->mac.type < ixgbe_mac_82599EB) ||
3631 		    (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3632 			gpie |= IXGBE_GPIE_EIAME;
3633 		}
3634 	}
3635 	/* Enable specific interrupts for 82599  */
3636 	if (hw->mac.type == ixgbe_mac_82599EB) {
3637 		gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */
3638 		gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */
3639 	}
3640 	/* Enable RSC Dealy 8us for 82599  */
3641 	if (ixgbe->lro_enable) {
3642 		gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3643 	}
3644 	/* write to interrupt control registers */
3645 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3646 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3647 	IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3648 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3649 	IXGBE_WRITE_FLUSH(hw);
3650 }
3651 
3652 /*
3653  * ixgbe_loopback_ioctl - Loopback support.
3654  */
3655 enum ioc_reply
3656 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3657 {
3658 	lb_info_sz_t *lbsp;
3659 	lb_property_t *lbpp;
3660 	uint32_t *lbmp;
3661 	uint32_t size;
3662 	uint32_t value;
3663 
3664 	if (mp->b_cont == NULL)
3665 		return (IOC_INVAL);
3666 
3667 	switch (iocp->ioc_cmd) {
3668 	default:
3669 		return (IOC_INVAL);
3670 
3671 	case LB_GET_INFO_SIZE:
3672 		size = sizeof (lb_info_sz_t);
3673 		if (iocp->ioc_count != size)
3674 			return (IOC_INVAL);
3675 
3676 		value = sizeof (lb_normal);
3677 		value += sizeof (lb_mac);
3678 		value += sizeof (lb_external);
3679 
3680 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3681 		*lbsp = value;
3682 		break;
3683 
3684 	case LB_GET_INFO:
3685 		value = sizeof (lb_normal);
3686 		value += sizeof (lb_mac);
3687 		value += sizeof (lb_external);
3688 
3689 		size = value;
3690 		if (iocp->ioc_count != size)
3691 			return (IOC_INVAL);
3692 
3693 		value = 0;
3694 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3695 
3696 		lbpp[value++] = lb_normal;
3697 		lbpp[value++] = lb_mac;
3698 		lbpp[value++] = lb_external;
3699 		break;
3700 
3701 	case LB_GET_MODE:
3702 		size = sizeof (uint32_t);
3703 		if (iocp->ioc_count != size)
3704 			return (IOC_INVAL);
3705 
3706 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3707 		*lbmp = ixgbe->loopback_mode;
3708 		break;
3709 
3710 	case LB_SET_MODE:
3711 		size = 0;
3712 		if (iocp->ioc_count != sizeof (uint32_t))
3713 			return (IOC_INVAL);
3714 
3715 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3716 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3717 			return (IOC_INVAL);
3718 		break;
3719 	}
3720 
3721 	iocp->ioc_count = size;
3722 	iocp->ioc_error = 0;
3723 
3724 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3725 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3726 		return (IOC_INVAL);
3727 	}
3728 
3729 	return (IOC_REPLY);
3730 }
3731 
3732 /*
3733  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3734  */
3735 static boolean_t
3736 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3737 {
3738 	if (mode == ixgbe->loopback_mode)
3739 		return (B_TRUE);
3740 
3741 	ixgbe->loopback_mode = mode;
3742 
3743 	if (mode == IXGBE_LB_NONE) {
3744 		/*
3745 		 * Reset the chip
3746 		 */
3747 		(void) ixgbe_reset(ixgbe);
3748 		return (B_TRUE);
3749 	}
3750 
3751 	mutex_enter(&ixgbe->gen_lock);
3752 
3753 	switch (mode) {
3754 	default:
3755 		mutex_exit(&ixgbe->gen_lock);
3756 		return (B_FALSE);
3757 
3758 	case IXGBE_LB_EXTERNAL:
3759 		break;
3760 
3761 	case IXGBE_LB_INTERNAL_MAC:
3762 		ixgbe_set_internal_mac_loopback(ixgbe);
3763 		break;
3764 	}
3765 
3766 	mutex_exit(&ixgbe->gen_lock);
3767 
3768 	return (B_TRUE);
3769 }
3770 
3771 /*
3772  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3773  */
3774 static void
3775 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3776 {
3777 	struct ixgbe_hw *hw;
3778 	uint32_t reg;
3779 	uint8_t atlas;
3780 
3781 	hw = &ixgbe->hw;
3782 
3783 	/*
3784 	 * Setup MAC loopback
3785 	 */
3786 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3787 	reg |= IXGBE_HLREG0_LPBK;
3788 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3789 
3790 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3791 	reg &= ~IXGBE_AUTOC_LMS_MASK;
3792 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3793 
3794 	/*
3795 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3796 	 */
3797 	if (hw->mac.type == ixgbe_mac_82598EB) {
3798 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3799 		    &atlas);
3800 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3801 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3802 		    atlas);
3803 
3804 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3805 		    &atlas);
3806 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3807 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3808 		    atlas);
3809 
3810 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3811 		    &atlas);
3812 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3813 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3814 		    atlas);
3815 
3816 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3817 		    &atlas);
3818 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3819 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3820 		    atlas);
3821 	} else if (hw->mac.type == ixgbe_mac_82599EB) {
3822 		reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3823 		reg |= (IXGBE_AUTOC_FLU |
3824 		    IXGBE_AUTOC_10G_KX4);
3825 		IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3826 
3827 		(void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
3828 		    B_FALSE, B_TRUE);
3829 	}
3830 }
3831 
3832 #pragma inline(ixgbe_intr_rx_work)
3833 /*
3834  * ixgbe_intr_rx_work - RX processing of ISR.
3835  */
3836 static void
3837 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3838 {
3839 	mblk_t *mp;
3840 
3841 	mutex_enter(&rx_ring->rx_lock);
3842 
3843 	mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3844 	mutex_exit(&rx_ring->rx_lock);
3845 
3846 	if (mp != NULL)
3847 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3848 		    rx_ring->ring_gen_num);
3849 }
3850 
3851 #pragma inline(ixgbe_intr_tx_work)
3852 /*
3853  * ixgbe_intr_tx_work - TX processing of ISR.
3854  */
3855 static void
3856 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3857 {
3858 	ixgbe_t *ixgbe = tx_ring->ixgbe;
3859 
3860 	/*
3861 	 * Recycle the tx descriptors
3862 	 */
3863 	tx_ring->tx_recycle(tx_ring);
3864 
3865 	/*
3866 	 * Schedule the re-transmit
3867 	 */
3868 	if (tx_ring->reschedule &&
3869 	    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
3870 		tx_ring->reschedule = B_FALSE;
3871 		mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3872 		    tx_ring->ring_handle);
3873 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3874 	}
3875 }
3876 
3877 #pragma inline(ixgbe_intr_other_work)
3878 /*
3879  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
3880  */
3881 static void
3882 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
3883 {
3884 	struct ixgbe_hw *hw = &ixgbe->hw;
3885 
3886 	ASSERT(mutex_owned(&ixgbe->gen_lock));
3887 
3888 	/*
3889 	 * handle link status change
3890 	 */
3891 	if (eicr & IXGBE_EICR_LSC) {
3892 		ixgbe_driver_link_check(ixgbe);
3893 	}
3894 
3895 	/*
3896 	 * check for fan failure on adapters with fans
3897 	 */
3898 	if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
3899 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3900 		if (hw->mac.type < ixgbe_mac_82599EB) {
3901 			ixgbe_log(ixgbe,
3902 			    "Fan has stopped, replace the adapter\n");
3903 
3904 			/* re-enable the interrupt, which was automasked */
3905 			ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
3906 		}
3907 	}
3908 
3909 	/*
3910 	 * Do SFP check for 82599
3911 	 */
3912 	if (hw->mac.type == ixgbe_mac_82599EB) {
3913 		if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
3914 		    ixgbe_sfp_check, (void *)ixgbe,
3915 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
3916 			ixgbe_log(ixgbe, "No memory available to dispatch "
3917 			    "taskq for SFP check");
3918 		}
3919 
3920 		/*
3921 		 * We need to fully re-check the link later.
3922 		 */
3923 		ixgbe->link_check_complete = B_FALSE;
3924 		ixgbe->link_check_hrtime = gethrtime() +
3925 		    (IXGBE_LINK_UP_TIME * 100000000ULL);
3926 	}
3927 }
3928 
3929 /*
3930  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
3931  */
3932 static uint_t
3933 ixgbe_intr_legacy(void *arg1, void *arg2)
3934 {
3935 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3936 	struct ixgbe_hw *hw = &ixgbe->hw;
3937 	ixgbe_tx_ring_t *tx_ring;
3938 	ixgbe_rx_ring_t *rx_ring;
3939 	uint32_t eicr;
3940 	mblk_t *mp;
3941 	boolean_t tx_reschedule;
3942 	uint_t result;
3943 
3944 	_NOTE(ARGUNUSED(arg2));
3945 
3946 	mutex_enter(&ixgbe->gen_lock);
3947 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
3948 		mutex_exit(&ixgbe->gen_lock);
3949 		return (DDI_INTR_UNCLAIMED);
3950 	}
3951 
3952 	mp = NULL;
3953 	tx_reschedule = B_FALSE;
3954 
3955 	/*
3956 	 * Any bit set in eicr: claim this interrupt
3957 	 */
3958 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3959 
3960 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3961 		mutex_exit(&ixgbe->gen_lock);
3962 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3963 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
3964 		return (DDI_INTR_CLAIMED);
3965 	}
3966 
3967 	if (eicr) {
3968 		/*
3969 		 * For legacy interrupt, we have only one interrupt,
3970 		 * so we have only one rx ring and one tx ring enabled.
3971 		 */
3972 		ASSERT(ixgbe->num_rx_rings == 1);
3973 		ASSERT(ixgbe->num_tx_rings == 1);
3974 
3975 		/*
3976 		 * For legacy interrupt, rx rings[0] will use RTxQ[0].
3977 		 */
3978 		if (eicr & 0x1) {
3979 			ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
3980 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3981 			ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3982 			/*
3983 			 * Clean the rx descriptors
3984 			 */
3985 			rx_ring = &ixgbe->rx_rings[0];
3986 			mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3987 		}
3988 
3989 		/*
3990 		 * For legacy interrupt, tx rings[0] will use RTxQ[1].
3991 		 */
3992 		if (eicr & 0x2) {
3993 			/*
3994 			 * Recycle the tx descriptors
3995 			 */
3996 			tx_ring = &ixgbe->tx_rings[0];
3997 			tx_ring->tx_recycle(tx_ring);
3998 
3999 			/*
4000 			 * Schedule the re-transmit
4001 			 */
4002 			tx_reschedule = (tx_ring->reschedule &&
4003 			    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4004 		}
4005 
4006 		/* any interrupt type other than tx/rx */
4007 		if (eicr & ixgbe->capab->other_intr) {
4008 			if (hw->mac.type < ixgbe_mac_82599EB) {
4009 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4010 			}
4011 			if (hw->mac.type == ixgbe_mac_82599EB) {
4012 				ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4013 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4014 			}
4015 			ixgbe_intr_other_work(ixgbe, eicr);
4016 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4017 		}
4018 
4019 		mutex_exit(&ixgbe->gen_lock);
4020 
4021 		result = DDI_INTR_CLAIMED;
4022 	} else {
4023 		mutex_exit(&ixgbe->gen_lock);
4024 
4025 		/*
4026 		 * No interrupt cause bits set: don't claim this interrupt.
4027 		 */
4028 		result = DDI_INTR_UNCLAIMED;
4029 	}
4030 
4031 	/* re-enable the interrupts which were automasked */
4032 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4033 
4034 	/*
4035 	 * Do the following work outside of the gen_lock
4036 	 */
4037 	if (mp != NULL) {
4038 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4039 		    rx_ring->ring_gen_num);
4040 	}
4041 
4042 	if (tx_reschedule)  {
4043 		tx_ring->reschedule = B_FALSE;
4044 		mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4045 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4046 	}
4047 
4048 	return (result);
4049 }
4050 
4051 /*
4052  * ixgbe_intr_msi - Interrupt handler for MSI.
4053  */
4054 static uint_t
4055 ixgbe_intr_msi(void *arg1, void *arg2)
4056 {
4057 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4058 	struct ixgbe_hw *hw = &ixgbe->hw;
4059 	uint32_t eicr;
4060 
4061 	_NOTE(ARGUNUSED(arg2));
4062 
4063 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4064 
4065 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4066 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4067 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4068 		return (DDI_INTR_CLAIMED);
4069 	}
4070 
4071 	/*
4072 	 * For MSI interrupt, we have only one vector,
4073 	 * so we have only one rx ring and one tx ring enabled.
4074 	 */
4075 	ASSERT(ixgbe->num_rx_rings == 1);
4076 	ASSERT(ixgbe->num_tx_rings == 1);
4077 
4078 	/*
4079 	 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4080 	 */
4081 	if (eicr & 0x1) {
4082 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4083 	}
4084 
4085 	/*
4086 	 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4087 	 */
4088 	if (eicr & 0x2) {
4089 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4090 	}
4091 
4092 	/* any interrupt type other than tx/rx */
4093 	if (eicr & ixgbe->capab->other_intr) {
4094 		mutex_enter(&ixgbe->gen_lock);
4095 		if (hw->mac.type < ixgbe_mac_82599EB) {
4096 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4097 		}
4098 		if (hw->mac.type == ixgbe_mac_82599EB) {
4099 			ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4100 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4101 		}
4102 		ixgbe_intr_other_work(ixgbe, eicr);
4103 		ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4104 		mutex_exit(&ixgbe->gen_lock);
4105 	}
4106 
4107 	/* re-enable the interrupts which were automasked */
4108 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4109 
4110 	return (DDI_INTR_CLAIMED);
4111 }
4112 
4113 /*
4114  * ixgbe_intr_msix - Interrupt handler for MSI-X.
4115  */
4116 static uint_t
4117 ixgbe_intr_msix(void *arg1, void *arg2)
4118 {
4119 	ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4120 	ixgbe_t *ixgbe = vect->ixgbe;
4121 	struct ixgbe_hw *hw = &ixgbe->hw;
4122 	uint32_t eicr;
4123 	int r_idx = 0;
4124 
4125 	_NOTE(ARGUNUSED(arg2));
4126 
4127 	/*
4128 	 * Clean each rx ring that has its bit set in the map
4129 	 */
4130 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4131 	while (r_idx >= 0) {
4132 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4133 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4134 		    (ixgbe->num_rx_rings - 1));
4135 	}
4136 
4137 	/*
4138 	 * Clean each tx ring that has its bit set in the map
4139 	 */
4140 	r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4141 	while (r_idx >= 0) {
4142 		ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4143 		r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4144 		    (ixgbe->num_tx_rings - 1));
4145 	}
4146 
4147 
4148 	/*
4149 	 * Clean other interrupt (link change) that has its bit set in the map
4150 	 */
4151 	if (BT_TEST(vect->other_map, 0) == 1) {
4152 		eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4153 
4154 		if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4155 		    DDI_FM_OK) {
4156 			ddi_fm_service_impact(ixgbe->dip,
4157 			    DDI_SERVICE_DEGRADED);
4158 			atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4159 			return (DDI_INTR_CLAIMED);
4160 		}
4161 
4162 		/*
4163 		 * Need check cause bits and only other causes will
4164 		 * be processed
4165 		 */
4166 		/* any interrupt type other than tx/rx */
4167 		if (eicr & ixgbe->capab->other_intr) {
4168 			if (hw->mac.type < ixgbe_mac_82599EB) {
4169 				mutex_enter(&ixgbe->gen_lock);
4170 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4171 				ixgbe_intr_other_work(ixgbe, eicr);
4172 				mutex_exit(&ixgbe->gen_lock);
4173 			} else {
4174 				if (hw->mac.type == ixgbe_mac_82599EB) {
4175 					mutex_enter(&ixgbe->gen_lock);
4176 					ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4177 					ixgbe_intr_other_work(ixgbe, eicr);
4178 					mutex_exit(&ixgbe->gen_lock);
4179 				}
4180 			}
4181 		}
4182 
4183 		/* re-enable the interrupts which were automasked */
4184 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4185 	}
4186 
4187 	return (DDI_INTR_CLAIMED);
4188 }
4189 
4190 /*
4191  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4192  *
4193  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4194  * if not successful, try Legacy.
4195  * ixgbe->intr_force can be used to force sequence to start with
4196  * any of the 3 types.
4197  * If MSI-X is not used, number of tx/rx rings is forced to 1.
4198  */
4199 static int
4200 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4201 {
4202 	dev_info_t *devinfo;
4203 	int intr_types;
4204 	int rc;
4205 
4206 	devinfo = ixgbe->dip;
4207 
4208 	/*
4209 	 * Get supported interrupt types
4210 	 */
4211 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4212 
4213 	if (rc != DDI_SUCCESS) {
4214 		ixgbe_log(ixgbe,
4215 		    "Get supported interrupt types failed: %d", rc);
4216 		return (IXGBE_FAILURE);
4217 	}
4218 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4219 
4220 	ixgbe->intr_type = 0;
4221 
4222 	/*
4223 	 * Install MSI-X interrupts
4224 	 */
4225 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4226 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4227 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4228 		if (rc == IXGBE_SUCCESS)
4229 			return (IXGBE_SUCCESS);
4230 
4231 		ixgbe_log(ixgbe,
4232 		    "Allocate MSI-X failed, trying MSI interrupts...");
4233 	}
4234 
4235 	/*
4236 	 * MSI-X not used, force rings and groups to 1
4237 	 */
4238 	ixgbe->num_rx_rings = 1;
4239 	ixgbe->num_rx_groups = 1;
4240 	ixgbe->num_tx_rings = 1;
4241 	ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4242 	ixgbe_log(ixgbe,
4243 	    "MSI-X not used, force rings and groups number to 1");
4244 
4245 	/*
4246 	 * Install MSI interrupts
4247 	 */
4248 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
4249 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4250 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4251 		if (rc == IXGBE_SUCCESS)
4252 			return (IXGBE_SUCCESS);
4253 
4254 		ixgbe_log(ixgbe,
4255 		    "Allocate MSI failed, trying Legacy interrupts...");
4256 	}
4257 
4258 	/*
4259 	 * Install legacy interrupts
4260 	 */
4261 	if (intr_types & DDI_INTR_TYPE_FIXED) {
4262 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4263 		if (rc == IXGBE_SUCCESS)
4264 			return (IXGBE_SUCCESS);
4265 
4266 		ixgbe_log(ixgbe,
4267 		    "Allocate Legacy interrupts failed");
4268 	}
4269 
4270 	/*
4271 	 * If none of the 3 types succeeded, return failure
4272 	 */
4273 	return (IXGBE_FAILURE);
4274 }
4275 
4276 /*
4277  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4278  *
4279  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
4280  * if fewer than 2 handles are available, return failure.
4281  * Upon success, this maps the vectors to rx and tx rings for
4282  * interrupts.
4283  */
4284 static int
4285 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4286 {
4287 	dev_info_t *devinfo;
4288 	int request, count, actual;
4289 	int minimum;
4290 	int rc;
4291 	uint32_t ring_per_group;
4292 
4293 	devinfo = ixgbe->dip;
4294 
4295 	switch (intr_type) {
4296 	case DDI_INTR_TYPE_FIXED:
4297 		request = 1;	/* Request 1 legacy interrupt handle */
4298 		minimum = 1;
4299 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4300 		break;
4301 
4302 	case DDI_INTR_TYPE_MSI:
4303 		request = 1;	/* Request 1 MSI interrupt handle */
4304 		minimum = 1;
4305 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
4306 		break;
4307 
4308 	case DDI_INTR_TYPE_MSIX:
4309 		/*
4310 		 * Best number of vectors for the adapter is
4311 		 * (# rx rings + # tx rings), however we will
4312 		 * limit the request number.
4313 		 */
4314 		request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
4315 		if (request > ixgbe->capab->max_ring_vect)
4316 			request = ixgbe->capab->max_ring_vect;
4317 		minimum = 1;
4318 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
4319 		break;
4320 
4321 	default:
4322 		ixgbe_log(ixgbe,
4323 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
4324 		    intr_type);
4325 		return (IXGBE_FAILURE);
4326 	}
4327 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
4328 	    request, minimum);
4329 
4330 	/*
4331 	 * Get number of supported interrupts
4332 	 */
4333 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4334 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
4335 		ixgbe_log(ixgbe,
4336 		    "Get interrupt number failed. Return: %d, count: %d",
4337 		    rc, count);
4338 		return (IXGBE_FAILURE);
4339 	}
4340 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
4341 
4342 	actual = 0;
4343 	ixgbe->intr_cnt = 0;
4344 	ixgbe->intr_cnt_max = 0;
4345 	ixgbe->intr_cnt_min = 0;
4346 
4347 	/*
4348 	 * Allocate an array of interrupt handles
4349 	 */
4350 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
4351 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
4352 
4353 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
4354 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
4355 	if (rc != DDI_SUCCESS) {
4356 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
4357 		    "return: %d, request: %d, actual: %d",
4358 		    rc, request, actual);
4359 		goto alloc_handle_fail;
4360 	}
4361 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
4362 
4363 	/*
4364 	 * upper/lower limit of interrupts
4365 	 */
4366 	ixgbe->intr_cnt = actual;
4367 	ixgbe->intr_cnt_max = request;
4368 	ixgbe->intr_cnt_min = minimum;
4369 
4370 	/*
4371 	 * rss number per group should not exceed the rx interrupt number,
4372 	 * else need to adjust rx ring number.
4373 	 */
4374 	ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4375 	ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
4376 	if (min(actual, ixgbe->num_rx_rings) < ring_per_group) {
4377 		ixgbe->num_rx_rings = ixgbe->num_rx_groups *
4378 		    min(actual, ixgbe->num_rx_rings);
4379 		ixgbe_setup_vmdq_rss_conf(ixgbe);
4380 	}
4381 
4382 	/*
4383 	 * Now we know the actual number of vectors.  Here we map the vector
4384 	 * to other, rx rings and tx ring.
4385 	 */
4386 	if (actual < minimum) {
4387 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
4388 		    actual);
4389 		goto alloc_handle_fail;
4390 	}
4391 
4392 	/*
4393 	 * Get priority for first vector, assume remaining are all the same
4394 	 */
4395 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
4396 	if (rc != DDI_SUCCESS) {
4397 		ixgbe_log(ixgbe,
4398 		    "Get interrupt priority failed: %d", rc);
4399 		goto alloc_handle_fail;
4400 	}
4401 
4402 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
4403 	if (rc != DDI_SUCCESS) {
4404 		ixgbe_log(ixgbe,
4405 		    "Get interrupt cap failed: %d", rc);
4406 		goto alloc_handle_fail;
4407 	}
4408 
4409 	ixgbe->intr_type = intr_type;
4410 
4411 	return (IXGBE_SUCCESS);
4412 
4413 alloc_handle_fail:
4414 	ixgbe_rem_intrs(ixgbe);
4415 
4416 	return (IXGBE_FAILURE);
4417 }
4418 
4419 /*
4420  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
4421  *
4422  * Before adding the interrupt handlers, the interrupt vectors have
4423  * been allocated, and the rx/tx rings have also been allocated.
4424  */
4425 static int
4426 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
4427 {
4428 	int vector = 0;
4429 	int rc;
4430 
4431 	switch (ixgbe->intr_type) {
4432 	case DDI_INTR_TYPE_MSIX:
4433 		/*
4434 		 * Add interrupt handler for all vectors
4435 		 */
4436 		for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
4437 			/*
4438 			 * install pointer to vect_map[vector]
4439 			 */
4440 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
4441 			    (ddi_intr_handler_t *)ixgbe_intr_msix,
4442 			    (void *)&ixgbe->vect_map[vector], NULL);
4443 
4444 			if (rc != DDI_SUCCESS) {
4445 				ixgbe_log(ixgbe,
4446 				    "Add interrupt handler failed. "
4447 				    "return: %d, vector: %d", rc, vector);
4448 				for (vector--; vector >= 0; vector--) {
4449 					(void) ddi_intr_remove_handler(
4450 					    ixgbe->htable[vector]);
4451 				}
4452 				return (IXGBE_FAILURE);
4453 			}
4454 		}
4455 
4456 		break;
4457 
4458 	case DDI_INTR_TYPE_MSI:
4459 		/*
4460 		 * Add interrupt handlers for the only vector
4461 		 */
4462 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
4463 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
4464 		    (void *)ixgbe, NULL);
4465 
4466 		if (rc != DDI_SUCCESS) {
4467 			ixgbe_log(ixgbe,
4468 			    "Add MSI interrupt handler failed: %d", rc);
4469 			return (IXGBE_FAILURE);
4470 		}
4471 
4472 		break;
4473 
4474 	case DDI_INTR_TYPE_FIXED:
4475 		/*
4476 		 * Add interrupt handlers for the only vector
4477 		 */
4478 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
4479 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
4480 		    (void *)ixgbe, NULL);
4481 
4482 		if (rc != DDI_SUCCESS) {
4483 			ixgbe_log(ixgbe,
4484 			    "Add legacy interrupt handler failed: %d", rc);
4485 			return (IXGBE_FAILURE);
4486 		}
4487 
4488 		break;
4489 
4490 	default:
4491 		return (IXGBE_FAILURE);
4492 	}
4493 
4494 	return (IXGBE_SUCCESS);
4495 }
4496 
4497 #pragma inline(ixgbe_map_rxring_to_vector)
4498 /*
4499  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
4500  */
4501 static void
4502 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
4503 {
4504 	/*
4505 	 * Set bit in map
4506 	 */
4507 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4508 
4509 	/*
4510 	 * Count bits set
4511 	 */
4512 	ixgbe->vect_map[v_idx].rxr_cnt++;
4513 
4514 	/*
4515 	 * Remember bit position
4516 	 */
4517 	ixgbe->rx_rings[r_idx].intr_vector = v_idx;
4518 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
4519 }
4520 
4521 #pragma inline(ixgbe_map_txring_to_vector)
4522 /*
4523  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
4524  */
4525 static void
4526 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
4527 {
4528 	/*
4529 	 * Set bit in map
4530 	 */
4531 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
4532 
4533 	/*
4534 	 * Count bits set
4535 	 */
4536 	ixgbe->vect_map[v_idx].txr_cnt++;
4537 
4538 	/*
4539 	 * Remember bit position
4540 	 */
4541 	ixgbe->tx_rings[t_idx].intr_vector = v_idx;
4542 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4543 }
4544 
4545 /*
4546  * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4547  * allocation register (IVAR).
4548  * cause:
4549  *   -1 : other cause
4550  *    0 : rx
4551  *    1 : tx
4552  */
4553 static void
4554 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4555     int8_t cause)
4556 {
4557 	struct ixgbe_hw *hw = &ixgbe->hw;
4558 	u32 ivar, index;
4559 
4560 	switch (hw->mac.type) {
4561 	case ixgbe_mac_82598EB:
4562 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4563 		if (cause == -1) {
4564 			cause = 0;
4565 		}
4566 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4567 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4568 		ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4569 		ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4570 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4571 		break;
4572 	case ixgbe_mac_82599EB:
4573 		if (cause == -1) {
4574 			/* other causes */
4575 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4576 			index = (intr_alloc_entry & 1) * 8;
4577 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4578 			ivar &= ~(0xFF << index);
4579 			ivar |= (msix_vector << index);
4580 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4581 		} else {
4582 			/* tx or rx causes */
4583 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4584 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4585 			ivar = IXGBE_READ_REG(hw,
4586 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4587 			ivar &= ~(0xFF << index);
4588 			ivar |= (msix_vector << index);
4589 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4590 			    ivar);
4591 		}
4592 		break;
4593 	default:
4594 		break;
4595 	}
4596 }
4597 
4598 /*
4599  * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4600  * given interrupt vector allocation register (IVAR).
4601  * cause:
4602  *   -1 : other cause
4603  *    0 : rx
4604  *    1 : tx
4605  */
4606 static void
4607 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4608 {
4609 	struct ixgbe_hw *hw = &ixgbe->hw;
4610 	u32 ivar, index;
4611 
4612 	switch (hw->mac.type) {
4613 	case ixgbe_mac_82598EB:
4614 		if (cause == -1) {
4615 			cause = 0;
4616 		}
4617 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4618 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4619 		ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4620 		    (intr_alloc_entry & 0x3)));
4621 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4622 		break;
4623 	case ixgbe_mac_82599EB:
4624 		if (cause == -1) {
4625 			/* other causes */
4626 			index = (intr_alloc_entry & 1) * 8;
4627 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4628 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4629 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4630 		} else {
4631 			/* tx or rx causes */
4632 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4633 			ivar = IXGBE_READ_REG(hw,
4634 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4635 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4636 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4637 			    ivar);
4638 		}
4639 		break;
4640 	default:
4641 		break;
4642 	}
4643 }
4644 
4645 /*
4646  * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4647  * given interrupt vector allocation register (IVAR).
4648  * cause:
4649  *   -1 : other cause
4650  *    0 : rx
4651  *    1 : tx
4652  */
4653 static void
4654 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4655 {
4656 	struct ixgbe_hw *hw = &ixgbe->hw;
4657 	u32 ivar, index;
4658 
4659 	switch (hw->mac.type) {
4660 	case ixgbe_mac_82598EB:
4661 		if (cause == -1) {
4662 			cause = 0;
4663 		}
4664 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4665 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4666 		ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4667 		    (intr_alloc_entry & 0x3)));
4668 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4669 		break;
4670 	case ixgbe_mac_82599EB:
4671 		if (cause == -1) {
4672 			/* other causes */
4673 			index = (intr_alloc_entry & 1) * 8;
4674 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4675 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4676 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4677 		} else {
4678 			/* tx or rx causes */
4679 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4680 			ivar = IXGBE_READ_REG(hw,
4681 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4682 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4683 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4684 			    ivar);
4685 		}
4686 		break;
4687 	default:
4688 		break;
4689 	}
4690 }
4691 
4692 /*
4693  * Convert the rx ring index driver maintained to the rx ring index
4694  * in h/w.
4695  */
4696 static uint32_t
4697 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4698 {
4699 
4700 	struct ixgbe_hw *hw = &ixgbe->hw;
4701 	uint32_t rx_ring_per_group, hw_rx_index;
4702 
4703 	if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4704 	    ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4705 		return (sw_rx_index);
4706 	} else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4707 		if (hw->mac.type == ixgbe_mac_82598EB) {
4708 			return (sw_rx_index);
4709 		} else if (hw->mac.type == ixgbe_mac_82599EB) {
4710 			return (sw_rx_index * 2);
4711 		}
4712 	} else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4713 		rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4714 
4715 		if (hw->mac.type == ixgbe_mac_82598EB) {
4716 			hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4717 			    16 + (sw_rx_index % rx_ring_per_group);
4718 			return (hw_rx_index);
4719 		} else if (hw->mac.type == ixgbe_mac_82599EB) {
4720 			if (ixgbe->num_rx_groups > 32) {
4721 				hw_rx_index = (sw_rx_index /
4722 				    rx_ring_per_group) * 2 +
4723 				    (sw_rx_index % rx_ring_per_group);
4724 			} else {
4725 				hw_rx_index = (sw_rx_index /
4726 				    rx_ring_per_group) * 4 +
4727 				    (sw_rx_index % rx_ring_per_group);
4728 			}
4729 			return (hw_rx_index);
4730 		}
4731 	}
4732 
4733 	/*
4734 	 * Should never reach. Just to make compiler happy.
4735 	 */
4736 	return (sw_rx_index);
4737 }
4738 
4739 /*
4740  * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4741  *
4742  * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4743  * to vector[0 - (intr_cnt -1)].
4744  */
4745 static int
4746 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4747 {
4748 	int i, vector = 0;
4749 
4750 	/* initialize vector map */
4751 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4752 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4753 		ixgbe->vect_map[i].ixgbe = ixgbe;
4754 	}
4755 
4756 	/*
4757 	 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4758 	 * tx rings[0] on RTxQ[1].
4759 	 */
4760 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4761 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4762 		ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4763 		return (IXGBE_SUCCESS);
4764 	}
4765 
4766 	/*
4767 	 * Interrupts/vectors mapping for MSI-X
4768 	 */
4769 
4770 	/*
4771 	 * Map other interrupt to vector 0,
4772 	 * Set bit in map and count the bits set.
4773 	 */
4774 	BT_SET(ixgbe->vect_map[vector].other_map, 0);
4775 	ixgbe->vect_map[vector].other_cnt++;
4776 
4777 	/*
4778 	 * Map rx ring interrupts to vectors
4779 	 */
4780 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
4781 		ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4782 		vector = (vector +1) % ixgbe->intr_cnt;
4783 	}
4784 
4785 	/*
4786 	 * Map tx ring interrupts to vectors
4787 	 */
4788 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
4789 		ixgbe_map_txring_to_vector(ixgbe, i, vector);
4790 		vector = (vector +1) % ixgbe->intr_cnt;
4791 	}
4792 
4793 	return (IXGBE_SUCCESS);
4794 }
4795 
4796 /*
4797  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4798  *
4799  * This relies on ring/vector mapping already set up in the
4800  * vect_map[] structures
4801  */
4802 static void
4803 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4804 {
4805 	struct ixgbe_hw *hw = &ixgbe->hw;
4806 	ixgbe_intr_vector_t *vect;	/* vector bitmap */
4807 	int r_idx;	/* ring index */
4808 	int v_idx;	/* vector index */
4809 	uint32_t hw_index;
4810 
4811 	/*
4812 	 * Clear any previous entries
4813 	 */
4814 	switch (hw->mac.type) {
4815 	case ixgbe_mac_82598EB:
4816 		for (v_idx = 0; v_idx < 25; v_idx++)
4817 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4818 
4819 		break;
4820 	case ixgbe_mac_82599EB:
4821 		for (v_idx = 0; v_idx < 64; v_idx++)
4822 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4823 		IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
4824 
4825 		break;
4826 	default:
4827 		break;
4828 	}
4829 
4830 	/*
4831 	 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
4832 	 * tx rings[0] will use RTxQ[1].
4833 	 */
4834 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4835 		ixgbe_setup_ivar(ixgbe, 0, 0, 0);
4836 		ixgbe_setup_ivar(ixgbe, 0, 1, 1);
4837 		return;
4838 	}
4839 
4840 	/*
4841 	 * For MSI-X interrupt, "Other" is always on vector[0].
4842 	 */
4843 	ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
4844 
4845 	/*
4846 	 * For each interrupt vector, populate the IVAR table
4847 	 */
4848 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
4849 		vect = &ixgbe->vect_map[v_idx];
4850 
4851 		/*
4852 		 * For each rx ring bit set
4853 		 */
4854 		r_idx = bt_getlowbit(vect->rx_map, 0,
4855 		    (ixgbe->num_rx_rings - 1));
4856 
4857 		while (r_idx >= 0) {
4858 			hw_index = ixgbe->rx_rings[r_idx].hw_index;
4859 			ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
4860 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4861 			    (ixgbe->num_rx_rings - 1));
4862 		}
4863 
4864 		/*
4865 		 * For each tx ring bit set
4866 		 */
4867 		r_idx = bt_getlowbit(vect->tx_map, 0,
4868 		    (ixgbe->num_tx_rings - 1));
4869 
4870 		while (r_idx >= 0) {
4871 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
4872 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4873 			    (ixgbe->num_tx_rings - 1));
4874 		}
4875 	}
4876 }
4877 
4878 /*
4879  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
4880  */
4881 static void
4882 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
4883 {
4884 	int i;
4885 	int rc;
4886 
4887 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4888 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
4889 		if (rc != DDI_SUCCESS) {
4890 			IXGBE_DEBUGLOG_1(ixgbe,
4891 			    "Remove intr handler failed: %d", rc);
4892 		}
4893 	}
4894 }
4895 
4896 /*
4897  * ixgbe_rem_intrs - Remove the allocated interrupts.
4898  */
4899 static void
4900 ixgbe_rem_intrs(ixgbe_t *ixgbe)
4901 {
4902 	int i;
4903 	int rc;
4904 
4905 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4906 		rc = ddi_intr_free(ixgbe->htable[i]);
4907 		if (rc != DDI_SUCCESS) {
4908 			IXGBE_DEBUGLOG_1(ixgbe,
4909 			    "Free intr failed: %d", rc);
4910 		}
4911 	}
4912 
4913 	kmem_free(ixgbe->htable, ixgbe->intr_size);
4914 	ixgbe->htable = NULL;
4915 }
4916 
4917 /*
4918  * ixgbe_enable_intrs - Enable all the ddi interrupts.
4919  */
4920 static int
4921 ixgbe_enable_intrs(ixgbe_t *ixgbe)
4922 {
4923 	int i;
4924 	int rc;
4925 
4926 	/*
4927 	 * Enable interrupts
4928 	 */
4929 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4930 		/*
4931 		 * Call ddi_intr_block_enable() for MSI
4932 		 */
4933 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
4934 		if (rc != DDI_SUCCESS) {
4935 			ixgbe_log(ixgbe,
4936 			    "Enable block intr failed: %d", rc);
4937 			return (IXGBE_FAILURE);
4938 		}
4939 	} else {
4940 		/*
4941 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
4942 		 */
4943 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4944 			rc = ddi_intr_enable(ixgbe->htable[i]);
4945 			if (rc != DDI_SUCCESS) {
4946 				ixgbe_log(ixgbe,
4947 				    "Enable intr failed: %d", rc);
4948 				return (IXGBE_FAILURE);
4949 			}
4950 		}
4951 	}
4952 
4953 	return (IXGBE_SUCCESS);
4954 }
4955 
4956 /*
4957  * ixgbe_disable_intrs - Disable all the interrupts.
4958  */
4959 static int
4960 ixgbe_disable_intrs(ixgbe_t *ixgbe)
4961 {
4962 	int i;
4963 	int rc;
4964 
4965 	/*
4966 	 * Disable all interrupts
4967 	 */
4968 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4969 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
4970 		if (rc != DDI_SUCCESS) {
4971 			ixgbe_log(ixgbe,
4972 			    "Disable block intr failed: %d", rc);
4973 			return (IXGBE_FAILURE);
4974 		}
4975 	} else {
4976 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4977 			rc = ddi_intr_disable(ixgbe->htable[i]);
4978 			if (rc != DDI_SUCCESS) {
4979 				ixgbe_log(ixgbe,
4980 				    "Disable intr failed: %d", rc);
4981 				return (IXGBE_FAILURE);
4982 			}
4983 		}
4984 	}
4985 
4986 	return (IXGBE_SUCCESS);
4987 }
4988 
4989 /*
4990  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
4991  */
4992 static void
4993 ixgbe_get_hw_state(ixgbe_t *ixgbe)
4994 {
4995 	struct ixgbe_hw *hw = &ixgbe->hw;
4996 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
4997 	boolean_t link_up = B_FALSE;
4998 	uint32_t pcs1g_anlp = 0;
4999 	uint32_t pcs1g_ana = 0;
5000 
5001 	ASSERT(mutex_owned(&ixgbe->gen_lock));
5002 	ixgbe->param_lp_1000fdx_cap = 0;
5003 	ixgbe->param_lp_100fdx_cap  = 0;
5004 
5005 	/* check for link, don't wait */
5006 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
5007 	if (link_up) {
5008 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5009 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
5010 
5011 		ixgbe->param_lp_1000fdx_cap =
5012 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5013 		ixgbe->param_lp_100fdx_cap =
5014 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5015 	}
5016 
5017 	ixgbe->param_adv_1000fdx_cap =
5018 	    (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
5019 	ixgbe->param_adv_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
5020 }
5021 
5022 /*
5023  * ixgbe_get_driver_control - Notify that driver is in control of device.
5024  */
5025 static void
5026 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5027 {
5028 	uint32_t ctrl_ext;
5029 
5030 	/*
5031 	 * Notify firmware that driver is in control of device
5032 	 */
5033 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5034 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5035 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5036 }
5037 
5038 /*
5039  * ixgbe_release_driver_control - Notify that driver is no longer in control
5040  * of device.
5041  */
5042 static void
5043 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5044 {
5045 	uint32_t ctrl_ext;
5046 
5047 	/*
5048 	 * Notify firmware that driver is no longer in control of device
5049 	 */
5050 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5051 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5052 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5053 }
5054 
5055 /*
5056  * ixgbe_atomic_reserve - Atomic decrease operation.
5057  */
5058 int
5059 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5060 {
5061 	uint32_t oldval;
5062 	uint32_t newval;
5063 
5064 	/*
5065 	 * ATOMICALLY
5066 	 */
5067 	do {
5068 		oldval = *count_p;
5069 		if (oldval < n)
5070 			return (-1);
5071 		newval = oldval - n;
5072 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
5073 
5074 	return (newval);
5075 }
5076 
5077 /*
5078  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5079  */
5080 static uint8_t *
5081 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5082 {
5083 	uint8_t *addr = *upd_ptr;
5084 	uint8_t *new_ptr;
5085 
5086 	_NOTE(ARGUNUSED(hw));
5087 	_NOTE(ARGUNUSED(vmdq));
5088 
5089 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5090 	*upd_ptr = new_ptr;
5091 	return (addr);
5092 }
5093 
5094 /*
5095  * FMA support
5096  */
5097 int
5098 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5099 {
5100 	ddi_fm_error_t de;
5101 
5102 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5103 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5104 	return (de.fme_status);
5105 }
5106 
5107 int
5108 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5109 {
5110 	ddi_fm_error_t de;
5111 
5112 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5113 	return (de.fme_status);
5114 }
5115 
5116 /*
5117  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5118  */
5119 static int
5120 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5121 {
5122 	_NOTE(ARGUNUSED(impl_data));
5123 	/*
5124 	 * as the driver can always deal with an error in any dma or
5125 	 * access handle, we can just return the fme_status value.
5126 	 */
5127 	pci_ereport_post(dip, err, NULL);
5128 	return (err->fme_status);
5129 }
5130 
5131 static void
5132 ixgbe_fm_init(ixgbe_t *ixgbe)
5133 {
5134 	ddi_iblock_cookie_t iblk;
5135 	int fma_dma_flag;
5136 
5137 	/*
5138 	 * Only register with IO Fault Services if we have some capability
5139 	 */
5140 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5141 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5142 	} else {
5143 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5144 	}
5145 
5146 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5147 		fma_dma_flag = 1;
5148 	} else {
5149 		fma_dma_flag = 0;
5150 	}
5151 
5152 	ixgbe_set_fma_flags(fma_dma_flag);
5153 
5154 	if (ixgbe->fm_capabilities) {
5155 
5156 		/*
5157 		 * Register capabilities with IO Fault Services
5158 		 */
5159 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5160 
5161 		/*
5162 		 * Initialize pci ereport capabilities if ereport capable
5163 		 */
5164 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5165 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5166 			pci_ereport_setup(ixgbe->dip);
5167 
5168 		/*
5169 		 * Register error callback if error callback capable
5170 		 */
5171 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5172 			ddi_fm_handler_register(ixgbe->dip,
5173 			    ixgbe_fm_error_cb, (void*) ixgbe);
5174 	}
5175 }
5176 
5177 static void
5178 ixgbe_fm_fini(ixgbe_t *ixgbe)
5179 {
5180 	/*
5181 	 * Only unregister FMA capabilities if they are registered
5182 	 */
5183 	if (ixgbe->fm_capabilities) {
5184 
5185 		/*
5186 		 * Release any resources allocated by pci_ereport_setup()
5187 		 */
5188 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5189 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5190 			pci_ereport_teardown(ixgbe->dip);
5191 
5192 		/*
5193 		 * Un-register error callback if error callback capable
5194 		 */
5195 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5196 			ddi_fm_handler_unregister(ixgbe->dip);
5197 
5198 		/*
5199 		 * Unregister from IO Fault Service
5200 		 */
5201 		ddi_fm_fini(ixgbe->dip);
5202 	}
5203 }
5204 
5205 void
5206 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5207 {
5208 	uint64_t ena;
5209 	char buf[FM_MAX_CLASS];
5210 
5211 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5212 	ena = fm_ena_generate(0, FM_ENA_FMT1);
5213 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5214 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5215 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5216 	}
5217 }
5218 
5219 static int
5220 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5221 {
5222 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5223 
5224 	mutex_enter(&rx_ring->rx_lock);
5225 	rx_ring->ring_gen_num = mr_gen_num;
5226 	mutex_exit(&rx_ring->rx_lock);
5227 	return (0);
5228 }
5229 
5230 /*
5231  * Get the global ring index by a ring index within a group.
5232  */
5233 static int
5234 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5235 {
5236 	ixgbe_rx_ring_t *rx_ring;
5237 	int i;
5238 
5239 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
5240 		rx_ring = &ixgbe->rx_rings[i];
5241 		if (rx_ring->group_index == gindex)
5242 			rindex--;
5243 		if (rindex < 0)
5244 			return (i);
5245 	}
5246 
5247 	return (-1);
5248 }
5249 
5250 /*
5251  * Callback funtion for MAC layer to register all rings.
5252  */
5253 /* ARGSUSED */
5254 void
5255 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
5256     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5257 {
5258 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
5259 	mac_intr_t *mintr = &infop->mri_intr;
5260 
5261 	switch (rtype) {
5262 	case MAC_RING_TYPE_RX: {
5263 		/*
5264 		 * 'index' is the ring index within the group.
5265 		 * Need to get the global ring index by searching in groups.
5266 		 */
5267 		int global_ring_index = ixgbe_get_rx_ring_index(
5268 		    ixgbe, group_index, ring_index);
5269 
5270 		ASSERT(global_ring_index >= 0);
5271 
5272 		ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
5273 		rx_ring->ring_handle = rh;
5274 
5275 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
5276 		infop->mri_start = ixgbe_ring_start;
5277 		infop->mri_stop = NULL;
5278 		infop->mri_poll = ixgbe_ring_rx_poll;
5279 		infop->mri_stat = ixgbe_rx_ring_stat;
5280 
5281 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
5282 		mintr->mi_enable = ixgbe_rx_ring_intr_enable;
5283 		mintr->mi_disable = ixgbe_rx_ring_intr_disable;
5284 		if (ixgbe->intr_type &
5285 		    (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5286 			mintr->mi_ddi_handle =
5287 			    ixgbe->htable[rx_ring->intr_vector];
5288 		}
5289 
5290 		break;
5291 	}
5292 	case MAC_RING_TYPE_TX: {
5293 		ASSERT(group_index == -1);
5294 		ASSERT(ring_index < ixgbe->num_tx_rings);
5295 
5296 		ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
5297 		tx_ring->ring_handle = rh;
5298 
5299 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
5300 		infop->mri_start = NULL;
5301 		infop->mri_stop = NULL;
5302 		infop->mri_tx = ixgbe_ring_tx;
5303 		infop->mri_stat = ixgbe_tx_ring_stat;
5304 		if (ixgbe->intr_type &
5305 		    (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5306 			mintr->mi_ddi_handle =
5307 			    ixgbe->htable[tx_ring->intr_vector];
5308 		}
5309 		break;
5310 	}
5311 	default:
5312 		break;
5313 	}
5314 }
5315 
5316 /*
5317  * Callback funtion for MAC layer to register all groups.
5318  */
5319 void
5320 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
5321     mac_group_info_t *infop, mac_group_handle_t gh)
5322 {
5323 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
5324 
5325 	switch (rtype) {
5326 	case MAC_RING_TYPE_RX: {
5327 		ixgbe_rx_group_t *rx_group;
5328 
5329 		rx_group = &ixgbe->rx_groups[index];
5330 		rx_group->group_handle = gh;
5331 
5332 		infop->mgi_driver = (mac_group_driver_t)rx_group;
5333 		infop->mgi_start = NULL;
5334 		infop->mgi_stop = NULL;
5335 		infop->mgi_addmac = ixgbe_addmac;
5336 		infop->mgi_remmac = ixgbe_remmac;
5337 		infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
5338 
5339 		break;
5340 	}
5341 	case MAC_RING_TYPE_TX:
5342 		break;
5343 	default:
5344 		break;
5345 	}
5346 }
5347 
5348 /*
5349  * Enable interrupt on the specificed rx ring.
5350  */
5351 int
5352 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
5353 {
5354 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5355 	ixgbe_t *ixgbe = rx_ring->ixgbe;
5356 	int r_idx = rx_ring->index;
5357 	int hw_r_idx = rx_ring->hw_index;
5358 	int v_idx = rx_ring->intr_vector;
5359 
5360 	mutex_enter(&ixgbe->gen_lock);
5361 	if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5362 		mutex_exit(&ixgbe->gen_lock);
5363 		/*
5364 		 * Simply return 0.
5365 		 * Interrupts are being adjusted. ixgbe_intr_adjust()
5366 		 * will eventually re-enable the interrupt when it's
5367 		 * done with the adjustment.
5368 		 */
5369 		return (0);
5370 	}
5371 
5372 	/*
5373 	 * To enable interrupt by setting the VAL bit of given interrupt
5374 	 * vector allocation register (IVAR).
5375 	 */
5376 	ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
5377 
5378 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5379 
5380 	/*
5381 	 * Trigger a Rx interrupt on this ring
5382 	 */
5383 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
5384 	IXGBE_WRITE_FLUSH(&ixgbe->hw);
5385 
5386 	mutex_exit(&ixgbe->gen_lock);
5387 
5388 	return (0);
5389 }
5390 
5391 /*
5392  * Disable interrupt on the specificed rx ring.
5393  */
5394 int
5395 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
5396 {
5397 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5398 	ixgbe_t *ixgbe = rx_ring->ixgbe;
5399 	int r_idx = rx_ring->index;
5400 	int hw_r_idx = rx_ring->hw_index;
5401 	int v_idx = rx_ring->intr_vector;
5402 
5403 	mutex_enter(&ixgbe->gen_lock);
5404 	if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5405 		mutex_exit(&ixgbe->gen_lock);
5406 		/*
5407 		 * Simply return 0.
5408 		 * In the rare case where an interrupt is being
5409 		 * disabled while interrupts are being adjusted,
5410 		 * we don't fail the operation. No interrupts will
5411 		 * be generated while they are adjusted, and
5412 		 * ixgbe_intr_adjust() will cause the interrupts
5413 		 * to be re-enabled once it completes. Note that
5414 		 * in this case, packets may be delivered to the
5415 		 * stack via interrupts before xgbe_rx_ring_intr_enable()
5416 		 * is called again. This is acceptable since interrupt
5417 		 * adjustment is infrequent, and the stack will be
5418 		 * able to handle these packets.
5419 		 */
5420 		return (0);
5421 	}
5422 
5423 	/*
5424 	 * To disable interrupt by clearing the VAL bit of given interrupt
5425 	 * vector allocation register (IVAR).
5426 	 */
5427 	ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
5428 
5429 	BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
5430 
5431 	mutex_exit(&ixgbe->gen_lock);
5432 
5433 	return (0);
5434 }
5435 
5436 /*
5437  * Add a mac address.
5438  */
5439 static int
5440 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
5441 {
5442 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5443 	ixgbe_t *ixgbe = rx_group->ixgbe;
5444 	struct ixgbe_hw *hw = &ixgbe->hw;
5445 	int slot, i;
5446 
5447 	mutex_enter(&ixgbe->gen_lock);
5448 
5449 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5450 		mutex_exit(&ixgbe->gen_lock);
5451 		return (ECANCELED);
5452 	}
5453 
5454 	if (ixgbe->unicst_avail == 0) {
5455 		/* no slots available */
5456 		mutex_exit(&ixgbe->gen_lock);
5457 		return (ENOSPC);
5458 	}
5459 
5460 	/*
5461 	 * The first ixgbe->num_rx_groups slots are reserved for each respective
5462 	 * group. The rest slots are shared by all groups. While adding a
5463 	 * MAC address, reserved slots are firstly checked then the shared
5464 	 * slots are searched.
5465 	 */
5466 	slot = -1;
5467 	if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
5468 		for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
5469 			if (ixgbe->unicst_addr[i].mac.set == 0) {
5470 				slot = i;
5471 				break;
5472 			}
5473 		}
5474 	} else {
5475 		slot = rx_group->index;
5476 	}
5477 
5478 	if (slot == -1) {
5479 		/* no slots available */
5480 		mutex_exit(&ixgbe->gen_lock);
5481 		return (ENOSPC);
5482 	}
5483 
5484 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5485 	(void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
5486 	    rx_group->index, IXGBE_RAH_AV);
5487 	ixgbe->unicst_addr[slot].mac.set = 1;
5488 	ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
5489 	ixgbe->unicst_avail--;
5490 
5491 	mutex_exit(&ixgbe->gen_lock);
5492 
5493 	return (0);
5494 }
5495 
5496 /*
5497  * Remove a mac address.
5498  */
5499 static int
5500 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
5501 {
5502 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5503 	ixgbe_t *ixgbe = rx_group->ixgbe;
5504 	struct ixgbe_hw *hw = &ixgbe->hw;
5505 	int slot;
5506 
5507 	mutex_enter(&ixgbe->gen_lock);
5508 
5509 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5510 		mutex_exit(&ixgbe->gen_lock);
5511 		return (ECANCELED);
5512 	}
5513 
5514 	slot = ixgbe_unicst_find(ixgbe, mac_addr);
5515 	if (slot == -1) {
5516 		mutex_exit(&ixgbe->gen_lock);
5517 		return (EINVAL);
5518 	}
5519 
5520 	if (ixgbe->unicst_addr[slot].mac.set == 0) {
5521 		mutex_exit(&ixgbe->gen_lock);
5522 		return (EINVAL);
5523 	}
5524 
5525 	bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5526 	(void) ixgbe_clear_rar(hw, slot);
5527 	ixgbe->unicst_addr[slot].mac.set = 0;
5528 	ixgbe->unicst_avail++;
5529 
5530 	mutex_exit(&ixgbe->gen_lock);
5531 
5532 	return (0);
5533 }
5534