xref: /titanic_50/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision 76fa7285ab719293f713601f68497677a82ce6f9)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #include "ixgbe_sw.h"
29 
30 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
31 static char ixgbe_version[] = "driver version 1.1.4";
32 
33 /*
34  * Local function protoypes
35  */
36 static int ixgbe_register_mac(ixgbe_t *);
37 static int ixgbe_identify_hardware(ixgbe_t *);
38 static int ixgbe_regs_map(ixgbe_t *);
39 static void ixgbe_init_properties(ixgbe_t *);
40 static int ixgbe_init_driver_settings(ixgbe_t *);
41 static void ixgbe_init_locks(ixgbe_t *);
42 static void ixgbe_destroy_locks(ixgbe_t *);
43 static int ixgbe_init(ixgbe_t *);
44 static int ixgbe_chip_start(ixgbe_t *);
45 static void ixgbe_chip_stop(ixgbe_t *);
46 static int ixgbe_reset(ixgbe_t *);
47 static void ixgbe_tx_clean(ixgbe_t *);
48 static boolean_t ixgbe_tx_drain(ixgbe_t *);
49 static boolean_t ixgbe_rx_drain(ixgbe_t *);
50 static int ixgbe_alloc_rings(ixgbe_t *);
51 static void ixgbe_free_rings(ixgbe_t *);
52 static int ixgbe_alloc_rx_data(ixgbe_t *);
53 static void ixgbe_free_rx_data(ixgbe_t *);
54 static void ixgbe_setup_rings(ixgbe_t *);
55 static void ixgbe_setup_rx(ixgbe_t *);
56 static void ixgbe_setup_tx(ixgbe_t *);
57 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
58 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
59 static void ixgbe_setup_rss(ixgbe_t *);
60 static void ixgbe_init_unicst(ixgbe_t *);
61 static int ixgbe_unicst_set(ixgbe_t *, const uint8_t *, int);
62 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
63 static void ixgbe_setup_multicst(ixgbe_t *);
64 static void ixgbe_get_hw_state(ixgbe_t *);
65 static void ixgbe_get_conf(ixgbe_t *);
66 static void ixgbe_init_params(ixgbe_t *);
67 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
68 static void ixgbe_driver_link_check(ixgbe_t *);
69 static void ixgbe_sfp_check(void *);
70 static void ixgbe_link_timer(void *);
71 static void ixgbe_local_timer(void *);
72 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
73 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
74 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
75 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
76 static boolean_t is_valid_mac_addr(uint8_t *);
77 static boolean_t ixgbe_stall_check(ixgbe_t *);
78 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
79 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
80 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
81 static int ixgbe_alloc_intrs(ixgbe_t *);
82 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
83 static int ixgbe_add_intr_handlers(ixgbe_t *);
84 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
85 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
86 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
87 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
88 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
89 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
90 static void ixgbe_setup_adapter_vector(ixgbe_t *);
91 static void ixgbe_rem_intr_handlers(ixgbe_t *);
92 static void ixgbe_rem_intrs(ixgbe_t *);
93 static int ixgbe_enable_intrs(ixgbe_t *);
94 static int ixgbe_disable_intrs(ixgbe_t *);
95 static uint_t ixgbe_intr_legacy(void *, void *);
96 static uint_t ixgbe_intr_msi(void *, void *);
97 static uint_t ixgbe_intr_msix(void *, void *);
98 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
99 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
100 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
101 static void ixgbe_get_driver_control(struct ixgbe_hw *);
102 static int ixgbe_addmac(void *, const uint8_t *);
103 static int ixgbe_remmac(void *, const uint8_t *);
104 static void ixgbe_release_driver_control(struct ixgbe_hw *);
105 
106 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
107 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
108 static int ixgbe_resume(dev_info_t *);
109 static int ixgbe_suspend(dev_info_t *);
110 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
111 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
112 
113 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
114     const void *impl_data);
115 static void ixgbe_fm_init(ixgbe_t *);
116 static void ixgbe_fm_fini(ixgbe_t *);
117 
118 mac_priv_prop_t ixgbe_priv_props[] = {
119 	{"_tx_copy_thresh", MAC_PROP_PERM_RW},
120 	{"_tx_recycle_thresh", MAC_PROP_PERM_RW},
121 	{"_tx_overload_thresh", MAC_PROP_PERM_RW},
122 	{"_tx_resched_thresh", MAC_PROP_PERM_RW},
123 	{"_rx_copy_thresh", MAC_PROP_PERM_RW},
124 	{"_rx_limit_per_intr", MAC_PROP_PERM_RW},
125 	{"_intr_throttling", MAC_PROP_PERM_RW},
126 	{"_adv_pause_cap", MAC_PROP_PERM_READ},
127 	{"_adv_asym_pause_cap", MAC_PROP_PERM_READ}
128 };
129 
130 #define	IXGBE_MAX_PRIV_PROPS \
131 	(sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
132 
133 static struct cb_ops ixgbe_cb_ops = {
134 	nulldev,		/* cb_open */
135 	nulldev,		/* cb_close */
136 	nodev,			/* cb_strategy */
137 	nodev,			/* cb_print */
138 	nodev,			/* cb_dump */
139 	nodev,			/* cb_read */
140 	nodev,			/* cb_write */
141 	nodev,			/* cb_ioctl */
142 	nodev,			/* cb_devmap */
143 	nodev,			/* cb_mmap */
144 	nodev,			/* cb_segmap */
145 	nochpoll,		/* cb_chpoll */
146 	ddi_prop_op,		/* cb_prop_op */
147 	NULL,			/* cb_stream */
148 	D_MP | D_HOTPLUG,	/* cb_flag */
149 	CB_REV,			/* cb_rev */
150 	nodev,			/* cb_aread */
151 	nodev			/* cb_awrite */
152 };
153 
154 static struct dev_ops ixgbe_dev_ops = {
155 	DEVO_REV,		/* devo_rev */
156 	0,			/* devo_refcnt */
157 	NULL,			/* devo_getinfo */
158 	nulldev,		/* devo_identify */
159 	nulldev,		/* devo_probe */
160 	ixgbe_attach,		/* devo_attach */
161 	ixgbe_detach,		/* devo_detach */
162 	nodev,			/* devo_reset */
163 	&ixgbe_cb_ops,		/* devo_cb_ops */
164 	NULL,			/* devo_bus_ops */
165 	ddi_power,		/* devo_power */
166 	ddi_quiesce_not_supported,	/* devo_quiesce */
167 };
168 
169 static struct modldrv ixgbe_modldrv = {
170 	&mod_driverops,		/* Type of module.  This one is a driver */
171 	ixgbe_ident,		/* Discription string */
172 	&ixgbe_dev_ops		/* driver ops */
173 };
174 
175 static struct modlinkage ixgbe_modlinkage = {
176 	MODREV_1, &ixgbe_modldrv, NULL
177 };
178 
179 /*
180  * Access attributes for register mapping
181  */
182 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
183 	DDI_DEVICE_ATTR_V1,
184 	DDI_STRUCTURE_LE_ACC,
185 	DDI_STRICTORDER_ACC,
186 	DDI_FLAGERR_ACC
187 };
188 
189 /*
190  * Loopback property
191  */
192 static lb_property_t lb_normal = {
193 	normal,	"normal", IXGBE_LB_NONE
194 };
195 
196 static lb_property_t lb_mac = {
197 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
198 };
199 
200 static lb_property_t lb_external = {
201 	external, "External", IXGBE_LB_EXTERNAL
202 };
203 
204 #define	IXGBE_M_CALLBACK_FLAGS \
205 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
206 
207 static mac_callbacks_t ixgbe_m_callbacks = {
208 	IXGBE_M_CALLBACK_FLAGS,
209 	ixgbe_m_stat,
210 	ixgbe_m_start,
211 	ixgbe_m_stop,
212 	ixgbe_m_promisc,
213 	ixgbe_m_multicst,
214 	NULL,
215 	NULL,
216 	ixgbe_m_ioctl,
217 	ixgbe_m_getcapab,
218 	NULL,
219 	NULL,
220 	ixgbe_m_setprop,
221 	ixgbe_m_getprop
222 };
223 
224 /*
225  * Initialize capabilities of each supported adapter type
226  */
227 static adapter_info_t ixgbe_82598eb_cap = {
228 	64,		/* maximum number of rx queues */
229 	1,		/* minimum number of rx queues */
230 	8,		/* default number of rx queues */
231 	32,		/* maximum number of tx queues */
232 	1,		/* minimum number of tx queues */
233 	8,		/* default number of tx queues */
234 	16366,		/* maximum MTU size */
235 	0xFFFF,		/* maximum interrupt throttle rate */
236 	0,		/* minimum interrupt throttle rate */
237 	200,		/* default interrupt throttle rate */
238 	18,		/* maximum total msix vectors */
239 	16,		/* maximum number of ring vectors */
240 	2,		/* maximum number of other vectors */
241 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
242 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
243 	| IXGBE_FLAG_RSS_CAPABLE
244 	| IXGBE_FLAG_VMDQ_CAPABLE)
245 };
246 
247 static adapter_info_t ixgbe_82599eb_cap = {
248 	128,		/* maximum number of rx queues */
249 	1,		/* minimum number of rx queues */
250 	8,		/* default number of rx queues */
251 	128,		/* maximum number of tx queues */
252 	1,		/* minimum number of tx queues */
253 	8,		/* default number of tx queues */
254 	15500,		/* maximum MTU size */
255 	0xFF8,		/* maximum interrupt throttle rate */
256 	0,		/* minimum interrupt throttle rate */
257 	200,		/* default interrupt throttle rate */
258 	64,		/* maximum total msix vectors */
259 	16,		/* maximum number of ring vectors */
260 	2,		/* maximum number of other vectors */
261 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
262 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
263 	| IXGBE_FLAG_RSS_CAPABLE
264 	| IXGBE_FLAG_VMDQ_CAPABLE
265 	| IXGBE_FLAG_RSC_CAPABLE)
266 };
267 
268 /*
269  * Module Initialization Functions.
270  */
271 
272 int
273 _init(void)
274 {
275 	int status;
276 
277 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
278 
279 	status = mod_install(&ixgbe_modlinkage);
280 
281 	if (status != DDI_SUCCESS) {
282 		mac_fini_ops(&ixgbe_dev_ops);
283 	}
284 
285 	return (status);
286 }
287 
288 int
289 _fini(void)
290 {
291 	int status;
292 
293 	status = mod_remove(&ixgbe_modlinkage);
294 
295 	if (status == DDI_SUCCESS) {
296 		mac_fini_ops(&ixgbe_dev_ops);
297 	}
298 
299 	return (status);
300 }
301 
302 int
303 _info(struct modinfo *modinfop)
304 {
305 	int status;
306 
307 	status = mod_info(&ixgbe_modlinkage, modinfop);
308 
309 	return (status);
310 }
311 
312 /*
313  * ixgbe_attach - Driver attach.
314  *
315  * This function is the device specific initialization entry
316  * point. This entry point is required and must be written.
317  * The DDI_ATTACH command must be provided in the attach entry
318  * point. When attach() is called with cmd set to DDI_ATTACH,
319  * all normal kernel services (such as kmem_alloc(9F)) are
320  * available for use by the driver.
321  *
322  * The attach() function will be called once for each instance
323  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
324  * Until attach() succeeds, the only driver entry points which
325  * may be called are open(9E) and getinfo(9E).
326  */
327 static int
328 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
329 {
330 	ixgbe_t *ixgbe;
331 	struct ixgbe_osdep *osdep;
332 	struct ixgbe_hw *hw;
333 	int instance;
334 	char taskqname[32];
335 
336 	/*
337 	 * Check the command and perform corresponding operations
338 	 */
339 	switch (cmd) {
340 	default:
341 		return (DDI_FAILURE);
342 
343 	case DDI_RESUME:
344 		return (ixgbe_resume(devinfo));
345 
346 	case DDI_ATTACH:
347 		break;
348 	}
349 
350 	/* Get the device instance */
351 	instance = ddi_get_instance(devinfo);
352 
353 	/* Allocate memory for the instance data structure */
354 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
355 
356 	ixgbe->dip = devinfo;
357 	ixgbe->instance = instance;
358 
359 	hw = &ixgbe->hw;
360 	osdep = &ixgbe->osdep;
361 	hw->back = osdep;
362 	osdep->ixgbe = ixgbe;
363 
364 	/* Attach the instance pointer to the dev_info data structure */
365 	ddi_set_driver_private(devinfo, ixgbe);
366 
367 	/*
368 	 * Initialize for fma support
369 	 */
370 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
371 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
372 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
373 	ixgbe_fm_init(ixgbe);
374 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
375 
376 	/*
377 	 * Map PCI config space registers
378 	 */
379 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
380 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
381 		goto attach_fail;
382 	}
383 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
384 
385 	/*
386 	 * Identify the chipset family
387 	 */
388 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
389 		ixgbe_error(ixgbe, "Failed to identify hardware");
390 		goto attach_fail;
391 	}
392 
393 	/*
394 	 * Map device registers
395 	 */
396 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
397 		ixgbe_error(ixgbe, "Failed to map device registers");
398 		goto attach_fail;
399 	}
400 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
401 
402 	/*
403 	 * Initialize driver parameters
404 	 */
405 	ixgbe_init_properties(ixgbe);
406 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
407 
408 	/*
409 	 * Allocate interrupts
410 	 */
411 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
412 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
413 		goto attach_fail;
414 	}
415 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
416 
417 	/*
418 	 * Allocate rx/tx rings based on the ring numbers.
419 	 * The actual numbers of rx/tx rings are decided by the number of
420 	 * allocated interrupt vectors, so we should allocate the rings after
421 	 * interrupts are allocated.
422 	 */
423 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
424 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
425 		goto attach_fail;
426 	}
427 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
428 
429 	/*
430 	 * Map rings to interrupt vectors
431 	 */
432 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
433 		ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
434 		goto attach_fail;
435 	}
436 
437 	/*
438 	 * Add interrupt handlers
439 	 */
440 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
441 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
442 		goto attach_fail;
443 	}
444 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
445 
446 	/*
447 	 * Create a taskq for sfp-change
448 	 */
449 	(void) sprintf(taskqname, "ixgbe%d_taskq", instance);
450 	if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
451 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
452 		ixgbe_error(ixgbe, "taskq_create failed");
453 		goto attach_fail;
454 	}
455 	ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
456 
457 	/*
458 	 * Initialize driver parameters
459 	 */
460 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
461 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
462 		goto attach_fail;
463 	}
464 
465 	/*
466 	 * Initialize mutexes for this device.
467 	 * Do this before enabling the interrupt handler and
468 	 * register the softint to avoid the condition where
469 	 * interrupt handler can try using uninitialized mutex.
470 	 */
471 	ixgbe_init_locks(ixgbe);
472 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
473 
474 	/*
475 	 * Initialize chipset hardware
476 	 */
477 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
478 		ixgbe_error(ixgbe, "Failed to initialize adapter");
479 		goto attach_fail;
480 	}
481 	ixgbe->link_check_complete = B_FALSE;
482 	ixgbe->link_check_hrtime = gethrtime() +
483 	    (IXGBE_LINK_UP_TIME * 100000000ULL);
484 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
485 
486 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
487 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
488 		goto attach_fail;
489 	}
490 
491 	/*
492 	 * Initialize statistics
493 	 */
494 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
495 		ixgbe_error(ixgbe, "Failed to initialize statistics");
496 		goto attach_fail;
497 	}
498 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
499 
500 	/*
501 	 * Register the driver to the MAC
502 	 */
503 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
504 		ixgbe_error(ixgbe, "Failed to register MAC");
505 		goto attach_fail;
506 	}
507 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
508 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
509 
510 	ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
511 	    IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
512 	if (ixgbe->periodic_id == 0) {
513 		ixgbe_error(ixgbe, "Failed to add the link check timer");
514 		goto attach_fail;
515 	}
516 	ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
517 
518 	/*
519 	 * Now that mutex locks are initialized, and the chip is also
520 	 * initialized, enable interrupts.
521 	 */
522 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
523 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
524 		goto attach_fail;
525 	}
526 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
527 
528 	ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
529 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
530 
531 	return (DDI_SUCCESS);
532 
533 attach_fail:
534 	ixgbe_unconfigure(devinfo, ixgbe);
535 	return (DDI_FAILURE);
536 }
537 
538 /*
539  * ixgbe_detach - Driver detach.
540  *
541  * The detach() function is the complement of the attach routine.
542  * If cmd is set to DDI_DETACH, detach() is used to remove  the
543  * state  associated  with  a  given  instance of a device node
544  * prior to the removal of that instance from the system.
545  *
546  * The detach() function will be called once for each  instance
547  * of the device for which there has been a successful attach()
548  * once there are no longer  any  opens  on  the  device.
549  *
550  * Interrupts routine are disabled, All memory allocated by this
551  * driver are freed.
552  */
553 static int
554 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
555 {
556 	ixgbe_t *ixgbe;
557 
558 	/*
559 	 * Check detach command
560 	 */
561 	switch (cmd) {
562 	default:
563 		return (DDI_FAILURE);
564 
565 	case DDI_SUSPEND:
566 		return (ixgbe_suspend(devinfo));
567 
568 	case DDI_DETACH:
569 		break;
570 	}
571 
572 	/*
573 	 * Get the pointer to the driver private data structure
574 	 */
575 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
576 	if (ixgbe == NULL)
577 		return (DDI_FAILURE);
578 
579 	/*
580 	 * If the device is still running, it needs to be stopped first.
581 	 * This check is necessary because under some specific circumstances,
582 	 * the detach routine can be called without stopping the interface
583 	 * first.
584 	 */
585 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
586 		atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
587 		mutex_enter(&ixgbe->gen_lock);
588 		ixgbe_stop(ixgbe, B_TRUE);
589 		mutex_exit(&ixgbe->gen_lock);
590 		/* Disable and stop the watchdog timer */
591 		ixgbe_disable_watchdog_timer(ixgbe);
592 	}
593 
594 	/*
595 	 * Check if there are still rx buffers held by the upper layer.
596 	 * If so, fail the detach.
597 	 */
598 	if (!ixgbe_rx_drain(ixgbe))
599 		return (DDI_FAILURE);
600 
601 	/*
602 	 * Do the remaining unconfigure routines
603 	 */
604 	ixgbe_unconfigure(devinfo, ixgbe);
605 
606 	return (DDI_SUCCESS);
607 }
608 
609 static void
610 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
611 {
612 	/*
613 	 * Disable interrupt
614 	 */
615 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
616 		(void) ixgbe_disable_intrs(ixgbe);
617 	}
618 
619 	/*
620 	 * remove the link check timer
621 	 */
622 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
623 		if (ixgbe->periodic_id != NULL) {
624 			ddi_periodic_delete(ixgbe->periodic_id);
625 			ixgbe->periodic_id = NULL;
626 		}
627 	}
628 
629 	/*
630 	 * Unregister MAC
631 	 */
632 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
633 		(void) mac_unregister(ixgbe->mac_hdl);
634 	}
635 
636 	/*
637 	 * Free statistics
638 	 */
639 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
640 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
641 	}
642 
643 	/*
644 	 * Remove interrupt handlers
645 	 */
646 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
647 		ixgbe_rem_intr_handlers(ixgbe);
648 	}
649 
650 	/*
651 	 * Remove taskq for sfp-status-change
652 	 */
653 	if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
654 		ddi_taskq_destroy(ixgbe->sfp_taskq);
655 	}
656 
657 	/*
658 	 * Remove interrupts
659 	 */
660 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
661 		ixgbe_rem_intrs(ixgbe);
662 	}
663 
664 	/*
665 	 * Remove driver properties
666 	 */
667 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
668 		(void) ddi_prop_remove_all(devinfo);
669 	}
670 
671 	/*
672 	 * Stop the chipset
673 	 */
674 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
675 		mutex_enter(&ixgbe->gen_lock);
676 		ixgbe_chip_stop(ixgbe);
677 		mutex_exit(&ixgbe->gen_lock);
678 	}
679 
680 	/*
681 	 * Free register handle
682 	 */
683 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
684 		if (ixgbe->osdep.reg_handle != NULL)
685 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
686 	}
687 
688 	/*
689 	 * Free PCI config handle
690 	 */
691 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
692 		if (ixgbe->osdep.cfg_handle != NULL)
693 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
694 	}
695 
696 	/*
697 	 * Free locks
698 	 */
699 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
700 		ixgbe_destroy_locks(ixgbe);
701 	}
702 
703 	/*
704 	 * Free the rx/tx rings
705 	 */
706 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
707 		ixgbe_free_rings(ixgbe);
708 	}
709 
710 	/*
711 	 * Unregister FMA capabilities
712 	 */
713 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
714 		ixgbe_fm_fini(ixgbe);
715 	}
716 
717 	/*
718 	 * Free the driver data structure
719 	 */
720 	kmem_free(ixgbe, sizeof (ixgbe_t));
721 
722 	ddi_set_driver_private(devinfo, NULL);
723 }
724 
725 /*
726  * ixgbe_register_mac - Register the driver and its function pointers with
727  * the GLD interface.
728  */
729 static int
730 ixgbe_register_mac(ixgbe_t *ixgbe)
731 {
732 	struct ixgbe_hw *hw = &ixgbe->hw;
733 	mac_register_t *mac;
734 	int status;
735 
736 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
737 		return (IXGBE_FAILURE);
738 
739 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
740 	mac->m_driver = ixgbe;
741 	mac->m_dip = ixgbe->dip;
742 	mac->m_src_addr = hw->mac.addr;
743 	mac->m_callbacks = &ixgbe_m_callbacks;
744 	mac->m_min_sdu = 0;
745 	mac->m_max_sdu = ixgbe->default_mtu;
746 	mac->m_margin = VLAN_TAGSZ;
747 	mac->m_priv_props = ixgbe_priv_props;
748 	mac->m_priv_prop_count = IXGBE_MAX_PRIV_PROPS;
749 	mac->m_v12n = MAC_VIRT_LEVEL1;
750 
751 	status = mac_register(mac, &ixgbe->mac_hdl);
752 
753 	mac_free(mac);
754 
755 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
756 }
757 
758 /*
759  * ixgbe_identify_hardware - Identify the type of the chipset.
760  */
761 static int
762 ixgbe_identify_hardware(ixgbe_t *ixgbe)
763 {
764 	struct ixgbe_hw *hw = &ixgbe->hw;
765 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
766 
767 	/*
768 	 * Get the device id
769 	 */
770 	hw->vendor_id =
771 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
772 	hw->device_id =
773 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
774 	hw->revision_id =
775 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
776 	hw->subsystem_device_id =
777 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
778 	hw->subsystem_vendor_id =
779 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
780 
781 	/*
782 	 * Set the mac type of the adapter based on the device id
783 	 */
784 	if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
785 		return (IXGBE_FAILURE);
786 	}
787 
788 	/*
789 	 * Install adapter capabilities
790 	 */
791 	switch (hw->mac.type) {
792 	case ixgbe_mac_82598EB:
793 		ixgbe_log(ixgbe, "identify 82598 adapter\n");
794 		ixgbe->capab = &ixgbe_82598eb_cap;
795 
796 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
797 			ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
798 			ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
799 		}
800 		ixgbe->capab->other_intr |= IXGBE_EICR_LSC;
801 
802 		break;
803 	case ixgbe_mac_82599EB:
804 		ixgbe_log(ixgbe, "identify 82599 adapter\n");
805 		ixgbe->capab = &ixgbe_82599eb_cap;
806 
807 		ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 |
808 		    IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC);
809 
810 		break;
811 	default:
812 		ixgbe_log(ixgbe,
813 		    "adapter not supported in ixgbe_identify_hardware(): %d\n",
814 		    hw->mac.type);
815 		return (IXGBE_FAILURE);
816 	}
817 
818 	return (IXGBE_SUCCESS);
819 }
820 
821 /*
822  * ixgbe_regs_map - Map the device registers.
823  *
824  */
825 static int
826 ixgbe_regs_map(ixgbe_t *ixgbe)
827 {
828 	dev_info_t *devinfo = ixgbe->dip;
829 	struct ixgbe_hw *hw = &ixgbe->hw;
830 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
831 	off_t mem_size;
832 
833 	/*
834 	 * First get the size of device registers to be mapped.
835 	 */
836 	if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
837 	    != DDI_SUCCESS) {
838 		return (IXGBE_FAILURE);
839 	}
840 
841 	/*
842 	 * Call ddi_regs_map_setup() to map registers
843 	 */
844 	if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
845 	    (caddr_t *)&hw->hw_addr, 0,
846 	    mem_size, &ixgbe_regs_acc_attr,
847 	    &osdep->reg_handle)) != DDI_SUCCESS) {
848 		return (IXGBE_FAILURE);
849 	}
850 
851 	return (IXGBE_SUCCESS);
852 }
853 
854 /*
855  * ixgbe_init_properties - Initialize driver properties.
856  */
857 static void
858 ixgbe_init_properties(ixgbe_t *ixgbe)
859 {
860 	/*
861 	 * Get conf file properties, including link settings
862 	 * jumbo frames, ring number, descriptor number, etc.
863 	 */
864 	ixgbe_get_conf(ixgbe);
865 
866 	ixgbe_init_params(ixgbe);
867 }
868 
869 /*
870  * ixgbe_init_driver_settings - Initialize driver settings.
871  *
872  * The settings include hardware function pointers, bus information,
873  * rx/tx rings settings, link state, and any other parameters that
874  * need to be setup during driver initialization.
875  */
876 static int
877 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
878 {
879 	struct ixgbe_hw *hw = &ixgbe->hw;
880 	dev_info_t *devinfo = ixgbe->dip;
881 	ixgbe_rx_ring_t *rx_ring;
882 	ixgbe_tx_ring_t *tx_ring;
883 	uint32_t rx_size;
884 	uint32_t tx_size;
885 	int i;
886 
887 	/*
888 	 * Initialize chipset specific hardware function pointers
889 	 */
890 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
891 		return (IXGBE_FAILURE);
892 	}
893 
894 	/*
895 	 * Get the system page size
896 	 */
897 	ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
898 
899 	/*
900 	 * Set rx buffer size
901 	 *
902 	 * The IP header alignment room is counted in the calculation.
903 	 * The rx buffer size is in unit of 1K that is required by the
904 	 * chipset hardware.
905 	 */
906 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
907 	ixgbe->rx_buf_size = ((rx_size >> 10) +
908 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
909 
910 	/*
911 	 * Set tx buffer size
912 	 */
913 	tx_size = ixgbe->max_frame_size;
914 	ixgbe->tx_buf_size = ((tx_size >> 10) +
915 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
916 
917 	/*
918 	 * Initialize rx/tx rings parameters
919 	 */
920 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
921 		rx_ring = &ixgbe->rx_rings[i];
922 		rx_ring->index = i;
923 		rx_ring->ixgbe = ixgbe;
924 	}
925 
926 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
927 		tx_ring = &ixgbe->tx_rings[i];
928 		tx_ring->index = i;
929 		tx_ring->ixgbe = ixgbe;
930 		if (ixgbe->tx_head_wb_enable)
931 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
932 		else
933 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
934 
935 		tx_ring->ring_size = ixgbe->tx_ring_size;
936 		tx_ring->free_list_size = ixgbe->tx_ring_size +
937 		    (ixgbe->tx_ring_size >> 1);
938 	}
939 
940 	/*
941 	 * Initialize values of interrupt throttling rate
942 	 */
943 	for (i = 1; i < MAX_INTR_VECTOR; i++)
944 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
945 
946 	/*
947 	 * The initial link state should be "unknown"
948 	 */
949 	ixgbe->link_state = LINK_STATE_UNKNOWN;
950 
951 	return (IXGBE_SUCCESS);
952 }
953 
954 /*
955  * ixgbe_init_locks - Initialize locks.
956  */
957 static void
958 ixgbe_init_locks(ixgbe_t *ixgbe)
959 {
960 	ixgbe_rx_ring_t *rx_ring;
961 	ixgbe_tx_ring_t *tx_ring;
962 	int i;
963 
964 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
965 		rx_ring = &ixgbe->rx_rings[i];
966 		mutex_init(&rx_ring->rx_lock, NULL,
967 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
968 	}
969 
970 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
971 		tx_ring = &ixgbe->tx_rings[i];
972 		mutex_init(&tx_ring->tx_lock, NULL,
973 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
974 		mutex_init(&tx_ring->recycle_lock, NULL,
975 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
976 		mutex_init(&tx_ring->tcb_head_lock, NULL,
977 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
978 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
979 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
980 	}
981 
982 	mutex_init(&ixgbe->gen_lock, NULL,
983 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
984 
985 	mutex_init(&ixgbe->watchdog_lock, NULL,
986 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
987 }
988 
989 /*
990  * ixgbe_destroy_locks - Destroy locks.
991  */
992 static void
993 ixgbe_destroy_locks(ixgbe_t *ixgbe)
994 {
995 	ixgbe_rx_ring_t *rx_ring;
996 	ixgbe_tx_ring_t *tx_ring;
997 	int i;
998 
999 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1000 		rx_ring = &ixgbe->rx_rings[i];
1001 		mutex_destroy(&rx_ring->rx_lock);
1002 	}
1003 
1004 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1005 		tx_ring = &ixgbe->tx_rings[i];
1006 		mutex_destroy(&tx_ring->tx_lock);
1007 		mutex_destroy(&tx_ring->recycle_lock);
1008 		mutex_destroy(&tx_ring->tcb_head_lock);
1009 		mutex_destroy(&tx_ring->tcb_tail_lock);
1010 	}
1011 
1012 	mutex_destroy(&ixgbe->gen_lock);
1013 	mutex_destroy(&ixgbe->watchdog_lock);
1014 }
1015 
1016 static int
1017 ixgbe_resume(dev_info_t *devinfo)
1018 {
1019 	ixgbe_t *ixgbe;
1020 	int i;
1021 
1022 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1023 	if (ixgbe == NULL)
1024 		return (DDI_FAILURE);
1025 
1026 	mutex_enter(&ixgbe->gen_lock);
1027 
1028 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1029 		if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1030 			mutex_exit(&ixgbe->gen_lock);
1031 			return (DDI_FAILURE);
1032 		}
1033 
1034 		/*
1035 		 * Enable and start the watchdog timer
1036 		 */
1037 		ixgbe_enable_watchdog_timer(ixgbe);
1038 	}
1039 
1040 	atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1041 
1042 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1043 		for (i = 0; i < ixgbe->num_tx_rings; i++) {
1044 			mac_tx_ring_update(ixgbe->mac_hdl,
1045 			    ixgbe->tx_rings[i].ring_handle);
1046 		}
1047 	}
1048 
1049 	mutex_exit(&ixgbe->gen_lock);
1050 
1051 	return (DDI_SUCCESS);
1052 }
1053 
1054 static int
1055 ixgbe_suspend(dev_info_t *devinfo)
1056 {
1057 	ixgbe_t *ixgbe;
1058 
1059 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1060 	if (ixgbe == NULL)
1061 		return (DDI_FAILURE);
1062 
1063 	mutex_enter(&ixgbe->gen_lock);
1064 
1065 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1066 	if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1067 		mutex_exit(&ixgbe->gen_lock);
1068 		return (DDI_SUCCESS);
1069 	}
1070 	ixgbe_stop(ixgbe, B_FALSE);
1071 
1072 	mutex_exit(&ixgbe->gen_lock);
1073 
1074 	/*
1075 	 * Disable and stop the watchdog timer
1076 	 */
1077 	ixgbe_disable_watchdog_timer(ixgbe);
1078 
1079 	return (DDI_SUCCESS);
1080 }
1081 
1082 /*
1083  * ixgbe_init - Initialize the device.
1084  */
1085 static int
1086 ixgbe_init(ixgbe_t *ixgbe)
1087 {
1088 	struct ixgbe_hw *hw = &ixgbe->hw;
1089 
1090 	mutex_enter(&ixgbe->gen_lock);
1091 
1092 	/*
1093 	 * Reset chipset to put the hardware in a known state
1094 	 * before we try to do anything with the eeprom.
1095 	 */
1096 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1097 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1098 		goto init_fail;
1099 	}
1100 
1101 	/*
1102 	 * Need to init eeprom before validating the checksum.
1103 	 */
1104 	if (ixgbe_init_eeprom_params(hw) < 0) {
1105 		ixgbe_error(ixgbe,
1106 		    "Unable to intitialize the eeprom interface.");
1107 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1108 		goto init_fail;
1109 	}
1110 
1111 	/*
1112 	 * NVM validation
1113 	 */
1114 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1115 		/*
1116 		 * Some PCI-E parts fail the first check due to
1117 		 * the link being in sleep state.  Call it again,
1118 		 * if it fails a second time it's a real issue.
1119 		 */
1120 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1121 			ixgbe_error(ixgbe,
1122 			    "Invalid NVM checksum. Please contact "
1123 			    "the vendor to update the NVM.");
1124 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1125 			goto init_fail;
1126 		}
1127 	}
1128 
1129 	/*
1130 	 * Setup default flow control thresholds - enable/disable
1131 	 * & flow control type is controlled by ixgbe.conf
1132 	 */
1133 	hw->fc.high_water = DEFAULT_FCRTH;
1134 	hw->fc.low_water = DEFAULT_FCRTL;
1135 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1136 	hw->fc.send_xon = B_TRUE;
1137 
1138 	/*
1139 	 * Initialize link settings
1140 	 */
1141 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1142 
1143 	/*
1144 	 * Initialize the chipset hardware
1145 	 */
1146 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1147 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1148 		goto init_fail;
1149 	}
1150 
1151 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1152 		goto init_fail;
1153 	}
1154 
1155 	mutex_exit(&ixgbe->gen_lock);
1156 	return (IXGBE_SUCCESS);
1157 
1158 init_fail:
1159 	/*
1160 	 * Reset PHY
1161 	 */
1162 	(void) ixgbe_reset_phy(hw);
1163 
1164 	mutex_exit(&ixgbe->gen_lock);
1165 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1166 	return (IXGBE_FAILURE);
1167 }
1168 
1169 /*
1170  * ixgbe_chip_start - Initialize and start the chipset hardware.
1171  */
1172 static int
1173 ixgbe_chip_start(ixgbe_t *ixgbe)
1174 {
1175 	struct ixgbe_hw *hw = &ixgbe->hw;
1176 	int ret_val, i;
1177 
1178 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1179 
1180 	/*
1181 	 * Get the mac address
1182 	 * This function should handle SPARC case correctly.
1183 	 */
1184 	if (!ixgbe_find_mac_address(ixgbe)) {
1185 		ixgbe_error(ixgbe, "Failed to get the mac address");
1186 		return (IXGBE_FAILURE);
1187 	}
1188 
1189 	/*
1190 	 * Validate the mac address
1191 	 */
1192 	(void) ixgbe_init_rx_addrs(hw);
1193 	if (!is_valid_mac_addr(hw->mac.addr)) {
1194 		ixgbe_error(ixgbe, "Invalid mac address");
1195 		return (IXGBE_FAILURE);
1196 	}
1197 
1198 	/*
1199 	 * Configure/Initialize hardware
1200 	 */
1201 	ret_val = ixgbe_init_hw(hw);
1202 	if (ret_val != IXGBE_SUCCESS) {
1203 		if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1204 			ixgbe_error(ixgbe,
1205 			    "This 82599 device is pre-release and contains"
1206 			    " outdated firmware, please contact your hardware"
1207 			    " vendor for a replacement.");
1208 		} else {
1209 			ixgbe_error(ixgbe, "Failed to initialize hardware");
1210 			return (IXGBE_FAILURE);
1211 		}
1212 	}
1213 
1214 	/*
1215 	 * Setup adapter interrupt vectors
1216 	 */
1217 	ixgbe_setup_adapter_vector(ixgbe);
1218 
1219 	/*
1220 	 * Initialize unicast addresses.
1221 	 */
1222 	ixgbe_init_unicst(ixgbe);
1223 
1224 	/*
1225 	 * Setup and initialize the mctable structures.
1226 	 */
1227 	ixgbe_setup_multicst(ixgbe);
1228 
1229 	/*
1230 	 * Set interrupt throttling rate
1231 	 */
1232 	for (i = 0; i < ixgbe->intr_cnt; i++) {
1233 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1234 	}
1235 
1236 	/*
1237 	 * Save the state of the phy
1238 	 */
1239 	ixgbe_get_hw_state(ixgbe);
1240 
1241 	/*
1242 	 * Make sure driver has control
1243 	 */
1244 	ixgbe_get_driver_control(hw);
1245 
1246 	return (IXGBE_SUCCESS);
1247 }
1248 
1249 /*
1250  * ixgbe_chip_stop - Stop the chipset hardware
1251  */
1252 static void
1253 ixgbe_chip_stop(ixgbe_t *ixgbe)
1254 {
1255 	struct ixgbe_hw *hw = &ixgbe->hw;
1256 
1257 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1258 
1259 	/*
1260 	 * Tell firmware driver is no longer in control
1261 	 */
1262 	ixgbe_release_driver_control(hw);
1263 
1264 	/*
1265 	 * Reset the chipset
1266 	 */
1267 	(void) ixgbe_reset_hw(hw);
1268 
1269 	/*
1270 	 * Reset PHY
1271 	 */
1272 	(void) ixgbe_reset_phy(hw);
1273 }
1274 
1275 /*
1276  * ixgbe_reset - Reset the chipset and re-start the driver.
1277  *
1278  * It involves stopping and re-starting the chipset,
1279  * and re-configuring the rx/tx rings.
1280  */
1281 static int
1282 ixgbe_reset(ixgbe_t *ixgbe)
1283 {
1284 	int i;
1285 
1286 	/*
1287 	 * Disable and stop the watchdog timer
1288 	 */
1289 	ixgbe_disable_watchdog_timer(ixgbe);
1290 
1291 	mutex_enter(&ixgbe->gen_lock);
1292 
1293 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1294 	atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1295 
1296 	ixgbe_stop(ixgbe, B_FALSE);
1297 
1298 	if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1299 		mutex_exit(&ixgbe->gen_lock);
1300 		return (IXGBE_FAILURE);
1301 	}
1302 
1303 	/*
1304 	 * After resetting, need to recheck the link status.
1305 	 */
1306 	ixgbe->link_check_complete = B_FALSE;
1307 	ixgbe->link_check_hrtime = gethrtime() +
1308 	    (IXGBE_LINK_UP_TIME * 100000000ULL);
1309 
1310 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1311 
1312 	if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1313 		for (i = 0; i < ixgbe->num_tx_rings; i++) {
1314 			mac_tx_ring_update(ixgbe->mac_hdl,
1315 			    ixgbe->tx_rings[i].ring_handle);
1316 		}
1317 	}
1318 
1319 	mutex_exit(&ixgbe->gen_lock);
1320 
1321 	/*
1322 	 * Enable and start the watchdog timer
1323 	 */
1324 	ixgbe_enable_watchdog_timer(ixgbe);
1325 
1326 	return (IXGBE_SUCCESS);
1327 }
1328 
1329 /*
1330  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1331  */
1332 static void
1333 ixgbe_tx_clean(ixgbe_t *ixgbe)
1334 {
1335 	ixgbe_tx_ring_t *tx_ring;
1336 	tx_control_block_t *tcb;
1337 	link_list_t pending_list;
1338 	uint32_t desc_num;
1339 	int i, j;
1340 
1341 	LINK_LIST_INIT(&pending_list);
1342 
1343 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1344 		tx_ring = &ixgbe->tx_rings[i];
1345 
1346 		mutex_enter(&tx_ring->recycle_lock);
1347 
1348 		/*
1349 		 * Clean the pending tx data - the pending packets in the
1350 		 * work_list that have no chances to be transmitted again.
1351 		 *
1352 		 * We must ensure the chipset is stopped or the link is down
1353 		 * before cleaning the transmit packets.
1354 		 */
1355 		desc_num = 0;
1356 		for (j = 0; j < tx_ring->ring_size; j++) {
1357 			tcb = tx_ring->work_list[j];
1358 			if (tcb != NULL) {
1359 				desc_num += tcb->desc_num;
1360 
1361 				tx_ring->work_list[j] = NULL;
1362 
1363 				ixgbe_free_tcb(tcb);
1364 
1365 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1366 			}
1367 		}
1368 
1369 		if (desc_num > 0) {
1370 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1371 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1372 
1373 			/*
1374 			 * Reset the head and tail pointers of the tbd ring;
1375 			 * Reset the writeback head if it's enable.
1376 			 */
1377 			tx_ring->tbd_head = 0;
1378 			tx_ring->tbd_tail = 0;
1379 			if (ixgbe->tx_head_wb_enable)
1380 				*tx_ring->tbd_head_wb = 0;
1381 
1382 			IXGBE_WRITE_REG(&ixgbe->hw,
1383 			    IXGBE_TDH(tx_ring->index), 0);
1384 			IXGBE_WRITE_REG(&ixgbe->hw,
1385 			    IXGBE_TDT(tx_ring->index), 0);
1386 		}
1387 
1388 		mutex_exit(&tx_ring->recycle_lock);
1389 
1390 		/*
1391 		 * Add the tx control blocks in the pending list to
1392 		 * the free list.
1393 		 */
1394 		ixgbe_put_free_list(tx_ring, &pending_list);
1395 	}
1396 }
1397 
1398 /*
1399  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1400  * transmitted.
1401  */
1402 static boolean_t
1403 ixgbe_tx_drain(ixgbe_t *ixgbe)
1404 {
1405 	ixgbe_tx_ring_t *tx_ring;
1406 	boolean_t done;
1407 	int i, j;
1408 
1409 	/*
1410 	 * Wait for a specific time to allow pending tx packets
1411 	 * to be transmitted.
1412 	 *
1413 	 * Check the counter tbd_free to see if transmission is done.
1414 	 * No lock protection is needed here.
1415 	 *
1416 	 * Return B_TRUE if all pending packets have been transmitted;
1417 	 * Otherwise return B_FALSE;
1418 	 */
1419 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1420 
1421 		done = B_TRUE;
1422 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1423 			tx_ring = &ixgbe->tx_rings[j];
1424 			done = done &&
1425 			    (tx_ring->tbd_free == tx_ring->ring_size);
1426 		}
1427 
1428 		if (done)
1429 			break;
1430 
1431 		msec_delay(1);
1432 	}
1433 
1434 	return (done);
1435 }
1436 
1437 /*
1438  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1439  */
1440 static boolean_t
1441 ixgbe_rx_drain(ixgbe_t *ixgbe)
1442 {
1443 	boolean_t done = B_TRUE;
1444 	int i;
1445 
1446 	/*
1447 	 * Polling the rx free list to check if those rx buffers held by
1448 	 * the upper layer are released.
1449 	 *
1450 	 * Check the counter rcb_free to see if all pending buffers are
1451 	 * released. No lock protection is needed here.
1452 	 *
1453 	 * Return B_TRUE if all pending buffers have been released;
1454 	 * Otherwise return B_FALSE;
1455 	 */
1456 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1457 		done = (ixgbe->rcb_pending == 0);
1458 
1459 		if (done)
1460 			break;
1461 
1462 		msec_delay(1);
1463 	}
1464 
1465 	return (done);
1466 }
1467 
1468 /*
1469  * ixgbe_start - Start the driver/chipset.
1470  */
1471 int
1472 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1473 {
1474 	int i;
1475 
1476 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1477 
1478 	if (alloc_buffer) {
1479 		if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1480 			ixgbe_error(ixgbe,
1481 			    "Failed to allocate software receive rings");
1482 			return (IXGBE_FAILURE);
1483 		}
1484 
1485 		/* Allocate buffers for all the rx/tx rings */
1486 		if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1487 			ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1488 			return (IXGBE_FAILURE);
1489 		}
1490 
1491 		ixgbe->tx_ring_init = B_TRUE;
1492 	} else {
1493 		ixgbe->tx_ring_init = B_FALSE;
1494 	}
1495 
1496 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1497 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1498 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1499 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1500 
1501 	/*
1502 	 * Start the chipset hardware
1503 	 */
1504 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1505 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1506 		goto start_failure;
1507 	}
1508 
1509 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1510 		goto start_failure;
1511 	}
1512 
1513 	/*
1514 	 * Setup the rx/tx rings
1515 	 */
1516 	ixgbe_setup_rings(ixgbe);
1517 
1518 	/*
1519 	 * ixgbe_start() will be called when resetting, however if reset
1520 	 * happens, we need to clear the ERROR and STALL flags before
1521 	 * enabling the interrupts.
1522 	 */
1523 	atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR | IXGBE_STALL));
1524 
1525 	/*
1526 	 * Enable adapter interrupts
1527 	 * The interrupts must be enabled after the driver state is START
1528 	 */
1529 	ixgbe_enable_adapter_interrupts(ixgbe);
1530 
1531 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1532 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1533 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1534 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1535 
1536 	return (IXGBE_SUCCESS);
1537 
1538 start_failure:
1539 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1540 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1541 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1542 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1543 
1544 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1545 
1546 	return (IXGBE_FAILURE);
1547 }
1548 
1549 /*
1550  * ixgbe_stop - Stop the driver/chipset.
1551  */
1552 void
1553 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1554 {
1555 	int i;
1556 
1557 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1558 
1559 	/*
1560 	 * Disable the adapter interrupts
1561 	 */
1562 	ixgbe_disable_adapter_interrupts(ixgbe);
1563 
1564 	/*
1565 	 * Drain the pending tx packets
1566 	 */
1567 	(void) ixgbe_tx_drain(ixgbe);
1568 
1569 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1570 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1571 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1572 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1573 
1574 	/*
1575 	 * Stop the chipset hardware
1576 	 */
1577 	ixgbe_chip_stop(ixgbe);
1578 
1579 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1580 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1581 	}
1582 
1583 	/*
1584 	 * Clean the pending tx data/resources
1585 	 */
1586 	ixgbe_tx_clean(ixgbe);
1587 
1588 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1589 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1590 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1591 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1592 
1593 	if (ixgbe->link_state == LINK_STATE_UP) {
1594 		ixgbe->link_state = LINK_STATE_UNKNOWN;
1595 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1596 	}
1597 
1598 	if (free_buffer) {
1599 		/*
1600 		 * Release the DMA/memory resources of rx/tx rings
1601 		 */
1602 		ixgbe_free_dma(ixgbe);
1603 		ixgbe_free_rx_data(ixgbe);
1604 	}
1605 }
1606 
1607 /*
1608  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1609  */
1610 static int
1611 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1612 {
1613 	/*
1614 	 * Allocate memory space for rx rings
1615 	 */
1616 	ixgbe->rx_rings = kmem_zalloc(
1617 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1618 	    KM_NOSLEEP);
1619 
1620 	if (ixgbe->rx_rings == NULL) {
1621 		return (IXGBE_FAILURE);
1622 	}
1623 
1624 	/*
1625 	 * Allocate memory space for tx rings
1626 	 */
1627 	ixgbe->tx_rings = kmem_zalloc(
1628 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1629 	    KM_NOSLEEP);
1630 
1631 	if (ixgbe->tx_rings == NULL) {
1632 		kmem_free(ixgbe->rx_rings,
1633 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1634 		ixgbe->rx_rings = NULL;
1635 		return (IXGBE_FAILURE);
1636 	}
1637 
1638 	/*
1639 	 * Allocate memory space for rx ring groups
1640 	 */
1641 	ixgbe->rx_groups = kmem_zalloc(
1642 	    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1643 	    KM_NOSLEEP);
1644 
1645 	if (ixgbe->rx_groups == NULL) {
1646 		kmem_free(ixgbe->rx_rings,
1647 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1648 		kmem_free(ixgbe->tx_rings,
1649 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1650 		ixgbe->rx_rings = NULL;
1651 		ixgbe->tx_rings = NULL;
1652 		return (IXGBE_FAILURE);
1653 	}
1654 
1655 	return (IXGBE_SUCCESS);
1656 }
1657 
1658 /*
1659  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1660  */
1661 static void
1662 ixgbe_free_rings(ixgbe_t *ixgbe)
1663 {
1664 	if (ixgbe->rx_rings != NULL) {
1665 		kmem_free(ixgbe->rx_rings,
1666 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1667 		ixgbe->rx_rings = NULL;
1668 	}
1669 
1670 	if (ixgbe->tx_rings != NULL) {
1671 		kmem_free(ixgbe->tx_rings,
1672 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1673 		ixgbe->tx_rings = NULL;
1674 	}
1675 
1676 	if (ixgbe->rx_groups != NULL) {
1677 		kmem_free(ixgbe->rx_groups,
1678 		    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1679 		ixgbe->rx_groups = NULL;
1680 	}
1681 }
1682 
1683 static int
1684 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
1685 {
1686 	ixgbe_rx_ring_t *rx_ring;
1687 	int i;
1688 
1689 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1690 		rx_ring = &ixgbe->rx_rings[i];
1691 		if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
1692 			goto alloc_rx_rings_failure;
1693 	}
1694 	return (IXGBE_SUCCESS);
1695 
1696 alloc_rx_rings_failure:
1697 	ixgbe_free_rx_data(ixgbe);
1698 	return (IXGBE_FAILURE);
1699 }
1700 
1701 static void
1702 ixgbe_free_rx_data(ixgbe_t *ixgbe)
1703 {
1704 	ixgbe_rx_ring_t *rx_ring;
1705 	ixgbe_rx_data_t *rx_data;
1706 	int i;
1707 
1708 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1709 		rx_ring = &ixgbe->rx_rings[i];
1710 
1711 		mutex_enter(&ixgbe->rx_pending_lock);
1712 		rx_data = rx_ring->rx_data;
1713 
1714 		if (rx_data != NULL) {
1715 			rx_data->flag |= IXGBE_RX_STOPPED;
1716 
1717 			if (rx_data->rcb_pending == 0) {
1718 				ixgbe_free_rx_ring_data(rx_data);
1719 				rx_ring->rx_data = NULL;
1720 			}
1721 		}
1722 
1723 		mutex_exit(&ixgbe->rx_pending_lock);
1724 	}
1725 }
1726 
1727 /*
1728  * ixgbe_setup_rings - Setup rx/tx rings.
1729  */
1730 static void
1731 ixgbe_setup_rings(ixgbe_t *ixgbe)
1732 {
1733 	/*
1734 	 * Setup the rx/tx rings, including the following:
1735 	 *
1736 	 * 1. Setup the descriptor ring and the control block buffers;
1737 	 * 2. Initialize necessary registers for receive/transmit;
1738 	 * 3. Initialize software pointers/parameters for receive/transmit;
1739 	 */
1740 	ixgbe_setup_rx(ixgbe);
1741 
1742 	ixgbe_setup_tx(ixgbe);
1743 }
1744 
1745 static void
1746 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
1747 {
1748 	ixgbe_t *ixgbe = rx_ring->ixgbe;
1749 	ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
1750 	struct ixgbe_hw *hw = &ixgbe->hw;
1751 	rx_control_block_t *rcb;
1752 	union ixgbe_adv_rx_desc	*rbd;
1753 	uint32_t size;
1754 	uint32_t buf_low;
1755 	uint32_t buf_high;
1756 	uint32_t reg_val;
1757 	int i;
1758 
1759 	ASSERT(mutex_owned(&rx_ring->rx_lock));
1760 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1761 
1762 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
1763 		rcb = rx_data->work_list[i];
1764 		rbd = &rx_data->rbd_ring[i];
1765 
1766 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
1767 		rbd->read.hdr_addr = NULL;
1768 	}
1769 
1770 	/*
1771 	 * Initialize the length register
1772 	 */
1773 	size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
1774 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
1775 
1776 	/*
1777 	 * Initialize the base address registers
1778 	 */
1779 	buf_low = (uint32_t)rx_data->rbd_area.dma_address;
1780 	buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
1781 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
1782 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
1783 
1784 	/*
1785 	 * Setup head & tail pointers
1786 	 */
1787 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_data->ring_size - 1);
1788 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
1789 
1790 	rx_data->rbd_next = 0;
1791 	rx_data->lro_first = 0;
1792 
1793 	/*
1794 	 * Setup the Receive Descriptor Control Register (RXDCTL)
1795 	 * PTHRESH=32 descriptors (half the internal cache)
1796 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
1797 	 * WTHRESH defaults to 1 (writeback each descriptor)
1798 	 */
1799 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
1800 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
1801 
1802 	/* Not a valid value for 82599 */
1803 	if (hw->mac.type < ixgbe_mac_82599EB) {
1804 		reg_val |= 0x0020;	/* pthresh */
1805 	}
1806 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
1807 
1808 	if (hw->mac.type == ixgbe_mac_82599EB) {
1809 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1810 		reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
1811 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
1812 	}
1813 
1814 	/*
1815 	 * Setup the Split and Replication Receive Control Register.
1816 	 * Set the rx buffer size and the advanced descriptor type.
1817 	 */
1818 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
1819 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1820 	reg_val |= IXGBE_SRRCTL_DROP_EN;
1821 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
1822 }
1823 
1824 static void
1825 ixgbe_setup_rx(ixgbe_t *ixgbe)
1826 {
1827 	ixgbe_rx_ring_t *rx_ring;
1828 	struct ixgbe_hw *hw = &ixgbe->hw;
1829 	ixgbe_rx_group_t *rx_group;
1830 	uint32_t reg_val;
1831 	uint32_t ring_mapping;
1832 	int i;
1833 
1834 	/* PSRTYPE must be configured for 82599 */
1835 	reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1836 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
1837 #define	IXGBE_PSRTYPE_L2_PKT	0x00001000
1838 	reg_val |= IXGBE_PSRTYPE_L2_PKT;
1839 	reg_val |= 0xE0000000;
1840 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
1841 
1842 	/*
1843 	 * Set filter control in FCTRL to accept broadcast packets and do
1844 	 * not pass pause frames to host.  Flow control settings are already
1845 	 * in this register, so preserve them.
1846 	 */
1847 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1848 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
1849 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
1850 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
1851 
1852 	/*
1853 	 * Enable the receive unit.  This must be done after filter
1854 	 * control is set in FCTRL.
1855 	 */
1856 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
1857 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
1858 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1859 
1860 	/*
1861 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
1862 	 */
1863 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1864 		rx_ring = &ixgbe->rx_rings[i];
1865 		ixgbe_setup_rx_ring(rx_ring);
1866 	}
1867 
1868 	/*
1869 	 * Setup rx groups.
1870 	 */
1871 	for (i = 0; i < ixgbe->num_rx_groups; i++) {
1872 		rx_group = &ixgbe->rx_groups[i];
1873 		rx_group->index = i;
1874 		rx_group->ixgbe = ixgbe;
1875 	}
1876 
1877 	/*
1878 	 * Setup the per-ring statistics mapping.
1879 	 */
1880 	ring_mapping = 0;
1881 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1882 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1883 		if ((i & 0x3) == 0x3) {
1884 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1885 			ring_mapping = 0;
1886 		}
1887 	}
1888 	if ((i & 0x3) != 0x3)
1889 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1890 
1891 	/*
1892 	 * The Max Frame Size in MHADD/MAXFRS will be internally increased
1893 	 * by four bytes if the packet has a VLAN field, so includes MTU,
1894 	 * ethernet header and frame check sequence.
1895 	 * Register is MAXFRS in 82599.
1896 	 */
1897 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
1898 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
1899 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
1900 
1901 	/*
1902 	 * Setup Jumbo Frame enable bit
1903 	 */
1904 	if (ixgbe->default_mtu > ETHERMTU) {
1905 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1906 		reg_val |= IXGBE_HLREG0_JUMBOEN;
1907 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1908 	}
1909 
1910 	/*
1911 	 * Hardware checksum settings
1912 	 */
1913 	if (ixgbe->rx_hcksum_enable) {
1914 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
1915 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
1916 	}
1917 
1918 	/*
1919 	 * Setup RSS for multiple receive queues
1920 	 */
1921 	if (ixgbe->num_rx_rings > 1)
1922 		ixgbe_setup_rss(ixgbe);
1923 
1924 	/*
1925 	 * Setup RSC for multiple receive queues.
1926 	 */
1927 	if (ixgbe->lro_enable) {
1928 		for (i = 0; i < ixgbe->num_rx_rings; i++) {
1929 			/*
1930 			 * Make sure rx_buf_size * MAXDESC not greater
1931 			 * than 65535.
1932 			 * Intel recommends 4 for MAXDESC field value.
1933 			 */
1934 			reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
1935 			reg_val |= IXGBE_RSCCTL_RSCEN;
1936 			if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
1937 				reg_val |= IXGBE_RSCCTL_MAXDESC_1;
1938 			else
1939 				reg_val |= IXGBE_RSCCTL_MAXDESC_4;
1940 			IXGBE_WRITE_REG(hw,  IXGBE_RSCCTL(i), reg_val);
1941 		}
1942 
1943 		reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
1944 		reg_val |= IXGBE_RSCDBU_RSCACKDIS;
1945 		IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
1946 
1947 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1948 		reg_val |= IXGBE_RDRXCTL_RSCACKC;
1949 		reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
1950 
1951 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
1952 	}
1953 }
1954 
1955 static void
1956 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
1957 {
1958 	ixgbe_t *ixgbe = tx_ring->ixgbe;
1959 	struct ixgbe_hw *hw = &ixgbe->hw;
1960 	uint32_t size;
1961 	uint32_t buf_low;
1962 	uint32_t buf_high;
1963 	uint32_t reg_val;
1964 
1965 	ASSERT(mutex_owned(&tx_ring->tx_lock));
1966 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1967 
1968 	/*
1969 	 * Initialize the length register
1970 	 */
1971 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
1972 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
1973 
1974 	/*
1975 	 * Initialize the base address registers
1976 	 */
1977 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
1978 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
1979 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
1980 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
1981 
1982 	/*
1983 	 * Setup head & tail pointers
1984 	 */
1985 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
1986 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
1987 
1988 	/*
1989 	 * Setup head write-back
1990 	 */
1991 	if (ixgbe->tx_head_wb_enable) {
1992 		/*
1993 		 * The memory of the head write-back is allocated using
1994 		 * the extra tbd beyond the tail of the tbd ring.
1995 		 */
1996 		tx_ring->tbd_head_wb = (uint32_t *)
1997 		    ((uintptr_t)tx_ring->tbd_area.address + size);
1998 		*tx_ring->tbd_head_wb = 0;
1999 
2000 		buf_low = (uint32_t)
2001 		    (tx_ring->tbd_area.dma_address + size);
2002 		buf_high = (uint32_t)
2003 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
2004 
2005 		/* Set the head write-back enable bit */
2006 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2007 
2008 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2009 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2010 
2011 		/*
2012 		 * Turn off relaxed ordering for head write back or it will
2013 		 * cause problems with the tx recycling
2014 		 */
2015 		reg_val = IXGBE_READ_REG(hw,
2016 		    IXGBE_DCA_TXCTRL(tx_ring->index));
2017 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2018 		IXGBE_WRITE_REG(hw,
2019 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2020 	} else {
2021 		tx_ring->tbd_head_wb = NULL;
2022 	}
2023 
2024 	tx_ring->tbd_head = 0;
2025 	tx_ring->tbd_tail = 0;
2026 	tx_ring->tbd_free = tx_ring->ring_size;
2027 
2028 	if (ixgbe->tx_ring_init == B_TRUE) {
2029 		tx_ring->tcb_head = 0;
2030 		tx_ring->tcb_tail = 0;
2031 		tx_ring->tcb_free = tx_ring->free_list_size;
2032 	}
2033 
2034 	/*
2035 	 * Initialize the s/w context structure
2036 	 */
2037 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2038 }
2039 
2040 static void
2041 ixgbe_setup_tx(ixgbe_t *ixgbe)
2042 {
2043 	struct ixgbe_hw *hw = &ixgbe->hw;
2044 	ixgbe_tx_ring_t *tx_ring;
2045 	uint32_t reg_val;
2046 	uint32_t ring_mapping;
2047 	int i;
2048 
2049 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2050 		tx_ring = &ixgbe->tx_rings[i];
2051 		ixgbe_setup_tx_ring(tx_ring);
2052 	}
2053 
2054 	/*
2055 	 * Setup the per-ring statistics mapping.
2056 	 */
2057 	ring_mapping = 0;
2058 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2059 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2060 		if ((i & 0x3) == 0x3) {
2061 			if (hw->mac.type >= ixgbe_mac_82599EB) {
2062 				IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2063 				    ring_mapping);
2064 			} else {
2065 				IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2066 				    ring_mapping);
2067 			}
2068 			ring_mapping = 0;
2069 		}
2070 	}
2071 	if ((i & 0x3) != 0x3)
2072 		if (hw->mac.type >= ixgbe_mac_82599EB) {
2073 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2074 		} else {
2075 			IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2076 		}
2077 
2078 	/*
2079 	 * Enable CRC appending and TX padding (for short tx frames)
2080 	 */
2081 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2082 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2083 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2084 
2085 	/*
2086 	 * enable DMA for 82599 parts
2087 	 */
2088 	if (hw->mac.type == ixgbe_mac_82599EB) {
2089 	/* DMATXCTL.TE must be set after all Tx config is complete */
2090 		reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2091 		reg_val |= IXGBE_DMATXCTL_TE;
2092 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2093 	}
2094 
2095 	/*
2096 	 * Enabling tx queues ..
2097 	 * For 82599 must be done after DMATXCTL.TE is set
2098 	 */
2099 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2100 		tx_ring = &ixgbe->tx_rings[i];
2101 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2102 		reg_val |= IXGBE_TXDCTL_ENABLE;
2103 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2104 	}
2105 }
2106 
2107 /*
2108  * ixgbe_setup_rss - Setup receive-side scaling feature.
2109  */
2110 static void
2111 ixgbe_setup_rss(ixgbe_t *ixgbe)
2112 {
2113 	struct ixgbe_hw *hw = &ixgbe->hw;
2114 	uint32_t i, mrqc, rxcsum;
2115 	uint32_t random;
2116 	uint32_t reta;
2117 
2118 	/*
2119 	 * Fill out redirection table
2120 	 */
2121 	reta = 0;
2122 	for (i = 0; i < 128; i++) {
2123 		reta = (reta << 8) | (i % ixgbe->num_rx_rings);
2124 		if ((i & 3) == 3)
2125 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2126 	}
2127 
2128 	/*
2129 	 * Fill out hash function seeds with a random constant
2130 	 */
2131 	for (i = 0; i < 10; i++) {
2132 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2133 		    sizeof (uint32_t));
2134 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2135 	}
2136 
2137 	/*
2138 	 * Enable RSS & perform hash on these packet types
2139 	 */
2140 	mrqc = IXGBE_MRQC_RSSEN |
2141 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
2142 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2143 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2144 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2145 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2146 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
2147 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2148 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2149 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2150 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2151 
2152 	/*
2153 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2154 	 * It is an adapter hardware limitation that Packet Checksum is
2155 	 * mutually exclusive with RSS.
2156 	 */
2157 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2158 	rxcsum |= IXGBE_RXCSUM_PCSD;
2159 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2160 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2161 }
2162 
2163 /*
2164  * ixgbe_init_unicst - Initialize the unicast addresses.
2165  */
2166 static void
2167 ixgbe_init_unicst(ixgbe_t *ixgbe)
2168 {
2169 	struct ixgbe_hw *hw = &ixgbe->hw;
2170 	uint8_t *mac_addr;
2171 	int slot;
2172 	/*
2173 	 * Here we should consider two situations:
2174 	 *
2175 	 * 1. Chipset is initialized at the first time,
2176 	 *    Clear all the multiple unicast addresses.
2177 	 *
2178 	 * 2. Chipset is reset
2179 	 *    Recover the multiple unicast addresses from the
2180 	 *    software data structure to the RAR registers.
2181 	 */
2182 	if (!ixgbe->unicst_init) {
2183 		/*
2184 		 * Initialize the multiple unicast addresses
2185 		 */
2186 		ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2187 		ixgbe->unicst_avail = ixgbe->unicst_total;
2188 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2189 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2190 			bzero(mac_addr, ETHERADDRL);
2191 			(void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2192 			ixgbe->unicst_addr[slot].mac.set = 0;
2193 		}
2194 		ixgbe->unicst_init = B_TRUE;
2195 	} else {
2196 		/* Re-configure the RAR registers */
2197 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2198 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2199 			if (ixgbe->unicst_addr[slot].mac.set == 1) {
2200 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2201 				    NULL, IXGBE_RAH_AV);
2202 			} else {
2203 				bzero(mac_addr, ETHERADDRL);
2204 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2205 				    NULL, NULL);
2206 			}
2207 		}
2208 	}
2209 }
2210 
2211 /*
2212  * ixgbe_unicst_set - Set the unicast address to the specified slot.
2213  */
2214 int
2215 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
2216     int slot)
2217 {
2218 	struct ixgbe_hw *hw = &ixgbe->hw;
2219 
2220 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2221 
2222 	/*
2223 	 * Save the unicast address in the software data structure
2224 	 */
2225 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
2226 
2227 	/*
2228 	 * Set the unicast address to the RAR register
2229 	 */
2230 	(void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, IXGBE_RAH_AV);
2231 
2232 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2233 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2234 		return (EIO);
2235 	}
2236 
2237 	return (0);
2238 }
2239 
2240 /*
2241  * ixgbe_unicst_find - Find the slot for the specified unicast address
2242  */
2243 int
2244 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2245 {
2246 	int slot;
2247 
2248 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2249 
2250 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2251 		if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2252 		    mac_addr, ETHERADDRL) == 0)
2253 			return (slot);
2254 	}
2255 
2256 	return (-1);
2257 }
2258 
2259 /*
2260  * ixgbe_multicst_add - Add a multicst address.
2261  */
2262 int
2263 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2264 {
2265 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2266 
2267 	if ((multiaddr[0] & 01) == 0) {
2268 		return (EINVAL);
2269 	}
2270 
2271 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2272 		return (ENOENT);
2273 	}
2274 
2275 	bcopy(multiaddr,
2276 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2277 	ixgbe->mcast_count++;
2278 
2279 	/*
2280 	 * Update the multicast table in the hardware
2281 	 */
2282 	ixgbe_setup_multicst(ixgbe);
2283 
2284 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2285 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2286 		return (EIO);
2287 	}
2288 
2289 	return (0);
2290 }
2291 
2292 /*
2293  * ixgbe_multicst_remove - Remove a multicst address.
2294  */
2295 int
2296 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2297 {
2298 	int i;
2299 
2300 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2301 
2302 	for (i = 0; i < ixgbe->mcast_count; i++) {
2303 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2304 		    ETHERADDRL) == 0) {
2305 			for (i++; i < ixgbe->mcast_count; i++) {
2306 				ixgbe->mcast_table[i - 1] =
2307 				    ixgbe->mcast_table[i];
2308 			}
2309 			ixgbe->mcast_count--;
2310 			break;
2311 		}
2312 	}
2313 
2314 	/*
2315 	 * Update the multicast table in the hardware
2316 	 */
2317 	ixgbe_setup_multicst(ixgbe);
2318 
2319 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2320 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2321 		return (EIO);
2322 	}
2323 
2324 	return (0);
2325 }
2326 
2327 /*
2328  * ixgbe_setup_multicast - Setup multicast data structures.
2329  *
2330  * This routine initializes all of the multicast related structures
2331  * and save them in the hardware registers.
2332  */
2333 static void
2334 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2335 {
2336 	uint8_t *mc_addr_list;
2337 	uint32_t mc_addr_count;
2338 	struct ixgbe_hw *hw = &ixgbe->hw;
2339 
2340 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2341 
2342 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2343 
2344 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2345 	mc_addr_count = ixgbe->mcast_count;
2346 
2347 	/*
2348 	 * Update the multicast addresses to the MTA registers
2349 	 */
2350 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2351 	    ixgbe_mc_table_itr);
2352 }
2353 
2354 /*
2355  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2356  *
2357  * This routine gets user-configured values out of the configuration
2358  * file ixgbe.conf.
2359  *
2360  * For each configurable value, there is a minimum, a maximum, and a
2361  * default.
2362  * If user does not configure a value, use the default.
2363  * If user configures below the minimum, use the minumum.
2364  * If user configures above the maximum, use the maxumum.
2365  */
2366 static void
2367 ixgbe_get_conf(ixgbe_t *ixgbe)
2368 {
2369 	struct ixgbe_hw *hw = &ixgbe->hw;
2370 	uint32_t flow_control;
2371 
2372 	/*
2373 	 * ixgbe driver supports the following user configurations:
2374 	 *
2375 	 * Jumbo frame configuration:
2376 	 *    default_mtu
2377 	 *
2378 	 * Ethernet flow control configuration:
2379 	 *    flow_control
2380 	 *
2381 	 * Multiple rings configurations:
2382 	 *    tx_queue_number
2383 	 *    tx_ring_size
2384 	 *    rx_queue_number
2385 	 *    rx_ring_size
2386 	 *
2387 	 * Call ixgbe_get_prop() to get the value for a specific
2388 	 * configuration parameter.
2389 	 */
2390 
2391 	/*
2392 	 * Jumbo frame configuration - max_frame_size controls host buffer
2393 	 * allocation, so includes MTU, ethernet header, vlan tag and
2394 	 * frame check sequence.
2395 	 */
2396 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2397 	    MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
2398 
2399 	ixgbe->max_frame_size = ixgbe->default_mtu +
2400 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2401 
2402 	/*
2403 	 * Ethernet flow control configuration
2404 	 */
2405 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2406 	    ixgbe_fc_none, 3, ixgbe_fc_none);
2407 	if (flow_control == 3)
2408 		flow_control = ixgbe_fc_default;
2409 
2410 	/*
2411 	 * fc.requested mode is what the user requests.  After autoneg,
2412 	 * fc.current_mode will be the flow_control mode that was negotiated.
2413 	 */
2414 	hw->fc.requested_mode = flow_control;
2415 
2416 	/*
2417 	 * Multiple rings configurations
2418 	 */
2419 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2420 	    ixgbe->capab->min_tx_que_num,
2421 	    ixgbe->capab->max_tx_que_num,
2422 	    ixgbe->capab->def_tx_que_num);
2423 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2424 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2425 
2426 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2427 	    ixgbe->capab->min_rx_que_num,
2428 	    ixgbe->capab->max_rx_que_num,
2429 	    ixgbe->capab->def_rx_que_num);
2430 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2431 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2432 
2433 	/*
2434 	 * Multiple groups configuration
2435 	 */
2436 	ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
2437 	    MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM);
2438 
2439 	ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
2440 	    0, 1, DEFAULT_MR_ENABLE);
2441 
2442 	if (ixgbe->mr_enable == B_FALSE) {
2443 		ixgbe->num_tx_rings = 1;
2444 		ixgbe->num_rx_rings = 1;
2445 		ixgbe->num_rx_groups = 1;
2446 	}
2447 
2448 	/*
2449 	 * Tunable used to force an interrupt type. The only use is
2450 	 * for testing of the lesser interrupt types.
2451 	 * 0 = don't force interrupt type
2452 	 * 1 = force interrupt type MSI-X
2453 	 * 2 = force interrupt type MSI
2454 	 * 3 = force interrupt type Legacy
2455 	 */
2456 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2457 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2458 
2459 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2460 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
2461 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2462 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
2463 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2464 	    0, 1, DEFAULT_LSO_ENABLE);
2465 	ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
2466 	    0, 1, DEFAULT_LRO_ENABLE);
2467 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2468 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
2469 
2470 	/* Head Write Back not recommended for 82599 */
2471 	if (hw->mac.type >= ixgbe_mac_82599EB) {
2472 		ixgbe->tx_head_wb_enable = B_FALSE;
2473 	}
2474 
2475 	/*
2476 	 * ixgbe LSO needs the tx h/w checksum support.
2477 	 * LSO will be disabled if tx h/w checksum is not
2478 	 * enabled.
2479 	 */
2480 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
2481 		ixgbe->lso_enable = B_FALSE;
2482 	}
2483 
2484 	/*
2485 	 * ixgbe LRO needs the rx h/w checksum support.
2486 	 * LRO will be disabled if rx h/w checksum is not
2487 	 * enabled.
2488 	 */
2489 	if (ixgbe->rx_hcksum_enable == B_FALSE) {
2490 		ixgbe->lro_enable = B_FALSE;
2491 	}
2492 
2493 	/*
2494 	 * ixgbe LRO only been supported by 82599 now
2495 	 */
2496 	if (hw->mac.type != ixgbe_mac_82599EB) {
2497 		ixgbe->lro_enable = B_FALSE;
2498 	}
2499 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
2500 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
2501 	    DEFAULT_TX_COPY_THRESHOLD);
2502 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
2503 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
2504 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
2505 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
2506 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
2507 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
2508 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
2509 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
2510 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
2511 
2512 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
2513 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
2514 	    DEFAULT_RX_COPY_THRESHOLD);
2515 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
2516 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
2517 	    DEFAULT_RX_LIMIT_PER_INTR);
2518 
2519 	ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
2520 	    ixgbe->capab->min_intr_throttle,
2521 	    ixgbe->capab->max_intr_throttle,
2522 	    ixgbe->capab->def_intr_throttle);
2523 	/*
2524 	 * 82599 requires the interupt throttling rate is
2525 	 * a multiple of 8. This is enforced by the register
2526 	 * definiton.
2527 	 */
2528 	if (hw->mac.type == ixgbe_mac_82599EB)
2529 		ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
2530 }
2531 
2532 static void
2533 ixgbe_init_params(ixgbe_t *ixgbe)
2534 {
2535 	ixgbe->param_en_10000fdx_cap = 1;
2536 	ixgbe->param_en_1000fdx_cap = 1;
2537 	ixgbe->param_en_100fdx_cap = 1;
2538 	ixgbe->param_adv_10000fdx_cap = 1;
2539 	ixgbe->param_adv_1000fdx_cap = 1;
2540 	ixgbe->param_adv_100fdx_cap = 1;
2541 
2542 	ixgbe->param_pause_cap = 1;
2543 	ixgbe->param_asym_pause_cap = 1;
2544 	ixgbe->param_rem_fault = 0;
2545 
2546 	ixgbe->param_adv_autoneg_cap = 1;
2547 	ixgbe->param_adv_pause_cap = 1;
2548 	ixgbe->param_adv_asym_pause_cap = 1;
2549 	ixgbe->param_adv_rem_fault = 0;
2550 
2551 	ixgbe->param_lp_10000fdx_cap = 0;
2552 	ixgbe->param_lp_1000fdx_cap = 0;
2553 	ixgbe->param_lp_100fdx_cap = 0;
2554 	ixgbe->param_lp_autoneg_cap = 0;
2555 	ixgbe->param_lp_pause_cap = 0;
2556 	ixgbe->param_lp_asym_pause_cap = 0;
2557 	ixgbe->param_lp_rem_fault = 0;
2558 }
2559 
2560 /*
2561  * ixgbe_get_prop - Get a property value out of the configuration file
2562  * ixgbe.conf.
2563  *
2564  * Caller provides the name of the property, a default value, a minimum
2565  * value, and a maximum value.
2566  *
2567  * Return configured value of the property, with default, minimum and
2568  * maximum properly applied.
2569  */
2570 static int
2571 ixgbe_get_prop(ixgbe_t *ixgbe,
2572     char *propname,	/* name of the property */
2573     int minval,		/* minimum acceptable value */
2574     int maxval,		/* maximim acceptable value */
2575     int defval)		/* default value */
2576 {
2577 	int value;
2578 
2579 	/*
2580 	 * Call ddi_prop_get_int() to read the conf settings
2581 	 */
2582 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
2583 	    DDI_PROP_DONTPASS, propname, defval);
2584 	if (value > maxval)
2585 		value = maxval;
2586 
2587 	if (value < minval)
2588 		value = minval;
2589 
2590 	return (value);
2591 }
2592 
2593 /*
2594  * ixgbe_driver_setup_link - Using the link properties to setup the link.
2595  */
2596 int
2597 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
2598 {
2599 	u32 autoneg_advertised = 0;
2600 
2601 	/*
2602 	 * No half duplex support with 10Gb parts
2603 	 */
2604 	if (ixgbe->param_adv_10000fdx_cap == 1)
2605 		autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2606 
2607 	if (ixgbe->param_adv_1000fdx_cap == 1)
2608 		autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2609 
2610 	if (ixgbe->param_adv_100fdx_cap == 1)
2611 		autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2612 
2613 	if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
2614 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
2615 		    "to autonegotiation with full link capabilities.");
2616 
2617 		autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
2618 		    IXGBE_LINK_SPEED_1GB_FULL |
2619 		    IXGBE_LINK_SPEED_100_FULL;
2620 	}
2621 
2622 	if (setup_hw) {
2623 		if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
2624 		    ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
2625 			ixgbe_notice(ixgbe, "Setup link failed on this "
2626 			    "device.");
2627 			return (IXGBE_FAILURE);
2628 		}
2629 	}
2630 
2631 	return (IXGBE_SUCCESS);
2632 }
2633 
2634 /*
2635  * ixgbe_driver_link_check - Link status processing.
2636  *
2637  * This function can be called in both kernel context and interrupt context
2638  */
2639 static void
2640 ixgbe_driver_link_check(ixgbe_t *ixgbe)
2641 {
2642 	struct ixgbe_hw *hw = &ixgbe->hw;
2643 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
2644 	boolean_t link_up = B_FALSE;
2645 	boolean_t link_changed = B_FALSE;
2646 
2647 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2648 
2649 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
2650 	if (link_up) {
2651 		ixgbe->link_check_complete = B_TRUE;
2652 
2653 		/* Link is up, enable flow control settings */
2654 		(void) ixgbe_fc_enable(hw, 0);
2655 
2656 		/*
2657 		 * The Link is up, check whether it was marked as down earlier
2658 		 */
2659 		if (ixgbe->link_state != LINK_STATE_UP) {
2660 			switch (speed) {
2661 			case IXGBE_LINK_SPEED_10GB_FULL:
2662 				ixgbe->link_speed = SPEED_10GB;
2663 				break;
2664 			case IXGBE_LINK_SPEED_1GB_FULL:
2665 				ixgbe->link_speed = SPEED_1GB;
2666 				break;
2667 			case IXGBE_LINK_SPEED_100_FULL:
2668 				ixgbe->link_speed = SPEED_100;
2669 			}
2670 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
2671 			ixgbe->link_state = LINK_STATE_UP;
2672 			link_changed = B_TRUE;
2673 		}
2674 	} else {
2675 		if (ixgbe->link_check_complete == B_TRUE ||
2676 		    (ixgbe->link_check_complete == B_FALSE &&
2677 		    gethrtime() >= ixgbe->link_check_hrtime)) {
2678 			/*
2679 			 * The link is really down
2680 			 */
2681 			ixgbe->link_check_complete = B_TRUE;
2682 
2683 			if (ixgbe->link_state != LINK_STATE_DOWN) {
2684 				ixgbe->link_speed = 0;
2685 				ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
2686 				ixgbe->link_state = LINK_STATE_DOWN;
2687 				link_changed = B_TRUE;
2688 			}
2689 		}
2690 	}
2691 
2692 	/*
2693 	 * this is only reached after a link-status-change interrupt
2694 	 * so always get new phy state
2695 	 */
2696 	ixgbe_get_hw_state(ixgbe);
2697 
2698 	/*
2699 	 * If we are in an interrupt context, need to re-enable the
2700 	 * interrupt, which was automasked
2701 	 */
2702 	if (servicing_interrupt() != 0) {
2703 		ixgbe->eims |= IXGBE_EICR_LSC;
2704 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
2705 	}
2706 
2707 	if (link_changed) {
2708 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
2709 	}
2710 }
2711 
2712 /*
2713  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
2714  */
2715 static void
2716 ixgbe_sfp_check(void *arg)
2717 {
2718 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2719 	uint32_t eicr = ixgbe->eicr;
2720 	struct ixgbe_hw *hw = &ixgbe->hw;
2721 
2722 	mutex_enter(&ixgbe->gen_lock);
2723 	if (eicr & IXGBE_EICR_GPI_SDP1) {
2724 		/* clear the interrupt */
2725 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2726 
2727 		/* if link up, do multispeed fiber setup */
2728 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
2729 		    B_TRUE, B_TRUE);
2730 		ixgbe_driver_link_check(ixgbe);
2731 	} else if (eicr & IXGBE_EICR_GPI_SDP2) {
2732 		/* clear the interrupt */
2733 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2734 
2735 		/* if link up, do sfp module setup */
2736 		(void) hw->mac.ops.setup_sfp(hw);
2737 
2738 		/* do multispeed fiber setup */
2739 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
2740 		    B_TRUE, B_TRUE);
2741 		ixgbe_driver_link_check(ixgbe);
2742 	}
2743 	mutex_exit(&ixgbe->gen_lock);
2744 }
2745 
2746 /*
2747  * ixgbe_link_timer - timer for link status detection
2748  */
2749 static void
2750 ixgbe_link_timer(void *arg)
2751 {
2752 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2753 
2754 	mutex_enter(&ixgbe->gen_lock);
2755 	ixgbe_driver_link_check(ixgbe);
2756 	mutex_exit(&ixgbe->gen_lock);
2757 }
2758 
2759 /*
2760  * ixgbe_local_timer - Driver watchdog function.
2761  *
2762  * This function will handle the transmit stall check and other routines.
2763  */
2764 static void
2765 ixgbe_local_timer(void *arg)
2766 {
2767 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2768 
2769 	if (ixgbe->ixgbe_state & IXGBE_ERROR) {
2770 		ixgbe->reset_count++;
2771 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2772 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2773 		ixgbe_restart_watchdog_timer(ixgbe);
2774 		return;
2775 	}
2776 
2777 	if (ixgbe_stall_check(ixgbe)) {
2778 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
2779 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2780 
2781 		ixgbe->reset_count++;
2782 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2783 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2784 	}
2785 
2786 	ixgbe_restart_watchdog_timer(ixgbe);
2787 }
2788 
2789 /*
2790  * ixgbe_stall_check - Check for transmit stall.
2791  *
2792  * This function checks if the adapter is stalled (in transmit).
2793  *
2794  * It is called each time the watchdog timeout is invoked.
2795  * If the transmit descriptor reclaim continuously fails,
2796  * the watchdog value will increment by 1. If the watchdog
2797  * value exceeds the threshold, the ixgbe is assumed to
2798  * have stalled and need to be reset.
2799  */
2800 static boolean_t
2801 ixgbe_stall_check(ixgbe_t *ixgbe)
2802 {
2803 	ixgbe_tx_ring_t *tx_ring;
2804 	boolean_t result;
2805 	int i;
2806 
2807 	if (ixgbe->link_state != LINK_STATE_UP)
2808 		return (B_FALSE);
2809 
2810 	/*
2811 	 * If any tx ring is stalled, we'll reset the chipset
2812 	 */
2813 	result = B_FALSE;
2814 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2815 		tx_ring = &ixgbe->tx_rings[i];
2816 		if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
2817 			tx_ring->tx_recycle(tx_ring);
2818 		}
2819 
2820 		if (tx_ring->recycle_fail > 0)
2821 			tx_ring->stall_watchdog++;
2822 		else
2823 			tx_ring->stall_watchdog = 0;
2824 
2825 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
2826 			result = B_TRUE;
2827 			break;
2828 		}
2829 	}
2830 
2831 	if (result) {
2832 		tx_ring->stall_watchdog = 0;
2833 		tx_ring->recycle_fail = 0;
2834 	}
2835 
2836 	return (result);
2837 }
2838 
2839 
2840 /*
2841  * is_valid_mac_addr - Check if the mac address is valid.
2842  */
2843 static boolean_t
2844 is_valid_mac_addr(uint8_t *mac_addr)
2845 {
2846 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
2847 	const uint8_t addr_test2[6] =
2848 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2849 
2850 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
2851 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
2852 		return (B_FALSE);
2853 
2854 	return (B_TRUE);
2855 }
2856 
2857 static boolean_t
2858 ixgbe_find_mac_address(ixgbe_t *ixgbe)
2859 {
2860 #ifdef __sparc
2861 	struct ixgbe_hw *hw = &ixgbe->hw;
2862 	uchar_t *bytes;
2863 	struct ether_addr sysaddr;
2864 	uint_t nelts;
2865 	int err;
2866 	boolean_t found = B_FALSE;
2867 
2868 	/*
2869 	 * The "vendor's factory-set address" may already have
2870 	 * been extracted from the chip, but if the property
2871 	 * "local-mac-address" is set we use that instead.
2872 	 *
2873 	 * We check whether it looks like an array of 6
2874 	 * bytes (which it should, if OBP set it).  If we can't
2875 	 * make sense of it this way, we'll ignore it.
2876 	 */
2877 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2878 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
2879 	if (err == DDI_PROP_SUCCESS) {
2880 		if (nelts == ETHERADDRL) {
2881 			while (nelts--)
2882 				hw->mac.addr[nelts] = bytes[nelts];
2883 			found = B_TRUE;
2884 		}
2885 		ddi_prop_free(bytes);
2886 	}
2887 
2888 	/*
2889 	 * Look up the OBP property "local-mac-address?". If the user has set
2890 	 * 'local-mac-address? = false', use "the system address" instead.
2891 	 */
2892 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
2893 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
2894 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
2895 			if (localetheraddr(NULL, &sysaddr) != 0) {
2896 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
2897 				found = B_TRUE;
2898 			}
2899 		}
2900 		ddi_prop_free(bytes);
2901 	}
2902 
2903 	/*
2904 	 * Finally(!), if there's a valid "mac-address" property (created
2905 	 * if we netbooted from this interface), we must use this instead
2906 	 * of any of the above to ensure that the NFS/install server doesn't
2907 	 * get confused by the address changing as Solaris takes over!
2908 	 */
2909 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2910 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
2911 	if (err == DDI_PROP_SUCCESS) {
2912 		if (nelts == ETHERADDRL) {
2913 			while (nelts--)
2914 				hw->mac.addr[nelts] = bytes[nelts];
2915 			found = B_TRUE;
2916 		}
2917 		ddi_prop_free(bytes);
2918 	}
2919 
2920 	if (found) {
2921 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
2922 		return (B_TRUE);
2923 	}
2924 #else
2925 	_NOTE(ARGUNUSED(ixgbe));
2926 #endif
2927 
2928 	return (B_TRUE);
2929 }
2930 
2931 #pragma inline(ixgbe_arm_watchdog_timer)
2932 static void
2933 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
2934 {
2935 	/*
2936 	 * Fire a watchdog timer
2937 	 */
2938 	ixgbe->watchdog_tid =
2939 	    timeout(ixgbe_local_timer,
2940 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
2941 
2942 }
2943 
2944 /*
2945  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
2946  */
2947 void
2948 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
2949 {
2950 	mutex_enter(&ixgbe->watchdog_lock);
2951 
2952 	if (!ixgbe->watchdog_enable) {
2953 		ixgbe->watchdog_enable = B_TRUE;
2954 		ixgbe->watchdog_start = B_TRUE;
2955 		ixgbe_arm_watchdog_timer(ixgbe);
2956 	}
2957 
2958 	mutex_exit(&ixgbe->watchdog_lock);
2959 }
2960 
2961 /*
2962  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
2963  */
2964 void
2965 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
2966 {
2967 	timeout_id_t tid;
2968 
2969 	mutex_enter(&ixgbe->watchdog_lock);
2970 
2971 	ixgbe->watchdog_enable = B_FALSE;
2972 	ixgbe->watchdog_start = B_FALSE;
2973 	tid = ixgbe->watchdog_tid;
2974 	ixgbe->watchdog_tid = 0;
2975 
2976 	mutex_exit(&ixgbe->watchdog_lock);
2977 
2978 	if (tid != 0)
2979 		(void) untimeout(tid);
2980 }
2981 
2982 /*
2983  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
2984  */
2985 void
2986 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
2987 {
2988 	mutex_enter(&ixgbe->watchdog_lock);
2989 
2990 	if (ixgbe->watchdog_enable) {
2991 		if (!ixgbe->watchdog_start) {
2992 			ixgbe->watchdog_start = B_TRUE;
2993 			ixgbe_arm_watchdog_timer(ixgbe);
2994 		}
2995 	}
2996 
2997 	mutex_exit(&ixgbe->watchdog_lock);
2998 }
2999 
3000 /*
3001  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3002  */
3003 static void
3004 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3005 {
3006 	mutex_enter(&ixgbe->watchdog_lock);
3007 
3008 	if (ixgbe->watchdog_start)
3009 		ixgbe_arm_watchdog_timer(ixgbe);
3010 
3011 	mutex_exit(&ixgbe->watchdog_lock);
3012 }
3013 
3014 /*
3015  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3016  */
3017 void
3018 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3019 {
3020 	timeout_id_t tid;
3021 
3022 	mutex_enter(&ixgbe->watchdog_lock);
3023 
3024 	ixgbe->watchdog_start = B_FALSE;
3025 	tid = ixgbe->watchdog_tid;
3026 	ixgbe->watchdog_tid = 0;
3027 
3028 	mutex_exit(&ixgbe->watchdog_lock);
3029 
3030 	if (tid != 0)
3031 		(void) untimeout(tid);
3032 }
3033 
3034 /*
3035  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3036  */
3037 static void
3038 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3039 {
3040 	struct ixgbe_hw *hw = &ixgbe->hw;
3041 
3042 	/*
3043 	 * mask all interrupts off
3044 	 */
3045 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3046 
3047 	/*
3048 	 * for MSI-X, also disable autoclear
3049 	 */
3050 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3051 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3052 	}
3053 
3054 	IXGBE_WRITE_FLUSH(hw);
3055 }
3056 
3057 /*
3058  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3059  */
3060 static void
3061 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3062 {
3063 	struct ixgbe_hw *hw = &ixgbe->hw;
3064 	uint32_t eiac, eiam;
3065 	uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3066 
3067 	/* interrupt types to enable */
3068 	ixgbe->eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
3069 	ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
3070 	ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3071 
3072 	/* enable automask on "other" causes that this adapter can generate */
3073 	eiam = ixgbe->capab->other_intr;
3074 
3075 	/*
3076 	 * msi-x mode
3077 	 */
3078 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3079 		/* enable autoclear but not on bits 29:20 */
3080 		eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3081 
3082 		/* general purpose interrupt enable */
3083 		gpie |= (IXGBE_GPIE_MSIX_MODE
3084 		    | IXGBE_GPIE_PBA_SUPPORT
3085 		    | IXGBE_GPIE_OCD
3086 		    | IXGBE_GPIE_EIAME);
3087 	/*
3088 	 * non-msi-x mode
3089 	 */
3090 	} else {
3091 
3092 		/* disable autoclear, leave gpie at default */
3093 		eiac = 0;
3094 
3095 		/*
3096 		 * General purpose interrupt enable.
3097 		 * For 82599, extended interrupt automask enable
3098 		 * only in MSI or MSI-X mode
3099 		 */
3100 		if ((hw->mac.type < ixgbe_mac_82599EB) ||
3101 		    (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3102 			gpie |= IXGBE_GPIE_EIAME;
3103 		}
3104 	}
3105 	/* Enable specific interrupts for 82599  */
3106 	if (hw->mac.type == ixgbe_mac_82599EB) {
3107 		gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */
3108 		gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */
3109 	}
3110 	/* Enable RSC Dealy 8us for 82599  */
3111 	if (ixgbe->lro_enable) {
3112 		gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3113 	}
3114 	/* write to interrupt control registers */
3115 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3116 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3117 	IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3118 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3119 	IXGBE_WRITE_FLUSH(hw);
3120 }
3121 
3122 /*
3123  * ixgbe_loopback_ioctl - Loopback support.
3124  */
3125 enum ioc_reply
3126 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3127 {
3128 	lb_info_sz_t *lbsp;
3129 	lb_property_t *lbpp;
3130 	uint32_t *lbmp;
3131 	uint32_t size;
3132 	uint32_t value;
3133 
3134 	if (mp->b_cont == NULL)
3135 		return (IOC_INVAL);
3136 
3137 	switch (iocp->ioc_cmd) {
3138 	default:
3139 		return (IOC_INVAL);
3140 
3141 	case LB_GET_INFO_SIZE:
3142 		size = sizeof (lb_info_sz_t);
3143 		if (iocp->ioc_count != size)
3144 			return (IOC_INVAL);
3145 
3146 		value = sizeof (lb_normal);
3147 		value += sizeof (lb_mac);
3148 		value += sizeof (lb_external);
3149 
3150 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3151 		*lbsp = value;
3152 		break;
3153 
3154 	case LB_GET_INFO:
3155 		value = sizeof (lb_normal);
3156 		value += sizeof (lb_mac);
3157 		value += sizeof (lb_external);
3158 
3159 		size = value;
3160 		if (iocp->ioc_count != size)
3161 			return (IOC_INVAL);
3162 
3163 		value = 0;
3164 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3165 
3166 		lbpp[value++] = lb_normal;
3167 		lbpp[value++] = lb_mac;
3168 		lbpp[value++] = lb_external;
3169 		break;
3170 
3171 	case LB_GET_MODE:
3172 		size = sizeof (uint32_t);
3173 		if (iocp->ioc_count != size)
3174 			return (IOC_INVAL);
3175 
3176 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3177 		*lbmp = ixgbe->loopback_mode;
3178 		break;
3179 
3180 	case LB_SET_MODE:
3181 		size = 0;
3182 		if (iocp->ioc_count != sizeof (uint32_t))
3183 			return (IOC_INVAL);
3184 
3185 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3186 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3187 			return (IOC_INVAL);
3188 		break;
3189 	}
3190 
3191 	iocp->ioc_count = size;
3192 	iocp->ioc_error = 0;
3193 
3194 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3195 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3196 		return (IOC_INVAL);
3197 	}
3198 
3199 	return (IOC_REPLY);
3200 }
3201 
3202 /*
3203  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3204  */
3205 static boolean_t
3206 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3207 {
3208 	if (mode == ixgbe->loopback_mode)
3209 		return (B_TRUE);
3210 
3211 	ixgbe->loopback_mode = mode;
3212 
3213 	if (mode == IXGBE_LB_NONE) {
3214 		/*
3215 		 * Reset the chip
3216 		 */
3217 		(void) ixgbe_reset(ixgbe);
3218 		return (B_TRUE);
3219 	}
3220 
3221 	mutex_enter(&ixgbe->gen_lock);
3222 
3223 	switch (mode) {
3224 	default:
3225 		mutex_exit(&ixgbe->gen_lock);
3226 		return (B_FALSE);
3227 
3228 	case IXGBE_LB_EXTERNAL:
3229 		break;
3230 
3231 	case IXGBE_LB_INTERNAL_MAC:
3232 		ixgbe_set_internal_mac_loopback(ixgbe);
3233 		break;
3234 	}
3235 
3236 	mutex_exit(&ixgbe->gen_lock);
3237 
3238 	return (B_TRUE);
3239 }
3240 
3241 /*
3242  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3243  */
3244 static void
3245 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3246 {
3247 	struct ixgbe_hw *hw;
3248 	uint32_t reg;
3249 	uint8_t atlas;
3250 
3251 	hw = &ixgbe->hw;
3252 
3253 	/*
3254 	 * Setup MAC loopback
3255 	 */
3256 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3257 	reg |= IXGBE_HLREG0_LPBK;
3258 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3259 
3260 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3261 	reg &= ~IXGBE_AUTOC_LMS_MASK;
3262 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3263 
3264 	/*
3265 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3266 	 */
3267 	if (hw->mac.type == ixgbe_mac_82598EB) {
3268 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3269 		    &atlas);
3270 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3271 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3272 		    atlas);
3273 
3274 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3275 		    &atlas);
3276 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3277 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3278 		    atlas);
3279 
3280 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3281 		    &atlas);
3282 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3283 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3284 		    atlas);
3285 
3286 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3287 		    &atlas);
3288 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3289 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3290 		    atlas);
3291 	}
3292 }
3293 
3294 #pragma inline(ixgbe_intr_rx_work)
3295 /*
3296  * ixgbe_intr_rx_work - RX processing of ISR.
3297  */
3298 static void
3299 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3300 {
3301 	mblk_t *mp;
3302 
3303 	mutex_enter(&rx_ring->rx_lock);
3304 
3305 	mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3306 	mutex_exit(&rx_ring->rx_lock);
3307 
3308 	if (mp != NULL)
3309 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3310 		    rx_ring->ring_gen_num);
3311 }
3312 
3313 #pragma inline(ixgbe_intr_tx_work)
3314 /*
3315  * ixgbe_intr_tx_work - TX processing of ISR.
3316  */
3317 static void
3318 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3319 {
3320 	ixgbe_t *ixgbe = tx_ring->ixgbe;
3321 
3322 	/*
3323 	 * Recycle the tx descriptors
3324 	 */
3325 	tx_ring->tx_recycle(tx_ring);
3326 
3327 	/*
3328 	 * Schedule the re-transmit
3329 	 */
3330 	if (tx_ring->reschedule &&
3331 	    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
3332 		tx_ring->reschedule = B_FALSE;
3333 		mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3334 		    tx_ring->ring_handle);
3335 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3336 	}
3337 }
3338 
3339 #pragma inline(ixgbe_intr_other_work)
3340 /*
3341  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
3342  */
3343 static void
3344 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
3345 {
3346 	struct ixgbe_hw *hw = &ixgbe->hw;
3347 
3348 	ASSERT(mutex_owned(&ixgbe->gen_lock));
3349 
3350 	/*
3351 	 * handle link status change
3352 	 */
3353 	if (eicr & IXGBE_EICR_LSC) {
3354 		ixgbe_driver_link_check(ixgbe);
3355 	}
3356 
3357 	/*
3358 	 * check for fan failure on adapters with fans
3359 	 */
3360 	if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
3361 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3362 		if (hw->mac.type < ixgbe_mac_82599EB) {
3363 			ixgbe_log(ixgbe,
3364 			    "Fan has stopped, replace the adapter\n");
3365 
3366 			/* re-enable the interrupt, which was automasked */
3367 			ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
3368 		}
3369 	}
3370 
3371 	/*
3372 	 * Do SFP check for 82599
3373 	 */
3374 	if (hw->mac.type == ixgbe_mac_82599EB) {
3375 		if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
3376 		    ixgbe_sfp_check, (void *)ixgbe,
3377 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
3378 			ixgbe_log(ixgbe, "No memory available to dispatch "
3379 			    "taskq for SFP check");
3380 		}
3381 
3382 		/*
3383 		 * We need to fully re-check the link later.
3384 		 */
3385 		ixgbe->link_check_complete = B_FALSE;
3386 		ixgbe->link_check_hrtime = gethrtime() +
3387 		    (IXGBE_LINK_UP_TIME * 100000000ULL);
3388 	}
3389 }
3390 
3391 /*
3392  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
3393  */
3394 static uint_t
3395 ixgbe_intr_legacy(void *arg1, void *arg2)
3396 {
3397 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3398 	struct ixgbe_hw *hw = &ixgbe->hw;
3399 	ixgbe_tx_ring_t *tx_ring;
3400 	ixgbe_rx_ring_t *rx_ring;
3401 	uint32_t eicr;
3402 	mblk_t *mp;
3403 	boolean_t tx_reschedule;
3404 	uint_t result;
3405 
3406 	_NOTE(ARGUNUSED(arg2));
3407 
3408 	mutex_enter(&ixgbe->gen_lock);
3409 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
3410 		mutex_exit(&ixgbe->gen_lock);
3411 		return (DDI_INTR_UNCLAIMED);
3412 	}
3413 
3414 	mp = NULL;
3415 	tx_reschedule = B_FALSE;
3416 
3417 	/*
3418 	 * Any bit set in eicr: claim this interrupt
3419 	 */
3420 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3421 
3422 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3423 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3424 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
3425 		return (DDI_INTR_CLAIMED);
3426 	}
3427 
3428 	if (eicr) {
3429 		/*
3430 		 * For legacy interrupt, we have only one interrupt,
3431 		 * so we have only one rx ring and one tx ring enabled.
3432 		 */
3433 		ASSERT(ixgbe->num_rx_rings == 1);
3434 		ASSERT(ixgbe->num_tx_rings == 1);
3435 
3436 		/*
3437 		 * For legacy interrupt, rx rings[0] will use RTxQ[0].
3438 		 */
3439 		if (eicr & 0x1) {
3440 			ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
3441 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3442 			ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3443 			/*
3444 			 * Clean the rx descriptors
3445 			 */
3446 			rx_ring = &ixgbe->rx_rings[0];
3447 			mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3448 		}
3449 
3450 		/*
3451 		 * For legacy interrupt, tx rings[0] will use RTxQ[1].
3452 		 */
3453 		if (eicr & 0x2) {
3454 			/*
3455 			 * Recycle the tx descriptors
3456 			 */
3457 			tx_ring = &ixgbe->tx_rings[0];
3458 			tx_ring->tx_recycle(tx_ring);
3459 
3460 			/*
3461 			 * Schedule the re-transmit
3462 			 */
3463 			tx_reschedule = (tx_ring->reschedule &&
3464 			    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
3465 		}
3466 
3467 		/* any interrupt type other than tx/rx */
3468 		if (eicr & ixgbe->capab->other_intr) {
3469 			if (hw->mac.type < ixgbe_mac_82599EB) {
3470 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3471 			}
3472 			if (hw->mac.type == ixgbe_mac_82599EB) {
3473 				ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3474 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3475 			}
3476 			ixgbe_intr_other_work(ixgbe, eicr);
3477 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3478 		}
3479 
3480 		mutex_exit(&ixgbe->gen_lock);
3481 
3482 		result = DDI_INTR_CLAIMED;
3483 	} else {
3484 		mutex_exit(&ixgbe->gen_lock);
3485 
3486 		/*
3487 		 * No interrupt cause bits set: don't claim this interrupt.
3488 		 */
3489 		result = DDI_INTR_UNCLAIMED;
3490 	}
3491 
3492 	/* re-enable the interrupts which were automasked */
3493 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3494 
3495 	/*
3496 	 * Do the following work outside of the gen_lock
3497 	 */
3498 	if (mp != NULL) {
3499 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3500 		    rx_ring->ring_gen_num);
3501 	}
3502 
3503 	if (tx_reschedule)  {
3504 		tx_ring->reschedule = B_FALSE;
3505 		mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
3506 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3507 	}
3508 
3509 	return (result);
3510 }
3511 
3512 /*
3513  * ixgbe_intr_msi - Interrupt handler for MSI.
3514  */
3515 static uint_t
3516 ixgbe_intr_msi(void *arg1, void *arg2)
3517 {
3518 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3519 	struct ixgbe_hw *hw = &ixgbe->hw;
3520 	uint32_t eicr;
3521 
3522 	_NOTE(ARGUNUSED(arg2));
3523 
3524 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3525 
3526 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3527 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3528 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
3529 		return (DDI_INTR_CLAIMED);
3530 	}
3531 
3532 	/*
3533 	 * For MSI interrupt, we have only one vector,
3534 	 * so we have only one rx ring and one tx ring enabled.
3535 	 */
3536 	ASSERT(ixgbe->num_rx_rings == 1);
3537 	ASSERT(ixgbe->num_tx_rings == 1);
3538 
3539 	/*
3540 	 * For MSI interrupt, rx rings[0] will use RTxQ[0].
3541 	 */
3542 	if (eicr & 0x1) {
3543 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
3544 	}
3545 
3546 	/*
3547 	 * For MSI interrupt, tx rings[0] will use RTxQ[1].
3548 	 */
3549 	if (eicr & 0x2) {
3550 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3551 	}
3552 
3553 	/* any interrupt type other than tx/rx */
3554 	if (eicr & ixgbe->capab->other_intr) {
3555 		mutex_enter(&ixgbe->gen_lock);
3556 		if (hw->mac.type < ixgbe_mac_82599EB) {
3557 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3558 		}
3559 		if (hw->mac.type == ixgbe_mac_82599EB) {
3560 			ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3561 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3562 		}
3563 		ixgbe_intr_other_work(ixgbe, eicr);
3564 		ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3565 		mutex_exit(&ixgbe->gen_lock);
3566 	}
3567 
3568 	/* re-enable the interrupts which were automasked */
3569 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3570 
3571 	return (DDI_INTR_CLAIMED);
3572 }
3573 
3574 /*
3575  * ixgbe_intr_msix - Interrupt handler for MSI-X.
3576  */
3577 static uint_t
3578 ixgbe_intr_msix(void *arg1, void *arg2)
3579 {
3580 	ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
3581 	ixgbe_t *ixgbe = vect->ixgbe;
3582 	struct ixgbe_hw *hw = &ixgbe->hw;
3583 	uint32_t eicr;
3584 	int r_idx = 0;
3585 
3586 	_NOTE(ARGUNUSED(arg2));
3587 
3588 	/*
3589 	 * Clean each rx ring that has its bit set in the map
3590 	 */
3591 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
3592 	while (r_idx >= 0) {
3593 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
3594 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3595 		    (ixgbe->num_rx_rings - 1));
3596 	}
3597 
3598 	/*
3599 	 * Clean each tx ring that has its bit set in the map
3600 	 */
3601 	r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
3602 	while (r_idx >= 0) {
3603 		ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
3604 		r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3605 		    (ixgbe->num_tx_rings - 1));
3606 	}
3607 
3608 
3609 	/*
3610 	 * Clean other interrupt (link change) that has its bit set in the map
3611 	 */
3612 	if (BT_TEST(vect->other_map, 0) == 1) {
3613 		eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3614 
3615 		if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
3616 		    DDI_FM_OK) {
3617 			ddi_fm_service_impact(ixgbe->dip,
3618 			    DDI_SERVICE_DEGRADED);
3619 			atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
3620 			return (DDI_INTR_CLAIMED);
3621 		}
3622 
3623 		/*
3624 		 * Need check cause bits and only other causes will
3625 		 * be processed
3626 		 */
3627 		/* any interrupt type other than tx/rx */
3628 		if (eicr & ixgbe->capab->other_intr) {
3629 			if (hw->mac.type < ixgbe_mac_82599EB) {
3630 				mutex_enter(&ixgbe->gen_lock);
3631 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3632 				ixgbe_intr_other_work(ixgbe, eicr);
3633 				mutex_exit(&ixgbe->gen_lock);
3634 			} else {
3635 				if (hw->mac.type == ixgbe_mac_82599EB) {
3636 					mutex_enter(&ixgbe->gen_lock);
3637 					ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3638 					ixgbe_intr_other_work(ixgbe, eicr);
3639 					mutex_exit(&ixgbe->gen_lock);
3640 				}
3641 			}
3642 		}
3643 
3644 		/* re-enable the interrupts which were automasked */
3645 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3646 	}
3647 
3648 	return (DDI_INTR_CLAIMED);
3649 }
3650 
3651 /*
3652  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
3653  *
3654  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
3655  * if not successful, try Legacy.
3656  * ixgbe->intr_force can be used to force sequence to start with
3657  * any of the 3 types.
3658  * If MSI-X is not used, number of tx/rx rings is forced to 1.
3659  */
3660 static int
3661 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
3662 {
3663 	dev_info_t *devinfo;
3664 	int intr_types;
3665 	int rc;
3666 
3667 	devinfo = ixgbe->dip;
3668 
3669 	/*
3670 	 * Get supported interrupt types
3671 	 */
3672 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
3673 
3674 	if (rc != DDI_SUCCESS) {
3675 		ixgbe_log(ixgbe,
3676 		    "Get supported interrupt types failed: %d", rc);
3677 		return (IXGBE_FAILURE);
3678 	}
3679 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
3680 
3681 	ixgbe->intr_type = 0;
3682 
3683 	/*
3684 	 * Install MSI-X interrupts
3685 	 */
3686 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
3687 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
3688 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
3689 		if (rc == IXGBE_SUCCESS)
3690 			return (IXGBE_SUCCESS);
3691 
3692 		ixgbe_log(ixgbe,
3693 		    "Allocate MSI-X failed, trying MSI interrupts...");
3694 	}
3695 
3696 	/*
3697 	 * MSI-X not used, force rings and groups to 1
3698 	 */
3699 	ixgbe->num_rx_rings = 1;
3700 	ixgbe->num_rx_groups = 1;
3701 	ixgbe->num_tx_rings = 1;
3702 	ixgbe_log(ixgbe,
3703 	    "MSI-X not used, force rings and groups number to 1");
3704 
3705 	/*
3706 	 * Install MSI interrupts
3707 	 */
3708 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
3709 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
3710 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
3711 		if (rc == IXGBE_SUCCESS)
3712 			return (IXGBE_SUCCESS);
3713 
3714 		ixgbe_log(ixgbe,
3715 		    "Allocate MSI failed, trying Legacy interrupts...");
3716 	}
3717 
3718 	/*
3719 	 * Install legacy interrupts
3720 	 */
3721 	if (intr_types & DDI_INTR_TYPE_FIXED) {
3722 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
3723 		if (rc == IXGBE_SUCCESS)
3724 			return (IXGBE_SUCCESS);
3725 
3726 		ixgbe_log(ixgbe,
3727 		    "Allocate Legacy interrupts failed");
3728 	}
3729 
3730 	/*
3731 	 * If none of the 3 types succeeded, return failure
3732 	 */
3733 	return (IXGBE_FAILURE);
3734 }
3735 
3736 /*
3737  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
3738  *
3739  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
3740  * if fewer than 2 handles are available, return failure.
3741  * Upon success, this maps the vectors to rx and tx rings for
3742  * interrupts.
3743  */
3744 static int
3745 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
3746 {
3747 	dev_info_t *devinfo;
3748 	int request, count, avail, actual;
3749 	int minimum;
3750 	int rc;
3751 
3752 	devinfo = ixgbe->dip;
3753 
3754 	switch (intr_type) {
3755 	case DDI_INTR_TYPE_FIXED:
3756 		request = 1;	/* Request 1 legacy interrupt handle */
3757 		minimum = 1;
3758 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
3759 		break;
3760 
3761 	case DDI_INTR_TYPE_MSI:
3762 		request = 1;	/* Request 1 MSI interrupt handle */
3763 		minimum = 1;
3764 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
3765 		break;
3766 
3767 	case DDI_INTR_TYPE_MSIX:
3768 		/*
3769 		 * Best number of vectors for the adapter is
3770 		 * # rx rings + # tx rings.
3771 		 */
3772 		request = ixgbe->num_rx_rings + ixgbe->num_tx_rings;
3773 		if (request > ixgbe->capab->max_ring_vect)
3774 			request = ixgbe->capab->max_ring_vect;
3775 		minimum = 2;
3776 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
3777 		break;
3778 
3779 	default:
3780 		ixgbe_log(ixgbe,
3781 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
3782 		    intr_type);
3783 		return (IXGBE_FAILURE);
3784 	}
3785 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
3786 	    request, minimum);
3787 
3788 	/*
3789 	 * Get number of supported interrupts
3790 	 */
3791 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3792 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3793 		ixgbe_log(ixgbe,
3794 		    "Get interrupt number failed. Return: %d, count: %d",
3795 		    rc, count);
3796 		return (IXGBE_FAILURE);
3797 	}
3798 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
3799 
3800 	/*
3801 	 * Get number of available interrupts
3802 	 */
3803 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3804 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3805 		ixgbe_log(ixgbe,
3806 		    "Get interrupt available number failed. "
3807 		    "Return: %d, available: %d", rc, avail);
3808 		return (IXGBE_FAILURE);
3809 	}
3810 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
3811 
3812 	if (avail < request) {
3813 		ixgbe_log(ixgbe, "Request %d handles, %d available",
3814 		    request, avail);
3815 		request = avail;
3816 	}
3817 
3818 	actual = 0;
3819 	ixgbe->intr_cnt = 0;
3820 
3821 	/*
3822 	 * Allocate an array of interrupt handles
3823 	 */
3824 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
3825 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
3826 
3827 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
3828 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
3829 	if (rc != DDI_SUCCESS) {
3830 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
3831 		    "return: %d, request: %d, actual: %d",
3832 		    rc, request, actual);
3833 		goto alloc_handle_fail;
3834 	}
3835 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
3836 
3837 	ixgbe->intr_cnt = actual;
3838 
3839 	/*
3840 	 * Now we know the actual number of vectors.  Here we map the vector
3841 	 * to other, rx rings and tx ring.
3842 	 */
3843 	if (actual < minimum) {
3844 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
3845 		    actual);
3846 		goto alloc_handle_fail;
3847 	}
3848 
3849 	/*
3850 	 * Get priority for first vector, assume remaining are all the same
3851 	 */
3852 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
3853 	if (rc != DDI_SUCCESS) {
3854 		ixgbe_log(ixgbe,
3855 		    "Get interrupt priority failed: %d", rc);
3856 		goto alloc_handle_fail;
3857 	}
3858 
3859 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
3860 	if (rc != DDI_SUCCESS) {
3861 		ixgbe_log(ixgbe,
3862 		    "Get interrupt cap failed: %d", rc);
3863 		goto alloc_handle_fail;
3864 	}
3865 
3866 	ixgbe->intr_type = intr_type;
3867 
3868 	return (IXGBE_SUCCESS);
3869 
3870 alloc_handle_fail:
3871 	ixgbe_rem_intrs(ixgbe);
3872 
3873 	return (IXGBE_FAILURE);
3874 }
3875 
3876 /*
3877  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
3878  *
3879  * Before adding the interrupt handlers, the interrupt vectors have
3880  * been allocated, and the rx/tx rings have also been allocated.
3881  */
3882 static int
3883 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
3884 {
3885 	int vector = 0;
3886 	int rc;
3887 
3888 	switch (ixgbe->intr_type) {
3889 	case DDI_INTR_TYPE_MSIX:
3890 		/*
3891 		 * Add interrupt handler for all vectors
3892 		 */
3893 		for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
3894 			/*
3895 			 * install pointer to vect_map[vector]
3896 			 */
3897 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
3898 			    (ddi_intr_handler_t *)ixgbe_intr_msix,
3899 			    (void *)&ixgbe->vect_map[vector], NULL);
3900 
3901 			if (rc != DDI_SUCCESS) {
3902 				ixgbe_log(ixgbe,
3903 				    "Add rx interrupt handler failed. "
3904 				    "return: %d, vector: %d", rc, vector);
3905 				for (vector--; vector >= 0; vector--) {
3906 					(void) ddi_intr_remove_handler(
3907 					    ixgbe->htable[vector]);
3908 				}
3909 				return (IXGBE_FAILURE);
3910 			}
3911 		}
3912 
3913 		break;
3914 
3915 	case DDI_INTR_TYPE_MSI:
3916 		/*
3917 		 * Add interrupt handlers for the only vector
3918 		 */
3919 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3920 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
3921 		    (void *)ixgbe, NULL);
3922 
3923 		if (rc != DDI_SUCCESS) {
3924 			ixgbe_log(ixgbe,
3925 			    "Add MSI interrupt handler failed: %d", rc);
3926 			return (IXGBE_FAILURE);
3927 		}
3928 
3929 		break;
3930 
3931 	case DDI_INTR_TYPE_FIXED:
3932 		/*
3933 		 * Add interrupt handlers for the only vector
3934 		 */
3935 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3936 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
3937 		    (void *)ixgbe, NULL);
3938 
3939 		if (rc != DDI_SUCCESS) {
3940 			ixgbe_log(ixgbe,
3941 			    "Add legacy interrupt handler failed: %d", rc);
3942 			return (IXGBE_FAILURE);
3943 		}
3944 
3945 		break;
3946 
3947 	default:
3948 		return (IXGBE_FAILURE);
3949 	}
3950 
3951 	return (IXGBE_SUCCESS);
3952 }
3953 
3954 #pragma inline(ixgbe_map_rxring_to_vector)
3955 /*
3956  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
3957  */
3958 static void
3959 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
3960 {
3961 	/*
3962 	 * Set bit in map
3963 	 */
3964 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
3965 
3966 	/*
3967 	 * Count bits set
3968 	 */
3969 	ixgbe->vect_map[v_idx].rxr_cnt++;
3970 
3971 	/*
3972 	 * Remember bit position
3973 	 */
3974 	ixgbe->rx_rings[r_idx].intr_vector = v_idx;
3975 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
3976 }
3977 
3978 #pragma inline(ixgbe_map_txring_to_vector)
3979 /*
3980  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
3981  */
3982 static void
3983 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
3984 {
3985 	/*
3986 	 * Set bit in map
3987 	 */
3988 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
3989 
3990 	/*
3991 	 * Count bits set
3992 	 */
3993 	ixgbe->vect_map[v_idx].txr_cnt++;
3994 
3995 	/*
3996 	 * Remember bit position
3997 	 */
3998 	ixgbe->tx_rings[t_idx].intr_vector = v_idx;
3999 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4000 }
4001 
4002 /*
4003  * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4004  * allocation register (IVAR).
4005  * cause:
4006  *   -1 : other cause
4007  *    0 : rx
4008  *    1 : tx
4009  */
4010 static void
4011 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4012     int8_t cause)
4013 {
4014 	struct ixgbe_hw *hw = &ixgbe->hw;
4015 	u32 ivar, index;
4016 
4017 	switch (hw->mac.type) {
4018 	case ixgbe_mac_82598EB:
4019 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4020 		if (cause == -1) {
4021 			cause = 0;
4022 		}
4023 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4024 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4025 		ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4026 		ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4027 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4028 		break;
4029 	case ixgbe_mac_82599EB:
4030 		if (cause == -1) {
4031 			/* other causes */
4032 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4033 			index = (intr_alloc_entry & 1) * 8;
4034 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4035 			ivar &= ~(0xFF << index);
4036 			ivar |= (msix_vector << index);
4037 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4038 		} else {
4039 			/* tx or rx causes */
4040 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4041 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4042 			ivar = IXGBE_READ_REG(hw,
4043 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4044 			ivar &= ~(0xFF << index);
4045 			ivar |= (msix_vector << index);
4046 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4047 			    ivar);
4048 		}
4049 		break;
4050 	default:
4051 		break;
4052 	}
4053 }
4054 
4055 /*
4056  * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4057  * given interrupt vector allocation register (IVAR).
4058  * cause:
4059  *   -1 : other cause
4060  *    0 : rx
4061  *    1 : tx
4062  */
4063 static void
4064 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4065 {
4066 	struct ixgbe_hw *hw = &ixgbe->hw;
4067 	u32 ivar, index;
4068 
4069 	switch (hw->mac.type) {
4070 	case ixgbe_mac_82598EB:
4071 		if (cause == -1) {
4072 			cause = 0;
4073 		}
4074 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4075 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4076 		ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4077 		    (intr_alloc_entry & 0x3)));
4078 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4079 		break;
4080 	case ixgbe_mac_82599EB:
4081 		if (cause == -1) {
4082 			/* other causes */
4083 			index = (intr_alloc_entry & 1) * 8;
4084 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4085 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4086 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4087 		} else {
4088 			/* tx or rx causes */
4089 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4090 			ivar = IXGBE_READ_REG(hw,
4091 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4092 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4093 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4094 			    ivar);
4095 		}
4096 		break;
4097 	default:
4098 		break;
4099 	}
4100 }
4101 
4102 /*
4103  * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4104  * given interrupt vector allocation register (IVAR).
4105  * cause:
4106  *   -1 : other cause
4107  *    0 : rx
4108  *    1 : tx
4109  */
4110 static void
4111 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4112 {
4113 	struct ixgbe_hw *hw = &ixgbe->hw;
4114 	u32 ivar, index;
4115 
4116 	switch (hw->mac.type) {
4117 	case ixgbe_mac_82598EB:
4118 		if (cause == -1) {
4119 			cause = 0;
4120 		}
4121 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4122 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4123 		ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4124 		    (intr_alloc_entry & 0x3)));
4125 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4126 		break;
4127 	case ixgbe_mac_82599EB:
4128 		if (cause == -1) {
4129 			/* other causes */
4130 			index = (intr_alloc_entry & 1) * 8;
4131 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4132 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4133 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4134 		} else {
4135 			/* tx or rx causes */
4136 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4137 			ivar = IXGBE_READ_REG(hw,
4138 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4139 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4140 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4141 			    ivar);
4142 		}
4143 		break;
4144 	default:
4145 		break;
4146 	}
4147 }
4148 
4149 /*
4150  * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4151  *
4152  * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4153  * to vector[0 - (intr_cnt -1)].
4154  */
4155 static int
4156 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4157 {
4158 	int i, vector = 0;
4159 
4160 	/* initialize vector map */
4161 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4162 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4163 		ixgbe->vect_map[i].ixgbe = ixgbe;
4164 	}
4165 
4166 	/*
4167 	 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4168 	 * tx rings[0] on RTxQ[1].
4169 	 */
4170 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4171 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4172 		ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4173 		return (IXGBE_SUCCESS);
4174 	}
4175 
4176 	/*
4177 	 * Interrupts/vectors mapping for MSI-X
4178 	 */
4179 
4180 	/*
4181 	 * Map other interrupt to vector 0,
4182 	 * Set bit in map and count the bits set.
4183 	 */
4184 	BT_SET(ixgbe->vect_map[vector].other_map, 0);
4185 	ixgbe->vect_map[vector].other_cnt++;
4186 	vector++;
4187 
4188 	/*
4189 	 * Map rx ring interrupts to vectors
4190 	 */
4191 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
4192 		ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4193 		vector = (vector +1) % ixgbe->intr_cnt;
4194 	}
4195 
4196 	/*
4197 	 * Map tx ring interrupts to vectors
4198 	 */
4199 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
4200 		ixgbe_map_txring_to_vector(ixgbe, i, vector);
4201 		vector = (vector +1) % ixgbe->intr_cnt;
4202 	}
4203 
4204 	return (IXGBE_SUCCESS);
4205 }
4206 
4207 /*
4208  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4209  *
4210  * This relies on ring/vector mapping already set up in the
4211  * vect_map[] structures
4212  */
4213 static void
4214 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4215 {
4216 	struct ixgbe_hw *hw = &ixgbe->hw;
4217 	ixgbe_intr_vector_t *vect;	/* vector bitmap */
4218 	int r_idx;	/* ring index */
4219 	int v_idx;	/* vector index */
4220 
4221 	/*
4222 	 * Clear any previous entries
4223 	 */
4224 	switch (hw->mac.type) {
4225 	case ixgbe_mac_82598EB:
4226 		for (v_idx = 0; v_idx < 25; v_idx++)
4227 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4228 
4229 		break;
4230 	case ixgbe_mac_82599EB:
4231 		for (v_idx = 0; v_idx < 64; v_idx++)
4232 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4233 		IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
4234 
4235 		break;
4236 	default:
4237 		break;
4238 	}
4239 
4240 	/*
4241 	 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
4242 	 * tx rings[0] will use RTxQ[1].
4243 	 */
4244 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4245 		ixgbe_setup_ivar(ixgbe, 0, 0, 0);
4246 		ixgbe_setup_ivar(ixgbe, 0, 1, 1);
4247 		return;
4248 	}
4249 
4250 	/*
4251 	 * For MSI-X interrupt, "Other" is always on vector[0].
4252 	 */
4253 	ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
4254 
4255 	/*
4256 	 * For each interrupt vector, populate the IVAR table
4257 	 */
4258 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
4259 		vect = &ixgbe->vect_map[v_idx];
4260 
4261 		/*
4262 		 * For each rx ring bit set
4263 		 */
4264 		r_idx = bt_getlowbit(vect->rx_map, 0,
4265 		    (ixgbe->num_rx_rings - 1));
4266 
4267 		while (r_idx >= 0) {
4268 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 0);
4269 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4270 			    (ixgbe->num_rx_rings - 1));
4271 		}
4272 
4273 		/*
4274 		 * For each tx ring bit set
4275 		 */
4276 		r_idx = bt_getlowbit(vect->tx_map, 0,
4277 		    (ixgbe->num_tx_rings - 1));
4278 
4279 		while (r_idx >= 0) {
4280 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
4281 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4282 			    (ixgbe->num_tx_rings - 1));
4283 		}
4284 	}
4285 }
4286 
4287 /*
4288  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
4289  */
4290 static void
4291 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
4292 {
4293 	int i;
4294 	int rc;
4295 
4296 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4297 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
4298 		if (rc != DDI_SUCCESS) {
4299 			IXGBE_DEBUGLOG_1(ixgbe,
4300 			    "Remove intr handler failed: %d", rc);
4301 		}
4302 	}
4303 }
4304 
4305 /*
4306  * ixgbe_rem_intrs - Remove the allocated interrupts.
4307  */
4308 static void
4309 ixgbe_rem_intrs(ixgbe_t *ixgbe)
4310 {
4311 	int i;
4312 	int rc;
4313 
4314 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4315 		rc = ddi_intr_free(ixgbe->htable[i]);
4316 		if (rc != DDI_SUCCESS) {
4317 			IXGBE_DEBUGLOG_1(ixgbe,
4318 			    "Free intr failed: %d", rc);
4319 		}
4320 	}
4321 
4322 	kmem_free(ixgbe->htable, ixgbe->intr_size);
4323 	ixgbe->htable = NULL;
4324 }
4325 
4326 /*
4327  * ixgbe_enable_intrs - Enable all the ddi interrupts.
4328  */
4329 static int
4330 ixgbe_enable_intrs(ixgbe_t *ixgbe)
4331 {
4332 	int i;
4333 	int rc;
4334 
4335 	/*
4336 	 * Enable interrupts
4337 	 */
4338 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4339 		/*
4340 		 * Call ddi_intr_block_enable() for MSI
4341 		 */
4342 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
4343 		if (rc != DDI_SUCCESS) {
4344 			ixgbe_log(ixgbe,
4345 			    "Enable block intr failed: %d", rc);
4346 			return (IXGBE_FAILURE);
4347 		}
4348 	} else {
4349 		/*
4350 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
4351 		 */
4352 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4353 			rc = ddi_intr_enable(ixgbe->htable[i]);
4354 			if (rc != DDI_SUCCESS) {
4355 				ixgbe_log(ixgbe,
4356 				    "Enable intr failed: %d", rc);
4357 				return (IXGBE_FAILURE);
4358 			}
4359 		}
4360 	}
4361 
4362 	return (IXGBE_SUCCESS);
4363 }
4364 
4365 /*
4366  * ixgbe_disable_intrs - Disable all the interrupts.
4367  */
4368 static int
4369 ixgbe_disable_intrs(ixgbe_t *ixgbe)
4370 {
4371 	int i;
4372 	int rc;
4373 
4374 	/*
4375 	 * Disable all interrupts
4376 	 */
4377 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4378 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
4379 		if (rc != DDI_SUCCESS) {
4380 			ixgbe_log(ixgbe,
4381 			    "Disable block intr failed: %d", rc);
4382 			return (IXGBE_FAILURE);
4383 		}
4384 	} else {
4385 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4386 			rc = ddi_intr_disable(ixgbe->htable[i]);
4387 			if (rc != DDI_SUCCESS) {
4388 				ixgbe_log(ixgbe,
4389 				    "Disable intr failed: %d", rc);
4390 				return (IXGBE_FAILURE);
4391 			}
4392 		}
4393 	}
4394 
4395 	return (IXGBE_SUCCESS);
4396 }
4397 
4398 /*
4399  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
4400  */
4401 static void
4402 ixgbe_get_hw_state(ixgbe_t *ixgbe)
4403 {
4404 	struct ixgbe_hw *hw = &ixgbe->hw;
4405 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
4406 	boolean_t link_up = B_FALSE;
4407 	uint32_t pcs1g_anlp = 0;
4408 	uint32_t pcs1g_ana = 0;
4409 
4410 	ASSERT(mutex_owned(&ixgbe->gen_lock));
4411 	ixgbe->param_lp_1000fdx_cap = 0;
4412 	ixgbe->param_lp_100fdx_cap  = 0;
4413 
4414 	/* check for link, don't wait */
4415 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
4416 	if (link_up) {
4417 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
4418 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
4419 
4420 		ixgbe->param_lp_1000fdx_cap =
4421 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4422 		ixgbe->param_lp_100fdx_cap =
4423 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4424 	}
4425 
4426 	ixgbe->param_adv_1000fdx_cap =
4427 	    (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4428 	ixgbe->param_adv_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4429 }
4430 
4431 /*
4432  * ixgbe_get_driver_control - Notify that driver is in control of device.
4433  */
4434 static void
4435 ixgbe_get_driver_control(struct ixgbe_hw *hw)
4436 {
4437 	uint32_t ctrl_ext;
4438 
4439 	/*
4440 	 * Notify firmware that driver is in control of device
4441 	 */
4442 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4443 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
4444 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4445 }
4446 
4447 /*
4448  * ixgbe_release_driver_control - Notify that driver is no longer in control
4449  * of device.
4450  */
4451 static void
4452 ixgbe_release_driver_control(struct ixgbe_hw *hw)
4453 {
4454 	uint32_t ctrl_ext;
4455 
4456 	/*
4457 	 * Notify firmware that driver is no longer in control of device
4458 	 */
4459 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4460 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
4461 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4462 }
4463 
4464 /*
4465  * ixgbe_atomic_reserve - Atomic decrease operation.
4466  */
4467 int
4468 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
4469 {
4470 	uint32_t oldval;
4471 	uint32_t newval;
4472 
4473 	/*
4474 	 * ATOMICALLY
4475 	 */
4476 	do {
4477 		oldval = *count_p;
4478 		if (oldval < n)
4479 			return (-1);
4480 		newval = oldval - n;
4481 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
4482 
4483 	return (newval);
4484 }
4485 
4486 /*
4487  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
4488  */
4489 static uint8_t *
4490 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
4491 {
4492 	uint8_t *addr = *upd_ptr;
4493 	uint8_t *new_ptr;
4494 
4495 	_NOTE(ARGUNUSED(hw));
4496 	_NOTE(ARGUNUSED(vmdq));
4497 
4498 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
4499 	*upd_ptr = new_ptr;
4500 	return (addr);
4501 }
4502 
4503 /*
4504  * FMA support
4505  */
4506 int
4507 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
4508 {
4509 	ddi_fm_error_t de;
4510 
4511 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4512 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
4513 	return (de.fme_status);
4514 }
4515 
4516 int
4517 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
4518 {
4519 	ddi_fm_error_t de;
4520 
4521 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4522 	return (de.fme_status);
4523 }
4524 
4525 /*
4526  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
4527  */
4528 static int
4529 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4530 {
4531 	_NOTE(ARGUNUSED(impl_data));
4532 	/*
4533 	 * as the driver can always deal with an error in any dma or
4534 	 * access handle, we can just return the fme_status value.
4535 	 */
4536 	pci_ereport_post(dip, err, NULL);
4537 	return (err->fme_status);
4538 }
4539 
4540 static void
4541 ixgbe_fm_init(ixgbe_t *ixgbe)
4542 {
4543 	ddi_iblock_cookie_t iblk;
4544 	int fma_dma_flag;
4545 
4546 	/*
4547 	 * Only register with IO Fault Services if we have some capability
4548 	 */
4549 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
4550 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4551 	} else {
4552 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4553 	}
4554 
4555 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
4556 		fma_dma_flag = 1;
4557 	} else {
4558 		fma_dma_flag = 0;
4559 	}
4560 
4561 	ixgbe_set_fma_flags(fma_dma_flag);
4562 
4563 	if (ixgbe->fm_capabilities) {
4564 
4565 		/*
4566 		 * Register capabilities with IO Fault Services
4567 		 */
4568 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
4569 
4570 		/*
4571 		 * Initialize pci ereport capabilities if ereport capable
4572 		 */
4573 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4574 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4575 			pci_ereport_setup(ixgbe->dip);
4576 
4577 		/*
4578 		 * Register error callback if error callback capable
4579 		 */
4580 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4581 			ddi_fm_handler_register(ixgbe->dip,
4582 			    ixgbe_fm_error_cb, (void*) ixgbe);
4583 	}
4584 }
4585 
4586 static void
4587 ixgbe_fm_fini(ixgbe_t *ixgbe)
4588 {
4589 	/*
4590 	 * Only unregister FMA capabilities if they are registered
4591 	 */
4592 	if (ixgbe->fm_capabilities) {
4593 
4594 		/*
4595 		 * Release any resources allocated by pci_ereport_setup()
4596 		 */
4597 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4598 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4599 			pci_ereport_teardown(ixgbe->dip);
4600 
4601 		/*
4602 		 * Un-register error callback if error callback capable
4603 		 */
4604 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4605 			ddi_fm_handler_unregister(ixgbe->dip);
4606 
4607 		/*
4608 		 * Unregister from IO Fault Service
4609 		 */
4610 		ddi_fm_fini(ixgbe->dip);
4611 	}
4612 }
4613 
4614 void
4615 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
4616 {
4617 	uint64_t ena;
4618 	char buf[FM_MAX_CLASS];
4619 
4620 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4621 	ena = fm_ena_generate(0, FM_ENA_FMT1);
4622 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
4623 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
4624 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
4625 	}
4626 }
4627 
4628 static int
4629 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
4630 {
4631 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
4632 
4633 	mutex_enter(&rx_ring->rx_lock);
4634 	rx_ring->ring_gen_num = mr_gen_num;
4635 	mutex_exit(&rx_ring->rx_lock);
4636 	return (0);
4637 }
4638 
4639 /*
4640  * Callback funtion for MAC layer to register all rings.
4641  */
4642 /* ARGSUSED */
4643 void
4644 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
4645     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
4646 {
4647 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4648 	mac_intr_t *mintr = &infop->mri_intr;
4649 
4650 	switch (rtype) {
4651 	case MAC_RING_TYPE_RX: {
4652 		ASSERT(rg_index == 0);
4653 		ASSERT(ring_index < ixgbe->num_rx_rings);
4654 
4655 		ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[ring_index];
4656 		rx_ring->ring_handle = rh;
4657 
4658 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
4659 		infop->mri_start = ixgbe_ring_start;
4660 		infop->mri_stop = NULL;
4661 		infop->mri_poll = ixgbe_ring_rx_poll;
4662 
4663 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
4664 		mintr->mi_enable = ixgbe_rx_ring_intr_enable;
4665 		mintr->mi_disable = ixgbe_rx_ring_intr_disable;
4666 
4667 		break;
4668 	}
4669 	case MAC_RING_TYPE_TX: {
4670 		ASSERT(rg_index == -1);
4671 		ASSERT(ring_index < ixgbe->num_tx_rings);
4672 
4673 		ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
4674 		tx_ring->ring_handle = rh;
4675 
4676 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
4677 		infop->mri_start = NULL;
4678 		infop->mri_stop = NULL;
4679 		infop->mri_tx = ixgbe_ring_tx;
4680 
4681 		break;
4682 	}
4683 	default:
4684 		break;
4685 	}
4686 }
4687 
4688 /*
4689  * Callback funtion for MAC layer to register all groups.
4690  */
4691 void
4692 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
4693     mac_group_info_t *infop, mac_group_handle_t gh)
4694 {
4695 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4696 
4697 	switch (rtype) {
4698 	case MAC_RING_TYPE_RX: {
4699 		ixgbe_rx_group_t *rx_group;
4700 
4701 		rx_group = &ixgbe->rx_groups[index];
4702 		rx_group->group_handle = gh;
4703 
4704 		infop->mgi_driver = (mac_group_driver_t)rx_group;
4705 		infop->mgi_start = NULL;
4706 		infop->mgi_stop = NULL;
4707 		infop->mgi_addmac = ixgbe_addmac;
4708 		infop->mgi_remmac = ixgbe_remmac;
4709 		infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
4710 
4711 		break;
4712 	}
4713 	case MAC_RING_TYPE_TX:
4714 		break;
4715 	default:
4716 		break;
4717 	}
4718 }
4719 
4720 /*
4721  * Enable interrupt on the specificed rx ring.
4722  */
4723 int
4724 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
4725 {
4726 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4727 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4728 	int r_idx = rx_ring->index;
4729 	int v_idx = rx_ring->intr_vector;
4730 
4731 	mutex_enter(&ixgbe->gen_lock);
4732 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 0);
4733 
4734 	/*
4735 	 * To enable interrupt by setting the VAL bit of given interrupt
4736 	 * vector allocation register (IVAR).
4737 	 */
4738 	ixgbe_enable_ivar(ixgbe, r_idx, 0);
4739 
4740 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4741 
4742 	/*
4743 	 * To trigger a Rx interrupt to on this ring
4744 	 */
4745 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
4746 	IXGBE_WRITE_FLUSH(&ixgbe->hw);
4747 
4748 	mutex_exit(&ixgbe->gen_lock);
4749 
4750 	return (0);
4751 }
4752 
4753 /*
4754  * Disable interrupt on the specificed rx ring.
4755  */
4756 int
4757 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
4758 {
4759 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4760 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4761 	int r_idx = rx_ring->index;
4762 	int v_idx = rx_ring->intr_vector;
4763 
4764 	mutex_enter(&ixgbe->gen_lock);
4765 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 1);
4766 
4767 	/*
4768 	 * To disable interrupt by clearing the VAL bit of given interrupt
4769 	 * vector allocation register (IVAR).
4770 	 */
4771 	ixgbe_disable_ivar(ixgbe, r_idx, 0);
4772 
4773 	BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
4774 
4775 	mutex_exit(&ixgbe->gen_lock);
4776 
4777 	return (0);
4778 }
4779 
4780 /*
4781  * Add a mac address.
4782  */
4783 static int
4784 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
4785 {
4786 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4787 	ixgbe_t *ixgbe = rx_group->ixgbe;
4788 	int slot;
4789 	int err;
4790 
4791 	mutex_enter(&ixgbe->gen_lock);
4792 
4793 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4794 		mutex_exit(&ixgbe->gen_lock);
4795 		return (ECANCELED);
4796 	}
4797 
4798 	if (ixgbe->unicst_avail == 0) {
4799 		/* no slots available */
4800 		mutex_exit(&ixgbe->gen_lock);
4801 		return (ENOSPC);
4802 	}
4803 
4804 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
4805 		if (ixgbe->unicst_addr[slot].mac.set == 0)
4806 			break;
4807 	}
4808 
4809 	ASSERT((slot >= 0) && (slot < ixgbe->unicst_total));
4810 
4811 	if ((err = ixgbe_unicst_set(ixgbe, mac_addr, slot)) == 0) {
4812 		ixgbe->unicst_addr[slot].mac.set = 1;
4813 		ixgbe->unicst_avail--;
4814 	}
4815 
4816 	mutex_exit(&ixgbe->gen_lock);
4817 
4818 	return (err);
4819 }
4820 
4821 /*
4822  * Remove a mac address.
4823  */
4824 static int
4825 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
4826 {
4827 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4828 	ixgbe_t *ixgbe = rx_group->ixgbe;
4829 	int slot;
4830 	int err;
4831 
4832 	mutex_enter(&ixgbe->gen_lock);
4833 
4834 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4835 		mutex_exit(&ixgbe->gen_lock);
4836 		return (ECANCELED);
4837 	}
4838 
4839 	slot = ixgbe_unicst_find(ixgbe, mac_addr);
4840 	if (slot == -1) {
4841 		mutex_exit(&ixgbe->gen_lock);
4842 		return (EINVAL);
4843 	}
4844 
4845 	if (ixgbe->unicst_addr[slot].mac.set == 0) {
4846 		mutex_exit(&ixgbe->gen_lock);
4847 		return (EINVAL);
4848 	}
4849 
4850 	bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
4851 	if ((err = ixgbe_unicst_set(ixgbe,
4852 	    ixgbe->unicst_addr[slot].mac.addr, slot)) == 0) {
4853 		ixgbe->unicst_addr[slot].mac.set = 0;
4854 		ixgbe->unicst_avail++;
4855 	}
4856 
4857 	mutex_exit(&ixgbe->gen_lock);
4858 
4859 	return (err);
4860 }
4861