xref: /illumos-gate/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision 0d166b18feda26f6f45f5be1c0c8c5e539b90e6c)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #include "ixgbe_sw.h"
29 
30 static char ident[] = "Intel 10Gb Ethernet";
31 static char ixgbe_version[] = "ixgbe 1.1.3";
32 
33 /*
34  * Local function protoypes
35  */
36 static int ixgbe_register_mac(ixgbe_t *);
37 static int ixgbe_identify_hardware(ixgbe_t *);
38 static int ixgbe_regs_map(ixgbe_t *);
39 static void ixgbe_init_properties(ixgbe_t *);
40 static int ixgbe_init_driver_settings(ixgbe_t *);
41 static void ixgbe_init_locks(ixgbe_t *);
42 static void ixgbe_destroy_locks(ixgbe_t *);
43 static int ixgbe_init(ixgbe_t *);
44 static int ixgbe_chip_start(ixgbe_t *);
45 static void ixgbe_chip_stop(ixgbe_t *);
46 static int ixgbe_reset(ixgbe_t *);
47 static void ixgbe_tx_clean(ixgbe_t *);
48 static boolean_t ixgbe_tx_drain(ixgbe_t *);
49 static boolean_t ixgbe_rx_drain(ixgbe_t *);
50 static int ixgbe_alloc_rings(ixgbe_t *);
51 static void ixgbe_free_rings(ixgbe_t *);
52 static int ixgbe_alloc_rx_data(ixgbe_t *);
53 static void ixgbe_free_rx_data(ixgbe_t *);
54 static void ixgbe_setup_rings(ixgbe_t *);
55 static void ixgbe_setup_rx(ixgbe_t *);
56 static void ixgbe_setup_tx(ixgbe_t *);
57 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
58 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
59 static void ixgbe_setup_rss(ixgbe_t *);
60 static void ixgbe_init_unicst(ixgbe_t *);
61 static int ixgbe_unicst_set(ixgbe_t *, const uint8_t *, int);
62 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
63 static void ixgbe_setup_multicst(ixgbe_t *);
64 static void ixgbe_get_hw_state(ixgbe_t *);
65 static void ixgbe_get_conf(ixgbe_t *);
66 static void ixgbe_init_params(ixgbe_t *);
67 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
68 static void ixgbe_driver_link_check(ixgbe_t *);
69 static void ixgbe_sfp_check(void *);
70 static void ixgbe_link_timer(void *);
71 static void ixgbe_local_timer(void *);
72 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
73 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
74 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
75 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
76 static boolean_t is_valid_mac_addr(uint8_t *);
77 static boolean_t ixgbe_stall_check(ixgbe_t *);
78 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
79 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
80 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
81 static int ixgbe_alloc_intrs(ixgbe_t *);
82 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
83 static int ixgbe_add_intr_handlers(ixgbe_t *);
84 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
85 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
86 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
87 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
88 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
89 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
90 static void ixgbe_setup_adapter_vector(ixgbe_t *);
91 static void ixgbe_rem_intr_handlers(ixgbe_t *);
92 static void ixgbe_rem_intrs(ixgbe_t *);
93 static int ixgbe_enable_intrs(ixgbe_t *);
94 static int ixgbe_disable_intrs(ixgbe_t *);
95 static uint_t ixgbe_intr_legacy(void *, void *);
96 static uint_t ixgbe_intr_msi(void *, void *);
97 static uint_t ixgbe_intr_msix(void *, void *);
98 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
99 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
100 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
101 static void ixgbe_get_driver_control(struct ixgbe_hw *);
102 static int ixgbe_addmac(void *, const uint8_t *);
103 static int ixgbe_remmac(void *, const uint8_t *);
104 static void ixgbe_release_driver_control(struct ixgbe_hw *);
105 
106 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
107 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
108 static int ixgbe_resume(dev_info_t *);
109 static int ixgbe_suspend(dev_info_t *);
110 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
111 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
112 
113 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
114     const void *impl_data);
115 static void ixgbe_fm_init(ixgbe_t *);
116 static void ixgbe_fm_fini(ixgbe_t *);
117 
118 mac_priv_prop_t ixgbe_priv_props[] = {
119 	{"_tx_copy_thresh", MAC_PROP_PERM_RW},
120 	{"_tx_recycle_thresh", MAC_PROP_PERM_RW},
121 	{"_tx_overload_thresh", MAC_PROP_PERM_RW},
122 	{"_tx_resched_thresh", MAC_PROP_PERM_RW},
123 	{"_rx_copy_thresh", MAC_PROP_PERM_RW},
124 	{"_rx_limit_per_intr", MAC_PROP_PERM_RW},
125 	{"_intr_throttling", MAC_PROP_PERM_RW},
126 	{"_adv_pause_cap", MAC_PROP_PERM_READ},
127 	{"_adv_asym_pause_cap", MAC_PROP_PERM_READ}
128 };
129 
130 #define	IXGBE_MAX_PRIV_PROPS \
131 	(sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
132 
133 static struct cb_ops ixgbe_cb_ops = {
134 	nulldev,		/* cb_open */
135 	nulldev,		/* cb_close */
136 	nodev,			/* cb_strategy */
137 	nodev,			/* cb_print */
138 	nodev,			/* cb_dump */
139 	nodev,			/* cb_read */
140 	nodev,			/* cb_write */
141 	nodev,			/* cb_ioctl */
142 	nodev,			/* cb_devmap */
143 	nodev,			/* cb_mmap */
144 	nodev,			/* cb_segmap */
145 	nochpoll,		/* cb_chpoll */
146 	ddi_prop_op,		/* cb_prop_op */
147 	NULL,			/* cb_stream */
148 	D_MP | D_HOTPLUG,	/* cb_flag */
149 	CB_REV,			/* cb_rev */
150 	nodev,			/* cb_aread */
151 	nodev			/* cb_awrite */
152 };
153 
154 static struct dev_ops ixgbe_dev_ops = {
155 	DEVO_REV,		/* devo_rev */
156 	0,			/* devo_refcnt */
157 	NULL,			/* devo_getinfo */
158 	nulldev,		/* devo_identify */
159 	nulldev,		/* devo_probe */
160 	ixgbe_attach,		/* devo_attach */
161 	ixgbe_detach,		/* devo_detach */
162 	nodev,			/* devo_reset */
163 	&ixgbe_cb_ops,		/* devo_cb_ops */
164 	NULL,			/* devo_bus_ops */
165 	ddi_power,		/* devo_power */
166 	ddi_quiesce_not_supported,	/* devo_quiesce */
167 };
168 
169 static struct modldrv ixgbe_modldrv = {
170 	&mod_driverops,		/* Type of module.  This one is a driver */
171 	ident,			/* Discription string */
172 	&ixgbe_dev_ops		/* driver ops */
173 };
174 
175 static struct modlinkage ixgbe_modlinkage = {
176 	MODREV_1, &ixgbe_modldrv, NULL
177 };
178 
179 /*
180  * Access attributes for register mapping
181  */
182 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
183 	DDI_DEVICE_ATTR_V1,
184 	DDI_STRUCTURE_LE_ACC,
185 	DDI_STRICTORDER_ACC,
186 	DDI_FLAGERR_ACC
187 };
188 
189 /*
190  * Loopback property
191  */
192 static lb_property_t lb_normal = {
193 	normal,	"normal", IXGBE_LB_NONE
194 };
195 
196 static lb_property_t lb_mac = {
197 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
198 };
199 
200 static lb_property_t lb_external = {
201 	external, "External", IXGBE_LB_EXTERNAL
202 };
203 
204 #define	IXGBE_M_CALLBACK_FLAGS \
205 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
206 
207 static mac_callbacks_t ixgbe_m_callbacks = {
208 	IXGBE_M_CALLBACK_FLAGS,
209 	ixgbe_m_stat,
210 	ixgbe_m_start,
211 	ixgbe_m_stop,
212 	ixgbe_m_promisc,
213 	ixgbe_m_multicst,
214 	NULL,
215 	NULL,
216 	ixgbe_m_ioctl,
217 	ixgbe_m_getcapab,
218 	NULL,
219 	NULL,
220 	ixgbe_m_setprop,
221 	ixgbe_m_getprop
222 };
223 
224 /*
225  * Initialize capabilities of each supported adapter type
226  */
227 static adapter_info_t ixgbe_82598eb_cap = {
228 	64,		/* maximum number of rx queues */
229 	1,		/* minimum number of rx queues */
230 	8,		/* default number of rx queues */
231 	32,		/* maximum number of tx queues */
232 	1,		/* minimum number of tx queues */
233 	8,		/* default number of tx queues */
234 	16366,		/* maximum MTU size */
235 	0xFFFF,		/* maximum interrupt throttle rate */
236 	0,		/* minimum interrupt throttle rate */
237 	200,		/* default interrupt throttle rate */
238 	18,		/* maximum total msix vectors */
239 	16,		/* maximum number of ring vectors */
240 	2,		/* maximum number of other vectors */
241 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
242 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
243 	| IXGBE_FLAG_RSS_CAPABLE
244 	| IXGBE_FLAG_VMDQ_CAPABLE)
245 };
246 
247 static adapter_info_t ixgbe_82599eb_cap = {
248 	128,		/* maximum number of rx queues */
249 	1,		/* minimum number of rx queues */
250 	8,		/* default number of rx queues */
251 	128,		/* maximum number of tx queues */
252 	1,		/* minimum number of tx queues */
253 	8,		/* default number of tx queues */
254 	15500,		/* maximum MTU size */
255 	0xFF8,		/* maximum interrupt throttle rate */
256 	0,		/* minimum interrupt throttle rate */
257 	200,		/* default interrupt throttle rate */
258 	64,		/* maximum total msix vectors */
259 	16,		/* maximum number of ring vectors */
260 	2,		/* maximum number of other vectors */
261 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
262 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
263 	| IXGBE_FLAG_RSS_CAPABLE
264 	| IXGBE_FLAG_VMDQ_CAPABLE)
265 };
266 
267 /*
268  * Module Initialization Functions.
269  */
270 
271 int
272 _init(void)
273 {
274 	int status;
275 
276 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
277 
278 	status = mod_install(&ixgbe_modlinkage);
279 
280 	if (status != DDI_SUCCESS) {
281 		mac_fini_ops(&ixgbe_dev_ops);
282 	}
283 
284 	return (status);
285 }
286 
287 int
288 _fini(void)
289 {
290 	int status;
291 
292 	status = mod_remove(&ixgbe_modlinkage);
293 
294 	if (status == DDI_SUCCESS) {
295 		mac_fini_ops(&ixgbe_dev_ops);
296 	}
297 
298 	return (status);
299 }
300 
301 int
302 _info(struct modinfo *modinfop)
303 {
304 	int status;
305 
306 	status = mod_info(&ixgbe_modlinkage, modinfop);
307 
308 	return (status);
309 }
310 
311 /*
312  * ixgbe_attach - Driver attach.
313  *
314  * This function is the device specific initialization entry
315  * point. This entry point is required and must be written.
316  * The DDI_ATTACH command must be provided in the attach entry
317  * point. When attach() is called with cmd set to DDI_ATTACH,
318  * all normal kernel services (such as kmem_alloc(9F)) are
319  * available for use by the driver.
320  *
321  * The attach() function will be called once for each instance
322  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
323  * Until attach() succeeds, the only driver entry points which
324  * may be called are open(9E) and getinfo(9E).
325  */
326 static int
327 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
328 {
329 	ixgbe_t *ixgbe;
330 	struct ixgbe_osdep *osdep;
331 	struct ixgbe_hw *hw;
332 	int instance;
333 	char taskqname[32];
334 
335 	/*
336 	 * Check the command and perform corresponding operations
337 	 */
338 	switch (cmd) {
339 	default:
340 		return (DDI_FAILURE);
341 
342 	case DDI_RESUME:
343 		return (ixgbe_resume(devinfo));
344 
345 	case DDI_ATTACH:
346 		break;
347 	}
348 
349 	/* Get the device instance */
350 	instance = ddi_get_instance(devinfo);
351 
352 	/* Allocate memory for the instance data structure */
353 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
354 
355 	ixgbe->dip = devinfo;
356 	ixgbe->instance = instance;
357 
358 	hw = &ixgbe->hw;
359 	osdep = &ixgbe->osdep;
360 	hw->back = osdep;
361 	osdep->ixgbe = ixgbe;
362 
363 	/* Attach the instance pointer to the dev_info data structure */
364 	ddi_set_driver_private(devinfo, ixgbe);
365 
366 	/*
367 	 * Initialize for fma support
368 	 */
369 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
370 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
371 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
372 	ixgbe_fm_init(ixgbe);
373 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
374 
375 	/*
376 	 * Map PCI config space registers
377 	 */
378 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
379 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
380 		goto attach_fail;
381 	}
382 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
383 
384 	/*
385 	 * Identify the chipset family
386 	 */
387 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
388 		ixgbe_error(ixgbe, "Failed to identify hardware");
389 		goto attach_fail;
390 	}
391 
392 	/*
393 	 * Map device registers
394 	 */
395 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
396 		ixgbe_error(ixgbe, "Failed to map device registers");
397 		goto attach_fail;
398 	}
399 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
400 
401 	/*
402 	 * Initialize driver parameters
403 	 */
404 	ixgbe_init_properties(ixgbe);
405 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
406 
407 	/*
408 	 * Allocate interrupts
409 	 */
410 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
411 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
412 		goto attach_fail;
413 	}
414 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
415 
416 	/*
417 	 * Allocate rx/tx rings based on the ring numbers.
418 	 * The actual numbers of rx/tx rings are decided by the number of
419 	 * allocated interrupt vectors, so we should allocate the rings after
420 	 * interrupts are allocated.
421 	 */
422 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
423 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
424 		goto attach_fail;
425 	}
426 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
427 
428 	/*
429 	 * Map rings to interrupt vectors
430 	 */
431 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
432 		ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
433 		goto attach_fail;
434 	}
435 
436 	/*
437 	 * Add interrupt handlers
438 	 */
439 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
440 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
441 		goto attach_fail;
442 	}
443 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
444 
445 	/*
446 	 * Create a taskq for sfp-change
447 	 */
448 	(void) sprintf(taskqname, "ixgbe%d_taskq", instance);
449 	if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
450 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
451 		ixgbe_error(ixgbe, "taskq_create failed");
452 		goto attach_fail;
453 	}
454 	ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
455 
456 	/*
457 	 * Initialize driver parameters
458 	 */
459 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
460 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
461 		goto attach_fail;
462 	}
463 
464 	/*
465 	 * Initialize mutexes for this device.
466 	 * Do this before enabling the interrupt handler and
467 	 * register the softint to avoid the condition where
468 	 * interrupt handler can try using uninitialized mutex.
469 	 */
470 	ixgbe_init_locks(ixgbe);
471 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
472 
473 	/*
474 	 * Initialize chipset hardware
475 	 */
476 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
477 		ixgbe_error(ixgbe, "Failed to initialize adapter");
478 		goto attach_fail;
479 	}
480 	ixgbe->link_check_complete = B_FALSE;
481 	ixgbe->link_check_hrtime = gethrtime() +
482 	    (IXGBE_LINK_UP_TIME * 100000000ULL);
483 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
484 
485 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
486 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
487 		goto attach_fail;
488 	}
489 
490 	/*
491 	 * Initialize statistics
492 	 */
493 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
494 		ixgbe_error(ixgbe, "Failed to initialize statistics");
495 		goto attach_fail;
496 	}
497 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
498 
499 	/*
500 	 * Register the driver to the MAC
501 	 */
502 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
503 		ixgbe_error(ixgbe, "Failed to register MAC");
504 		goto attach_fail;
505 	}
506 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
507 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
508 
509 	ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
510 	    IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
511 	if (ixgbe->periodic_id == 0) {
512 		ixgbe_error(ixgbe, "Failed to add the link check timer");
513 		goto attach_fail;
514 	}
515 	ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
516 
517 	/*
518 	 * Now that mutex locks are initialized, and the chip is also
519 	 * initialized, enable interrupts.
520 	 */
521 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
522 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
523 		goto attach_fail;
524 	}
525 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
526 
527 	ixgbe_log(ixgbe, "%s", ixgbe_version);
528 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
529 
530 	return (DDI_SUCCESS);
531 
532 attach_fail:
533 	ixgbe_unconfigure(devinfo, ixgbe);
534 	return (DDI_FAILURE);
535 }
536 
537 /*
538  * ixgbe_detach - Driver detach.
539  *
540  * The detach() function is the complement of the attach routine.
541  * If cmd is set to DDI_DETACH, detach() is used to remove  the
542  * state  associated  with  a  given  instance of a device node
543  * prior to the removal of that instance from the system.
544  *
545  * The detach() function will be called once for each  instance
546  * of the device for which there has been a successful attach()
547  * once there are no longer  any  opens  on  the  device.
548  *
549  * Interrupts routine are disabled, All memory allocated by this
550  * driver are freed.
551  */
552 static int
553 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
554 {
555 	ixgbe_t *ixgbe;
556 
557 	/*
558 	 * Check detach command
559 	 */
560 	switch (cmd) {
561 	default:
562 		return (DDI_FAILURE);
563 
564 	case DDI_SUSPEND:
565 		return (ixgbe_suspend(devinfo));
566 
567 	case DDI_DETACH:
568 		break;
569 	}
570 
571 	/*
572 	 * Get the pointer to the driver private data structure
573 	 */
574 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
575 	if (ixgbe == NULL)
576 		return (DDI_FAILURE);
577 
578 	/*
579 	 * If the device is still running, it needs to be stopped first.
580 	 * This check is necessary because under some specific circumstances,
581 	 * the detach routine can be called without stopping the interface
582 	 * first.
583 	 */
584 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
585 		atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
586 		mutex_enter(&ixgbe->gen_lock);
587 		ixgbe_stop(ixgbe, B_TRUE);
588 		mutex_exit(&ixgbe->gen_lock);
589 		/* Disable and stop the watchdog timer */
590 		ixgbe_disable_watchdog_timer(ixgbe);
591 	}
592 
593 	/*
594 	 * Check if there are still rx buffers held by the upper layer.
595 	 * If so, fail the detach.
596 	 */
597 	if (!ixgbe_rx_drain(ixgbe))
598 		return (DDI_FAILURE);
599 
600 	/*
601 	 * Do the remaining unconfigure routines
602 	 */
603 	ixgbe_unconfigure(devinfo, ixgbe);
604 
605 	return (DDI_SUCCESS);
606 }
607 
608 static void
609 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
610 {
611 	/*
612 	 * Disable interrupt
613 	 */
614 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
615 		(void) ixgbe_disable_intrs(ixgbe);
616 	}
617 
618 	/*
619 	 * remove the link check timer
620 	 */
621 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
622 		if (ixgbe->periodic_id != NULL) {
623 			ddi_periodic_delete(ixgbe->periodic_id);
624 			ixgbe->periodic_id = NULL;
625 		}
626 	}
627 
628 	/*
629 	 * Unregister MAC
630 	 */
631 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
632 		(void) mac_unregister(ixgbe->mac_hdl);
633 	}
634 
635 	/*
636 	 * Free statistics
637 	 */
638 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
639 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
640 	}
641 
642 	/*
643 	 * Remove interrupt handlers
644 	 */
645 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
646 		ixgbe_rem_intr_handlers(ixgbe);
647 	}
648 
649 	/*
650 	 * Remove taskq for sfp-status-change
651 	 */
652 	if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
653 		ddi_taskq_destroy(ixgbe->sfp_taskq);
654 	}
655 
656 	/*
657 	 * Remove interrupts
658 	 */
659 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
660 		ixgbe_rem_intrs(ixgbe);
661 	}
662 
663 	/*
664 	 * Remove driver properties
665 	 */
666 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
667 		(void) ddi_prop_remove_all(devinfo);
668 	}
669 
670 	/*
671 	 * Stop the chipset
672 	 */
673 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
674 		mutex_enter(&ixgbe->gen_lock);
675 		ixgbe_chip_stop(ixgbe);
676 		mutex_exit(&ixgbe->gen_lock);
677 	}
678 
679 	/*
680 	 * Free register handle
681 	 */
682 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
683 		if (ixgbe->osdep.reg_handle != NULL)
684 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
685 	}
686 
687 	/*
688 	 * Free PCI config handle
689 	 */
690 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
691 		if (ixgbe->osdep.cfg_handle != NULL)
692 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
693 	}
694 
695 	/*
696 	 * Free locks
697 	 */
698 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
699 		ixgbe_destroy_locks(ixgbe);
700 	}
701 
702 	/*
703 	 * Free the rx/tx rings
704 	 */
705 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
706 		ixgbe_free_rings(ixgbe);
707 	}
708 
709 	/*
710 	 * Unregister FMA capabilities
711 	 */
712 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
713 		ixgbe_fm_fini(ixgbe);
714 	}
715 
716 	/*
717 	 * Free the driver data structure
718 	 */
719 	kmem_free(ixgbe, sizeof (ixgbe_t));
720 
721 	ddi_set_driver_private(devinfo, NULL);
722 }
723 
724 /*
725  * ixgbe_register_mac - Register the driver and its function pointers with
726  * the GLD interface.
727  */
728 static int
729 ixgbe_register_mac(ixgbe_t *ixgbe)
730 {
731 	struct ixgbe_hw *hw = &ixgbe->hw;
732 	mac_register_t *mac;
733 	int status;
734 
735 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
736 		return (IXGBE_FAILURE);
737 
738 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
739 	mac->m_driver = ixgbe;
740 	mac->m_dip = ixgbe->dip;
741 	mac->m_src_addr = hw->mac.addr;
742 	mac->m_callbacks = &ixgbe_m_callbacks;
743 	mac->m_min_sdu = 0;
744 	mac->m_max_sdu = ixgbe->default_mtu;
745 	mac->m_margin = VLAN_TAGSZ;
746 	mac->m_priv_props = ixgbe_priv_props;
747 	mac->m_priv_prop_count = IXGBE_MAX_PRIV_PROPS;
748 	mac->m_v12n = MAC_VIRT_LEVEL1;
749 
750 	status = mac_register(mac, &ixgbe->mac_hdl);
751 
752 	mac_free(mac);
753 
754 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
755 }
756 
757 /*
758  * ixgbe_identify_hardware - Identify the type of the chipset.
759  */
760 static int
761 ixgbe_identify_hardware(ixgbe_t *ixgbe)
762 {
763 	struct ixgbe_hw *hw = &ixgbe->hw;
764 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
765 
766 	/*
767 	 * Get the device id
768 	 */
769 	hw->vendor_id =
770 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
771 	hw->device_id =
772 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
773 	hw->revision_id =
774 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
775 	hw->subsystem_device_id =
776 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
777 	hw->subsystem_vendor_id =
778 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
779 
780 	/*
781 	 * Set the mac type of the adapter based on the device id
782 	 */
783 	if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
784 		return (IXGBE_FAILURE);
785 	}
786 
787 	/*
788 	 * Install adapter capabilities
789 	 */
790 	switch (hw->mac.type) {
791 	case ixgbe_mac_82598EB:
792 		ixgbe_log(ixgbe, "identify 82598 adapter\n");
793 		ixgbe->capab = &ixgbe_82598eb_cap;
794 
795 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
796 			ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
797 			ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
798 		}
799 		ixgbe->capab->other_intr |= IXGBE_EICR_LSC;
800 
801 		break;
802 	case ixgbe_mac_82599EB:
803 		ixgbe_log(ixgbe, "identify 82599 adapter\n");
804 		ixgbe->capab = &ixgbe_82599eb_cap;
805 
806 		ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 |
807 		    IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC);
808 
809 		break;
810 	default:
811 		ixgbe_log(ixgbe,
812 		    "adapter not supported in ixgbe_identify_hardware(): %d\n",
813 		    hw->mac.type);
814 		return (IXGBE_FAILURE);
815 	}
816 
817 	return (IXGBE_SUCCESS);
818 }
819 
820 /*
821  * ixgbe_regs_map - Map the device registers.
822  *
823  */
824 static int
825 ixgbe_regs_map(ixgbe_t *ixgbe)
826 {
827 	dev_info_t *devinfo = ixgbe->dip;
828 	struct ixgbe_hw *hw = &ixgbe->hw;
829 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
830 	off_t mem_size;
831 
832 	/*
833 	 * First get the size of device registers to be mapped.
834 	 */
835 	if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
836 	    != DDI_SUCCESS) {
837 		return (IXGBE_FAILURE);
838 	}
839 
840 	/*
841 	 * Call ddi_regs_map_setup() to map registers
842 	 */
843 	if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
844 	    (caddr_t *)&hw->hw_addr, 0,
845 	    mem_size, &ixgbe_regs_acc_attr,
846 	    &osdep->reg_handle)) != DDI_SUCCESS) {
847 		return (IXGBE_FAILURE);
848 	}
849 
850 	return (IXGBE_SUCCESS);
851 }
852 
853 /*
854  * ixgbe_init_properties - Initialize driver properties.
855  */
856 static void
857 ixgbe_init_properties(ixgbe_t *ixgbe)
858 {
859 	/*
860 	 * Get conf file properties, including link settings
861 	 * jumbo frames, ring number, descriptor number, etc.
862 	 */
863 	ixgbe_get_conf(ixgbe);
864 
865 	ixgbe_init_params(ixgbe);
866 }
867 
868 /*
869  * ixgbe_init_driver_settings - Initialize driver settings.
870  *
871  * The settings include hardware function pointers, bus information,
872  * rx/tx rings settings, link state, and any other parameters that
873  * need to be setup during driver initialization.
874  */
875 static int
876 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
877 {
878 	struct ixgbe_hw *hw = &ixgbe->hw;
879 	dev_info_t *devinfo = ixgbe->dip;
880 	ixgbe_rx_ring_t *rx_ring;
881 	ixgbe_tx_ring_t *tx_ring;
882 	uint32_t rx_size;
883 	uint32_t tx_size;
884 	int i;
885 
886 	/*
887 	 * Initialize chipset specific hardware function pointers
888 	 */
889 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
890 		return (IXGBE_FAILURE);
891 	}
892 
893 	/*
894 	 * Get the system page size
895 	 */
896 	ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
897 
898 	/*
899 	 * Set rx buffer size
900 	 *
901 	 * The IP header alignment room is counted in the calculation.
902 	 * The rx buffer size is in unit of 1K that is required by the
903 	 * chipset hardware.
904 	 */
905 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
906 	ixgbe->rx_buf_size = ((rx_size >> 10) +
907 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
908 
909 	/*
910 	 * Set tx buffer size
911 	 */
912 	tx_size = ixgbe->max_frame_size;
913 	ixgbe->tx_buf_size = ((tx_size >> 10) +
914 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
915 
916 	/*
917 	 * Initialize rx/tx rings parameters
918 	 */
919 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
920 		rx_ring = &ixgbe->rx_rings[i];
921 		rx_ring->index = i;
922 		rx_ring->ixgbe = ixgbe;
923 	}
924 
925 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
926 		tx_ring = &ixgbe->tx_rings[i];
927 		tx_ring->index = i;
928 		tx_ring->ixgbe = ixgbe;
929 		if (ixgbe->tx_head_wb_enable)
930 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
931 		else
932 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
933 
934 		tx_ring->ring_size = ixgbe->tx_ring_size;
935 		tx_ring->free_list_size = ixgbe->tx_ring_size +
936 		    (ixgbe->tx_ring_size >> 1);
937 	}
938 
939 	/*
940 	 * Initialize values of interrupt throttling rate
941 	 */
942 	for (i = 1; i < MAX_INTR_VECTOR; i++)
943 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
944 
945 	/*
946 	 * The initial link state should be "unknown"
947 	 */
948 	ixgbe->link_state = LINK_STATE_UNKNOWN;
949 
950 	return (IXGBE_SUCCESS);
951 }
952 
953 /*
954  * ixgbe_init_locks - Initialize locks.
955  */
956 static void
957 ixgbe_init_locks(ixgbe_t *ixgbe)
958 {
959 	ixgbe_rx_ring_t *rx_ring;
960 	ixgbe_tx_ring_t *tx_ring;
961 	int i;
962 
963 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
964 		rx_ring = &ixgbe->rx_rings[i];
965 		mutex_init(&rx_ring->rx_lock, NULL,
966 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
967 	}
968 
969 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
970 		tx_ring = &ixgbe->tx_rings[i];
971 		mutex_init(&tx_ring->tx_lock, NULL,
972 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
973 		mutex_init(&tx_ring->recycle_lock, NULL,
974 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
975 		mutex_init(&tx_ring->tcb_head_lock, NULL,
976 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
977 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
978 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
979 	}
980 
981 	mutex_init(&ixgbe->gen_lock, NULL,
982 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
983 
984 	mutex_init(&ixgbe->watchdog_lock, NULL,
985 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
986 }
987 
988 /*
989  * ixgbe_destroy_locks - Destroy locks.
990  */
991 static void
992 ixgbe_destroy_locks(ixgbe_t *ixgbe)
993 {
994 	ixgbe_rx_ring_t *rx_ring;
995 	ixgbe_tx_ring_t *tx_ring;
996 	int i;
997 
998 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
999 		rx_ring = &ixgbe->rx_rings[i];
1000 		mutex_destroy(&rx_ring->rx_lock);
1001 	}
1002 
1003 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1004 		tx_ring = &ixgbe->tx_rings[i];
1005 		mutex_destroy(&tx_ring->tx_lock);
1006 		mutex_destroy(&tx_ring->recycle_lock);
1007 		mutex_destroy(&tx_ring->tcb_head_lock);
1008 		mutex_destroy(&tx_ring->tcb_tail_lock);
1009 	}
1010 
1011 	mutex_destroy(&ixgbe->gen_lock);
1012 	mutex_destroy(&ixgbe->watchdog_lock);
1013 }
1014 
1015 static int
1016 ixgbe_resume(dev_info_t *devinfo)
1017 {
1018 	ixgbe_t *ixgbe;
1019 	int i;
1020 
1021 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1022 	if (ixgbe == NULL)
1023 		return (DDI_FAILURE);
1024 
1025 	mutex_enter(&ixgbe->gen_lock);
1026 
1027 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1028 		if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1029 			mutex_exit(&ixgbe->gen_lock);
1030 			return (DDI_FAILURE);
1031 		}
1032 
1033 		/*
1034 		 * Enable and start the watchdog timer
1035 		 */
1036 		ixgbe_enable_watchdog_timer(ixgbe);
1037 	}
1038 
1039 	atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1040 
1041 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1042 		for (i = 0; i < ixgbe->num_tx_rings; i++) {
1043 			mac_tx_ring_update(ixgbe->mac_hdl,
1044 			    ixgbe->tx_rings[i].ring_handle);
1045 		}
1046 	}
1047 
1048 	mutex_exit(&ixgbe->gen_lock);
1049 
1050 	return (DDI_SUCCESS);
1051 }
1052 
1053 static int
1054 ixgbe_suspend(dev_info_t *devinfo)
1055 {
1056 	ixgbe_t *ixgbe;
1057 
1058 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1059 	if (ixgbe == NULL)
1060 		return (DDI_FAILURE);
1061 
1062 	mutex_enter(&ixgbe->gen_lock);
1063 
1064 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1065 	if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1066 		mutex_exit(&ixgbe->gen_lock);
1067 		return (DDI_SUCCESS);
1068 	}
1069 	ixgbe_stop(ixgbe, B_FALSE);
1070 
1071 	mutex_exit(&ixgbe->gen_lock);
1072 
1073 	/*
1074 	 * Disable and stop the watchdog timer
1075 	 */
1076 	ixgbe_disable_watchdog_timer(ixgbe);
1077 
1078 	return (DDI_SUCCESS);
1079 }
1080 
1081 /*
1082  * ixgbe_init - Initialize the device.
1083  */
1084 static int
1085 ixgbe_init(ixgbe_t *ixgbe)
1086 {
1087 	struct ixgbe_hw *hw = &ixgbe->hw;
1088 
1089 	mutex_enter(&ixgbe->gen_lock);
1090 
1091 	/*
1092 	 * Reset chipset to put the hardware in a known state
1093 	 * before we try to do anything with the eeprom.
1094 	 */
1095 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1096 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1097 		goto init_fail;
1098 	}
1099 
1100 	/*
1101 	 * Need to init eeprom before validating the checksum.
1102 	 */
1103 	if (ixgbe_init_eeprom_params(hw) < 0) {
1104 		ixgbe_error(ixgbe,
1105 		    "Unable to intitialize the eeprom interface.");
1106 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1107 		goto init_fail;
1108 	}
1109 
1110 	/*
1111 	 * NVM validation
1112 	 */
1113 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1114 		/*
1115 		 * Some PCI-E parts fail the first check due to
1116 		 * the link being in sleep state.  Call it again,
1117 		 * if it fails a second time it's a real issue.
1118 		 */
1119 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1120 			ixgbe_error(ixgbe,
1121 			    "Invalid NVM checksum. Please contact "
1122 			    "the vendor to update the NVM.");
1123 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1124 			goto init_fail;
1125 		}
1126 	}
1127 
1128 	/*
1129 	 * Setup default flow control thresholds - enable/disable
1130 	 * & flow control type is controlled by ixgbe.conf
1131 	 */
1132 	hw->fc.high_water = DEFAULT_FCRTH;
1133 	hw->fc.low_water = DEFAULT_FCRTL;
1134 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1135 	hw->fc.send_xon = B_TRUE;
1136 
1137 	/*
1138 	 * Initialize link settings
1139 	 */
1140 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1141 
1142 	/*
1143 	 * Initialize the chipset hardware
1144 	 */
1145 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1146 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1147 		goto init_fail;
1148 	}
1149 
1150 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1151 		goto init_fail;
1152 	}
1153 
1154 	mutex_exit(&ixgbe->gen_lock);
1155 	return (IXGBE_SUCCESS);
1156 
1157 init_fail:
1158 	/*
1159 	 * Reset PHY
1160 	 */
1161 	(void) ixgbe_reset_phy(hw);
1162 
1163 	mutex_exit(&ixgbe->gen_lock);
1164 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1165 	return (IXGBE_FAILURE);
1166 }
1167 
1168 /*
1169  * ixgbe_chip_start - Initialize and start the chipset hardware.
1170  */
1171 static int
1172 ixgbe_chip_start(ixgbe_t *ixgbe)
1173 {
1174 	struct ixgbe_hw *hw = &ixgbe->hw;
1175 	int ret_val, i;
1176 
1177 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1178 
1179 	/*
1180 	 * Get the mac address
1181 	 * This function should handle SPARC case correctly.
1182 	 */
1183 	if (!ixgbe_find_mac_address(ixgbe)) {
1184 		ixgbe_error(ixgbe, "Failed to get the mac address");
1185 		return (IXGBE_FAILURE);
1186 	}
1187 
1188 	/*
1189 	 * Validate the mac address
1190 	 */
1191 	(void) ixgbe_init_rx_addrs(hw);
1192 	if (!is_valid_mac_addr(hw->mac.addr)) {
1193 		ixgbe_error(ixgbe, "Invalid mac address");
1194 		return (IXGBE_FAILURE);
1195 	}
1196 
1197 	/*
1198 	 * Configure/Initialize hardware
1199 	 */
1200 	ret_val = ixgbe_init_hw(hw);
1201 	if (ret_val != IXGBE_SUCCESS) {
1202 		if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1203 			ixgbe_error(ixgbe,
1204 			    "This 82599 device is pre-release and contains"
1205 			    " outdated firmware, please contact your hardware"
1206 			    " vendor for a replacement.");
1207 		} else {
1208 			ixgbe_error(ixgbe, "Failed to initialize hardware");
1209 			return (IXGBE_FAILURE);
1210 		}
1211 	}
1212 
1213 	/*
1214 	 * Setup adapter interrupt vectors
1215 	 */
1216 	ixgbe_setup_adapter_vector(ixgbe);
1217 
1218 	/*
1219 	 * Initialize unicast addresses.
1220 	 */
1221 	ixgbe_init_unicst(ixgbe);
1222 
1223 	/*
1224 	 * Setup and initialize the mctable structures.
1225 	 */
1226 	ixgbe_setup_multicst(ixgbe);
1227 
1228 	/*
1229 	 * Set interrupt throttling rate
1230 	 */
1231 	for (i = 0; i < ixgbe->intr_cnt; i++) {
1232 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1233 	}
1234 
1235 	/*
1236 	 * Save the state of the phy
1237 	 */
1238 	ixgbe_get_hw_state(ixgbe);
1239 
1240 	/*
1241 	 * Make sure driver has control
1242 	 */
1243 	ixgbe_get_driver_control(hw);
1244 
1245 	return (IXGBE_SUCCESS);
1246 }
1247 
1248 /*
1249  * ixgbe_chip_stop - Stop the chipset hardware
1250  */
1251 static void
1252 ixgbe_chip_stop(ixgbe_t *ixgbe)
1253 {
1254 	struct ixgbe_hw *hw = &ixgbe->hw;
1255 
1256 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1257 
1258 	/*
1259 	 * Tell firmware driver is no longer in control
1260 	 */
1261 	ixgbe_release_driver_control(hw);
1262 
1263 	/*
1264 	 * Reset the chipset
1265 	 */
1266 	(void) ixgbe_reset_hw(hw);
1267 
1268 	/*
1269 	 * Reset PHY
1270 	 */
1271 	(void) ixgbe_reset_phy(hw);
1272 }
1273 
1274 /*
1275  * ixgbe_reset - Reset the chipset and re-start the driver.
1276  *
1277  * It involves stopping and re-starting the chipset,
1278  * and re-configuring the rx/tx rings.
1279  */
1280 static int
1281 ixgbe_reset(ixgbe_t *ixgbe)
1282 {
1283 	int i;
1284 
1285 	/*
1286 	 * Disable and stop the watchdog timer
1287 	 */
1288 	ixgbe_disable_watchdog_timer(ixgbe);
1289 
1290 	mutex_enter(&ixgbe->gen_lock);
1291 
1292 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1293 	atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1294 
1295 	ixgbe_stop(ixgbe, B_FALSE);
1296 
1297 	if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1298 		mutex_exit(&ixgbe->gen_lock);
1299 		return (IXGBE_FAILURE);
1300 	}
1301 
1302 	/*
1303 	 * After resetting, need to recheck the link status.
1304 	 */
1305 	ixgbe->link_check_complete = B_FALSE;
1306 	ixgbe->link_check_hrtime = gethrtime() +
1307 	    (IXGBE_LINK_UP_TIME * 100000000ULL);
1308 
1309 	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1310 
1311 	if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1312 		for (i = 0; i < ixgbe->num_tx_rings; i++) {
1313 			mac_tx_ring_update(ixgbe->mac_hdl,
1314 			    ixgbe->tx_rings[i].ring_handle);
1315 		}
1316 	}
1317 
1318 	mutex_exit(&ixgbe->gen_lock);
1319 
1320 	/*
1321 	 * Enable and start the watchdog timer
1322 	 */
1323 	ixgbe_enable_watchdog_timer(ixgbe);
1324 
1325 	return (IXGBE_SUCCESS);
1326 }
1327 
1328 /*
1329  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1330  */
1331 static void
1332 ixgbe_tx_clean(ixgbe_t *ixgbe)
1333 {
1334 	ixgbe_tx_ring_t *tx_ring;
1335 	tx_control_block_t *tcb;
1336 	link_list_t pending_list;
1337 	uint32_t desc_num;
1338 	int i, j;
1339 
1340 	LINK_LIST_INIT(&pending_list);
1341 
1342 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1343 		tx_ring = &ixgbe->tx_rings[i];
1344 
1345 		mutex_enter(&tx_ring->recycle_lock);
1346 
1347 		/*
1348 		 * Clean the pending tx data - the pending packets in the
1349 		 * work_list that have no chances to be transmitted again.
1350 		 *
1351 		 * We must ensure the chipset is stopped or the link is down
1352 		 * before cleaning the transmit packets.
1353 		 */
1354 		desc_num = 0;
1355 		for (j = 0; j < tx_ring->ring_size; j++) {
1356 			tcb = tx_ring->work_list[j];
1357 			if (tcb != NULL) {
1358 				desc_num += tcb->desc_num;
1359 
1360 				tx_ring->work_list[j] = NULL;
1361 
1362 				ixgbe_free_tcb(tcb);
1363 
1364 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1365 			}
1366 		}
1367 
1368 		if (desc_num > 0) {
1369 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1370 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1371 
1372 			/*
1373 			 * Reset the head and tail pointers of the tbd ring;
1374 			 * Reset the writeback head if it's enable.
1375 			 */
1376 			tx_ring->tbd_head = 0;
1377 			tx_ring->tbd_tail = 0;
1378 			if (ixgbe->tx_head_wb_enable)
1379 				*tx_ring->tbd_head_wb = 0;
1380 
1381 			IXGBE_WRITE_REG(&ixgbe->hw,
1382 			    IXGBE_TDH(tx_ring->index), 0);
1383 			IXGBE_WRITE_REG(&ixgbe->hw,
1384 			    IXGBE_TDT(tx_ring->index), 0);
1385 		}
1386 
1387 		mutex_exit(&tx_ring->recycle_lock);
1388 
1389 		/*
1390 		 * Add the tx control blocks in the pending list to
1391 		 * the free list.
1392 		 */
1393 		ixgbe_put_free_list(tx_ring, &pending_list);
1394 	}
1395 }
1396 
1397 /*
1398  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1399  * transmitted.
1400  */
1401 static boolean_t
1402 ixgbe_tx_drain(ixgbe_t *ixgbe)
1403 {
1404 	ixgbe_tx_ring_t *tx_ring;
1405 	boolean_t done;
1406 	int i, j;
1407 
1408 	/*
1409 	 * Wait for a specific time to allow pending tx packets
1410 	 * to be transmitted.
1411 	 *
1412 	 * Check the counter tbd_free to see if transmission is done.
1413 	 * No lock protection is needed here.
1414 	 *
1415 	 * Return B_TRUE if all pending packets have been transmitted;
1416 	 * Otherwise return B_FALSE;
1417 	 */
1418 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1419 
1420 		done = B_TRUE;
1421 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1422 			tx_ring = &ixgbe->tx_rings[j];
1423 			done = done &&
1424 			    (tx_ring->tbd_free == tx_ring->ring_size);
1425 		}
1426 
1427 		if (done)
1428 			break;
1429 
1430 		msec_delay(1);
1431 	}
1432 
1433 	return (done);
1434 }
1435 
1436 /*
1437  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1438  */
1439 static boolean_t
1440 ixgbe_rx_drain(ixgbe_t *ixgbe)
1441 {
1442 	boolean_t done = B_TRUE;
1443 	int i;
1444 
1445 	/*
1446 	 * Polling the rx free list to check if those rx buffers held by
1447 	 * the upper layer are released.
1448 	 *
1449 	 * Check the counter rcb_free to see if all pending buffers are
1450 	 * released. No lock protection is needed here.
1451 	 *
1452 	 * Return B_TRUE if all pending buffers have been released;
1453 	 * Otherwise return B_FALSE;
1454 	 */
1455 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1456 		done = (ixgbe->rcb_pending == 0);
1457 
1458 		if (done)
1459 			break;
1460 
1461 		msec_delay(1);
1462 	}
1463 
1464 	return (done);
1465 }
1466 
1467 /*
1468  * ixgbe_start - Start the driver/chipset.
1469  */
1470 int
1471 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1472 {
1473 	int i;
1474 
1475 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1476 
1477 	if (alloc_buffer) {
1478 		if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1479 			ixgbe_error(ixgbe,
1480 			    "Failed to allocate software receive rings");
1481 			return (IXGBE_FAILURE);
1482 		}
1483 
1484 		/* Allocate buffers for all the rx/tx rings */
1485 		if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1486 			ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1487 			return (IXGBE_FAILURE);
1488 		}
1489 
1490 		ixgbe->tx_ring_init = B_TRUE;
1491 	} else {
1492 		ixgbe->tx_ring_init = B_FALSE;
1493 	}
1494 
1495 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1496 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1497 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1498 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1499 
1500 	/*
1501 	 * Start the chipset hardware
1502 	 */
1503 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1504 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1505 		goto start_failure;
1506 	}
1507 
1508 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1509 		goto start_failure;
1510 	}
1511 
1512 	/*
1513 	 * Setup the rx/tx rings
1514 	 */
1515 	ixgbe_setup_rings(ixgbe);
1516 
1517 	/*
1518 	 * ixgbe_start() will be called when resetting, however if reset
1519 	 * happens, we need to clear the ERROR and STALL flags before
1520 	 * enabling the interrupts.
1521 	 */
1522 	atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR | IXGBE_STALL));
1523 
1524 	/*
1525 	 * Enable adapter interrupts
1526 	 * The interrupts must be enabled after the driver state is START
1527 	 */
1528 	ixgbe_enable_adapter_interrupts(ixgbe);
1529 
1530 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1531 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1532 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1533 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1534 
1535 	return (IXGBE_SUCCESS);
1536 
1537 start_failure:
1538 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1539 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1540 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1541 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1542 
1543 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1544 
1545 	return (IXGBE_FAILURE);
1546 }
1547 
1548 /*
1549  * ixgbe_stop - Stop the driver/chipset.
1550  */
1551 void
1552 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1553 {
1554 	int i;
1555 
1556 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1557 
1558 	/*
1559 	 * Disable the adapter interrupts
1560 	 */
1561 	ixgbe_disable_adapter_interrupts(ixgbe);
1562 
1563 	/*
1564 	 * Drain the pending tx packets
1565 	 */
1566 	(void) ixgbe_tx_drain(ixgbe);
1567 
1568 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1569 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1570 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1571 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1572 
1573 	/*
1574 	 * Stop the chipset hardware
1575 	 */
1576 	ixgbe_chip_stop(ixgbe);
1577 
1578 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1579 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1580 	}
1581 
1582 	/*
1583 	 * Clean the pending tx data/resources
1584 	 */
1585 	ixgbe_tx_clean(ixgbe);
1586 
1587 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1588 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1589 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1590 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1591 
1592 	if (ixgbe->link_state == LINK_STATE_UP) {
1593 		ixgbe->link_state = LINK_STATE_UNKNOWN;
1594 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1595 	}
1596 
1597 	if (free_buffer) {
1598 		/*
1599 		 * Release the DMA/memory resources of rx/tx rings
1600 		 */
1601 		ixgbe_free_dma(ixgbe);
1602 		ixgbe_free_rx_data(ixgbe);
1603 	}
1604 }
1605 
1606 /*
1607  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1608  */
1609 static int
1610 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1611 {
1612 	/*
1613 	 * Allocate memory space for rx rings
1614 	 */
1615 	ixgbe->rx_rings = kmem_zalloc(
1616 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1617 	    KM_NOSLEEP);
1618 
1619 	if (ixgbe->rx_rings == NULL) {
1620 		return (IXGBE_FAILURE);
1621 	}
1622 
1623 	/*
1624 	 * Allocate memory space for tx rings
1625 	 */
1626 	ixgbe->tx_rings = kmem_zalloc(
1627 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1628 	    KM_NOSLEEP);
1629 
1630 	if (ixgbe->tx_rings == NULL) {
1631 		kmem_free(ixgbe->rx_rings,
1632 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1633 		ixgbe->rx_rings = NULL;
1634 		return (IXGBE_FAILURE);
1635 	}
1636 
1637 	/*
1638 	 * Allocate memory space for rx ring groups
1639 	 */
1640 	ixgbe->rx_groups = kmem_zalloc(
1641 	    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1642 	    KM_NOSLEEP);
1643 
1644 	if (ixgbe->rx_groups == NULL) {
1645 		kmem_free(ixgbe->rx_rings,
1646 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1647 		kmem_free(ixgbe->tx_rings,
1648 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1649 		ixgbe->rx_rings = NULL;
1650 		ixgbe->tx_rings = NULL;
1651 		return (IXGBE_FAILURE);
1652 	}
1653 
1654 	return (IXGBE_SUCCESS);
1655 }
1656 
1657 /*
1658  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1659  */
1660 static void
1661 ixgbe_free_rings(ixgbe_t *ixgbe)
1662 {
1663 	if (ixgbe->rx_rings != NULL) {
1664 		kmem_free(ixgbe->rx_rings,
1665 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1666 		ixgbe->rx_rings = NULL;
1667 	}
1668 
1669 	if (ixgbe->tx_rings != NULL) {
1670 		kmem_free(ixgbe->tx_rings,
1671 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1672 		ixgbe->tx_rings = NULL;
1673 	}
1674 
1675 	if (ixgbe->rx_groups != NULL) {
1676 		kmem_free(ixgbe->rx_groups,
1677 		    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1678 		ixgbe->rx_groups = NULL;
1679 	}
1680 }
1681 
1682 static int
1683 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
1684 {
1685 	ixgbe_rx_ring_t *rx_ring;
1686 	int i;
1687 
1688 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1689 		rx_ring = &ixgbe->rx_rings[i];
1690 		if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
1691 			goto alloc_rx_rings_failure;
1692 	}
1693 	return (IXGBE_SUCCESS);
1694 
1695 alloc_rx_rings_failure:
1696 	ixgbe_free_rx_data(ixgbe);
1697 	return (IXGBE_FAILURE);
1698 }
1699 
1700 static void
1701 ixgbe_free_rx_data(ixgbe_t *ixgbe)
1702 {
1703 	ixgbe_rx_ring_t *rx_ring;
1704 	ixgbe_rx_data_t *rx_data;
1705 	int i;
1706 
1707 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1708 		rx_ring = &ixgbe->rx_rings[i];
1709 
1710 		mutex_enter(&ixgbe->rx_pending_lock);
1711 		rx_data = rx_ring->rx_data;
1712 
1713 		if (rx_data != NULL) {
1714 			rx_data->flag |= IXGBE_RX_STOPPED;
1715 
1716 			if (rx_data->rcb_pending == 0) {
1717 				ixgbe_free_rx_ring_data(rx_data);
1718 				rx_ring->rx_data = NULL;
1719 			}
1720 		}
1721 
1722 		mutex_exit(&ixgbe->rx_pending_lock);
1723 	}
1724 }
1725 
1726 /*
1727  * ixgbe_setup_rings - Setup rx/tx rings.
1728  */
1729 static void
1730 ixgbe_setup_rings(ixgbe_t *ixgbe)
1731 {
1732 	/*
1733 	 * Setup the rx/tx rings, including the following:
1734 	 *
1735 	 * 1. Setup the descriptor ring and the control block buffers;
1736 	 * 2. Initialize necessary registers for receive/transmit;
1737 	 * 3. Initialize software pointers/parameters for receive/transmit;
1738 	 */
1739 	ixgbe_setup_rx(ixgbe);
1740 
1741 	ixgbe_setup_tx(ixgbe);
1742 }
1743 
1744 static void
1745 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
1746 {
1747 	ixgbe_t *ixgbe = rx_ring->ixgbe;
1748 	ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
1749 	struct ixgbe_hw *hw = &ixgbe->hw;
1750 	rx_control_block_t *rcb;
1751 	union ixgbe_adv_rx_desc	*rbd;
1752 	uint32_t size;
1753 	uint32_t buf_low;
1754 	uint32_t buf_high;
1755 	uint32_t reg_val;
1756 	int i;
1757 
1758 	ASSERT(mutex_owned(&rx_ring->rx_lock));
1759 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1760 
1761 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
1762 		rcb = rx_data->work_list[i];
1763 		rbd = &rx_data->rbd_ring[i];
1764 
1765 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
1766 		rbd->read.hdr_addr = NULL;
1767 	}
1768 
1769 	/*
1770 	 * Initialize the length register
1771 	 */
1772 	size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
1773 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
1774 
1775 	/*
1776 	 * Initialize the base address registers
1777 	 */
1778 	buf_low = (uint32_t)rx_data->rbd_area.dma_address;
1779 	buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
1780 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
1781 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
1782 
1783 	/*
1784 	 * Setup head & tail pointers
1785 	 */
1786 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_data->ring_size - 1);
1787 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
1788 
1789 	rx_data->rbd_next = 0;
1790 
1791 	/*
1792 	 * Setup the Receive Descriptor Control Register (RXDCTL)
1793 	 * PTHRESH=32 descriptors (half the internal cache)
1794 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
1795 	 * WTHRESH defaults to 1 (writeback each descriptor)
1796 	 */
1797 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
1798 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
1799 
1800 	/* Not a valid value for 82599 */
1801 	if (hw->mac.type < ixgbe_mac_82599EB) {
1802 		reg_val |= 0x0020;	/* pthresh */
1803 	}
1804 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
1805 
1806 	if (hw->mac.type == ixgbe_mac_82599EB) {
1807 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1808 		reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
1809 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
1810 	}
1811 
1812 	/*
1813 	 * Setup the Split and Replication Receive Control Register.
1814 	 * Set the rx buffer size and the advanced descriptor type.
1815 	 */
1816 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
1817 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1818 	reg_val |= IXGBE_SRRCTL_DROP_EN;
1819 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
1820 }
1821 
1822 static void
1823 ixgbe_setup_rx(ixgbe_t *ixgbe)
1824 {
1825 	ixgbe_rx_ring_t *rx_ring;
1826 	struct ixgbe_hw *hw = &ixgbe->hw;
1827 	ixgbe_rx_group_t *rx_group;
1828 	uint32_t reg_val;
1829 	uint32_t ring_mapping;
1830 	int i;
1831 
1832 	/* PSRTYPE must be configured for 82599 */
1833 	reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1834 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
1835 #define	IXGBE_PSRTYPE_L2_PKT	0x00001000
1836 	reg_val |= IXGBE_PSRTYPE_L2_PKT;
1837 	reg_val |= 0xE0000000;
1838 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
1839 
1840 	/*
1841 	 * Set filter control in FCTRL to accept broadcast packets and do
1842 	 * not pass pause frames to host.  Flow control settings are already
1843 	 * in this register, so preserve them.
1844 	 */
1845 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1846 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
1847 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
1848 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
1849 
1850 	/*
1851 	 * Enable the receive unit.  This must be done after filter
1852 	 * control is set in FCTRL.
1853 	 */
1854 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
1855 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
1856 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1857 
1858 	/*
1859 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
1860 	 */
1861 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1862 		rx_ring = &ixgbe->rx_rings[i];
1863 		ixgbe_setup_rx_ring(rx_ring);
1864 	}
1865 
1866 	/*
1867 	 * Setup rx groups.
1868 	 */
1869 	for (i = 0; i < ixgbe->num_rx_groups; i++) {
1870 		rx_group = &ixgbe->rx_groups[i];
1871 		rx_group->index = i;
1872 		rx_group->ixgbe = ixgbe;
1873 	}
1874 
1875 	/*
1876 	 * Setup the per-ring statistics mapping.
1877 	 */
1878 	ring_mapping = 0;
1879 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1880 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1881 		if ((i & 0x3) == 0x3) {
1882 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1883 			ring_mapping = 0;
1884 		}
1885 	}
1886 	if ((i & 0x3) != 0x3)
1887 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1888 
1889 	/*
1890 	 * The Max Frame Size in MHADD/MAXFRS will be internally increased
1891 	 * by four bytes if the packet has a VLAN field, so includes MTU,
1892 	 * ethernet header and frame check sequence.
1893 	 * Register is MAXFRS in 82599.
1894 	 */
1895 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
1896 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
1897 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
1898 
1899 	/*
1900 	 * Setup Jumbo Frame enable bit
1901 	 */
1902 	if (ixgbe->default_mtu > ETHERMTU) {
1903 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1904 		reg_val |= IXGBE_HLREG0_JUMBOEN;
1905 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1906 	}
1907 
1908 	/*
1909 	 * Hardware checksum settings
1910 	 */
1911 	if (ixgbe->rx_hcksum_enable) {
1912 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
1913 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
1914 	}
1915 
1916 	/*
1917 	 * Setup RSS for multiple receive queues
1918 	 */
1919 	if (ixgbe->num_rx_rings > 1)
1920 		ixgbe_setup_rss(ixgbe);
1921 }
1922 
1923 static void
1924 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
1925 {
1926 	ixgbe_t *ixgbe = tx_ring->ixgbe;
1927 	struct ixgbe_hw *hw = &ixgbe->hw;
1928 	uint32_t size;
1929 	uint32_t buf_low;
1930 	uint32_t buf_high;
1931 	uint32_t reg_val;
1932 
1933 	ASSERT(mutex_owned(&tx_ring->tx_lock));
1934 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1935 
1936 	/*
1937 	 * Initialize the length register
1938 	 */
1939 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
1940 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
1941 
1942 	/*
1943 	 * Initialize the base address registers
1944 	 */
1945 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
1946 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
1947 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
1948 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
1949 
1950 	/*
1951 	 * Setup head & tail pointers
1952 	 */
1953 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
1954 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
1955 
1956 	/*
1957 	 * Setup head write-back
1958 	 */
1959 	if (ixgbe->tx_head_wb_enable) {
1960 		/*
1961 		 * The memory of the head write-back is allocated using
1962 		 * the extra tbd beyond the tail of the tbd ring.
1963 		 */
1964 		tx_ring->tbd_head_wb = (uint32_t *)
1965 		    ((uintptr_t)tx_ring->tbd_area.address + size);
1966 		*tx_ring->tbd_head_wb = 0;
1967 
1968 		buf_low = (uint32_t)
1969 		    (tx_ring->tbd_area.dma_address + size);
1970 		buf_high = (uint32_t)
1971 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
1972 
1973 		/* Set the head write-back enable bit */
1974 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1975 
1976 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
1977 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
1978 
1979 		/*
1980 		 * Turn off relaxed ordering for head write back or it will
1981 		 * cause problems with the tx recycling
1982 		 */
1983 		reg_val = IXGBE_READ_REG(hw,
1984 		    IXGBE_DCA_TXCTRL(tx_ring->index));
1985 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1986 		IXGBE_WRITE_REG(hw,
1987 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
1988 	} else {
1989 		tx_ring->tbd_head_wb = NULL;
1990 	}
1991 
1992 	tx_ring->tbd_head = 0;
1993 	tx_ring->tbd_tail = 0;
1994 	tx_ring->tbd_free = tx_ring->ring_size;
1995 
1996 	if (ixgbe->tx_ring_init == B_TRUE) {
1997 		tx_ring->tcb_head = 0;
1998 		tx_ring->tcb_tail = 0;
1999 		tx_ring->tcb_free = tx_ring->free_list_size;
2000 	}
2001 
2002 	/*
2003 	 * Initialize the s/w context structure
2004 	 */
2005 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2006 }
2007 
2008 static void
2009 ixgbe_setup_tx(ixgbe_t *ixgbe)
2010 {
2011 	struct ixgbe_hw *hw = &ixgbe->hw;
2012 	ixgbe_tx_ring_t *tx_ring;
2013 	uint32_t reg_val;
2014 	uint32_t ring_mapping;
2015 	int i;
2016 
2017 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2018 		tx_ring = &ixgbe->tx_rings[i];
2019 		ixgbe_setup_tx_ring(tx_ring);
2020 	}
2021 
2022 	/*
2023 	 * Setup the per-ring statistics mapping.
2024 	 */
2025 	ring_mapping = 0;
2026 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2027 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2028 		if ((i & 0x3) == 0x3) {
2029 			if (hw->mac.type >= ixgbe_mac_82599EB) {
2030 				IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2031 				    ring_mapping);
2032 			} else {
2033 				IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2034 				    ring_mapping);
2035 			}
2036 			ring_mapping = 0;
2037 		}
2038 	}
2039 	if ((i & 0x3) != 0x3)
2040 		if (hw->mac.type >= ixgbe_mac_82599EB) {
2041 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2042 		} else {
2043 			IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2044 		}
2045 
2046 	/*
2047 	 * Enable CRC appending and TX padding (for short tx frames)
2048 	 */
2049 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2050 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2051 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2052 
2053 	/*
2054 	 * enable DMA for 82599 parts
2055 	 */
2056 	if (hw->mac.type == ixgbe_mac_82599EB) {
2057 	/* DMATXCTL.TE must be set after all Tx config is complete */
2058 		reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2059 		reg_val |= IXGBE_DMATXCTL_TE;
2060 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2061 	}
2062 
2063 	/*
2064 	 * Enabling tx queues ..
2065 	 * For 82599 must be done after DMATXCTL.TE is set
2066 	 */
2067 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2068 		tx_ring = &ixgbe->tx_rings[i];
2069 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2070 		reg_val |= IXGBE_TXDCTL_ENABLE;
2071 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2072 	}
2073 }
2074 
2075 /*
2076  * ixgbe_setup_rss - Setup receive-side scaling feature.
2077  */
2078 static void
2079 ixgbe_setup_rss(ixgbe_t *ixgbe)
2080 {
2081 	struct ixgbe_hw *hw = &ixgbe->hw;
2082 	uint32_t i, mrqc, rxcsum;
2083 	uint32_t random;
2084 	uint32_t reta;
2085 
2086 	/*
2087 	 * Fill out redirection table
2088 	 */
2089 	reta = 0;
2090 	for (i = 0; i < 128; i++) {
2091 		reta = (reta << 8) | (i % ixgbe->num_rx_rings);
2092 		if ((i & 3) == 3)
2093 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2094 	}
2095 
2096 	/*
2097 	 * Fill out hash function seeds with a random constant
2098 	 */
2099 	for (i = 0; i < 10; i++) {
2100 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2101 		    sizeof (uint32_t));
2102 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2103 	}
2104 
2105 	/*
2106 	 * Enable RSS & perform hash on these packet types
2107 	 */
2108 	mrqc = IXGBE_MRQC_RSSEN |
2109 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
2110 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2111 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2112 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2113 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2114 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
2115 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2116 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2117 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2118 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2119 
2120 	/*
2121 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2122 	 * It is an adapter hardware limitation that Packet Checksum is
2123 	 * mutually exclusive with RSS.
2124 	 */
2125 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2126 	rxcsum |= IXGBE_RXCSUM_PCSD;
2127 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2128 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2129 }
2130 
2131 /*
2132  * ixgbe_init_unicst - Initialize the unicast addresses.
2133  */
2134 static void
2135 ixgbe_init_unicst(ixgbe_t *ixgbe)
2136 {
2137 	struct ixgbe_hw *hw = &ixgbe->hw;
2138 	uint8_t *mac_addr;
2139 	int slot;
2140 	/*
2141 	 * Here we should consider two situations:
2142 	 *
2143 	 * 1. Chipset is initialized at the first time,
2144 	 *    Clear all the multiple unicast addresses.
2145 	 *
2146 	 * 2. Chipset is reset
2147 	 *    Recover the multiple unicast addresses from the
2148 	 *    software data structure to the RAR registers.
2149 	 */
2150 	if (!ixgbe->unicst_init) {
2151 		/*
2152 		 * Initialize the multiple unicast addresses
2153 		 */
2154 		ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2155 		ixgbe->unicst_avail = ixgbe->unicst_total;
2156 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2157 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2158 			bzero(mac_addr, ETHERADDRL);
2159 			(void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2160 			ixgbe->unicst_addr[slot].mac.set = 0;
2161 		}
2162 		ixgbe->unicst_init = B_TRUE;
2163 	} else {
2164 		/* Re-configure the RAR registers */
2165 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2166 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2167 			if (ixgbe->unicst_addr[slot].mac.set == 1) {
2168 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2169 				    NULL, IXGBE_RAH_AV);
2170 			} else {
2171 				bzero(mac_addr, ETHERADDRL);
2172 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2173 				    NULL, NULL);
2174 			}
2175 		}
2176 	}
2177 }
2178 
2179 /*
2180  * ixgbe_unicst_set - Set the unicast address to the specified slot.
2181  */
2182 int
2183 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
2184     int slot)
2185 {
2186 	struct ixgbe_hw *hw = &ixgbe->hw;
2187 
2188 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2189 
2190 	/*
2191 	 * Save the unicast address in the software data structure
2192 	 */
2193 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
2194 
2195 	/*
2196 	 * Set the unicast address to the RAR register
2197 	 */
2198 	(void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, IXGBE_RAH_AV);
2199 
2200 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2201 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2202 		return (EIO);
2203 	}
2204 
2205 	return (0);
2206 }
2207 
2208 /*
2209  * ixgbe_unicst_find - Find the slot for the specified unicast address
2210  */
2211 int
2212 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2213 {
2214 	int slot;
2215 
2216 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2217 
2218 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2219 		if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2220 		    mac_addr, ETHERADDRL) == 0)
2221 			return (slot);
2222 	}
2223 
2224 	return (-1);
2225 }
2226 
2227 /*
2228  * ixgbe_multicst_add - Add a multicst address.
2229  */
2230 int
2231 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2232 {
2233 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2234 
2235 	if ((multiaddr[0] & 01) == 0) {
2236 		return (EINVAL);
2237 	}
2238 
2239 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2240 		return (ENOENT);
2241 	}
2242 
2243 	bcopy(multiaddr,
2244 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2245 	ixgbe->mcast_count++;
2246 
2247 	/*
2248 	 * Update the multicast table in the hardware
2249 	 */
2250 	ixgbe_setup_multicst(ixgbe);
2251 
2252 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2253 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2254 		return (EIO);
2255 	}
2256 
2257 	return (0);
2258 }
2259 
2260 /*
2261  * ixgbe_multicst_remove - Remove a multicst address.
2262  */
2263 int
2264 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2265 {
2266 	int i;
2267 
2268 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2269 
2270 	for (i = 0; i < ixgbe->mcast_count; i++) {
2271 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2272 		    ETHERADDRL) == 0) {
2273 			for (i++; i < ixgbe->mcast_count; i++) {
2274 				ixgbe->mcast_table[i - 1] =
2275 				    ixgbe->mcast_table[i];
2276 			}
2277 			ixgbe->mcast_count--;
2278 			break;
2279 		}
2280 	}
2281 
2282 	/*
2283 	 * Update the multicast table in the hardware
2284 	 */
2285 	ixgbe_setup_multicst(ixgbe);
2286 
2287 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2288 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2289 		return (EIO);
2290 	}
2291 
2292 	return (0);
2293 }
2294 
2295 /*
2296  * ixgbe_setup_multicast - Setup multicast data structures.
2297  *
2298  * This routine initializes all of the multicast related structures
2299  * and save them in the hardware registers.
2300  */
2301 static void
2302 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2303 {
2304 	uint8_t *mc_addr_list;
2305 	uint32_t mc_addr_count;
2306 	struct ixgbe_hw *hw = &ixgbe->hw;
2307 
2308 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2309 
2310 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2311 
2312 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2313 	mc_addr_count = ixgbe->mcast_count;
2314 
2315 	/*
2316 	 * Update the multicast addresses to the MTA registers
2317 	 */
2318 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2319 	    ixgbe_mc_table_itr);
2320 }
2321 
2322 /*
2323  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2324  *
2325  * This routine gets user-configured values out of the configuration
2326  * file ixgbe.conf.
2327  *
2328  * For each configurable value, there is a minimum, a maximum, and a
2329  * default.
2330  * If user does not configure a value, use the default.
2331  * If user configures below the minimum, use the minumum.
2332  * If user configures above the maximum, use the maxumum.
2333  */
2334 static void
2335 ixgbe_get_conf(ixgbe_t *ixgbe)
2336 {
2337 	struct ixgbe_hw *hw = &ixgbe->hw;
2338 	uint32_t flow_control;
2339 
2340 	/*
2341 	 * ixgbe driver supports the following user configurations:
2342 	 *
2343 	 * Jumbo frame configuration:
2344 	 *    default_mtu
2345 	 *
2346 	 * Ethernet flow control configuration:
2347 	 *    flow_control
2348 	 *
2349 	 * Multiple rings configurations:
2350 	 *    tx_queue_number
2351 	 *    tx_ring_size
2352 	 *    rx_queue_number
2353 	 *    rx_ring_size
2354 	 *
2355 	 * Call ixgbe_get_prop() to get the value for a specific
2356 	 * configuration parameter.
2357 	 */
2358 
2359 	/*
2360 	 * Jumbo frame configuration - max_frame_size controls host buffer
2361 	 * allocation, so includes MTU, ethernet header, vlan tag and
2362 	 * frame check sequence.
2363 	 */
2364 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2365 	    MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
2366 
2367 	ixgbe->max_frame_size = ixgbe->default_mtu +
2368 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2369 
2370 	/*
2371 	 * Ethernet flow control configuration
2372 	 */
2373 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2374 	    ixgbe_fc_none, 3, ixgbe_fc_none);
2375 	if (flow_control == 3)
2376 		flow_control = ixgbe_fc_default;
2377 
2378 	/*
2379 	 * fc.requested mode is what the user requests.  After autoneg,
2380 	 * fc.current_mode will be the flow_control mode that was negotiated.
2381 	 */
2382 	hw->fc.requested_mode = flow_control;
2383 
2384 	/*
2385 	 * Multiple rings configurations
2386 	 */
2387 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2388 	    ixgbe->capab->min_tx_que_num,
2389 	    ixgbe->capab->max_tx_que_num,
2390 	    ixgbe->capab->def_tx_que_num);
2391 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2392 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2393 
2394 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2395 	    ixgbe->capab->min_rx_que_num,
2396 	    ixgbe->capab->max_rx_que_num,
2397 	    ixgbe->capab->def_rx_que_num);
2398 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2399 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2400 
2401 	/*
2402 	 * Multiple groups configuration
2403 	 */
2404 	ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
2405 	    MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM);
2406 
2407 	ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
2408 	    0, 1, DEFAULT_MR_ENABLE);
2409 
2410 	if (ixgbe->mr_enable == B_FALSE) {
2411 		ixgbe->num_tx_rings = 1;
2412 		ixgbe->num_rx_rings = 1;
2413 		ixgbe->num_rx_groups = 1;
2414 	}
2415 
2416 	/*
2417 	 * Tunable used to force an interrupt type. The only use is
2418 	 * for testing of the lesser interrupt types.
2419 	 * 0 = don't force interrupt type
2420 	 * 1 = force interrupt type MSI-X
2421 	 * 2 = force interrupt type MSI
2422 	 * 3 = force interrupt type Legacy
2423 	 */
2424 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2425 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2426 
2427 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2428 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
2429 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2430 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
2431 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2432 	    0, 1, DEFAULT_LSO_ENABLE);
2433 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2434 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
2435 
2436 	/* Head Write Back not recommended for 82599 */
2437 	if (hw->mac.type >= ixgbe_mac_82599EB) {
2438 		ixgbe->tx_head_wb_enable = B_FALSE;
2439 	}
2440 
2441 	/*
2442 	 * ixgbe LSO needs the tx h/w checksum support.
2443 	 * LSO will be disabled if tx h/w checksum is not
2444 	 * enabled.
2445 	 */
2446 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
2447 		ixgbe->lso_enable = B_FALSE;
2448 	}
2449 
2450 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
2451 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
2452 	    DEFAULT_TX_COPY_THRESHOLD);
2453 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
2454 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
2455 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
2456 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
2457 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
2458 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
2459 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
2460 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
2461 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
2462 
2463 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
2464 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
2465 	    DEFAULT_RX_COPY_THRESHOLD);
2466 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
2467 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
2468 	    DEFAULT_RX_LIMIT_PER_INTR);
2469 
2470 	ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
2471 	    ixgbe->capab->min_intr_throttle,
2472 	    ixgbe->capab->max_intr_throttle,
2473 	    ixgbe->capab->def_intr_throttle);
2474 	/*
2475 	 * 82599 requires the interupt throttling rate is
2476 	 * a multiple of 8. This is enforced by the register
2477 	 * definiton.
2478 	 */
2479 	if (hw->mac.type == ixgbe_mac_82599EB)
2480 		ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
2481 }
2482 
2483 static void
2484 ixgbe_init_params(ixgbe_t *ixgbe)
2485 {
2486 	ixgbe->param_en_10000fdx_cap = 1;
2487 	ixgbe->param_en_1000fdx_cap = 1;
2488 	ixgbe->param_en_100fdx_cap = 1;
2489 	ixgbe->param_adv_10000fdx_cap = 1;
2490 	ixgbe->param_adv_1000fdx_cap = 1;
2491 	ixgbe->param_adv_100fdx_cap = 1;
2492 
2493 	ixgbe->param_pause_cap = 1;
2494 	ixgbe->param_asym_pause_cap = 1;
2495 	ixgbe->param_rem_fault = 0;
2496 
2497 	ixgbe->param_adv_autoneg_cap = 1;
2498 	ixgbe->param_adv_pause_cap = 1;
2499 	ixgbe->param_adv_asym_pause_cap = 1;
2500 	ixgbe->param_adv_rem_fault = 0;
2501 
2502 	ixgbe->param_lp_10000fdx_cap = 0;
2503 	ixgbe->param_lp_1000fdx_cap = 0;
2504 	ixgbe->param_lp_100fdx_cap = 0;
2505 	ixgbe->param_lp_autoneg_cap = 0;
2506 	ixgbe->param_lp_pause_cap = 0;
2507 	ixgbe->param_lp_asym_pause_cap = 0;
2508 	ixgbe->param_lp_rem_fault = 0;
2509 }
2510 
2511 /*
2512  * ixgbe_get_prop - Get a property value out of the configuration file
2513  * ixgbe.conf.
2514  *
2515  * Caller provides the name of the property, a default value, a minimum
2516  * value, and a maximum value.
2517  *
2518  * Return configured value of the property, with default, minimum and
2519  * maximum properly applied.
2520  */
2521 static int
2522 ixgbe_get_prop(ixgbe_t *ixgbe,
2523     char *propname,	/* name of the property */
2524     int minval,		/* minimum acceptable value */
2525     int maxval,		/* maximim acceptable value */
2526     int defval)		/* default value */
2527 {
2528 	int value;
2529 
2530 	/*
2531 	 * Call ddi_prop_get_int() to read the conf settings
2532 	 */
2533 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
2534 	    DDI_PROP_DONTPASS, propname, defval);
2535 	if (value > maxval)
2536 		value = maxval;
2537 
2538 	if (value < minval)
2539 		value = minval;
2540 
2541 	return (value);
2542 }
2543 
2544 /*
2545  * ixgbe_driver_setup_link - Using the link properties to setup the link.
2546  */
2547 int
2548 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
2549 {
2550 	u32 autoneg_advertised = 0;
2551 
2552 	/*
2553 	 * No half duplex support with 10Gb parts
2554 	 */
2555 	if (ixgbe->param_adv_10000fdx_cap == 1)
2556 		autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2557 
2558 	if (ixgbe->param_adv_1000fdx_cap == 1)
2559 		autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2560 
2561 	if (ixgbe->param_adv_100fdx_cap == 1)
2562 		autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2563 
2564 	if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
2565 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
2566 		    "to autonegotiation with full link capabilities.");
2567 
2568 		autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
2569 		    IXGBE_LINK_SPEED_1GB_FULL |
2570 		    IXGBE_LINK_SPEED_100_FULL;
2571 	}
2572 
2573 	if (setup_hw) {
2574 		if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
2575 		    ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
2576 			ixgbe_notice(ixgbe, "Setup link failed on this "
2577 			    "device.");
2578 			return (IXGBE_FAILURE);
2579 		}
2580 	}
2581 
2582 	return (IXGBE_SUCCESS);
2583 }
2584 
2585 /*
2586  * ixgbe_driver_link_check - Link status processing.
2587  *
2588  * This function can be called in both kernel context and interrupt context
2589  */
2590 static void
2591 ixgbe_driver_link_check(ixgbe_t *ixgbe)
2592 {
2593 	struct ixgbe_hw *hw = &ixgbe->hw;
2594 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
2595 	boolean_t link_up = B_FALSE;
2596 	boolean_t link_changed = B_FALSE;
2597 
2598 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2599 
2600 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
2601 	if (link_up) {
2602 		ixgbe->link_check_complete = B_TRUE;
2603 
2604 		/* Link is up, enable flow control settings */
2605 		(void) ixgbe_fc_enable(hw, 0);
2606 
2607 		/*
2608 		 * The Link is up, check whether it was marked as down earlier
2609 		 */
2610 		if (ixgbe->link_state != LINK_STATE_UP) {
2611 			switch (speed) {
2612 			case IXGBE_LINK_SPEED_10GB_FULL:
2613 				ixgbe->link_speed = SPEED_10GB;
2614 				break;
2615 			case IXGBE_LINK_SPEED_1GB_FULL:
2616 				ixgbe->link_speed = SPEED_1GB;
2617 				break;
2618 			case IXGBE_LINK_SPEED_100_FULL:
2619 				ixgbe->link_speed = SPEED_100;
2620 			}
2621 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
2622 			ixgbe->link_state = LINK_STATE_UP;
2623 			ixgbe->link_down_timeout = 0;
2624 			link_changed = B_TRUE;
2625 		}
2626 	} else {
2627 		if (ixgbe->link_check_complete == B_TRUE ||
2628 		    (ixgbe->link_check_complete == B_FALSE &&
2629 		    gethrtime() >= ixgbe->link_check_hrtime)) {
2630 			/*
2631 			 * The link is really down
2632 			 */
2633 			ixgbe->link_check_complete = B_TRUE;
2634 
2635 			if (ixgbe->link_state != LINK_STATE_DOWN) {
2636 				ixgbe->link_speed = 0;
2637 				ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
2638 				ixgbe->link_state = LINK_STATE_DOWN;
2639 				link_changed = B_TRUE;
2640 			}
2641 
2642 			if (ixgbe->ixgbe_state & IXGBE_STARTED) {
2643 				if (ixgbe->link_down_timeout <
2644 				    MAX_LINK_DOWN_TIMEOUT) {
2645 					ixgbe->link_down_timeout++;
2646 				} else if (ixgbe->link_down_timeout ==
2647 				    MAX_LINK_DOWN_TIMEOUT) {
2648 					ixgbe_tx_clean(ixgbe);
2649 					ixgbe->link_down_timeout++;
2650 				}
2651 			}
2652 		}
2653 	}
2654 
2655 	/*
2656 	 * this is only reached after a link-status-change interrupt
2657 	 * so always get new phy state
2658 	 */
2659 	ixgbe_get_hw_state(ixgbe);
2660 
2661 	/*
2662 	 * If we are in an interrupt context, need to re-enable the
2663 	 * interrupt, which was automasked
2664 	 */
2665 	if (servicing_interrupt() != 0) {
2666 		ixgbe->eims |= IXGBE_EICR_LSC;
2667 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
2668 	}
2669 
2670 	if (link_changed) {
2671 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
2672 	}
2673 }
2674 
2675 /*
2676  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
2677  */
2678 static void
2679 ixgbe_sfp_check(void *arg)
2680 {
2681 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2682 	uint32_t eicr = ixgbe->eicr;
2683 	struct ixgbe_hw *hw = &ixgbe->hw;
2684 
2685 	mutex_enter(&ixgbe->gen_lock);
2686 	if (eicr & IXGBE_EICR_GPI_SDP1) {
2687 		/* clear the interrupt */
2688 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2689 
2690 		/* if link up, do multispeed fiber setup */
2691 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
2692 		    B_TRUE, B_TRUE);
2693 		ixgbe_driver_link_check(ixgbe);
2694 	} else if (eicr & IXGBE_EICR_GPI_SDP2) {
2695 		/* clear the interrupt */
2696 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2697 
2698 		/* if link up, do sfp module setup */
2699 		(void) hw->mac.ops.setup_sfp(hw);
2700 
2701 		/* do multispeed fiber setup */
2702 		(void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
2703 		    B_TRUE, B_TRUE);
2704 		ixgbe_driver_link_check(ixgbe);
2705 	}
2706 	mutex_exit(&ixgbe->gen_lock);
2707 }
2708 
2709 /*
2710  * ixgbe_link_timer - timer for link status detection
2711  */
2712 static void
2713 ixgbe_link_timer(void *arg)
2714 {
2715 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2716 
2717 	mutex_enter(&ixgbe->gen_lock);
2718 	ixgbe_driver_link_check(ixgbe);
2719 	mutex_exit(&ixgbe->gen_lock);
2720 }
2721 
2722 /*
2723  * ixgbe_local_timer - Driver watchdog function.
2724  *
2725  * This function will handle the transmit stall check and other routines.
2726  */
2727 static void
2728 ixgbe_local_timer(void *arg)
2729 {
2730 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2731 
2732 	if (ixgbe->ixgbe_state & IXGBE_ERROR) {
2733 		ixgbe->reset_count++;
2734 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2735 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2736 		ixgbe_restart_watchdog_timer(ixgbe);
2737 		return;
2738 	}
2739 
2740 	if (ixgbe_stall_check(ixgbe)) {
2741 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
2742 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2743 
2744 		ixgbe->reset_count++;
2745 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2746 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2747 		ixgbe_restart_watchdog_timer(ixgbe);
2748 	}
2749 }
2750 
2751 /*
2752  * ixgbe_stall_check - Check for transmit stall.
2753  *
2754  * This function checks if the adapter is stalled (in transmit).
2755  *
2756  * It is called each time the watchdog timeout is invoked.
2757  * If the transmit descriptor reclaim continuously fails,
2758  * the watchdog value will increment by 1. If the watchdog
2759  * value exceeds the threshold, the ixgbe is assumed to
2760  * have stalled and need to be reset.
2761  */
2762 static boolean_t
2763 ixgbe_stall_check(ixgbe_t *ixgbe)
2764 {
2765 	ixgbe_tx_ring_t *tx_ring;
2766 	boolean_t result;
2767 	int i;
2768 
2769 	if (ixgbe->link_state != LINK_STATE_UP)
2770 		return (B_FALSE);
2771 
2772 	/*
2773 	 * If any tx ring is stalled, we'll reset the chipset
2774 	 */
2775 	result = B_FALSE;
2776 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2777 		tx_ring = &ixgbe->tx_rings[i];
2778 		if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
2779 			tx_ring->tx_recycle(tx_ring);
2780 		}
2781 
2782 		if (tx_ring->recycle_fail > 0)
2783 			tx_ring->stall_watchdog++;
2784 		else
2785 			tx_ring->stall_watchdog = 0;
2786 
2787 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
2788 			result = B_TRUE;
2789 			break;
2790 		}
2791 	}
2792 
2793 	if (result) {
2794 		tx_ring->stall_watchdog = 0;
2795 		tx_ring->recycle_fail = 0;
2796 	}
2797 
2798 	return (result);
2799 }
2800 
2801 
2802 /*
2803  * is_valid_mac_addr - Check if the mac address is valid.
2804  */
2805 static boolean_t
2806 is_valid_mac_addr(uint8_t *mac_addr)
2807 {
2808 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
2809 	const uint8_t addr_test2[6] =
2810 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2811 
2812 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
2813 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
2814 		return (B_FALSE);
2815 
2816 	return (B_TRUE);
2817 }
2818 
2819 static boolean_t
2820 ixgbe_find_mac_address(ixgbe_t *ixgbe)
2821 {
2822 #ifdef __sparc
2823 	struct ixgbe_hw *hw = &ixgbe->hw;
2824 	uchar_t *bytes;
2825 	struct ether_addr sysaddr;
2826 	uint_t nelts;
2827 	int err;
2828 	boolean_t found = B_FALSE;
2829 
2830 	/*
2831 	 * The "vendor's factory-set address" may already have
2832 	 * been extracted from the chip, but if the property
2833 	 * "local-mac-address" is set we use that instead.
2834 	 *
2835 	 * We check whether it looks like an array of 6
2836 	 * bytes (which it should, if OBP set it).  If we can't
2837 	 * make sense of it this way, we'll ignore it.
2838 	 */
2839 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2840 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
2841 	if (err == DDI_PROP_SUCCESS) {
2842 		if (nelts == ETHERADDRL) {
2843 			while (nelts--)
2844 				hw->mac.addr[nelts] = bytes[nelts];
2845 			found = B_TRUE;
2846 		}
2847 		ddi_prop_free(bytes);
2848 	}
2849 
2850 	/*
2851 	 * Look up the OBP property "local-mac-address?". If the user has set
2852 	 * 'local-mac-address? = false', use "the system address" instead.
2853 	 */
2854 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
2855 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
2856 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
2857 			if (localetheraddr(NULL, &sysaddr) != 0) {
2858 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
2859 				found = B_TRUE;
2860 			}
2861 		}
2862 		ddi_prop_free(bytes);
2863 	}
2864 
2865 	/*
2866 	 * Finally(!), if there's a valid "mac-address" property (created
2867 	 * if we netbooted from this interface), we must use this instead
2868 	 * of any of the above to ensure that the NFS/install server doesn't
2869 	 * get confused by the address changing as Solaris takes over!
2870 	 */
2871 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2872 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
2873 	if (err == DDI_PROP_SUCCESS) {
2874 		if (nelts == ETHERADDRL) {
2875 			while (nelts--)
2876 				hw->mac.addr[nelts] = bytes[nelts];
2877 			found = B_TRUE;
2878 		}
2879 		ddi_prop_free(bytes);
2880 	}
2881 
2882 	if (found) {
2883 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
2884 		return (B_TRUE);
2885 	}
2886 #else
2887 	_NOTE(ARGUNUSED(ixgbe));
2888 #endif
2889 
2890 	return (B_TRUE);
2891 }
2892 
2893 #pragma inline(ixgbe_arm_watchdog_timer)
2894 static void
2895 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
2896 {
2897 	/*
2898 	 * Fire a watchdog timer
2899 	 */
2900 	ixgbe->watchdog_tid =
2901 	    timeout(ixgbe_local_timer,
2902 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
2903 
2904 }
2905 
2906 /*
2907  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
2908  */
2909 void
2910 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
2911 {
2912 	mutex_enter(&ixgbe->watchdog_lock);
2913 
2914 	if (!ixgbe->watchdog_enable) {
2915 		ixgbe->watchdog_enable = B_TRUE;
2916 		ixgbe->watchdog_start = B_TRUE;
2917 		ixgbe_arm_watchdog_timer(ixgbe);
2918 	}
2919 
2920 	mutex_exit(&ixgbe->watchdog_lock);
2921 }
2922 
2923 /*
2924  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
2925  */
2926 void
2927 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
2928 {
2929 	timeout_id_t tid;
2930 
2931 	mutex_enter(&ixgbe->watchdog_lock);
2932 
2933 	ixgbe->watchdog_enable = B_FALSE;
2934 	ixgbe->watchdog_start = B_FALSE;
2935 	tid = ixgbe->watchdog_tid;
2936 	ixgbe->watchdog_tid = 0;
2937 
2938 	mutex_exit(&ixgbe->watchdog_lock);
2939 
2940 	if (tid != 0)
2941 		(void) untimeout(tid);
2942 }
2943 
2944 /*
2945  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
2946  */
2947 void
2948 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
2949 {
2950 	mutex_enter(&ixgbe->watchdog_lock);
2951 
2952 	if (ixgbe->watchdog_enable) {
2953 		if (!ixgbe->watchdog_start) {
2954 			ixgbe->watchdog_start = B_TRUE;
2955 			ixgbe_arm_watchdog_timer(ixgbe);
2956 		}
2957 	}
2958 
2959 	mutex_exit(&ixgbe->watchdog_lock);
2960 }
2961 
2962 /*
2963  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
2964  */
2965 static void
2966 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
2967 {
2968 	mutex_enter(&ixgbe->watchdog_lock);
2969 
2970 	if (ixgbe->watchdog_start)
2971 		ixgbe_arm_watchdog_timer(ixgbe);
2972 
2973 	mutex_exit(&ixgbe->watchdog_lock);
2974 }
2975 
2976 /*
2977  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
2978  */
2979 void
2980 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
2981 {
2982 	timeout_id_t tid;
2983 
2984 	mutex_enter(&ixgbe->watchdog_lock);
2985 
2986 	ixgbe->watchdog_start = B_FALSE;
2987 	tid = ixgbe->watchdog_tid;
2988 	ixgbe->watchdog_tid = 0;
2989 
2990 	mutex_exit(&ixgbe->watchdog_lock);
2991 
2992 	if (tid != 0)
2993 		(void) untimeout(tid);
2994 }
2995 
2996 /*
2997  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
2998  */
2999 static void
3000 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3001 {
3002 	struct ixgbe_hw *hw = &ixgbe->hw;
3003 
3004 	/*
3005 	 * mask all interrupts off
3006 	 */
3007 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3008 
3009 	/*
3010 	 * for MSI-X, also disable autoclear
3011 	 */
3012 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3013 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3014 	}
3015 
3016 	IXGBE_WRITE_FLUSH(hw);
3017 }
3018 
3019 /*
3020  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3021  */
3022 static void
3023 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3024 {
3025 	struct ixgbe_hw *hw = &ixgbe->hw;
3026 	uint32_t eiac, eiam;
3027 	uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3028 
3029 	/* interrupt types to enable */
3030 	ixgbe->eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
3031 	ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
3032 	ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3033 
3034 	/* enable automask on "other" causes that this adapter can generate */
3035 	eiam = ixgbe->capab->other_intr;
3036 
3037 	/*
3038 	 * msi-x mode
3039 	 */
3040 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3041 		/* enable autoclear but not on bits 29:20 */
3042 		eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3043 
3044 		/* general purpose interrupt enable */
3045 		gpie |= (IXGBE_GPIE_MSIX_MODE
3046 		    | IXGBE_GPIE_PBA_SUPPORT
3047 		    | IXGBE_GPIE_OCD
3048 		    | IXGBE_GPIE_EIAME);
3049 	/*
3050 	 * non-msi-x mode
3051 	 */
3052 	} else {
3053 
3054 		/* disable autoclear, leave gpie at default */
3055 		eiac = 0;
3056 
3057 		/*
3058 		 * General purpose interrupt enable.
3059 		 * For 82599, extended interrupt automask enable
3060 		 * only in MSI or MSI-X mode
3061 		 */
3062 		if ((hw->mac.type < ixgbe_mac_82599EB) ||
3063 		    (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3064 			gpie |= IXGBE_GPIE_EIAME;
3065 		}
3066 	}
3067 	/* Enable specific interrupts for 82599  */
3068 	if (hw->mac.type == ixgbe_mac_82599EB) {
3069 		gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */
3070 		gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */
3071 	}
3072 
3073 	/* write to interrupt control registers */
3074 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3075 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3076 	IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3077 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3078 	IXGBE_WRITE_FLUSH(hw);
3079 }
3080 
3081 /*
3082  * ixgbe_loopback_ioctl - Loopback support.
3083  */
3084 enum ioc_reply
3085 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3086 {
3087 	lb_info_sz_t *lbsp;
3088 	lb_property_t *lbpp;
3089 	uint32_t *lbmp;
3090 	uint32_t size;
3091 	uint32_t value;
3092 
3093 	if (mp->b_cont == NULL)
3094 		return (IOC_INVAL);
3095 
3096 	switch (iocp->ioc_cmd) {
3097 	default:
3098 		return (IOC_INVAL);
3099 
3100 	case LB_GET_INFO_SIZE:
3101 		size = sizeof (lb_info_sz_t);
3102 		if (iocp->ioc_count != size)
3103 			return (IOC_INVAL);
3104 
3105 		value = sizeof (lb_normal);
3106 		value += sizeof (lb_mac);
3107 		value += sizeof (lb_external);
3108 
3109 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3110 		*lbsp = value;
3111 		break;
3112 
3113 	case LB_GET_INFO:
3114 		value = sizeof (lb_normal);
3115 		value += sizeof (lb_mac);
3116 		value += sizeof (lb_external);
3117 
3118 		size = value;
3119 		if (iocp->ioc_count != size)
3120 			return (IOC_INVAL);
3121 
3122 		value = 0;
3123 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3124 
3125 		lbpp[value++] = lb_normal;
3126 		lbpp[value++] = lb_mac;
3127 		lbpp[value++] = lb_external;
3128 		break;
3129 
3130 	case LB_GET_MODE:
3131 		size = sizeof (uint32_t);
3132 		if (iocp->ioc_count != size)
3133 			return (IOC_INVAL);
3134 
3135 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3136 		*lbmp = ixgbe->loopback_mode;
3137 		break;
3138 
3139 	case LB_SET_MODE:
3140 		size = 0;
3141 		if (iocp->ioc_count != sizeof (uint32_t))
3142 			return (IOC_INVAL);
3143 
3144 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3145 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3146 			return (IOC_INVAL);
3147 		break;
3148 	}
3149 
3150 	iocp->ioc_count = size;
3151 	iocp->ioc_error = 0;
3152 
3153 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3154 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3155 		return (IOC_INVAL);
3156 	}
3157 
3158 	return (IOC_REPLY);
3159 }
3160 
3161 /*
3162  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3163  */
3164 static boolean_t
3165 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3166 {
3167 	if (mode == ixgbe->loopback_mode)
3168 		return (B_TRUE);
3169 
3170 	ixgbe->loopback_mode = mode;
3171 
3172 	if (mode == IXGBE_LB_NONE) {
3173 		/*
3174 		 * Reset the chip
3175 		 */
3176 		(void) ixgbe_reset(ixgbe);
3177 		return (B_TRUE);
3178 	}
3179 
3180 	mutex_enter(&ixgbe->gen_lock);
3181 
3182 	switch (mode) {
3183 	default:
3184 		mutex_exit(&ixgbe->gen_lock);
3185 		return (B_FALSE);
3186 
3187 	case IXGBE_LB_EXTERNAL:
3188 		break;
3189 
3190 	case IXGBE_LB_INTERNAL_MAC:
3191 		ixgbe_set_internal_mac_loopback(ixgbe);
3192 		break;
3193 	}
3194 
3195 	mutex_exit(&ixgbe->gen_lock);
3196 
3197 	return (B_TRUE);
3198 }
3199 
3200 /*
3201  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3202  */
3203 static void
3204 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3205 {
3206 	struct ixgbe_hw *hw;
3207 	uint32_t reg;
3208 	uint8_t atlas;
3209 
3210 	hw = &ixgbe->hw;
3211 
3212 	/*
3213 	 * Setup MAC loopback
3214 	 */
3215 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3216 	reg |= IXGBE_HLREG0_LPBK;
3217 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3218 
3219 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3220 	reg &= ~IXGBE_AUTOC_LMS_MASK;
3221 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3222 
3223 	/*
3224 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3225 	 */
3226 	if (hw->mac.type == ixgbe_mac_82598EB) {
3227 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3228 		    &atlas);
3229 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3230 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3231 		    atlas);
3232 
3233 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3234 		    &atlas);
3235 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3236 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3237 		    atlas);
3238 
3239 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3240 		    &atlas);
3241 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3242 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3243 		    atlas);
3244 
3245 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3246 		    &atlas);
3247 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3248 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3249 		    atlas);
3250 	}
3251 }
3252 
3253 #pragma inline(ixgbe_intr_rx_work)
3254 /*
3255  * ixgbe_intr_rx_work - RX processing of ISR.
3256  */
3257 static void
3258 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3259 {
3260 	mblk_t *mp;
3261 
3262 	mutex_enter(&rx_ring->rx_lock);
3263 
3264 	mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3265 	mutex_exit(&rx_ring->rx_lock);
3266 
3267 	if (mp != NULL)
3268 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3269 		    rx_ring->ring_gen_num);
3270 }
3271 
3272 #pragma inline(ixgbe_intr_tx_work)
3273 /*
3274  * ixgbe_intr_tx_work - TX processing of ISR.
3275  */
3276 static void
3277 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3278 {
3279 	ixgbe_t *ixgbe = tx_ring->ixgbe;
3280 
3281 	/*
3282 	 * Recycle the tx descriptors
3283 	 */
3284 	tx_ring->tx_recycle(tx_ring);
3285 
3286 	/*
3287 	 * Schedule the re-transmit
3288 	 */
3289 	if (tx_ring->reschedule &&
3290 	    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
3291 		tx_ring->reschedule = B_FALSE;
3292 		mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3293 		    tx_ring->ring_handle);
3294 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3295 	}
3296 }
3297 
3298 #pragma inline(ixgbe_intr_other_work)
3299 /*
3300  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
3301  */
3302 static void
3303 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
3304 {
3305 	struct ixgbe_hw *hw = &ixgbe->hw;
3306 
3307 	ASSERT(mutex_owned(&ixgbe->gen_lock));
3308 
3309 	/*
3310 	 * handle link status change
3311 	 */
3312 	if (eicr & IXGBE_EICR_LSC) {
3313 		ixgbe_driver_link_check(ixgbe);
3314 	}
3315 
3316 	/*
3317 	 * check for fan failure on adapters with fans
3318 	 */
3319 	if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
3320 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3321 		if (hw->mac.type < ixgbe_mac_82599EB) {
3322 			ixgbe_log(ixgbe,
3323 			    "Fan has stopped, replace the adapter\n");
3324 
3325 			/* re-enable the interrupt, which was automasked */
3326 			ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
3327 		}
3328 	}
3329 
3330 	/*
3331 	 * Do SFP check for 82599
3332 	 */
3333 	if (hw->mac.type == ixgbe_mac_82599EB) {
3334 		if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
3335 		    ixgbe_sfp_check, (void *)ixgbe,
3336 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
3337 			ixgbe_log(ixgbe, "No memory available to dispatch "
3338 			    "taskq for SFP check");
3339 		}
3340 
3341 		/*
3342 		 * We need to fully re-check the link later.
3343 		 */
3344 		ixgbe->link_check_complete = B_FALSE;
3345 		ixgbe->link_check_hrtime = gethrtime() +
3346 		    (IXGBE_LINK_UP_TIME * 100000000ULL);
3347 	}
3348 }
3349 
3350 /*
3351  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
3352  */
3353 static uint_t
3354 ixgbe_intr_legacy(void *arg1, void *arg2)
3355 {
3356 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3357 	struct ixgbe_hw *hw = &ixgbe->hw;
3358 	ixgbe_tx_ring_t *tx_ring;
3359 	ixgbe_rx_ring_t *rx_ring;
3360 	uint32_t eicr;
3361 	mblk_t *mp;
3362 	boolean_t tx_reschedule;
3363 	uint_t result;
3364 
3365 	_NOTE(ARGUNUSED(arg2));
3366 
3367 	mutex_enter(&ixgbe->gen_lock);
3368 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
3369 		mutex_exit(&ixgbe->gen_lock);
3370 		return (DDI_INTR_UNCLAIMED);
3371 	}
3372 
3373 	mp = NULL;
3374 	tx_reschedule = B_FALSE;
3375 
3376 	/*
3377 	 * Any bit set in eicr: claim this interrupt
3378 	 */
3379 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3380 
3381 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3382 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3383 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
3384 		return (DDI_INTR_CLAIMED);
3385 	}
3386 
3387 	if (eicr) {
3388 		/*
3389 		 * For legacy interrupt, we have only one interrupt,
3390 		 * so we have only one rx ring and one tx ring enabled.
3391 		 */
3392 		ASSERT(ixgbe->num_rx_rings == 1);
3393 		ASSERT(ixgbe->num_tx_rings == 1);
3394 
3395 		/*
3396 		 * For legacy interrupt, rx rings[0] will use RTxQ[0].
3397 		 */
3398 		if (eicr & 0x1) {
3399 			ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
3400 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3401 			ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3402 			/*
3403 			 * Clean the rx descriptors
3404 			 */
3405 			rx_ring = &ixgbe->rx_rings[0];
3406 			mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3407 		}
3408 
3409 		/*
3410 		 * For legacy interrupt, tx rings[0] will use RTxQ[1].
3411 		 */
3412 		if (eicr & 0x2) {
3413 			/*
3414 			 * Recycle the tx descriptors
3415 			 */
3416 			tx_ring = &ixgbe->tx_rings[0];
3417 			tx_ring->tx_recycle(tx_ring);
3418 
3419 			/*
3420 			 * Schedule the re-transmit
3421 			 */
3422 			tx_reschedule = (tx_ring->reschedule &&
3423 			    (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
3424 		}
3425 
3426 		/* any interrupt type other than tx/rx */
3427 		if (eicr & ixgbe->capab->other_intr) {
3428 			if (hw->mac.type < ixgbe_mac_82599EB) {
3429 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3430 			}
3431 			if (hw->mac.type == ixgbe_mac_82599EB) {
3432 				ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3433 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3434 			}
3435 			ixgbe_intr_other_work(ixgbe, eicr);
3436 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3437 		}
3438 
3439 		mutex_exit(&ixgbe->gen_lock);
3440 
3441 		result = DDI_INTR_CLAIMED;
3442 	} else {
3443 		mutex_exit(&ixgbe->gen_lock);
3444 
3445 		/*
3446 		 * No interrupt cause bits set: don't claim this interrupt.
3447 		 */
3448 		result = DDI_INTR_UNCLAIMED;
3449 	}
3450 
3451 	/* re-enable the interrupts which were automasked */
3452 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3453 
3454 	/*
3455 	 * Do the following work outside of the gen_lock
3456 	 */
3457 	if (mp != NULL) {
3458 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3459 		    rx_ring->ring_gen_num);
3460 	}
3461 
3462 	if (tx_reschedule)  {
3463 		tx_ring->reschedule = B_FALSE;
3464 		mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
3465 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3466 	}
3467 
3468 	return (result);
3469 }
3470 
3471 /*
3472  * ixgbe_intr_msi - Interrupt handler for MSI.
3473  */
3474 static uint_t
3475 ixgbe_intr_msi(void *arg1, void *arg2)
3476 {
3477 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3478 	struct ixgbe_hw *hw = &ixgbe->hw;
3479 	uint32_t eicr;
3480 
3481 	_NOTE(ARGUNUSED(arg2));
3482 
3483 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3484 
3485 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3486 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3487 		atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
3488 		return (DDI_INTR_CLAIMED);
3489 	}
3490 
3491 	/*
3492 	 * For MSI interrupt, we have only one vector,
3493 	 * so we have only one rx ring and one tx ring enabled.
3494 	 */
3495 	ASSERT(ixgbe->num_rx_rings == 1);
3496 	ASSERT(ixgbe->num_tx_rings == 1);
3497 
3498 	/*
3499 	 * For MSI interrupt, rx rings[0] will use RTxQ[0].
3500 	 */
3501 	if (eicr & 0x1) {
3502 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
3503 	}
3504 
3505 	/*
3506 	 * For MSI interrupt, tx rings[0] will use RTxQ[1].
3507 	 */
3508 	if (eicr & 0x2) {
3509 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3510 	}
3511 
3512 	/* any interrupt type other than tx/rx */
3513 	if (eicr & ixgbe->capab->other_intr) {
3514 		mutex_enter(&ixgbe->gen_lock);
3515 		if (hw->mac.type < ixgbe_mac_82599EB) {
3516 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3517 		}
3518 		if (hw->mac.type == ixgbe_mac_82599EB) {
3519 			ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3520 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3521 		}
3522 		ixgbe_intr_other_work(ixgbe, eicr);
3523 		ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3524 		mutex_exit(&ixgbe->gen_lock);
3525 	}
3526 
3527 	/* re-enable the interrupts which were automasked */
3528 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3529 
3530 	return (DDI_INTR_CLAIMED);
3531 }
3532 
3533 /*
3534  * ixgbe_intr_msix - Interrupt handler for MSI-X.
3535  */
3536 static uint_t
3537 ixgbe_intr_msix(void *arg1, void *arg2)
3538 {
3539 	ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
3540 	ixgbe_t *ixgbe = vect->ixgbe;
3541 	struct ixgbe_hw *hw = &ixgbe->hw;
3542 	uint32_t eicr;
3543 	int r_idx = 0;
3544 
3545 	_NOTE(ARGUNUSED(arg2));
3546 
3547 	/*
3548 	 * Clean each rx ring that has its bit set in the map
3549 	 */
3550 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
3551 	while (r_idx >= 0) {
3552 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
3553 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3554 		    (ixgbe->num_rx_rings - 1));
3555 	}
3556 
3557 	/*
3558 	 * Clean each tx ring that has its bit set in the map
3559 	 */
3560 	r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
3561 	while (r_idx >= 0) {
3562 		ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
3563 		r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3564 		    (ixgbe->num_tx_rings - 1));
3565 	}
3566 
3567 
3568 	/*
3569 	 * Clean other interrupt (link change) that has its bit set in the map
3570 	 */
3571 	if (BT_TEST(vect->other_map, 0) == 1) {
3572 		eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3573 
3574 		if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
3575 		    DDI_FM_OK) {
3576 			ddi_fm_service_impact(ixgbe->dip,
3577 			    DDI_SERVICE_DEGRADED);
3578 			atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
3579 			return (DDI_INTR_CLAIMED);
3580 		}
3581 
3582 		/*
3583 		 * Need check cause bits and only other causes will
3584 		 * be processed
3585 		 */
3586 		/* any interrupt type other than tx/rx */
3587 		if (eicr & ixgbe->capab->other_intr) {
3588 			if (hw->mac.type < ixgbe_mac_82599EB) {
3589 				mutex_enter(&ixgbe->gen_lock);
3590 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3591 				ixgbe_intr_other_work(ixgbe, eicr);
3592 				mutex_exit(&ixgbe->gen_lock);
3593 			} else {
3594 				if (hw->mac.type == ixgbe_mac_82599EB) {
3595 					mutex_enter(&ixgbe->gen_lock);
3596 					ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3597 					ixgbe_intr_other_work(ixgbe, eicr);
3598 					mutex_exit(&ixgbe->gen_lock);
3599 				}
3600 			}
3601 		}
3602 
3603 		/* re-enable the interrupts which were automasked */
3604 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3605 	}
3606 
3607 	return (DDI_INTR_CLAIMED);
3608 }
3609 
3610 /*
3611  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
3612  *
3613  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
3614  * if not successful, try Legacy.
3615  * ixgbe->intr_force can be used to force sequence to start with
3616  * any of the 3 types.
3617  * If MSI-X is not used, number of tx/rx rings is forced to 1.
3618  */
3619 static int
3620 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
3621 {
3622 	dev_info_t *devinfo;
3623 	int intr_types;
3624 	int rc;
3625 
3626 	devinfo = ixgbe->dip;
3627 
3628 	/*
3629 	 * Get supported interrupt types
3630 	 */
3631 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
3632 
3633 	if (rc != DDI_SUCCESS) {
3634 		ixgbe_log(ixgbe,
3635 		    "Get supported interrupt types failed: %d", rc);
3636 		return (IXGBE_FAILURE);
3637 	}
3638 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
3639 
3640 	ixgbe->intr_type = 0;
3641 
3642 	/*
3643 	 * Install MSI-X interrupts
3644 	 */
3645 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
3646 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
3647 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
3648 		if (rc == IXGBE_SUCCESS)
3649 			return (IXGBE_SUCCESS);
3650 
3651 		ixgbe_log(ixgbe,
3652 		    "Allocate MSI-X failed, trying MSI interrupts...");
3653 	}
3654 
3655 	/*
3656 	 * MSI-X not used, force rings and groups to 1
3657 	 */
3658 	ixgbe->num_rx_rings = 1;
3659 	ixgbe->num_rx_groups = 1;
3660 	ixgbe->num_tx_rings = 1;
3661 	ixgbe_log(ixgbe,
3662 	    "MSI-X not used, force rings and groups number to 1");
3663 
3664 	/*
3665 	 * Install MSI interrupts
3666 	 */
3667 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
3668 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
3669 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
3670 		if (rc == IXGBE_SUCCESS)
3671 			return (IXGBE_SUCCESS);
3672 
3673 		ixgbe_log(ixgbe,
3674 		    "Allocate MSI failed, trying Legacy interrupts...");
3675 	}
3676 
3677 	/*
3678 	 * Install legacy interrupts
3679 	 */
3680 	if (intr_types & DDI_INTR_TYPE_FIXED) {
3681 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
3682 		if (rc == IXGBE_SUCCESS)
3683 			return (IXGBE_SUCCESS);
3684 
3685 		ixgbe_log(ixgbe,
3686 		    "Allocate Legacy interrupts failed");
3687 	}
3688 
3689 	/*
3690 	 * If none of the 3 types succeeded, return failure
3691 	 */
3692 	return (IXGBE_FAILURE);
3693 }
3694 
3695 /*
3696  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
3697  *
3698  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
3699  * if fewer than 2 handles are available, return failure.
3700  * Upon success, this maps the vectors to rx and tx rings for
3701  * interrupts.
3702  */
3703 static int
3704 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
3705 {
3706 	dev_info_t *devinfo;
3707 	int request, count, avail, actual;
3708 	int minimum;
3709 	int rc;
3710 
3711 	devinfo = ixgbe->dip;
3712 
3713 	switch (intr_type) {
3714 	case DDI_INTR_TYPE_FIXED:
3715 		request = 1;	/* Request 1 legacy interrupt handle */
3716 		minimum = 1;
3717 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
3718 		break;
3719 
3720 	case DDI_INTR_TYPE_MSI:
3721 		request = 1;	/* Request 1 MSI interrupt handle */
3722 		minimum = 1;
3723 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
3724 		break;
3725 
3726 	case DDI_INTR_TYPE_MSIX:
3727 		/*
3728 		 * Best number of vectors for the adapter is
3729 		 * # rx rings + # tx rings.
3730 		 */
3731 		request = ixgbe->num_rx_rings + ixgbe->num_tx_rings;
3732 		if (request > ixgbe->capab->max_ring_vect)
3733 			request = ixgbe->capab->max_ring_vect;
3734 		minimum = 2;
3735 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
3736 		break;
3737 
3738 	default:
3739 		ixgbe_log(ixgbe,
3740 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
3741 		    intr_type);
3742 		return (IXGBE_FAILURE);
3743 	}
3744 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
3745 	    request, minimum);
3746 
3747 	/*
3748 	 * Get number of supported interrupts
3749 	 */
3750 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3751 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3752 		ixgbe_log(ixgbe,
3753 		    "Get interrupt number failed. Return: %d, count: %d",
3754 		    rc, count);
3755 		return (IXGBE_FAILURE);
3756 	}
3757 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
3758 
3759 	/*
3760 	 * Get number of available interrupts
3761 	 */
3762 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3763 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3764 		ixgbe_log(ixgbe,
3765 		    "Get interrupt available number failed. "
3766 		    "Return: %d, available: %d", rc, avail);
3767 		return (IXGBE_FAILURE);
3768 	}
3769 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
3770 
3771 	if (avail < request) {
3772 		ixgbe_log(ixgbe, "Request %d handles, %d available",
3773 		    request, avail);
3774 		request = avail;
3775 	}
3776 
3777 	actual = 0;
3778 	ixgbe->intr_cnt = 0;
3779 
3780 	/*
3781 	 * Allocate an array of interrupt handles
3782 	 */
3783 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
3784 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
3785 
3786 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
3787 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
3788 	if (rc != DDI_SUCCESS) {
3789 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
3790 		    "return: %d, request: %d, actual: %d",
3791 		    rc, request, actual);
3792 		goto alloc_handle_fail;
3793 	}
3794 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
3795 
3796 	ixgbe->intr_cnt = actual;
3797 
3798 	/*
3799 	 * Now we know the actual number of vectors.  Here we map the vector
3800 	 * to other, rx rings and tx ring.
3801 	 */
3802 	if (actual < minimum) {
3803 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
3804 		    actual);
3805 		goto alloc_handle_fail;
3806 	}
3807 
3808 	/*
3809 	 * Get priority for first vector, assume remaining are all the same
3810 	 */
3811 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
3812 	if (rc != DDI_SUCCESS) {
3813 		ixgbe_log(ixgbe,
3814 		    "Get interrupt priority failed: %d", rc);
3815 		goto alloc_handle_fail;
3816 	}
3817 
3818 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
3819 	if (rc != DDI_SUCCESS) {
3820 		ixgbe_log(ixgbe,
3821 		    "Get interrupt cap failed: %d", rc);
3822 		goto alloc_handle_fail;
3823 	}
3824 
3825 	ixgbe->intr_type = intr_type;
3826 
3827 	return (IXGBE_SUCCESS);
3828 
3829 alloc_handle_fail:
3830 	ixgbe_rem_intrs(ixgbe);
3831 
3832 	return (IXGBE_FAILURE);
3833 }
3834 
3835 /*
3836  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
3837  *
3838  * Before adding the interrupt handlers, the interrupt vectors have
3839  * been allocated, and the rx/tx rings have also been allocated.
3840  */
3841 static int
3842 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
3843 {
3844 	int vector = 0;
3845 	int rc;
3846 
3847 	switch (ixgbe->intr_type) {
3848 	case DDI_INTR_TYPE_MSIX:
3849 		/*
3850 		 * Add interrupt handler for all vectors
3851 		 */
3852 		for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
3853 			/*
3854 			 * install pointer to vect_map[vector]
3855 			 */
3856 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
3857 			    (ddi_intr_handler_t *)ixgbe_intr_msix,
3858 			    (void *)&ixgbe->vect_map[vector], NULL);
3859 
3860 			if (rc != DDI_SUCCESS) {
3861 				ixgbe_log(ixgbe,
3862 				    "Add rx interrupt handler failed. "
3863 				    "return: %d, vector: %d", rc, vector);
3864 				for (vector--; vector >= 0; vector--) {
3865 					(void) ddi_intr_remove_handler(
3866 					    ixgbe->htable[vector]);
3867 				}
3868 				return (IXGBE_FAILURE);
3869 			}
3870 		}
3871 
3872 		break;
3873 
3874 	case DDI_INTR_TYPE_MSI:
3875 		/*
3876 		 * Add interrupt handlers for the only vector
3877 		 */
3878 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3879 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
3880 		    (void *)ixgbe, NULL);
3881 
3882 		if (rc != DDI_SUCCESS) {
3883 			ixgbe_log(ixgbe,
3884 			    "Add MSI interrupt handler failed: %d", rc);
3885 			return (IXGBE_FAILURE);
3886 		}
3887 
3888 		break;
3889 
3890 	case DDI_INTR_TYPE_FIXED:
3891 		/*
3892 		 * Add interrupt handlers for the only vector
3893 		 */
3894 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3895 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
3896 		    (void *)ixgbe, NULL);
3897 
3898 		if (rc != DDI_SUCCESS) {
3899 			ixgbe_log(ixgbe,
3900 			    "Add legacy interrupt handler failed: %d", rc);
3901 			return (IXGBE_FAILURE);
3902 		}
3903 
3904 		break;
3905 
3906 	default:
3907 		return (IXGBE_FAILURE);
3908 	}
3909 
3910 	return (IXGBE_SUCCESS);
3911 }
3912 
3913 #pragma inline(ixgbe_map_rxring_to_vector)
3914 /*
3915  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
3916  */
3917 static void
3918 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
3919 {
3920 	/*
3921 	 * Set bit in map
3922 	 */
3923 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
3924 
3925 	/*
3926 	 * Count bits set
3927 	 */
3928 	ixgbe->vect_map[v_idx].rxr_cnt++;
3929 
3930 	/*
3931 	 * Remember bit position
3932 	 */
3933 	ixgbe->rx_rings[r_idx].intr_vector = v_idx;
3934 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
3935 }
3936 
3937 #pragma inline(ixgbe_map_txring_to_vector)
3938 /*
3939  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
3940  */
3941 static void
3942 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
3943 {
3944 	/*
3945 	 * Set bit in map
3946 	 */
3947 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
3948 
3949 	/*
3950 	 * Count bits set
3951 	 */
3952 	ixgbe->vect_map[v_idx].txr_cnt++;
3953 
3954 	/*
3955 	 * Remember bit position
3956 	 */
3957 	ixgbe->tx_rings[t_idx].intr_vector = v_idx;
3958 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
3959 }
3960 
3961 /*
3962  * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
3963  * allocation register (IVAR).
3964  * cause:
3965  *   -1 : other cause
3966  *    0 : rx
3967  *    1 : tx
3968  */
3969 static void
3970 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
3971     int8_t cause)
3972 {
3973 	struct ixgbe_hw *hw = &ixgbe->hw;
3974 	u32 ivar, index;
3975 
3976 	switch (hw->mac.type) {
3977 	case ixgbe_mac_82598EB:
3978 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3979 		if (cause == -1) {
3980 			cause = 0;
3981 		}
3982 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3983 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3984 		ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
3985 		ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
3986 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3987 		break;
3988 	case ixgbe_mac_82599EB:
3989 		if (cause == -1) {
3990 			/* other causes */
3991 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3992 			index = (intr_alloc_entry & 1) * 8;
3993 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3994 			ivar &= ~(0xFF << index);
3995 			ivar |= (msix_vector << index);
3996 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3997 		} else {
3998 			/* tx or rx causes */
3999 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4000 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4001 			ivar = IXGBE_READ_REG(hw,
4002 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4003 			ivar &= ~(0xFF << index);
4004 			ivar |= (msix_vector << index);
4005 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4006 			    ivar);
4007 		}
4008 		break;
4009 	default:
4010 		break;
4011 	}
4012 }
4013 
4014 /*
4015  * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4016  * given interrupt vector allocation register (IVAR).
4017  * cause:
4018  *   -1 : other cause
4019  *    0 : rx
4020  *    1 : tx
4021  */
4022 static void
4023 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4024 {
4025 	struct ixgbe_hw *hw = &ixgbe->hw;
4026 	u32 ivar, index;
4027 
4028 	switch (hw->mac.type) {
4029 	case ixgbe_mac_82598EB:
4030 		if (cause == -1) {
4031 			cause = 0;
4032 		}
4033 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4034 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4035 		ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4036 		    (intr_alloc_entry & 0x3)));
4037 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4038 		break;
4039 	case ixgbe_mac_82599EB:
4040 		if (cause == -1) {
4041 			/* other causes */
4042 			index = (intr_alloc_entry & 1) * 8;
4043 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4044 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4045 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4046 		} else {
4047 			/* tx or rx causes */
4048 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4049 			ivar = IXGBE_READ_REG(hw,
4050 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4051 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4052 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4053 			    ivar);
4054 		}
4055 		break;
4056 	default:
4057 		break;
4058 	}
4059 }
4060 
4061 /*
4062  * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4063  * given interrupt vector allocation register (IVAR).
4064  * cause:
4065  *   -1 : other cause
4066  *    0 : rx
4067  *    1 : tx
4068  */
4069 static void
4070 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4071 {
4072 	struct ixgbe_hw *hw = &ixgbe->hw;
4073 	u32 ivar, index;
4074 
4075 	switch (hw->mac.type) {
4076 	case ixgbe_mac_82598EB:
4077 		if (cause == -1) {
4078 			cause = 0;
4079 		}
4080 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4081 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4082 		ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4083 		    (intr_alloc_entry & 0x3)));
4084 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4085 		break;
4086 	case ixgbe_mac_82599EB:
4087 		if (cause == -1) {
4088 			/* other causes */
4089 			index = (intr_alloc_entry & 1) * 8;
4090 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4091 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4092 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4093 		} else {
4094 			/* tx or rx causes */
4095 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4096 			ivar = IXGBE_READ_REG(hw,
4097 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4098 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4099 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4100 			    ivar);
4101 		}
4102 		break;
4103 	default:
4104 		break;
4105 	}
4106 }
4107 
4108 /*
4109  * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4110  *
4111  * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4112  * to vector[0 - (intr_cnt -1)].
4113  */
4114 static int
4115 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4116 {
4117 	int i, vector = 0;
4118 
4119 	/* initialize vector map */
4120 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4121 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4122 		ixgbe->vect_map[i].ixgbe = ixgbe;
4123 	}
4124 
4125 	/*
4126 	 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4127 	 * tx rings[0] on RTxQ[1].
4128 	 */
4129 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4130 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4131 		ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4132 		return (IXGBE_SUCCESS);
4133 	}
4134 
4135 	/*
4136 	 * Interrupts/vectors mapping for MSI-X
4137 	 */
4138 
4139 	/*
4140 	 * Map other interrupt to vector 0,
4141 	 * Set bit in map and count the bits set.
4142 	 */
4143 	BT_SET(ixgbe->vect_map[vector].other_map, 0);
4144 	ixgbe->vect_map[vector].other_cnt++;
4145 	vector++;
4146 
4147 	/*
4148 	 * Map rx ring interrupts to vectors
4149 	 */
4150 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
4151 		ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4152 		vector = (vector +1) % ixgbe->intr_cnt;
4153 	}
4154 
4155 	/*
4156 	 * Map tx ring interrupts to vectors
4157 	 */
4158 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
4159 		ixgbe_map_txring_to_vector(ixgbe, i, vector);
4160 		vector = (vector +1) % ixgbe->intr_cnt;
4161 	}
4162 
4163 	return (IXGBE_SUCCESS);
4164 }
4165 
4166 /*
4167  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4168  *
4169  * This relies on ring/vector mapping already set up in the
4170  * vect_map[] structures
4171  */
4172 static void
4173 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4174 {
4175 	struct ixgbe_hw *hw = &ixgbe->hw;
4176 	ixgbe_intr_vector_t *vect;	/* vector bitmap */
4177 	int r_idx;	/* ring index */
4178 	int v_idx;	/* vector index */
4179 
4180 	/*
4181 	 * Clear any previous entries
4182 	 */
4183 	switch (hw->mac.type) {
4184 	case ixgbe_mac_82598EB:
4185 		for (v_idx = 0; v_idx < 25; v_idx++)
4186 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4187 
4188 		break;
4189 	case ixgbe_mac_82599EB:
4190 		for (v_idx = 0; v_idx < 64; v_idx++)
4191 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4192 		IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
4193 
4194 		break;
4195 	default:
4196 		break;
4197 	}
4198 
4199 	/*
4200 	 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
4201 	 * tx rings[0] will use RTxQ[1].
4202 	 */
4203 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4204 		ixgbe_setup_ivar(ixgbe, 0, 0, 0);
4205 		ixgbe_setup_ivar(ixgbe, 0, 1, 1);
4206 		return;
4207 	}
4208 
4209 	/*
4210 	 * For MSI-X interrupt, "Other" is always on vector[0].
4211 	 */
4212 	ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
4213 
4214 	/*
4215 	 * For each interrupt vector, populate the IVAR table
4216 	 */
4217 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
4218 		vect = &ixgbe->vect_map[v_idx];
4219 
4220 		/*
4221 		 * For each rx ring bit set
4222 		 */
4223 		r_idx = bt_getlowbit(vect->rx_map, 0,
4224 		    (ixgbe->num_rx_rings - 1));
4225 
4226 		while (r_idx >= 0) {
4227 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 0);
4228 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4229 			    (ixgbe->num_rx_rings - 1));
4230 		}
4231 
4232 		/*
4233 		 * For each tx ring bit set
4234 		 */
4235 		r_idx = bt_getlowbit(vect->tx_map, 0,
4236 		    (ixgbe->num_tx_rings - 1));
4237 
4238 		while (r_idx >= 0) {
4239 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
4240 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4241 			    (ixgbe->num_tx_rings - 1));
4242 		}
4243 	}
4244 }
4245 
4246 /*
4247  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
4248  */
4249 static void
4250 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
4251 {
4252 	int i;
4253 	int rc;
4254 
4255 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4256 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
4257 		if (rc != DDI_SUCCESS) {
4258 			IXGBE_DEBUGLOG_1(ixgbe,
4259 			    "Remove intr handler failed: %d", rc);
4260 		}
4261 	}
4262 }
4263 
4264 /*
4265  * ixgbe_rem_intrs - Remove the allocated interrupts.
4266  */
4267 static void
4268 ixgbe_rem_intrs(ixgbe_t *ixgbe)
4269 {
4270 	int i;
4271 	int rc;
4272 
4273 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4274 		rc = ddi_intr_free(ixgbe->htable[i]);
4275 		if (rc != DDI_SUCCESS) {
4276 			IXGBE_DEBUGLOG_1(ixgbe,
4277 			    "Free intr failed: %d", rc);
4278 		}
4279 	}
4280 
4281 	kmem_free(ixgbe->htable, ixgbe->intr_size);
4282 	ixgbe->htable = NULL;
4283 }
4284 
4285 /*
4286  * ixgbe_enable_intrs - Enable all the ddi interrupts.
4287  */
4288 static int
4289 ixgbe_enable_intrs(ixgbe_t *ixgbe)
4290 {
4291 	int i;
4292 	int rc;
4293 
4294 	/*
4295 	 * Enable interrupts
4296 	 */
4297 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4298 		/*
4299 		 * Call ddi_intr_block_enable() for MSI
4300 		 */
4301 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
4302 		if (rc != DDI_SUCCESS) {
4303 			ixgbe_log(ixgbe,
4304 			    "Enable block intr failed: %d", rc);
4305 			return (IXGBE_FAILURE);
4306 		}
4307 	} else {
4308 		/*
4309 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
4310 		 */
4311 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4312 			rc = ddi_intr_enable(ixgbe->htable[i]);
4313 			if (rc != DDI_SUCCESS) {
4314 				ixgbe_log(ixgbe,
4315 				    "Enable intr failed: %d", rc);
4316 				return (IXGBE_FAILURE);
4317 			}
4318 		}
4319 	}
4320 
4321 	return (IXGBE_SUCCESS);
4322 }
4323 
4324 /*
4325  * ixgbe_disable_intrs - Disable all the interrupts.
4326  */
4327 static int
4328 ixgbe_disable_intrs(ixgbe_t *ixgbe)
4329 {
4330 	int i;
4331 	int rc;
4332 
4333 	/*
4334 	 * Disable all interrupts
4335 	 */
4336 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4337 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
4338 		if (rc != DDI_SUCCESS) {
4339 			ixgbe_log(ixgbe,
4340 			    "Disable block intr failed: %d", rc);
4341 			return (IXGBE_FAILURE);
4342 		}
4343 	} else {
4344 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4345 			rc = ddi_intr_disable(ixgbe->htable[i]);
4346 			if (rc != DDI_SUCCESS) {
4347 				ixgbe_log(ixgbe,
4348 				    "Disable intr failed: %d", rc);
4349 				return (IXGBE_FAILURE);
4350 			}
4351 		}
4352 	}
4353 
4354 	return (IXGBE_SUCCESS);
4355 }
4356 
4357 /*
4358  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
4359  */
4360 static void
4361 ixgbe_get_hw_state(ixgbe_t *ixgbe)
4362 {
4363 	struct ixgbe_hw *hw = &ixgbe->hw;
4364 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
4365 	boolean_t link_up = B_FALSE;
4366 	uint32_t pcs1g_anlp = 0;
4367 	uint32_t pcs1g_ana = 0;
4368 
4369 	ASSERT(mutex_owned(&ixgbe->gen_lock));
4370 	ixgbe->param_lp_1000fdx_cap = 0;
4371 	ixgbe->param_lp_100fdx_cap  = 0;
4372 
4373 	/* check for link, don't wait */
4374 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
4375 	if (link_up) {
4376 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
4377 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
4378 
4379 		ixgbe->param_lp_1000fdx_cap =
4380 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4381 		ixgbe->param_lp_100fdx_cap =
4382 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4383 	}
4384 
4385 	ixgbe->param_adv_1000fdx_cap =
4386 	    (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4387 	ixgbe->param_adv_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4388 }
4389 
4390 /*
4391  * ixgbe_get_driver_control - Notify that driver is in control of device.
4392  */
4393 static void
4394 ixgbe_get_driver_control(struct ixgbe_hw *hw)
4395 {
4396 	uint32_t ctrl_ext;
4397 
4398 	/*
4399 	 * Notify firmware that driver is in control of device
4400 	 */
4401 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4402 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
4403 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4404 }
4405 
4406 /*
4407  * ixgbe_release_driver_control - Notify that driver is no longer in control
4408  * of device.
4409  */
4410 static void
4411 ixgbe_release_driver_control(struct ixgbe_hw *hw)
4412 {
4413 	uint32_t ctrl_ext;
4414 
4415 	/*
4416 	 * Notify firmware that driver is no longer in control of device
4417 	 */
4418 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4419 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
4420 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4421 }
4422 
4423 /*
4424  * ixgbe_atomic_reserve - Atomic decrease operation.
4425  */
4426 int
4427 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
4428 {
4429 	uint32_t oldval;
4430 	uint32_t newval;
4431 
4432 	/*
4433 	 * ATOMICALLY
4434 	 */
4435 	do {
4436 		oldval = *count_p;
4437 		if (oldval < n)
4438 			return (-1);
4439 		newval = oldval - n;
4440 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
4441 
4442 	return (newval);
4443 }
4444 
4445 /*
4446  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
4447  */
4448 static uint8_t *
4449 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
4450 {
4451 	uint8_t *addr = *upd_ptr;
4452 	uint8_t *new_ptr;
4453 
4454 	_NOTE(ARGUNUSED(hw));
4455 	_NOTE(ARGUNUSED(vmdq));
4456 
4457 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
4458 	*upd_ptr = new_ptr;
4459 	return (addr);
4460 }
4461 
4462 /*
4463  * FMA support
4464  */
4465 int
4466 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
4467 {
4468 	ddi_fm_error_t de;
4469 
4470 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4471 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
4472 	return (de.fme_status);
4473 }
4474 
4475 int
4476 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
4477 {
4478 	ddi_fm_error_t de;
4479 
4480 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4481 	return (de.fme_status);
4482 }
4483 
4484 /*
4485  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
4486  */
4487 static int
4488 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4489 {
4490 	_NOTE(ARGUNUSED(impl_data));
4491 	/*
4492 	 * as the driver can always deal with an error in any dma or
4493 	 * access handle, we can just return the fme_status value.
4494 	 */
4495 	pci_ereport_post(dip, err, NULL);
4496 	return (err->fme_status);
4497 }
4498 
4499 static void
4500 ixgbe_fm_init(ixgbe_t *ixgbe)
4501 {
4502 	ddi_iblock_cookie_t iblk;
4503 	int fma_dma_flag;
4504 
4505 	/*
4506 	 * Only register with IO Fault Services if we have some capability
4507 	 */
4508 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
4509 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4510 	} else {
4511 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4512 	}
4513 
4514 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
4515 		fma_dma_flag = 1;
4516 	} else {
4517 		fma_dma_flag = 0;
4518 	}
4519 
4520 	ixgbe_set_fma_flags(fma_dma_flag);
4521 
4522 	if (ixgbe->fm_capabilities) {
4523 
4524 		/*
4525 		 * Register capabilities with IO Fault Services
4526 		 */
4527 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
4528 
4529 		/*
4530 		 * Initialize pci ereport capabilities if ereport capable
4531 		 */
4532 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4533 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4534 			pci_ereport_setup(ixgbe->dip);
4535 
4536 		/*
4537 		 * Register error callback if error callback capable
4538 		 */
4539 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4540 			ddi_fm_handler_register(ixgbe->dip,
4541 			    ixgbe_fm_error_cb, (void*) ixgbe);
4542 	}
4543 }
4544 
4545 static void
4546 ixgbe_fm_fini(ixgbe_t *ixgbe)
4547 {
4548 	/*
4549 	 * Only unregister FMA capabilities if they are registered
4550 	 */
4551 	if (ixgbe->fm_capabilities) {
4552 
4553 		/*
4554 		 * Release any resources allocated by pci_ereport_setup()
4555 		 */
4556 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4557 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4558 			pci_ereport_teardown(ixgbe->dip);
4559 
4560 		/*
4561 		 * Un-register error callback if error callback capable
4562 		 */
4563 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4564 			ddi_fm_handler_unregister(ixgbe->dip);
4565 
4566 		/*
4567 		 * Unregister from IO Fault Service
4568 		 */
4569 		ddi_fm_fini(ixgbe->dip);
4570 	}
4571 }
4572 
4573 void
4574 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
4575 {
4576 	uint64_t ena;
4577 	char buf[FM_MAX_CLASS];
4578 
4579 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4580 	ena = fm_ena_generate(0, FM_ENA_FMT1);
4581 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
4582 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
4583 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
4584 	}
4585 }
4586 
4587 static int
4588 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
4589 {
4590 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
4591 
4592 	mutex_enter(&rx_ring->rx_lock);
4593 	rx_ring->ring_gen_num = mr_gen_num;
4594 	mutex_exit(&rx_ring->rx_lock);
4595 	return (0);
4596 }
4597 
4598 /*
4599  * Callback funtion for MAC layer to register all rings.
4600  */
4601 /* ARGSUSED */
4602 void
4603 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
4604     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
4605 {
4606 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4607 	mac_intr_t *mintr = &infop->mri_intr;
4608 
4609 	switch (rtype) {
4610 	case MAC_RING_TYPE_RX: {
4611 		ASSERT(rg_index == 0);
4612 		ASSERT(ring_index < ixgbe->num_rx_rings);
4613 
4614 		ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[ring_index];
4615 		rx_ring->ring_handle = rh;
4616 
4617 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
4618 		infop->mri_start = ixgbe_ring_start;
4619 		infop->mri_stop = NULL;
4620 		infop->mri_poll = ixgbe_ring_rx_poll;
4621 
4622 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
4623 		mintr->mi_enable = ixgbe_rx_ring_intr_enable;
4624 		mintr->mi_disable = ixgbe_rx_ring_intr_disable;
4625 
4626 		break;
4627 	}
4628 	case MAC_RING_TYPE_TX: {
4629 		ASSERT(rg_index == -1);
4630 		ASSERT(ring_index < ixgbe->num_tx_rings);
4631 
4632 		ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
4633 		tx_ring->ring_handle = rh;
4634 
4635 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
4636 		infop->mri_start = NULL;
4637 		infop->mri_stop = NULL;
4638 		infop->mri_tx = ixgbe_ring_tx;
4639 
4640 		break;
4641 	}
4642 	default:
4643 		break;
4644 	}
4645 }
4646 
4647 /*
4648  * Callback funtion for MAC layer to register all groups.
4649  */
4650 void
4651 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
4652     mac_group_info_t *infop, mac_group_handle_t gh)
4653 {
4654 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4655 
4656 	switch (rtype) {
4657 	case MAC_RING_TYPE_RX: {
4658 		ixgbe_rx_group_t *rx_group;
4659 
4660 		rx_group = &ixgbe->rx_groups[index];
4661 		rx_group->group_handle = gh;
4662 
4663 		infop->mgi_driver = (mac_group_driver_t)rx_group;
4664 		infop->mgi_start = NULL;
4665 		infop->mgi_stop = NULL;
4666 		infop->mgi_addmac = ixgbe_addmac;
4667 		infop->mgi_remmac = ixgbe_remmac;
4668 		infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
4669 
4670 		break;
4671 	}
4672 	case MAC_RING_TYPE_TX:
4673 		break;
4674 	default:
4675 		break;
4676 	}
4677 }
4678 
4679 /*
4680  * Enable interrupt on the specificed rx ring.
4681  */
4682 int
4683 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
4684 {
4685 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4686 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4687 	int r_idx = rx_ring->index;
4688 	int v_idx = rx_ring->intr_vector;
4689 
4690 	mutex_enter(&ixgbe->gen_lock);
4691 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 0);
4692 
4693 	/*
4694 	 * To enable interrupt by setting the VAL bit of given interrupt
4695 	 * vector allocation register (IVAR).
4696 	 */
4697 	ixgbe_enable_ivar(ixgbe, r_idx, 0);
4698 
4699 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4700 
4701 	/*
4702 	 * To trigger a Rx interrupt to on this ring
4703 	 */
4704 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
4705 	IXGBE_WRITE_FLUSH(&ixgbe->hw);
4706 
4707 	mutex_exit(&ixgbe->gen_lock);
4708 
4709 	return (0);
4710 }
4711 
4712 /*
4713  * Disable interrupt on the specificed rx ring.
4714  */
4715 int
4716 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
4717 {
4718 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4719 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4720 	int r_idx = rx_ring->index;
4721 	int v_idx = rx_ring->intr_vector;
4722 
4723 	mutex_enter(&ixgbe->gen_lock);
4724 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 1);
4725 
4726 	/*
4727 	 * To disable interrupt by clearing the VAL bit of given interrupt
4728 	 * vector allocation register (IVAR).
4729 	 */
4730 	ixgbe_disable_ivar(ixgbe, r_idx, 0);
4731 
4732 	BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
4733 
4734 	mutex_exit(&ixgbe->gen_lock);
4735 
4736 	return (0);
4737 }
4738 
4739 /*
4740  * Add a mac address.
4741  */
4742 static int
4743 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
4744 {
4745 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4746 	ixgbe_t *ixgbe = rx_group->ixgbe;
4747 	int slot;
4748 	int err;
4749 
4750 	mutex_enter(&ixgbe->gen_lock);
4751 
4752 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4753 		mutex_exit(&ixgbe->gen_lock);
4754 		return (ECANCELED);
4755 	}
4756 
4757 	if (ixgbe->unicst_avail == 0) {
4758 		/* no slots available */
4759 		mutex_exit(&ixgbe->gen_lock);
4760 		return (ENOSPC);
4761 	}
4762 
4763 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
4764 		if (ixgbe->unicst_addr[slot].mac.set == 0)
4765 			break;
4766 	}
4767 
4768 	ASSERT((slot >= 0) && (slot < ixgbe->unicst_total));
4769 
4770 	if ((err = ixgbe_unicst_set(ixgbe, mac_addr, slot)) == 0) {
4771 		ixgbe->unicst_addr[slot].mac.set = 1;
4772 		ixgbe->unicst_avail--;
4773 	}
4774 
4775 	mutex_exit(&ixgbe->gen_lock);
4776 
4777 	return (err);
4778 }
4779 
4780 /*
4781  * Remove a mac address.
4782  */
4783 static int
4784 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
4785 {
4786 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4787 	ixgbe_t *ixgbe = rx_group->ixgbe;
4788 	int slot;
4789 	int err;
4790 
4791 	mutex_enter(&ixgbe->gen_lock);
4792 
4793 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4794 		mutex_exit(&ixgbe->gen_lock);
4795 		return (ECANCELED);
4796 	}
4797 
4798 	slot = ixgbe_unicst_find(ixgbe, mac_addr);
4799 	if (slot == -1) {
4800 		mutex_exit(&ixgbe->gen_lock);
4801 		return (EINVAL);
4802 	}
4803 
4804 	if (ixgbe->unicst_addr[slot].mac.set == 0) {
4805 		mutex_exit(&ixgbe->gen_lock);
4806 		return (EINVAL);
4807 	}
4808 
4809 	bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
4810 	if ((err = ixgbe_unicst_set(ixgbe,
4811 	    ixgbe->unicst_addr[slot].mac.addr, slot)) == 0) {
4812 		ixgbe->unicst_addr[slot].mac.set = 0;
4813 		ixgbe->unicst_avail++;
4814 	}
4815 
4816 	mutex_exit(&ixgbe->gen_lock);
4817 
4818 	return (err);
4819 }
4820