xref: /titanic_41/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision c0b6434526a837cec6e2665caf2f98240fe517c9)
1 /*
2  * CDDL HEADER START
3  *
4  * Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 #include "ixgbe_sw.h"
29 
30 static char ident[] = "Intel 10Gb Ethernet";
31 
32 /*
33  * Local function protoypes
34  */
35 static int ixgbe_register_mac(ixgbe_t *);
36 static int ixgbe_identify_hardware(ixgbe_t *);
37 static int ixgbe_regs_map(ixgbe_t *);
38 static void ixgbe_init_properties(ixgbe_t *);
39 static int ixgbe_init_driver_settings(ixgbe_t *);
40 static void ixgbe_init_locks(ixgbe_t *);
41 static void ixgbe_destroy_locks(ixgbe_t *);
42 static int ixgbe_init(ixgbe_t *);
43 static int ixgbe_chip_start(ixgbe_t *);
44 static void ixgbe_chip_stop(ixgbe_t *);
45 static int ixgbe_reset(ixgbe_t *);
46 static void ixgbe_tx_clean(ixgbe_t *);
47 static boolean_t ixgbe_tx_drain(ixgbe_t *);
48 static boolean_t ixgbe_rx_drain(ixgbe_t *);
49 static int ixgbe_alloc_rings(ixgbe_t *);
50 static int ixgbe_init_rings(ixgbe_t *);
51 static void ixgbe_free_rings(ixgbe_t *);
52 static void ixgbe_fini_rings(ixgbe_t *);
53 static void ixgbe_setup_rings(ixgbe_t *);
54 static void ixgbe_setup_rx(ixgbe_t *);
55 static void ixgbe_setup_tx(ixgbe_t *);
56 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
57 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
58 static void ixgbe_setup_rss(ixgbe_t *);
59 static void ixgbe_init_unicst(ixgbe_t *);
60 static int ixgbe_unicst_set(ixgbe_t *, const uint8_t *, int);
61 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
62 static void ixgbe_setup_multicst(ixgbe_t *);
63 static void ixgbe_get_hw_state(ixgbe_t *);
64 static void ixgbe_get_conf(ixgbe_t *);
65 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
66 static void ixgbe_driver_link_check(void *);
67 static void ixgbe_sfp_check(void *);
68 static void ixgbe_local_timer(void *);
69 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
70 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
71 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
72 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
73 static boolean_t is_valid_mac_addr(uint8_t *);
74 static boolean_t ixgbe_stall_check(ixgbe_t *);
75 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
76 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
77 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
78 static int ixgbe_alloc_intrs(ixgbe_t *);
79 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
80 static int ixgbe_add_intr_handlers(ixgbe_t *);
81 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
82 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
83 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
84 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
85 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
86 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
87 static void ixgbe_setup_adapter_vector(ixgbe_t *);
88 static void ixgbe_rem_intr_handlers(ixgbe_t *);
89 static void ixgbe_rem_intrs(ixgbe_t *);
90 static int ixgbe_enable_intrs(ixgbe_t *);
91 static int ixgbe_disable_intrs(ixgbe_t *);
92 static uint_t ixgbe_intr_legacy(void *, void *);
93 static uint_t ixgbe_intr_msi(void *, void *);
94 static uint_t ixgbe_intr_msix(void *, void *);
95 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
96 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
97 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
98 static void ixgbe_get_driver_control(struct ixgbe_hw *);
99 static int ixgbe_addmac(void *, const uint8_t *);
100 static int ixgbe_remmac(void *, const uint8_t *);
101 static void ixgbe_release_driver_control(struct ixgbe_hw *);
102 
103 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
104 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
105 static int ixgbe_resume(dev_info_t *);
106 static int ixgbe_suspend(dev_info_t *);
107 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
108 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
109 
110 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
111     const void *impl_data);
112 static void ixgbe_fm_init(ixgbe_t *);
113 static void ixgbe_fm_fini(ixgbe_t *);
114 
115 static struct cb_ops ixgbe_cb_ops = {
116 	nulldev,		/* cb_open */
117 	nulldev,		/* cb_close */
118 	nodev,			/* cb_strategy */
119 	nodev,			/* cb_print */
120 	nodev,			/* cb_dump */
121 	nodev,			/* cb_read */
122 	nodev,			/* cb_write */
123 	nodev,			/* cb_ioctl */
124 	nodev,			/* cb_devmap */
125 	nodev,			/* cb_mmap */
126 	nodev,			/* cb_segmap */
127 	nochpoll,		/* cb_chpoll */
128 	ddi_prop_op,		/* cb_prop_op */
129 	NULL,			/* cb_stream */
130 	D_MP | D_HOTPLUG,	/* cb_flag */
131 	CB_REV,			/* cb_rev */
132 	nodev,			/* cb_aread */
133 	nodev			/* cb_awrite */
134 };
135 
136 static struct dev_ops ixgbe_dev_ops = {
137 	DEVO_REV,		/* devo_rev */
138 	0,			/* devo_refcnt */
139 	NULL,			/* devo_getinfo */
140 	nulldev,		/* devo_identify */
141 	nulldev,		/* devo_probe */
142 	ixgbe_attach,		/* devo_attach */
143 	ixgbe_detach,		/* devo_detach */
144 	nodev,			/* devo_reset */
145 	&ixgbe_cb_ops,		/* devo_cb_ops */
146 	NULL,			/* devo_bus_ops */
147 	ddi_power,		/* devo_power */
148 	ddi_quiesce_not_supported,	/* devo_quiesce */
149 };
150 
151 static struct modldrv ixgbe_modldrv = {
152 	&mod_driverops,		/* Type of module.  This one is a driver */
153 	ident,			/* Discription string */
154 	&ixgbe_dev_ops		/* driver ops */
155 };
156 
157 static struct modlinkage ixgbe_modlinkage = {
158 	MODREV_1, &ixgbe_modldrv, NULL
159 };
160 
161 /*
162  * Access attributes for register mapping
163  */
164 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
165 	DDI_DEVICE_ATTR_V0,
166 	DDI_STRUCTURE_LE_ACC,
167 	DDI_STRICTORDER_ACC,
168 	DDI_FLAGERR_ACC
169 };
170 
171 /*
172  * Loopback property
173  */
174 static lb_property_t lb_normal = {
175 	normal,	"normal", IXGBE_LB_NONE
176 };
177 
178 static lb_property_t lb_mac = {
179 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
180 };
181 
182 #define	IXGBE_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
183 
184 static mac_callbacks_t ixgbe_m_callbacks = {
185 	IXGBE_M_CALLBACK_FLAGS,
186 	ixgbe_m_stat,
187 	ixgbe_m_start,
188 	ixgbe_m_stop,
189 	ixgbe_m_promisc,
190 	ixgbe_m_multicst,
191 	NULL,
192 	NULL,
193 	ixgbe_m_ioctl,
194 	ixgbe_m_getcapab
195 };
196 
197 /*
198  * Initialize capabilities of each supported adapter type
199  */
200 static adapter_info_t ixgbe_82598eb_cap = {
201 	64,		/* maximum number of rx queues */
202 	1,		/* minimum number of rx queues */
203 	8,		/* default number of rx queues */
204 	32,		/* maximum number of tx queues */
205 	1,		/* minimum number of tx queues */
206 	8,		/* default number of tx queues */
207 	18,		/* maximum total msix vectors */
208 	16,		/* maximum number of ring vectors */
209 	2,		/* maximum number of other vectors */
210 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
211 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
212 	| IXGBE_FLAG_RSS_CAPABLE
213 	| IXGBE_FLAG_VMDQ_CAPABLE)
214 };
215 
216 static adapter_info_t ixgbe_82599eb_cap = {
217 	128,		/* maximum number of rx queues */
218 	1,		/* minimum number of rx queues */
219 	8,		/* default number of rx queues */
220 	128,		/* maximum number of tx queues */
221 	1,		/* minimum number of tx queues */
222 	8,		/* default number of tx queues */
223 	64,		/* maximum total msix vectors */
224 	16,		/* maximum number of ring vectors */
225 	2,		/* maximum number of other vectors */
226 	IXGBE_EICR_LSC,	/* "other" interrupt types handled */
227 	(IXGBE_FLAG_DCA_CAPABLE	/* capability flags */
228 	| IXGBE_FLAG_RSS_CAPABLE
229 	| IXGBE_FLAG_VMDQ_CAPABLE)
230 };
231 
232 /*
233  * Module Initialization Functions.
234  */
235 
236 int
237 _init(void)
238 {
239 	int status;
240 
241 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
242 
243 	status = mod_install(&ixgbe_modlinkage);
244 
245 	if (status != DDI_SUCCESS) {
246 		mac_fini_ops(&ixgbe_dev_ops);
247 	}
248 
249 	return (status);
250 }
251 
252 int
253 _fini(void)
254 {
255 	int status;
256 
257 	status = mod_remove(&ixgbe_modlinkage);
258 
259 	if (status == DDI_SUCCESS) {
260 		mac_fini_ops(&ixgbe_dev_ops);
261 	}
262 
263 	return (status);
264 }
265 
266 int
267 _info(struct modinfo *modinfop)
268 {
269 	int status;
270 
271 	status = mod_info(&ixgbe_modlinkage, modinfop);
272 
273 	return (status);
274 }
275 
276 /*
277  * ixgbe_attach - Driver attach.
278  *
279  * This function is the device specific initialization entry
280  * point. This entry point is required and must be written.
281  * The DDI_ATTACH command must be provided in the attach entry
282  * point. When attach() is called with cmd set to DDI_ATTACH,
283  * all normal kernel services (such as kmem_alloc(9F)) are
284  * available for use by the driver.
285  *
286  * The attach() function will be called once for each instance
287  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
288  * Until attach() succeeds, the only driver entry points which
289  * may be called are open(9E) and getinfo(9E).
290  */
291 static int
292 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
293 {
294 	ixgbe_t *ixgbe;
295 	struct ixgbe_osdep *osdep;
296 	struct ixgbe_hw *hw;
297 	int instance;
298 	char taskqname[32];
299 
300 	/*
301 	 * Check the command and perform corresponding operations
302 	 */
303 	switch (cmd) {
304 	default:
305 		return (DDI_FAILURE);
306 
307 	case DDI_RESUME:
308 		return (ixgbe_resume(devinfo));
309 
310 	case DDI_ATTACH:
311 		break;
312 	}
313 
314 	/* Get the device instance */
315 	instance = ddi_get_instance(devinfo);
316 
317 	/* Allocate memory for the instance data structure */
318 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
319 
320 	ixgbe->dip = devinfo;
321 	ixgbe->instance = instance;
322 
323 	hw = &ixgbe->hw;
324 	osdep = &ixgbe->osdep;
325 	hw->back = osdep;
326 	osdep->ixgbe = ixgbe;
327 
328 	/* Attach the instance pointer to the dev_info data structure */
329 	ddi_set_driver_private(devinfo, ixgbe);
330 
331 	/*
332 	 * Initialize for fma support
333 	 */
334 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
335 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
336 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
337 	ixgbe_fm_init(ixgbe);
338 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
339 
340 	/*
341 	 * Map PCI config space registers
342 	 */
343 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
344 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
345 		goto attach_fail;
346 	}
347 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
348 
349 	/*
350 	 * Identify the chipset family
351 	 */
352 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
353 		ixgbe_error(ixgbe, "Failed to identify hardware");
354 		goto attach_fail;
355 	}
356 
357 	/*
358 	 * Map device registers
359 	 */
360 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
361 		ixgbe_error(ixgbe, "Failed to map device registers");
362 		goto attach_fail;
363 	}
364 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
365 
366 	/*
367 	 * Initialize driver parameters
368 	 */
369 	ixgbe_init_properties(ixgbe);
370 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
371 
372 	/*
373 	 * Allocate interrupts
374 	 */
375 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
376 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
377 		goto attach_fail;
378 	}
379 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
380 
381 	/*
382 	 * Allocate rx/tx rings based on the ring numbers.
383 	 * The actual numbers of rx/tx rings are decided by the number of
384 	 * allocated interrupt vectors, so we should allocate the rings after
385 	 * interrupts are allocated.
386 	 */
387 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
388 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
389 		goto attach_fail;
390 	}
391 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
392 
393 	/*
394 	 * Map rings to interrupt vectors
395 	 */
396 	if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
397 		ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
398 		goto attach_fail;
399 	}
400 
401 	/*
402 	 * Add interrupt handlers
403 	 */
404 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
405 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
406 		goto attach_fail;
407 	}
408 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
409 
410 	/*
411 	 * Create a taskq for link-status-change
412 	 */
413 	(void) sprintf(taskqname, "ixgbe%d_taskq", instance);
414 	if ((ixgbe->lsc_taskq = ddi_taskq_create(devinfo, taskqname,
415 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
416 		ixgbe_error(ixgbe, "taskq_create failed");
417 		goto attach_fail;
418 	}
419 	ixgbe->attach_progress |= ATTACH_PROGRESS_LSC_TASKQ;
420 
421 	/*
422 	 * Initialize driver parameters
423 	 */
424 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
425 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
426 		goto attach_fail;
427 	}
428 
429 	/*
430 	 * Initialize mutexes for this device.
431 	 * Do this before enabling the interrupt handler and
432 	 * register the softint to avoid the condition where
433 	 * interrupt handler can try using uninitialized mutex.
434 	 */
435 	ixgbe_init_locks(ixgbe);
436 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
437 
438 	/*
439 	 * Initialize chipset hardware
440 	 */
441 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
442 		ixgbe_error(ixgbe, "Failed to initialize adapter");
443 		goto attach_fail;
444 	}
445 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
446 
447 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
448 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
449 		goto attach_fail;
450 	}
451 
452 	/*
453 	 * Initialize DMA and hardware settings for rx/tx rings
454 	 */
455 	if (ixgbe_init_rings(ixgbe) != IXGBE_SUCCESS) {
456 		ixgbe_error(ixgbe, "Failed to initialize rings");
457 		goto attach_fail;
458 	}
459 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT_RINGS;
460 
461 	/*
462 	 * Initialize statistics
463 	 */
464 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
465 		ixgbe_error(ixgbe, "Failed to initialize statistics");
466 		goto attach_fail;
467 	}
468 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
469 
470 	/*
471 	 * Initialize NDD parameters
472 	 */
473 	if (ixgbe_nd_init(ixgbe) != IXGBE_SUCCESS) {
474 		ixgbe_error(ixgbe, "Failed to initialize ndd");
475 		goto attach_fail;
476 	}
477 	ixgbe->attach_progress |= ATTACH_PROGRESS_NDD;
478 
479 	/*
480 	 * Register the driver to the MAC
481 	 */
482 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
483 		ixgbe_error(ixgbe, "Failed to register MAC");
484 		goto attach_fail;
485 	}
486 	mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
487 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
488 
489 	/*
490 	 * Now that mutex locks are initialized, and the chip is also
491 	 * initialized, enable interrupts.
492 	 */
493 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
494 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
495 		goto attach_fail;
496 	}
497 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
498 
499 	ixgbe->ixgbe_state |= IXGBE_INITIALIZED;
500 
501 	return (DDI_SUCCESS);
502 
503 attach_fail:
504 	ixgbe_unconfigure(devinfo, ixgbe);
505 	return (DDI_FAILURE);
506 }
507 
508 /*
509  * ixgbe_detach - Driver detach.
510  *
511  * The detach() function is the complement of the attach routine.
512  * If cmd is set to DDI_DETACH, detach() is used to remove  the
513  * state  associated  with  a  given  instance of a device node
514  * prior to the removal of that instance from the system.
515  *
516  * The detach() function will be called once for each  instance
517  * of the device for which there has been a successful attach()
518  * once there are no longer  any  opens  on  the  device.
519  *
520  * Interrupts routine are disabled, All memory allocated by this
521  * driver are freed.
522  */
523 static int
524 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
525 {
526 	ixgbe_t *ixgbe;
527 
528 	/*
529 	 * Check detach command
530 	 */
531 	switch (cmd) {
532 	default:
533 		return (DDI_FAILURE);
534 
535 	case DDI_SUSPEND:
536 		return (ixgbe_suspend(devinfo));
537 
538 	case DDI_DETACH:
539 		break;
540 	}
541 
542 
543 	/*
544 	 * Get the pointer to the driver private data structure
545 	 */
546 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
547 	if (ixgbe == NULL)
548 		return (DDI_FAILURE);
549 
550 	/*
551 	 * Unregister MAC. If failed, we have to fail the detach
552 	 */
553 	if (mac_unregister(ixgbe->mac_hdl) != 0) {
554 		ixgbe_error(ixgbe, "Failed to unregister MAC");
555 		return (DDI_FAILURE);
556 	}
557 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC;
558 
559 	/*
560 	 * If the device is still running, it needs to be stopped first.
561 	 * This check is necessary because under some specific circumstances,
562 	 * the detach routine can be called without stopping the interface
563 	 * first.
564 	 */
565 	mutex_enter(&ixgbe->gen_lock);
566 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
567 		ixgbe->ixgbe_state &= ~IXGBE_STARTED;
568 		ixgbe_stop(ixgbe);
569 		mutex_exit(&ixgbe->gen_lock);
570 		/* Disable and stop the watchdog timer */
571 		ixgbe_disable_watchdog_timer(ixgbe);
572 	} else
573 		mutex_exit(&ixgbe->gen_lock);
574 
575 	/*
576 	 * Check if there are still rx buffers held by the upper layer.
577 	 * If so, fail the detach.
578 	 */
579 	if (!ixgbe_rx_drain(ixgbe))
580 		return (DDI_FAILURE);
581 
582 	/*
583 	 * Do the remaining unconfigure routines
584 	 */
585 	ixgbe_unconfigure(devinfo, ixgbe);
586 
587 	return (DDI_SUCCESS);
588 }
589 
590 static void
591 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
592 {
593 	/*
594 	 * Disable interrupt
595 	 */
596 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
597 		(void) ixgbe_disable_intrs(ixgbe);
598 	}
599 
600 	/*
601 	 * Unregister MAC
602 	 */
603 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
604 		(void) mac_unregister(ixgbe->mac_hdl);
605 	}
606 
607 	/*
608 	 * Free ndd parameters
609 	 */
610 	if (ixgbe->attach_progress & ATTACH_PROGRESS_NDD) {
611 		ixgbe_nd_cleanup(ixgbe);
612 	}
613 
614 	/*
615 	 * Free statistics
616 	 */
617 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
618 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
619 	}
620 
621 	/*
622 	 * Remove interrupt handlers
623 	 */
624 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
625 		ixgbe_rem_intr_handlers(ixgbe);
626 	}
627 
628 	/*
629 	 * Remove taskq for link-status-change
630 	 */
631 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LSC_TASKQ) {
632 		ddi_taskq_destroy(ixgbe->lsc_taskq);
633 	}
634 
635 	/*
636 	 * Remove interrupts
637 	 */
638 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
639 		ixgbe_rem_intrs(ixgbe);
640 	}
641 
642 	/*
643 	 * Remove driver properties
644 	 */
645 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
646 		(void) ddi_prop_remove_all(devinfo);
647 	}
648 
649 	/*
650 	 * Release the DMA resources of rx/tx rings
651 	 */
652 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT_RINGS) {
653 		ixgbe_fini_rings(ixgbe);
654 	}
655 
656 	/*
657 	 * Stop the chipset
658 	 */
659 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
660 		mutex_enter(&ixgbe->gen_lock);
661 		ixgbe_chip_stop(ixgbe);
662 		mutex_exit(&ixgbe->gen_lock);
663 	}
664 
665 	/*
666 	 * Free register handle
667 	 */
668 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
669 		if (ixgbe->osdep.reg_handle != NULL)
670 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
671 	}
672 
673 	/*
674 	 * Free PCI config handle
675 	 */
676 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
677 		if (ixgbe->osdep.cfg_handle != NULL)
678 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
679 	}
680 
681 	/*
682 	 * Free locks
683 	 */
684 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
685 		ixgbe_destroy_locks(ixgbe);
686 	}
687 
688 	/*
689 	 * Free the rx/tx rings
690 	 */
691 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
692 		ixgbe_free_rings(ixgbe);
693 	}
694 
695 	/*
696 	 * Unregister FMA capabilities
697 	 */
698 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
699 		ixgbe_fm_fini(ixgbe);
700 	}
701 
702 	/*
703 	 * Free the driver data structure
704 	 */
705 	kmem_free(ixgbe, sizeof (ixgbe_t));
706 
707 	ddi_set_driver_private(devinfo, NULL);
708 }
709 
710 /*
711  * ixgbe_register_mac - Register the driver and its function pointers with
712  * the GLD interface.
713  */
714 static int
715 ixgbe_register_mac(ixgbe_t *ixgbe)
716 {
717 	struct ixgbe_hw *hw = &ixgbe->hw;
718 	mac_register_t *mac;
719 	int status;
720 
721 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
722 		return (IXGBE_FAILURE);
723 
724 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
725 	mac->m_driver = ixgbe;
726 	mac->m_dip = ixgbe->dip;
727 	mac->m_src_addr = hw->mac.addr;
728 	mac->m_callbacks = &ixgbe_m_callbacks;
729 	mac->m_min_sdu = 0;
730 	mac->m_max_sdu = ixgbe->default_mtu;
731 	mac->m_margin = VLAN_TAGSZ;
732 	mac->m_v12n = MAC_VIRT_LEVEL1;
733 
734 	status = mac_register(mac, &ixgbe->mac_hdl);
735 
736 	mac_free(mac);
737 
738 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
739 }
740 
741 /*
742  * ixgbe_identify_hardware - Identify the type of the chipset.
743  */
744 static int
745 ixgbe_identify_hardware(ixgbe_t *ixgbe)
746 {
747 	struct ixgbe_hw *hw = &ixgbe->hw;
748 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
749 
750 	/*
751 	 * Get the device id
752 	 */
753 	hw->vendor_id =
754 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
755 	hw->device_id =
756 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
757 	hw->revision_id =
758 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
759 	hw->subsystem_device_id =
760 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
761 	hw->subsystem_vendor_id =
762 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
763 
764 	/*
765 	 * Set the mac type of the adapter based on the device id
766 	 */
767 	if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
768 		return (IXGBE_FAILURE);
769 	}
770 
771 	/*
772 	 * Install adapter capabilities
773 	 */
774 	switch (hw->mac.type) {
775 	case ixgbe_mac_82598EB:
776 		ixgbe_log(ixgbe, "identify 82598 adapter\n");
777 		ixgbe->capab = &ixgbe_82598eb_cap;
778 
779 		if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
780 			ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
781 			ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
782 		}
783 		ixgbe->capab->other_intr |= IXGBE_EICR_LSC;
784 
785 		break;
786 	case ixgbe_mac_82599EB:
787 		ixgbe_log(ixgbe, "identify 82599 adapter\n");
788 		ixgbe->capab = &ixgbe_82599eb_cap;
789 
790 		ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 |
791 		    IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC);
792 
793 		break;
794 	default:
795 		ixgbe_log(ixgbe,
796 		    "adapter not supported in ixgbe_identify_hardware(): %d\n",
797 		    hw->mac.type);
798 		return (IXGBE_FAILURE);
799 	}
800 
801 	return (IXGBE_SUCCESS);
802 }
803 
804 /*
805  * ixgbe_regs_map - Map the device registers.
806  *
807  */
808 static int
809 ixgbe_regs_map(ixgbe_t *ixgbe)
810 {
811 	dev_info_t *devinfo = ixgbe->dip;
812 	struct ixgbe_hw *hw = &ixgbe->hw;
813 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
814 	off_t mem_size;
815 
816 	/*
817 	 * First get the size of device registers to be mapped.
818 	 */
819 	if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
820 	    != DDI_SUCCESS) {
821 		return (IXGBE_FAILURE);
822 	}
823 
824 	/*
825 	 * Call ddi_regs_map_setup() to map registers
826 	 */
827 	if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
828 	    (caddr_t *)&hw->hw_addr, 0,
829 	    mem_size, &ixgbe_regs_acc_attr,
830 	    &osdep->reg_handle)) != DDI_SUCCESS) {
831 		return (IXGBE_FAILURE);
832 	}
833 
834 	return (IXGBE_SUCCESS);
835 }
836 
837 /*
838  * ixgbe_init_properties - Initialize driver properties.
839  */
840 static void
841 ixgbe_init_properties(ixgbe_t *ixgbe)
842 {
843 	/*
844 	 * Get conf file properties, including link settings
845 	 * jumbo frames, ring number, descriptor number, etc.
846 	 */
847 	ixgbe_get_conf(ixgbe);
848 }
849 
850 /*
851  * ixgbe_init_driver_settings - Initialize driver settings.
852  *
853  * The settings include hardware function pointers, bus information,
854  * rx/tx rings settings, link state, and any other parameters that
855  * need to be setup during driver initialization.
856  */
857 static int
858 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
859 {
860 	struct ixgbe_hw *hw = &ixgbe->hw;
861 	dev_info_t *devinfo = ixgbe->dip;
862 	ixgbe_rx_ring_t *rx_ring;
863 	ixgbe_tx_ring_t *tx_ring;
864 	uint32_t rx_size;
865 	uint32_t tx_size;
866 	int i;
867 
868 	/*
869 	 * Initialize chipset specific hardware function pointers
870 	 */
871 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
872 		return (IXGBE_FAILURE);
873 	}
874 
875 	/*
876 	 * Get the system page size
877 	 */
878 	ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
879 
880 	/*
881 	 * Set rx buffer size
882 	 *
883 	 * The IP header alignment room is counted in the calculation.
884 	 * The rx buffer size is in unit of 1K that is required by the
885 	 * chipset hardware.
886 	 */
887 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
888 	ixgbe->rx_buf_size = ((rx_size >> 10) +
889 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
890 
891 	/*
892 	 * Set tx buffer size
893 	 */
894 	tx_size = ixgbe->max_frame_size;
895 	ixgbe->tx_buf_size = ((tx_size >> 10) +
896 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
897 
898 	/*
899 	 * Initialize rx/tx rings parameters
900 	 */
901 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
902 		rx_ring = &ixgbe->rx_rings[i];
903 		rx_ring->index = i;
904 		rx_ring->ixgbe = ixgbe;
905 
906 		rx_ring->ring_size = ixgbe->rx_ring_size;
907 		rx_ring->free_list_size = ixgbe->rx_ring_size;
908 		rx_ring->copy_thresh = ixgbe->rx_copy_thresh;
909 		rx_ring->limit_per_intr = ixgbe->rx_limit_per_intr;
910 	}
911 
912 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
913 		tx_ring = &ixgbe->tx_rings[i];
914 		tx_ring->index = i;
915 		tx_ring->ixgbe = ixgbe;
916 		if (ixgbe->tx_head_wb_enable)
917 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
918 		else
919 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
920 
921 		tx_ring->ring_size = ixgbe->tx_ring_size;
922 		tx_ring->free_list_size = ixgbe->tx_ring_size +
923 		    (ixgbe->tx_ring_size >> 1);
924 		tx_ring->copy_thresh = ixgbe->tx_copy_thresh;
925 		tx_ring->recycle_thresh = ixgbe->tx_recycle_thresh;
926 		tx_ring->overload_thresh = ixgbe->tx_overload_thresh;
927 	tx_ring->resched_thresh = ixgbe->tx_resched_thresh;
928 	}
929 
930 	/*
931 	 * Initialize values of interrupt throttling rate
932 	 */
933 	for (i = 1; i < MAX_INTR_VECTOR; i++)
934 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
935 
936 	/*
937 	 * The initial link state should be "unknown"
938 	 */
939 	ixgbe->link_state = LINK_STATE_UNKNOWN;
940 
941 	return (IXGBE_SUCCESS);
942 }
943 
944 /*
945  * ixgbe_init_locks - Initialize locks.
946  */
947 static void
948 ixgbe_init_locks(ixgbe_t *ixgbe)
949 {
950 	ixgbe_rx_ring_t *rx_ring;
951 	ixgbe_tx_ring_t *tx_ring;
952 	int i;
953 
954 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
955 		rx_ring = &ixgbe->rx_rings[i];
956 		mutex_init(&rx_ring->rx_lock, NULL,
957 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
958 		mutex_init(&rx_ring->recycle_lock, NULL,
959 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
960 	}
961 
962 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
963 		tx_ring = &ixgbe->tx_rings[i];
964 		mutex_init(&tx_ring->tx_lock, NULL,
965 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
966 		mutex_init(&tx_ring->recycle_lock, NULL,
967 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
968 		mutex_init(&tx_ring->tcb_head_lock, NULL,
969 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
970 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
971 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
972 	}
973 
974 	mutex_init(&ixgbe->gen_lock, NULL,
975 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
976 
977 	mutex_init(&ixgbe->watchdog_lock, NULL,
978 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
979 }
980 
981 /*
982  * ixgbe_destroy_locks - Destroy locks.
983  */
984 static void
985 ixgbe_destroy_locks(ixgbe_t *ixgbe)
986 {
987 	ixgbe_rx_ring_t *rx_ring;
988 	ixgbe_tx_ring_t *tx_ring;
989 	int i;
990 
991 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
992 		rx_ring = &ixgbe->rx_rings[i];
993 		mutex_destroy(&rx_ring->rx_lock);
994 		mutex_destroy(&rx_ring->recycle_lock);
995 	}
996 
997 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
998 		tx_ring = &ixgbe->tx_rings[i];
999 		mutex_destroy(&tx_ring->tx_lock);
1000 		mutex_destroy(&tx_ring->recycle_lock);
1001 		mutex_destroy(&tx_ring->tcb_head_lock);
1002 		mutex_destroy(&tx_ring->tcb_tail_lock);
1003 	}
1004 
1005 	mutex_destroy(&ixgbe->gen_lock);
1006 	mutex_destroy(&ixgbe->watchdog_lock);
1007 }
1008 
1009 static int
1010 ixgbe_resume(dev_info_t *devinfo)
1011 {
1012 	ixgbe_t *ixgbe;
1013 
1014 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1015 	if (ixgbe == NULL)
1016 		return (DDI_FAILURE);
1017 
1018 	mutex_enter(&ixgbe->gen_lock);
1019 
1020 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1021 		if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) {
1022 			mutex_exit(&ixgbe->gen_lock);
1023 			return (DDI_FAILURE);
1024 		}
1025 
1026 		/*
1027 		 * Enable and start the watchdog timer
1028 		 */
1029 		ixgbe_enable_watchdog_timer(ixgbe);
1030 	}
1031 
1032 	ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1033 
1034 	mutex_exit(&ixgbe->gen_lock);
1035 
1036 	return (DDI_SUCCESS);
1037 }
1038 
1039 static int
1040 ixgbe_suspend(dev_info_t *devinfo)
1041 {
1042 	ixgbe_t *ixgbe;
1043 
1044 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1045 	if (ixgbe == NULL)
1046 		return (DDI_FAILURE);
1047 
1048 	mutex_enter(&ixgbe->gen_lock);
1049 
1050 	ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1051 
1052 	ixgbe_stop(ixgbe);
1053 
1054 	mutex_exit(&ixgbe->gen_lock);
1055 
1056 	/*
1057 	 * Disable and stop the watchdog timer
1058 	 */
1059 	ixgbe_disable_watchdog_timer(ixgbe);
1060 
1061 	return (DDI_SUCCESS);
1062 }
1063 
1064 /*
1065  * ixgbe_init - Initialize the device.
1066  */
1067 static int
1068 ixgbe_init(ixgbe_t *ixgbe)
1069 {
1070 	struct ixgbe_hw *hw = &ixgbe->hw;
1071 
1072 	mutex_enter(&ixgbe->gen_lock);
1073 
1074 	/*
1075 	 * Reset chipset to put the hardware in a known state
1076 	 * before we try to do anything with the eeprom.
1077 	 */
1078 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1079 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1080 		goto init_fail;
1081 	}
1082 
1083 	/*
1084 	 * Need to init eeprom before validating the checksum.
1085 	 */
1086 	if (ixgbe_init_eeprom_params(hw) < 0) {
1087 		ixgbe_error(ixgbe,
1088 		    "Unable to intitialize the eeprom interface.");
1089 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1090 		goto init_fail;
1091 	}
1092 
1093 	/*
1094 	 * NVM validation
1095 	 */
1096 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1097 		/*
1098 		 * Some PCI-E parts fail the first check due to
1099 		 * the link being in sleep state.  Call it again,
1100 		 * if it fails a second time it's a real issue.
1101 		 */
1102 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1103 			ixgbe_error(ixgbe,
1104 			    "Invalid NVM checksum. Please contact "
1105 			    "the vendor to update the NVM.");
1106 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1107 			goto init_fail;
1108 		}
1109 	}
1110 
1111 	/*
1112 	 * Setup default flow control thresholds - enable/disable
1113 	 * & flow control type is controlled by ixgbe.conf
1114 	 */
1115 	hw->fc.high_water = DEFAULT_FCRTH;
1116 	hw->fc.low_water = DEFAULT_FCRTL;
1117 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1118 	hw->fc.send_xon = B_TRUE;
1119 
1120 	/*
1121 	 * Don't wait for auto-negotiation to complete
1122 	 */
1123 	hw->phy.autoneg_wait_to_complete = B_FALSE;
1124 
1125 	/*
1126 	 * Initialize link settings
1127 	 */
1128 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1129 
1130 	/*
1131 	 * Initialize the chipset hardware
1132 	 */
1133 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1134 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1135 		goto init_fail;
1136 	}
1137 
1138 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
1139 		goto init_fail;
1140 	}
1141 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1142 		goto init_fail;
1143 	}
1144 
1145 	mutex_exit(&ixgbe->gen_lock);
1146 	return (IXGBE_SUCCESS);
1147 
1148 init_fail:
1149 	/*
1150 	 * Reset PHY
1151 	 */
1152 	(void) ixgbe_reset_phy(hw);
1153 
1154 	mutex_exit(&ixgbe->gen_lock);
1155 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1156 	return (IXGBE_FAILURE);
1157 }
1158 
1159 /*
1160  * ixgbe_init_rings - Allocate DMA resources for all rx/tx rings and
1161  * initialize relevant hardware settings.
1162  */
1163 static int
1164 ixgbe_init_rings(ixgbe_t *ixgbe)
1165 {
1166 	int i;
1167 
1168 	/*
1169 	 * Allocate buffers for all the rx/tx rings
1170 	 */
1171 	if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS)
1172 		return (IXGBE_FAILURE);
1173 
1174 	/*
1175 	 * Setup the rx/tx rings
1176 	 */
1177 	mutex_enter(&ixgbe->gen_lock);
1178 
1179 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1180 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1181 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1182 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1183 
1184 	ixgbe_setup_rings(ixgbe);
1185 
1186 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1187 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1188 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1189 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1190 
1191 	mutex_exit(&ixgbe->gen_lock);
1192 
1193 	return (IXGBE_SUCCESS);
1194 }
1195 
1196 /*
1197  * ixgbe_fini_rings - Release DMA resources of all rx/tx rings.
1198  */
1199 static void
1200 ixgbe_fini_rings(ixgbe_t *ixgbe)
1201 {
1202 	/*
1203 	 * Release the DMA/memory resources of rx/tx rings
1204 	 */
1205 	ixgbe_free_dma(ixgbe);
1206 }
1207 
1208 /*
1209  * ixgbe_chip_start - Initialize and start the chipset hardware.
1210  */
1211 static int
1212 ixgbe_chip_start(ixgbe_t *ixgbe)
1213 {
1214 	struct ixgbe_hw *hw = &ixgbe->hw;
1215 	int ret_val, i;
1216 
1217 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1218 
1219 	/*
1220 	 * Get the mac address
1221 	 * This function should handle SPARC case correctly.
1222 	 */
1223 	if (!ixgbe_find_mac_address(ixgbe)) {
1224 		ixgbe_error(ixgbe, "Failed to get the mac address");
1225 		return (IXGBE_FAILURE);
1226 	}
1227 
1228 	/*
1229 	 * Validate the mac address
1230 	 */
1231 	(void) ixgbe_init_rx_addrs(hw);
1232 	if (!is_valid_mac_addr(hw->mac.addr)) {
1233 		ixgbe_error(ixgbe, "Invalid mac address");
1234 		return (IXGBE_FAILURE);
1235 	}
1236 
1237 	/*
1238 	 * Configure/Initialize hardware
1239 	 */
1240 	ret_val = ixgbe_init_hw(hw);
1241 	if (ret_val != IXGBE_SUCCESS) {
1242 		if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1243 			ixgbe_error(ixgbe,
1244 			    "This 82599 device is pre-release and contains"
1245 			    " outdated firmware, please contact your hardware"
1246 			    " vendor for a replacement.");
1247 		} else {
1248 			ixgbe_error(ixgbe, "Failed to initialize hardware");
1249 			return (IXGBE_FAILURE);
1250 		}
1251 	}
1252 
1253 	/*
1254 	 * Setup adapter interrupt vectors
1255 	 */
1256 	ixgbe_setup_adapter_vector(ixgbe);
1257 
1258 	/*
1259 	 * Initialize unicast addresses.
1260 	 */
1261 	ixgbe_init_unicst(ixgbe);
1262 
1263 	/*
1264 	 * Setup and initialize the mctable structures.
1265 	 */
1266 	ixgbe_setup_multicst(ixgbe);
1267 
1268 	/*
1269 	 * Set interrupt throttling rate
1270 	 */
1271 	for (i = 0; i < ixgbe->intr_cnt; i++) {
1272 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1273 	}
1274 
1275 	/*
1276 	 * Save the state of the phy
1277 	 */
1278 	ixgbe_get_hw_state(ixgbe);
1279 
1280 	/*
1281 	 * Make sure driver has control
1282 	 */
1283 	ixgbe_get_driver_control(hw);
1284 
1285 	return (IXGBE_SUCCESS);
1286 }
1287 
1288 /*
1289  * ixgbe_chip_stop - Stop the chipset hardware
1290  */
1291 static void
1292 ixgbe_chip_stop(ixgbe_t *ixgbe)
1293 {
1294 	struct ixgbe_hw *hw = &ixgbe->hw;
1295 
1296 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1297 
1298 	/*
1299 	 * Tell firmware driver is no longer in control
1300 	 */
1301 	ixgbe_release_driver_control(hw);
1302 
1303 	/*
1304 	 * Reset the chipset
1305 	 */
1306 	(void) ixgbe_reset_hw(hw);
1307 
1308 	/*
1309 	 * Reset PHY
1310 	 */
1311 	(void) ixgbe_reset_phy(hw);
1312 }
1313 
1314 /*
1315  * ixgbe_reset - Reset the chipset and re-start the driver.
1316  *
1317  * It involves stopping and re-starting the chipset,
1318  * and re-configuring the rx/tx rings.
1319  */
1320 static int
1321 ixgbe_reset(ixgbe_t *ixgbe)
1322 {
1323 	int i;
1324 
1325 	mutex_enter(&ixgbe->gen_lock);
1326 
1327 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1328 	ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1329 
1330 	/*
1331 	 * Disable the adapter interrupts to stop any rx/tx activities
1332 	 * before draining pending data and resetting hardware.
1333 	 */
1334 	ixgbe_disable_adapter_interrupts(ixgbe);
1335 
1336 	/*
1337 	 * Drain the pending transmit packets
1338 	 */
1339 	(void) ixgbe_tx_drain(ixgbe);
1340 
1341 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1342 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1343 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1344 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1345 
1346 	/*
1347 	 * Stop the chipset hardware
1348 	 */
1349 	ixgbe_chip_stop(ixgbe);
1350 
1351 	/*
1352 	 * Clean the pending tx data/resources
1353 	 */
1354 	ixgbe_tx_clean(ixgbe);
1355 
1356 	/*
1357 	 * Start the chipset hardware
1358 	 */
1359 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1360 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1361 		goto reset_failure;
1362 	}
1363 
1364 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1365 		goto reset_failure;
1366 	}
1367 
1368 	/*
1369 	 * Setup the rx/tx rings
1370 	 */
1371 	ixgbe_setup_rings(ixgbe);
1372 
1373 	/*
1374 	 * Enable adapter interrupts
1375 	 * The interrupts must be enabled after the driver state is START
1376 	 */
1377 	ixgbe_enable_adapter_interrupts(ixgbe);
1378 
1379 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1380 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1381 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1382 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1383 
1384 	ixgbe->ixgbe_state |= IXGBE_STARTED;
1385 	mutex_exit(&ixgbe->gen_lock);
1386 
1387 	return (IXGBE_SUCCESS);
1388 
1389 reset_failure:
1390 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1391 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1392 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1393 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1394 
1395 	mutex_exit(&ixgbe->gen_lock);
1396 
1397 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1398 
1399 	return (IXGBE_FAILURE);
1400 }
1401 
1402 /*
1403  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1404  */
1405 static void
1406 ixgbe_tx_clean(ixgbe_t *ixgbe)
1407 {
1408 	ixgbe_tx_ring_t *tx_ring;
1409 	tx_control_block_t *tcb;
1410 	link_list_t pending_list;
1411 	uint32_t desc_num;
1412 	int i, j;
1413 
1414 	LINK_LIST_INIT(&pending_list);
1415 
1416 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1417 		tx_ring = &ixgbe->tx_rings[i];
1418 
1419 		mutex_enter(&tx_ring->recycle_lock);
1420 
1421 		/*
1422 		 * Clean the pending tx data - the pending packets in the
1423 		 * work_list that have no chances to be transmitted again.
1424 		 *
1425 		 * We must ensure the chipset is stopped or the link is down
1426 		 * before cleaning the transmit packets.
1427 		 */
1428 		desc_num = 0;
1429 		for (j = 0; j < tx_ring->ring_size; j++) {
1430 			tcb = tx_ring->work_list[j];
1431 			if (tcb != NULL) {
1432 				desc_num += tcb->desc_num;
1433 
1434 				tx_ring->work_list[j] = NULL;
1435 
1436 				ixgbe_free_tcb(tcb);
1437 
1438 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1439 			}
1440 		}
1441 
1442 		if (desc_num > 0) {
1443 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1444 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1445 
1446 			/*
1447 			 * Reset the head and tail pointers of the tbd ring;
1448 			 * Reset the writeback head if it's enable.
1449 			 */
1450 			tx_ring->tbd_head = 0;
1451 			tx_ring->tbd_tail = 0;
1452 			if (ixgbe->tx_head_wb_enable)
1453 				*tx_ring->tbd_head_wb = 0;
1454 
1455 			IXGBE_WRITE_REG(&ixgbe->hw,
1456 			    IXGBE_TDH(tx_ring->index), 0);
1457 			IXGBE_WRITE_REG(&ixgbe->hw,
1458 			    IXGBE_TDT(tx_ring->index), 0);
1459 		}
1460 
1461 		mutex_exit(&tx_ring->recycle_lock);
1462 
1463 		/*
1464 		 * Add the tx control blocks in the pending list to
1465 		 * the free list.
1466 		 */
1467 		ixgbe_put_free_list(tx_ring, &pending_list);
1468 	}
1469 }
1470 
1471 /*
1472  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1473  * transmitted.
1474  */
1475 static boolean_t
1476 ixgbe_tx_drain(ixgbe_t *ixgbe)
1477 {
1478 	ixgbe_tx_ring_t *tx_ring;
1479 	boolean_t done;
1480 	int i, j;
1481 
1482 	/*
1483 	 * Wait for a specific time to allow pending tx packets
1484 	 * to be transmitted.
1485 	 *
1486 	 * Check the counter tbd_free to see if transmission is done.
1487 	 * No lock protection is needed here.
1488 	 *
1489 	 * Return B_TRUE if all pending packets have been transmitted;
1490 	 * Otherwise return B_FALSE;
1491 	 */
1492 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1493 
1494 		done = B_TRUE;
1495 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1496 			tx_ring = &ixgbe->tx_rings[j];
1497 			done = done &&
1498 			    (tx_ring->tbd_free == tx_ring->ring_size);
1499 		}
1500 
1501 		if (done)
1502 			break;
1503 
1504 		msec_delay(1);
1505 	}
1506 
1507 	return (done);
1508 }
1509 
1510 /*
1511  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1512  */
1513 static boolean_t
1514 ixgbe_rx_drain(ixgbe_t *ixgbe)
1515 {
1516 	ixgbe_rx_ring_t *rx_ring;
1517 	boolean_t done;
1518 	int i, j;
1519 
1520 	/*
1521 	 * Polling the rx free list to check if those rx buffers held by
1522 	 * the upper layer are released.
1523 	 *
1524 	 * Check the counter rcb_free to see if all pending buffers are
1525 	 * released. No lock protection is needed here.
1526 	 *
1527 	 * Return B_TRUE if all pending buffers have been released;
1528 	 * Otherwise return B_FALSE;
1529 	 */
1530 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1531 
1532 		done = B_TRUE;
1533 		for (j = 0; j < ixgbe->num_rx_rings; j++) {
1534 			rx_ring = &ixgbe->rx_rings[j];
1535 			done = done &&
1536 			    (rx_ring->rcb_free == rx_ring->free_list_size);
1537 		}
1538 
1539 		if (done)
1540 			break;
1541 
1542 		msec_delay(1);
1543 	}
1544 
1545 	return (done);
1546 }
1547 
1548 /*
1549  * ixgbe_start - Start the driver/chipset.
1550  */
1551 int
1552 ixgbe_start(ixgbe_t *ixgbe)
1553 {
1554 	int i;
1555 
1556 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1557 
1558 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1559 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1560 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1561 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1562 
1563 	/*
1564 	 * Start the chipset hardware
1565 	 */
1566 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1567 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1568 		goto start_failure;
1569 	}
1570 
1571 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1572 		goto start_failure;
1573 	}
1574 
1575 	/*
1576 	 * Setup the rx/tx rings
1577 	 */
1578 	ixgbe_setup_rings(ixgbe);
1579 
1580 	/*
1581 	 * Enable adapter interrupts
1582 	 * The interrupts must be enabled after the driver state is START
1583 	 */
1584 	ixgbe_enable_adapter_interrupts(ixgbe);
1585 
1586 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1587 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1588 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1589 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1590 
1591 	return (IXGBE_SUCCESS);
1592 
1593 start_failure:
1594 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1595 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1596 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1597 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1598 
1599 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1600 
1601 	return (IXGBE_FAILURE);
1602 }
1603 
1604 /*
1605  * ixgbe_stop - Stop the driver/chipset.
1606  */
1607 void
1608 ixgbe_stop(ixgbe_t *ixgbe)
1609 {
1610 	int i;
1611 
1612 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1613 
1614 	/*
1615 	 * Disable the adapter interrupts
1616 	 */
1617 	ixgbe_disable_adapter_interrupts(ixgbe);
1618 
1619 	/*
1620 	 * Drain the pending tx packets
1621 	 */
1622 	(void) ixgbe_tx_drain(ixgbe);
1623 
1624 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1625 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1626 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1627 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1628 
1629 	/*
1630 	 * Stop the chipset hardware
1631 	 */
1632 	ixgbe_chip_stop(ixgbe);
1633 
1634 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1635 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1636 	}
1637 
1638 	/*
1639 	 * Clean the pending tx data/resources
1640 	 */
1641 	ixgbe_tx_clean(ixgbe);
1642 
1643 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1644 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1645 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1646 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1647 }
1648 
1649 /*
1650  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1651  */
1652 static int
1653 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1654 {
1655 	/*
1656 	 * Allocate memory space for rx rings
1657 	 */
1658 	ixgbe->rx_rings = kmem_zalloc(
1659 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1660 	    KM_NOSLEEP);
1661 
1662 	if (ixgbe->rx_rings == NULL) {
1663 		return (IXGBE_FAILURE);
1664 	}
1665 
1666 	/*
1667 	 * Allocate memory space for tx rings
1668 	 */
1669 	ixgbe->tx_rings = kmem_zalloc(
1670 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1671 	    KM_NOSLEEP);
1672 
1673 	if (ixgbe->tx_rings == NULL) {
1674 		kmem_free(ixgbe->rx_rings,
1675 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1676 		ixgbe->rx_rings = NULL;
1677 		return (IXGBE_FAILURE);
1678 	}
1679 
1680 	/*
1681 	 * Allocate memory space for rx ring groups
1682 	 */
1683 	ixgbe->rx_groups = kmem_zalloc(
1684 	    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1685 	    KM_NOSLEEP);
1686 
1687 	if (ixgbe->rx_groups == NULL) {
1688 		kmem_free(ixgbe->rx_rings,
1689 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1690 		kmem_free(ixgbe->tx_rings,
1691 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1692 		ixgbe->rx_rings = NULL;
1693 		ixgbe->tx_rings = NULL;
1694 		return (IXGBE_FAILURE);
1695 	}
1696 
1697 	return (IXGBE_SUCCESS);
1698 }
1699 
1700 /*
1701  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1702  */
1703 static void
1704 ixgbe_free_rings(ixgbe_t *ixgbe)
1705 {
1706 	if (ixgbe->rx_rings != NULL) {
1707 		kmem_free(ixgbe->rx_rings,
1708 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1709 		ixgbe->rx_rings = NULL;
1710 	}
1711 
1712 	if (ixgbe->tx_rings != NULL) {
1713 		kmem_free(ixgbe->tx_rings,
1714 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1715 		ixgbe->tx_rings = NULL;
1716 	}
1717 
1718 	if (ixgbe->rx_groups != NULL) {
1719 		kmem_free(ixgbe->rx_groups,
1720 		    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1721 		ixgbe->rx_groups = NULL;
1722 	}
1723 }
1724 
1725 /*
1726  * ixgbe_setup_rings - Setup rx/tx rings.
1727  */
1728 static void
1729 ixgbe_setup_rings(ixgbe_t *ixgbe)
1730 {
1731 	/*
1732 	 * Setup the rx/tx rings, including the following:
1733 	 *
1734 	 * 1. Setup the descriptor ring and the control block buffers;
1735 	 * 2. Initialize necessary registers for receive/transmit;
1736 	 * 3. Initialize software pointers/parameters for receive/transmit;
1737 	 */
1738 	ixgbe_setup_rx(ixgbe);
1739 
1740 	ixgbe_setup_tx(ixgbe);
1741 }
1742 
1743 static void
1744 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
1745 {
1746 	ixgbe_t *ixgbe = rx_ring->ixgbe;
1747 	struct ixgbe_hw *hw = &ixgbe->hw;
1748 	rx_control_block_t *rcb;
1749 	union ixgbe_adv_rx_desc	*rbd;
1750 	uint32_t size;
1751 	uint32_t buf_low;
1752 	uint32_t buf_high;
1753 	uint32_t reg_val;
1754 	int i;
1755 
1756 	ASSERT(mutex_owned(&rx_ring->rx_lock));
1757 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1758 
1759 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
1760 		rcb = rx_ring->work_list[i];
1761 		rbd = &rx_ring->rbd_ring[i];
1762 
1763 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
1764 		rbd->read.hdr_addr = NULL;
1765 	}
1766 
1767 	/*
1768 	 * Initialize the length register
1769 	 */
1770 	size = rx_ring->ring_size * sizeof (union ixgbe_adv_rx_desc);
1771 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
1772 
1773 	/*
1774 	 * Initialize the base address registers
1775 	 */
1776 	buf_low = (uint32_t)rx_ring->rbd_area.dma_address;
1777 	buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32);
1778 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
1779 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
1780 
1781 	/*
1782 	 * Setup head & tail pointers
1783 	 */
1784 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_ring->ring_size - 1);
1785 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
1786 
1787 	rx_ring->rbd_next = 0;
1788 
1789 	/*
1790 	 * Note: Considering the case that the chipset is being reset
1791 	 * and there are still some buffers held by the upper layer,
1792 	 * we should not reset the values of rcb_head, rcb_tail and
1793 	 * rcb_free if the state is not IXGBE_UNKNOWN.
1794 	 */
1795 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
1796 		rx_ring->rcb_head = 0;
1797 		rx_ring->rcb_tail = 0;
1798 		rx_ring->rcb_free = rx_ring->free_list_size;
1799 	}
1800 
1801 	/*
1802 	 * Setup the Receive Descriptor Control Register (RXDCTL)
1803 	 * PTHRESH=32 descriptors (half the internal cache)
1804 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
1805 	 * WTHRESH defaults to 1 (writeback each descriptor)
1806 	 */
1807 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
1808 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
1809 
1810 	/* Not a valid value for 82599 */
1811 	if (hw->mac.type < ixgbe_mac_82599EB) {
1812 		reg_val |= 0x0020;	/* pthresh */
1813 	}
1814 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
1815 
1816 	if (hw->mac.type == ixgbe_mac_82599EB) {
1817 		reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1818 		reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
1819 		IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
1820 	}
1821 
1822 	/*
1823 	 * Setup the Split and Replication Receive Control Register.
1824 	 * Set the rx buffer size and the advanced descriptor type.
1825 	 */
1826 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
1827 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1828 	reg_val |= IXGBE_SRRCTL_DROP_EN;
1829 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
1830 }
1831 
1832 static void
1833 ixgbe_setup_rx(ixgbe_t *ixgbe)
1834 {
1835 	ixgbe_rx_ring_t *rx_ring;
1836 	struct ixgbe_hw *hw = &ixgbe->hw;
1837 	ixgbe_rx_group_t *rx_group;
1838 	uint32_t reg_val;
1839 	uint32_t ring_mapping;
1840 	int i;
1841 
1842 	/* PSRTYPE must be configured for 82599 */
1843 	reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1844 	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
1845 #define	IXGBE_PSRTYPE_L2_PKT	0x00001000
1846 	reg_val |= IXGBE_PSRTYPE_L2_PKT;
1847 	reg_val |= 0xE0000000;
1848 	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
1849 
1850 	/*
1851 	 * Set filter control in FCTRL to accept broadcast packets and do
1852 	 * not pass pause frames to host.  Flow control settings are already
1853 	 * in this register, so preserve them.
1854 	 */
1855 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1856 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
1857 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
1858 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
1859 
1860 	/*
1861 	 * Enable the receive unit.  This must be done after filter
1862 	 * control is set in FCTRL.
1863 	 */
1864 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
1865 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
1866 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1867 
1868 	/*
1869 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
1870 	 */
1871 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1872 		rx_ring = &ixgbe->rx_rings[i];
1873 		ixgbe_setup_rx_ring(rx_ring);
1874 	}
1875 
1876 	/*
1877 	 * Setup rx groups.
1878 	 */
1879 	for (i = 0; i < ixgbe->num_rx_groups; i++) {
1880 		rx_group = &ixgbe->rx_groups[i];
1881 		rx_group->index = i;
1882 		rx_group->ixgbe = ixgbe;
1883 	}
1884 
1885 	/*
1886 	 * Setup the per-ring statistics mapping.
1887 	 */
1888 	ring_mapping = 0;
1889 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1890 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1891 		if ((i & 0x3) == 0x3) {
1892 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1893 			ring_mapping = 0;
1894 		}
1895 	}
1896 	if ((i & 0x3) != 0x3)
1897 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1898 
1899 	/*
1900 	 * The Max Frame Size in MHADD/MAXFRS will be internally increased
1901 	 * by four bytes if the packet has a VLAN field, so includes MTU,
1902 	 * ethernet header and frame check sequence.
1903 	 * Register is MAXFRS in 82599.
1904 	 */
1905 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
1906 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
1907 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
1908 
1909 	/*
1910 	 * Setup Jumbo Frame enable bit
1911 	 */
1912 	if (ixgbe->default_mtu > ETHERMTU) {
1913 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1914 		reg_val |= IXGBE_HLREG0_JUMBOEN;
1915 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1916 	}
1917 
1918 	/*
1919 	 * Hardware checksum settings
1920 	 */
1921 	if (ixgbe->rx_hcksum_enable) {
1922 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
1923 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
1924 	}
1925 
1926 	/*
1927 	 * Setup RSS for multiple receive queues
1928 	 */
1929 	if (ixgbe->num_rx_rings > 1)
1930 		ixgbe_setup_rss(ixgbe);
1931 }
1932 
1933 static void
1934 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
1935 {
1936 	ixgbe_t *ixgbe = tx_ring->ixgbe;
1937 	struct ixgbe_hw *hw = &ixgbe->hw;
1938 	uint32_t size;
1939 	uint32_t buf_low;
1940 	uint32_t buf_high;
1941 	uint32_t reg_val;
1942 
1943 	ASSERT(mutex_owned(&tx_ring->tx_lock));
1944 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1945 
1946 	/*
1947 	 * Initialize the length register
1948 	 */
1949 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
1950 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
1951 
1952 	/*
1953 	 * Initialize the base address registers
1954 	 */
1955 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
1956 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
1957 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
1958 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
1959 
1960 	/*
1961 	 * Setup head & tail pointers
1962 	 */
1963 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
1964 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
1965 
1966 	/*
1967 	 * Setup head write-back
1968 	 */
1969 	if (ixgbe->tx_head_wb_enable) {
1970 		/*
1971 		 * The memory of the head write-back is allocated using
1972 		 * the extra tbd beyond the tail of the tbd ring.
1973 		 */
1974 		tx_ring->tbd_head_wb = (uint32_t *)
1975 		    ((uintptr_t)tx_ring->tbd_area.address + size);
1976 		*tx_ring->tbd_head_wb = 0;
1977 
1978 		buf_low = (uint32_t)
1979 		    (tx_ring->tbd_area.dma_address + size);
1980 		buf_high = (uint32_t)
1981 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
1982 
1983 		/* Set the head write-back enable bit */
1984 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1985 
1986 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
1987 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
1988 
1989 		/*
1990 		 * Turn off relaxed ordering for head write back or it will
1991 		 * cause problems with the tx recycling
1992 		 */
1993 		reg_val = IXGBE_READ_REG(hw,
1994 		    IXGBE_DCA_TXCTRL(tx_ring->index));
1995 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1996 		IXGBE_WRITE_REG(hw,
1997 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
1998 	} else {
1999 		tx_ring->tbd_head_wb = NULL;
2000 	}
2001 
2002 	tx_ring->tbd_head = 0;
2003 	tx_ring->tbd_tail = 0;
2004 	tx_ring->tbd_free = tx_ring->ring_size;
2005 
2006 	/*
2007 	 * Note: Considering the case that the chipset is being reset,
2008 	 * and there are still some tcb in the pending list,
2009 	 * we should not reset the values of tcb_head, tcb_tail and
2010 	 * tcb_free if the state is not IXGBE_UNKNOWN.
2011 	 */
2012 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
2013 		tx_ring->tcb_head = 0;
2014 		tx_ring->tcb_tail = 0;
2015 		tx_ring->tcb_free = tx_ring->free_list_size;
2016 	}
2017 
2018 	/*
2019 	 * Initialize the s/w context structure
2020 	 */
2021 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2022 }
2023 
2024 static void
2025 ixgbe_setup_tx(ixgbe_t *ixgbe)
2026 {
2027 	struct ixgbe_hw *hw = &ixgbe->hw;
2028 	ixgbe_tx_ring_t *tx_ring;
2029 	uint32_t reg_val;
2030 	uint32_t ring_mapping;
2031 	int i;
2032 
2033 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2034 		tx_ring = &ixgbe->tx_rings[i];
2035 		ixgbe_setup_tx_ring(tx_ring);
2036 	}
2037 
2038 	/*
2039 	 * Setup the per-ring statistics mapping.
2040 	 */
2041 	ring_mapping = 0;
2042 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2043 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2044 		if ((i & 0x3) == 0x3) {
2045 			if (hw->mac.type >= ixgbe_mac_82599EB) {
2046 				IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2047 				    ring_mapping);
2048 			} else {
2049 				IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2050 				    ring_mapping);
2051 			}
2052 			ring_mapping = 0;
2053 		}
2054 	}
2055 	if ((i & 0x3) != 0x3)
2056 		if (hw->mac.type >= ixgbe_mac_82599EB) {
2057 			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2058 		} else {
2059 			IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2060 		}
2061 
2062 	/*
2063 	 * Enable CRC appending and TX padding (for short tx frames)
2064 	 */
2065 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2066 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2067 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2068 
2069 	/*
2070 	 * enable DMA for 82599 parts
2071 	 */
2072 	if (hw->mac.type == ixgbe_mac_82599EB) {
2073 	/* DMATXCTL.TE must be set after all Tx config is complete */
2074 		reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2075 		reg_val |= IXGBE_DMATXCTL_TE;
2076 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2077 	}
2078 
2079 	/*
2080 	 * Enabling tx queues ..
2081 	 * For 82599 must be done after DMATXCTL.TE is set
2082 	 */
2083 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2084 		tx_ring = &ixgbe->tx_rings[i];
2085 		reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2086 		reg_val |= IXGBE_TXDCTL_ENABLE;
2087 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2088 	}
2089 }
2090 
2091 /*
2092  * ixgbe_setup_rss - Setup receive-side scaling feature.
2093  */
2094 static void
2095 ixgbe_setup_rss(ixgbe_t *ixgbe)
2096 {
2097 	struct ixgbe_hw *hw = &ixgbe->hw;
2098 	uint32_t i, mrqc, rxcsum;
2099 	uint32_t random;
2100 	uint32_t reta;
2101 
2102 	/*
2103 	 * Fill out redirection table
2104 	 */
2105 	reta = 0;
2106 	for (i = 0; i < 128; i++) {
2107 		reta = (reta << 8) | (i % ixgbe->num_rx_rings);
2108 		if ((i & 3) == 3)
2109 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2110 	}
2111 
2112 	/*
2113 	 * Fill out hash function seeds with a random constant
2114 	 */
2115 	for (i = 0; i < 10; i++) {
2116 		(void) random_get_pseudo_bytes((uint8_t *)&random,
2117 		    sizeof (uint32_t));
2118 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2119 	}
2120 
2121 	/*
2122 	 * Enable RSS & perform hash on these packet types
2123 	 */
2124 	mrqc = IXGBE_MRQC_RSSEN |
2125 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
2126 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2127 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2128 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2129 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2130 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
2131 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2132 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2133 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2134 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2135 
2136 	/*
2137 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
2138 	 * It is an adapter hardware limitation that Packet Checksum is
2139 	 * mutually exclusive with RSS.
2140 	 */
2141 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2142 	rxcsum |= IXGBE_RXCSUM_PCSD;
2143 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2144 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2145 }
2146 
2147 /*
2148  * ixgbe_init_unicst - Initialize the unicast addresses.
2149  */
2150 static void
2151 ixgbe_init_unicst(ixgbe_t *ixgbe)
2152 {
2153 	struct ixgbe_hw *hw = &ixgbe->hw;
2154 	uint8_t *mac_addr;
2155 	int slot;
2156 	/*
2157 	 * Here we should consider two situations:
2158 	 *
2159 	 * 1. Chipset is initialized at the first time,
2160 	 *    Clear all the multiple unicast addresses.
2161 	 *
2162 	 * 2. Chipset is reset
2163 	 *    Recover the multiple unicast addresses from the
2164 	 *    software data structure to the RAR registers.
2165 	 */
2166 	if (!ixgbe->unicst_init) {
2167 		/*
2168 		 * Initialize the multiple unicast addresses
2169 		 */
2170 		ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2171 		ixgbe->unicst_avail = ixgbe->unicst_total;
2172 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2173 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2174 			bzero(mac_addr, ETHERADDRL);
2175 			(void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2176 			ixgbe->unicst_addr[slot].mac.set = 0;
2177 		}
2178 		ixgbe->unicst_init = B_TRUE;
2179 	} else {
2180 		/* Re-configure the RAR registers */
2181 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2182 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2183 			if (ixgbe->unicst_addr[slot].mac.set == 1) {
2184 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2185 				    NULL, IXGBE_RAH_AV);
2186 			} else {
2187 				bzero(mac_addr, ETHERADDRL);
2188 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2189 				    NULL, NULL);
2190 			}
2191 		}
2192 	}
2193 }
2194 
2195 /*
2196  * ixgbe_unicst_set - Set the unicast address to the specified slot.
2197  */
2198 int
2199 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
2200     int slot)
2201 {
2202 	struct ixgbe_hw *hw = &ixgbe->hw;
2203 
2204 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2205 
2206 	/*
2207 	 * Save the unicast address in the software data structure
2208 	 */
2209 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
2210 
2211 	/*
2212 	 * Set the unicast address to the RAR register
2213 	 */
2214 	(void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, IXGBE_RAH_AV);
2215 
2216 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2217 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2218 		return (EIO);
2219 	}
2220 
2221 	return (0);
2222 }
2223 
2224 /*
2225  * ixgbe_unicst_find - Find the slot for the specified unicast address
2226  */
2227 int
2228 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2229 {
2230 	int slot;
2231 
2232 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2233 
2234 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2235 		if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2236 		    mac_addr, ETHERADDRL) == 0)
2237 			return (slot);
2238 	}
2239 
2240 	return (-1);
2241 }
2242 
2243 /*
2244  * ixgbe_multicst_add - Add a multicst address.
2245  */
2246 int
2247 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2248 {
2249 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2250 
2251 	if ((multiaddr[0] & 01) == 0) {
2252 		return (EINVAL);
2253 	}
2254 
2255 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2256 		return (ENOENT);
2257 	}
2258 
2259 	bcopy(multiaddr,
2260 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2261 	ixgbe->mcast_count++;
2262 
2263 	/*
2264 	 * Update the multicast table in the hardware
2265 	 */
2266 	ixgbe_setup_multicst(ixgbe);
2267 
2268 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2269 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2270 		return (EIO);
2271 	}
2272 
2273 	return (0);
2274 }
2275 
2276 /*
2277  * ixgbe_multicst_remove - Remove a multicst address.
2278  */
2279 int
2280 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2281 {
2282 	int i;
2283 
2284 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2285 
2286 	for (i = 0; i < ixgbe->mcast_count; i++) {
2287 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2288 		    ETHERADDRL) == 0) {
2289 			for (i++; i < ixgbe->mcast_count; i++) {
2290 				ixgbe->mcast_table[i - 1] =
2291 				    ixgbe->mcast_table[i];
2292 			}
2293 			ixgbe->mcast_count--;
2294 			break;
2295 		}
2296 	}
2297 
2298 	/*
2299 	 * Update the multicast table in the hardware
2300 	 */
2301 	ixgbe_setup_multicst(ixgbe);
2302 
2303 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2304 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2305 		return (EIO);
2306 	}
2307 
2308 	return (0);
2309 }
2310 
2311 /*
2312  * ixgbe_setup_multicast - Setup multicast data structures.
2313  *
2314  * This routine initializes all of the multicast related structures
2315  * and save them in the hardware registers.
2316  */
2317 static void
2318 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2319 {
2320 	uint8_t *mc_addr_list;
2321 	uint32_t mc_addr_count;
2322 	struct ixgbe_hw *hw = &ixgbe->hw;
2323 
2324 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2325 
2326 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2327 
2328 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2329 	mc_addr_count = ixgbe->mcast_count;
2330 
2331 	/*
2332 	 * Update the multicast addresses to the MTA registers
2333 	 */
2334 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2335 	    ixgbe_mc_table_itr);
2336 }
2337 
2338 /*
2339  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2340  *
2341  * This routine gets user-configured values out of the configuration
2342  * file ixgbe.conf.
2343  *
2344  * For each configurable value, there is a minimum, a maximum, and a
2345  * default.
2346  * If user does not configure a value, use the default.
2347  * If user configures below the minimum, use the minumum.
2348  * If user configures above the maximum, use the maxumum.
2349  */
2350 static void
2351 ixgbe_get_conf(ixgbe_t *ixgbe)
2352 {
2353 	struct ixgbe_hw *hw = &ixgbe->hw;
2354 	uint32_t flow_control;
2355 
2356 	/*
2357 	 * ixgbe driver supports the following user configurations:
2358 	 *
2359 	 * Jumbo frame configuration:
2360 	 *    default_mtu
2361 	 *
2362 	 * Ethernet flow control configuration:
2363 	 *    flow_control
2364 	 *
2365 	 * Multiple rings configurations:
2366 	 *    tx_queue_number
2367 	 *    tx_ring_size
2368 	 *    rx_queue_number
2369 	 *    rx_ring_size
2370 	 *
2371 	 * Call ixgbe_get_prop() to get the value for a specific
2372 	 * configuration parameter.
2373 	 */
2374 
2375 	/*
2376 	 * Jumbo frame configuration - max_frame_size controls host buffer
2377 	 * allocation, so includes MTU, ethernet header, vlan tag and
2378 	 * frame check sequence.
2379 	 */
2380 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2381 	    MIN_MTU, MAX_MTU, DEFAULT_MTU);
2382 
2383 	ixgbe->max_frame_size = ixgbe->default_mtu +
2384 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2385 
2386 	/*
2387 	 * Ethernet flow control configuration
2388 	 */
2389 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2390 	    ixgbe_fc_none, 3, ixgbe_fc_none);
2391 	if (flow_control == 3)
2392 		flow_control = ixgbe_fc_default;
2393 
2394 	/*
2395 	 * fc.requested mode is what the user requests.  After autoneg,
2396 	 * fc.current_mode will be the flow_control mode that was negotiated.
2397 	 */
2398 	hw->fc.requested_mode = flow_control;
2399 
2400 	/*
2401 	 * Multiple rings configurations
2402 	 */
2403 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2404 	    ixgbe->capab->min_tx_que_num,
2405 	    ixgbe->capab->max_tx_que_num,
2406 	    ixgbe->capab->def_tx_que_num);
2407 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2408 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2409 
2410 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2411 	    ixgbe->capab->min_rx_que_num,
2412 	    ixgbe->capab->max_rx_que_num,
2413 	    ixgbe->capab->def_rx_que_num);
2414 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2415 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2416 
2417 	/*
2418 	 * Multiple groups configuration
2419 	 */
2420 	ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
2421 	    MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM);
2422 
2423 	ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
2424 	    0, 1, DEFAULT_MR_ENABLE);
2425 
2426 	if (ixgbe->mr_enable == B_FALSE) {
2427 		ixgbe->num_tx_rings = 1;
2428 		ixgbe->num_rx_rings = 1;
2429 		ixgbe->num_rx_groups = 1;
2430 	}
2431 
2432 	/*
2433 	 * Tunable used to force an interrupt type. The only use is
2434 	 * for testing of the lesser interrupt types.
2435 	 * 0 = don't force interrupt type
2436 	 * 1 = force interrupt type MSI-X
2437 	 * 2 = force interrupt type MSI
2438 	 * 3 = force interrupt type Legacy
2439 	 */
2440 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2441 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2442 
2443 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2444 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
2445 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2446 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
2447 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2448 	    0, 1, DEFAULT_LSO_ENABLE);
2449 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2450 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
2451 
2452 	/* Head Write Back not recommended for 82599 */
2453 	if (hw->mac.type >= ixgbe_mac_82599EB) {
2454 		ixgbe->tx_head_wb_enable = B_FALSE;
2455 	}
2456 
2457 	/*
2458 	 * ixgbe LSO needs the tx h/w checksum support.
2459 	 * LSO will be disabled if tx h/w checksum is not
2460 	 * enabled.
2461 	 */
2462 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
2463 		ixgbe->lso_enable = B_FALSE;
2464 	}
2465 
2466 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
2467 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
2468 	    DEFAULT_TX_COPY_THRESHOLD);
2469 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
2470 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
2471 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
2472 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
2473 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
2474 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
2475 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
2476 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
2477 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
2478 
2479 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
2480 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
2481 	    DEFAULT_RX_COPY_THRESHOLD);
2482 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
2483 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
2484 	    DEFAULT_RX_LIMIT_PER_INTR);
2485 
2486 	/*
2487 	 * Interrupt throttling is per 256ns in 82598 and 2.048usec
2488 	 * (256ns * 8) increments in 82599.
2489 	 */
2490 	switch (hw->mac.type) {
2491 	case ixgbe_mac_82598EB:
2492 		ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe,
2493 		    PROP_INTR_THROTTLING,
2494 		    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING_82598,
2495 		    DEFAULT_INTR_THROTTLING_82598);
2496 		break;
2497 	case ixgbe_mac_82599EB:
2498 		ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe,
2499 		    PROP_INTR_THROTTLING,
2500 		    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING_82599,
2501 		    DEFAULT_INTR_THROTTLING_82599);
2502 
2503 		/*
2504 		 * 82599 requires the interupt throttling rate is
2505 		 * a multiple of 8. This is enforced by the register
2506 		 * definiton.
2507 		 */
2508 		ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] &
2509 		    0xFF8;
2510 		break;
2511 	}
2512 }
2513 
2514 /*
2515  * ixgbe_get_prop - Get a property value out of the configuration file
2516  * ixgbe.conf.
2517  *
2518  * Caller provides the name of the property, a default value, a minimum
2519  * value, and a maximum value.
2520  *
2521  * Return configured value of the property, with default, minimum and
2522  * maximum properly applied.
2523  */
2524 static int
2525 ixgbe_get_prop(ixgbe_t *ixgbe,
2526     char *propname,	/* name of the property */
2527     int minval,		/* minimum acceptable value */
2528     int maxval,		/* maximim acceptable value */
2529     int defval)		/* default value */
2530 {
2531 	int value;
2532 
2533 	/*
2534 	 * Call ddi_prop_get_int() to read the conf settings
2535 	 */
2536 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
2537 	    DDI_PROP_DONTPASS, propname, defval);
2538 	if (value > maxval)
2539 		value = maxval;
2540 
2541 	if (value < minval)
2542 		value = minval;
2543 
2544 	return (value);
2545 }
2546 
2547 /*
2548  * ixgbe_driver_setup_link - Using the link properties to setup the link.
2549  */
2550 int
2551 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
2552 {
2553 	struct ixgbe_mac_info *mac;
2554 	struct ixgbe_phy_info *phy;
2555 	boolean_t invalid;
2556 
2557 	mac = &ixgbe->hw.mac;
2558 	phy = &ixgbe->hw.phy;
2559 	invalid = B_FALSE;
2560 
2561 	if (ixgbe->param_adv_autoneg_cap == 1) {
2562 		mac->autoneg = B_TRUE;
2563 		phy->autoneg_advertised = 0;
2564 
2565 		/*
2566 		 * No half duplex support with 10Gb parts
2567 		 */
2568 		if (ixgbe->param_adv_10000fdx_cap == 1)
2569 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2570 
2571 		if (ixgbe->param_adv_1000fdx_cap == 1)
2572 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2573 
2574 		if (ixgbe->param_adv_100fdx_cap == 1)
2575 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2576 
2577 		if (phy->autoneg_advertised == 0)
2578 			invalid = B_TRUE;
2579 	} else {
2580 		ixgbe->hw.mac.autoneg = B_FALSE;
2581 	}
2582 
2583 	if (invalid) {
2584 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link to "
2585 		    "autonegotiation with full link capabilities.");
2586 		ixgbe->hw.mac.autoneg = B_TRUE;
2587 	}
2588 
2589 	if (setup_hw) {
2590 		if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS) {
2591 			ixgbe_notice(ixgbe, "Setup link failed on this "
2592 			    "device.");
2593 			return (IXGBE_FAILURE);
2594 		}
2595 	}
2596 
2597 	return (IXGBE_SUCCESS);
2598 }
2599 
2600 /*
2601  * ixgbe_driver_link_check - Link status processing done in taskq.
2602  */
2603 static void
2604 ixgbe_driver_link_check(void *arg)
2605 {
2606 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2607 	struct ixgbe_hw *hw = &ixgbe->hw;
2608 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
2609 	boolean_t link_up = B_FALSE;
2610 	boolean_t link_changed = B_FALSE;
2611 
2612 	mutex_enter(&ixgbe->gen_lock);
2613 
2614 	/* check for link, wait the full time */
2615 	(void) ixgbe_check_link(hw, &speed, &link_up, true);
2616 	if (link_up) {
2617 		/* Link is up, enable flow control settings */
2618 		(void) ixgbe_fc_enable(hw, 0);
2619 
2620 		/*
2621 		 * The Link is up, check whether it was marked as down earlier
2622 		 */
2623 		if (ixgbe->link_state != LINK_STATE_UP) {
2624 			switch (speed) {
2625 			case IXGBE_LINK_SPEED_10GB_FULL:
2626 				ixgbe->link_speed = SPEED_10GB;
2627 				break;
2628 			case IXGBE_LINK_SPEED_1GB_FULL:
2629 				ixgbe->link_speed = SPEED_1GB;
2630 				break;
2631 			case IXGBE_LINK_SPEED_100_FULL:
2632 				ixgbe->link_speed = SPEED_100;
2633 			}
2634 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
2635 			ixgbe->link_state = LINK_STATE_UP;
2636 			ixgbe->link_down_timeout = 0;
2637 			link_changed = B_TRUE;
2638 		}
2639 	} else {
2640 		if (ixgbe->link_state != LINK_STATE_DOWN) {
2641 			ixgbe->link_speed = 0;
2642 			ixgbe->link_duplex = 0;
2643 			ixgbe->link_state = LINK_STATE_DOWN;
2644 			link_changed = B_TRUE;
2645 		}
2646 
2647 		if (ixgbe->ixgbe_state & IXGBE_STARTED) {
2648 			if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) {
2649 				ixgbe->link_down_timeout++;
2650 			} else if (ixgbe->link_down_timeout ==
2651 			    MAX_LINK_DOWN_TIMEOUT) {
2652 				ixgbe_tx_clean(ixgbe);
2653 				ixgbe->link_down_timeout++;
2654 			}
2655 		}
2656 	}
2657 
2658 	/*
2659 	 * this is only reached after a link-status-change interrupt
2660 	 * so always get new phy state
2661 	 */
2662 	ixgbe_get_hw_state(ixgbe);
2663 
2664 	/* re-enable the interrupt, which was automasked */
2665 	ixgbe->eims |= IXGBE_EICR_LSC;
2666 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
2667 
2668 	mutex_exit(&ixgbe->gen_lock);
2669 
2670 	/* outside the gen_lock */
2671 	if (link_changed) {
2672 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
2673 	}
2674 }
2675 
2676 /*
2677  * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
2678  */
2679 static void
2680 ixgbe_sfp_check(void *arg)
2681 {
2682 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2683 	uint32_t eicr = ixgbe->eicr;
2684 	struct ixgbe_hw *hw = &ixgbe->hw;
2685 	uint32_t autoneg;
2686 
2687 	if (eicr & IXGBE_EICR_GPI_SDP1) {
2688 		/* clear the interrupt */
2689 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
2690 
2691 		/* if link up, do multispeed fiber setup */
2692 		(void) ixgbe_get_link_capabilities(hw, &autoneg,
2693 		    &hw->mac.autoneg);
2694 		(void) ixgbe_setup_link_speed(hw, autoneg, B_TRUE, B_TRUE);
2695 		ixgbe_driver_link_check(ixgbe);
2696 	} else if (eicr & IXGBE_EICR_GPI_SDP2) {
2697 		/* clear the interrupt */
2698 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
2699 
2700 		/* if link up, do sfp module setup */
2701 		(void) hw->mac.ops.setup_sfp(hw);
2702 
2703 		/* do multispeed fiber setup */
2704 		(void) ixgbe_get_link_capabilities(hw, &autoneg,
2705 		    &hw->mac.autoneg);
2706 		(void) ixgbe_setup_link_speed(hw, autoneg, B_TRUE, B_TRUE);
2707 		ixgbe_driver_link_check(ixgbe);
2708 	}
2709 }
2710 
2711 /*
2712  * ixgbe_local_timer - Driver watchdog function.
2713  *
2714  * This function will handle the transmit stall check, link status check and
2715  * other routines.
2716  */
2717 static void
2718 ixgbe_local_timer(void *arg)
2719 {
2720 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2721 
2722 	if (ixgbe_stall_check(ixgbe)) {
2723 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2724 		ixgbe->reset_count++;
2725 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2726 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2727 	}
2728 
2729 	ixgbe_restart_watchdog_timer(ixgbe);
2730 }
2731 
2732 /*
2733  * ixgbe_stall_check - Check for transmit stall.
2734  *
2735  * This function checks if the adapter is stalled (in transmit).
2736  *
2737  * It is called each time the watchdog timeout is invoked.
2738  * If the transmit descriptor reclaim continuously fails,
2739  * the watchdog value will increment by 1. If the watchdog
2740  * value exceeds the threshold, the ixgbe is assumed to
2741  * have stalled and need to be reset.
2742  */
2743 static boolean_t
2744 ixgbe_stall_check(ixgbe_t *ixgbe)
2745 {
2746 	ixgbe_tx_ring_t *tx_ring;
2747 	boolean_t result;
2748 	int i;
2749 
2750 	if (ixgbe->link_state != LINK_STATE_UP)
2751 		return (B_FALSE);
2752 
2753 	/*
2754 	 * If any tx ring is stalled, we'll reset the chipset
2755 	 */
2756 	result = B_FALSE;
2757 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2758 		tx_ring = &ixgbe->tx_rings[i];
2759 		if (tx_ring->tbd_free <= tx_ring->recycle_thresh) {
2760 			tx_ring->tx_recycle(tx_ring);
2761 		}
2762 
2763 		if (tx_ring->recycle_fail > 0)
2764 			tx_ring->stall_watchdog++;
2765 		else
2766 			tx_ring->stall_watchdog = 0;
2767 
2768 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
2769 			result = B_TRUE;
2770 			break;
2771 		}
2772 	}
2773 
2774 	if (result) {
2775 		tx_ring->stall_watchdog = 0;
2776 		tx_ring->recycle_fail = 0;
2777 	}
2778 
2779 	return (result);
2780 }
2781 
2782 
2783 /*
2784  * is_valid_mac_addr - Check if the mac address is valid.
2785  */
2786 static boolean_t
2787 is_valid_mac_addr(uint8_t *mac_addr)
2788 {
2789 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
2790 	const uint8_t addr_test2[6] =
2791 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2792 
2793 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
2794 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
2795 		return (B_FALSE);
2796 
2797 	return (B_TRUE);
2798 }
2799 
2800 static boolean_t
2801 ixgbe_find_mac_address(ixgbe_t *ixgbe)
2802 {
2803 #ifdef __sparc
2804 	struct ixgbe_hw *hw = &ixgbe->hw;
2805 	uchar_t *bytes;
2806 	struct ether_addr sysaddr;
2807 	uint_t nelts;
2808 	int err;
2809 	boolean_t found = B_FALSE;
2810 
2811 	/*
2812 	 * The "vendor's factory-set address" may already have
2813 	 * been extracted from the chip, but if the property
2814 	 * "local-mac-address" is set we use that instead.
2815 	 *
2816 	 * We check whether it looks like an array of 6
2817 	 * bytes (which it should, if OBP set it).  If we can't
2818 	 * make sense of it this way, we'll ignore it.
2819 	 */
2820 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2821 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
2822 	if (err == DDI_PROP_SUCCESS) {
2823 		if (nelts == ETHERADDRL) {
2824 			while (nelts--)
2825 				hw->mac.addr[nelts] = bytes[nelts];
2826 			found = B_TRUE;
2827 		}
2828 		ddi_prop_free(bytes);
2829 	}
2830 
2831 	/*
2832 	 * Look up the OBP property "local-mac-address?". If the user has set
2833 	 * 'local-mac-address? = false', use "the system address" instead.
2834 	 */
2835 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
2836 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
2837 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
2838 			if (localetheraddr(NULL, &sysaddr) != 0) {
2839 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
2840 				found = B_TRUE;
2841 			}
2842 		}
2843 		ddi_prop_free(bytes);
2844 	}
2845 
2846 	/*
2847 	 * Finally(!), if there's a valid "mac-address" property (created
2848 	 * if we netbooted from this interface), we must use this instead
2849 	 * of any of the above to ensure that the NFS/install server doesn't
2850 	 * get confused by the address changing as Solaris takes over!
2851 	 */
2852 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2853 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
2854 	if (err == DDI_PROP_SUCCESS) {
2855 		if (nelts == ETHERADDRL) {
2856 			while (nelts--)
2857 				hw->mac.addr[nelts] = bytes[nelts];
2858 			found = B_TRUE;
2859 		}
2860 		ddi_prop_free(bytes);
2861 	}
2862 
2863 	if (found) {
2864 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
2865 		return (B_TRUE);
2866 	}
2867 #else
2868 	_NOTE(ARGUNUSED(ixgbe));
2869 #endif
2870 
2871 	return (B_TRUE);
2872 }
2873 
2874 #pragma inline(ixgbe_arm_watchdog_timer)
2875 static void
2876 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
2877 {
2878 	/*
2879 	 * Fire a watchdog timer
2880 	 */
2881 	ixgbe->watchdog_tid =
2882 	    timeout(ixgbe_local_timer,
2883 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
2884 
2885 }
2886 
2887 /*
2888  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
2889  */
2890 void
2891 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
2892 {
2893 	mutex_enter(&ixgbe->watchdog_lock);
2894 
2895 	if (!ixgbe->watchdog_enable) {
2896 		ixgbe->watchdog_enable = B_TRUE;
2897 		ixgbe->watchdog_start = B_TRUE;
2898 		ixgbe_arm_watchdog_timer(ixgbe);
2899 	}
2900 
2901 	mutex_exit(&ixgbe->watchdog_lock);
2902 }
2903 
2904 /*
2905  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
2906  */
2907 void
2908 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
2909 {
2910 	timeout_id_t tid;
2911 
2912 	mutex_enter(&ixgbe->watchdog_lock);
2913 
2914 	ixgbe->watchdog_enable = B_FALSE;
2915 	ixgbe->watchdog_start = B_FALSE;
2916 	tid = ixgbe->watchdog_tid;
2917 	ixgbe->watchdog_tid = 0;
2918 
2919 	mutex_exit(&ixgbe->watchdog_lock);
2920 
2921 	if (tid != 0)
2922 		(void) untimeout(tid);
2923 }
2924 
2925 /*
2926  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
2927  */
2928 void
2929 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
2930 {
2931 	mutex_enter(&ixgbe->watchdog_lock);
2932 
2933 	if (ixgbe->watchdog_enable) {
2934 		if (!ixgbe->watchdog_start) {
2935 			ixgbe->watchdog_start = B_TRUE;
2936 			ixgbe_arm_watchdog_timer(ixgbe);
2937 		}
2938 	}
2939 
2940 	mutex_exit(&ixgbe->watchdog_lock);
2941 }
2942 
2943 /*
2944  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
2945  */
2946 static void
2947 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
2948 {
2949 	mutex_enter(&ixgbe->watchdog_lock);
2950 
2951 	if (ixgbe->watchdog_start)
2952 		ixgbe_arm_watchdog_timer(ixgbe);
2953 
2954 	mutex_exit(&ixgbe->watchdog_lock);
2955 }
2956 
2957 /*
2958  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
2959  */
2960 void
2961 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
2962 {
2963 	timeout_id_t tid;
2964 
2965 	mutex_enter(&ixgbe->watchdog_lock);
2966 
2967 	ixgbe->watchdog_start = B_FALSE;
2968 	tid = ixgbe->watchdog_tid;
2969 	ixgbe->watchdog_tid = 0;
2970 
2971 	mutex_exit(&ixgbe->watchdog_lock);
2972 
2973 	if (tid != 0)
2974 		(void) untimeout(tid);
2975 }
2976 
2977 /*
2978  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
2979  */
2980 static void
2981 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
2982 {
2983 	struct ixgbe_hw *hw = &ixgbe->hw;
2984 
2985 	/*
2986 	 * mask all interrupts off
2987 	 */
2988 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
2989 
2990 	/*
2991 	 * for MSI-X, also disable autoclear
2992 	 */
2993 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2994 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
2995 	}
2996 
2997 	IXGBE_WRITE_FLUSH(hw);
2998 }
2999 
3000 /*
3001  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3002  */
3003 static void
3004 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3005 {
3006 	struct ixgbe_hw *hw = &ixgbe->hw;
3007 	uint32_t eiac, eiam;
3008 	uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3009 
3010 	/* interrupt types to enable */
3011 	ixgbe->eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
3012 	ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
3013 	ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3014 
3015 	/* enable automask on "other" causes that this adapter can generate */
3016 	eiam = ixgbe->capab->other_intr;
3017 
3018 	/*
3019 	 * msi-x mode
3020 	 */
3021 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3022 		/* enable autoclear but not on bits 29:20 */
3023 		eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3024 
3025 		/* general purpose interrupt enable */
3026 		gpie |= (IXGBE_GPIE_MSIX_MODE
3027 		    | IXGBE_GPIE_PBA_SUPPORT
3028 		    | IXGBE_GPIE_OCD
3029 		    | IXGBE_GPIE_EIAME);
3030 	/*
3031 	 * non-msi-x mode
3032 	 */
3033 	} else {
3034 
3035 		/* disable autoclear, leave gpie at default */
3036 		eiac = 0;
3037 
3038 		/*
3039 		 * General purpose interrupt enable.
3040 		 * For 82599, extended interrupt automask enable
3041 		 * only in MSI or MSI-X mode
3042 		 */
3043 		if ((hw->mac.type < ixgbe_mac_82599EB) ||
3044 		    (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3045 			gpie |= IXGBE_GPIE_EIAME;
3046 		}
3047 	}
3048 	/* Enable specific interrupts for 82599  */
3049 	if (hw->mac.type == ixgbe_mac_82599EB) {
3050 		gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */
3051 		gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */
3052 	}
3053 
3054 	/* write to interrupt control registers */
3055 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3056 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3057 	IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3058 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3059 	IXGBE_WRITE_FLUSH(hw);
3060 }
3061 
3062 /*
3063  * ixgbe_loopback_ioctl - Loopback support.
3064  */
3065 enum ioc_reply
3066 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3067 {
3068 	lb_info_sz_t *lbsp;
3069 	lb_property_t *lbpp;
3070 	uint32_t *lbmp;
3071 	uint32_t size;
3072 	uint32_t value;
3073 
3074 	if (mp->b_cont == NULL)
3075 		return (IOC_INVAL);
3076 
3077 	switch (iocp->ioc_cmd) {
3078 	default:
3079 		return (IOC_INVAL);
3080 
3081 	case LB_GET_INFO_SIZE:
3082 		size = sizeof (lb_info_sz_t);
3083 		if (iocp->ioc_count != size)
3084 			return (IOC_INVAL);
3085 
3086 		value = sizeof (lb_normal);
3087 		value += sizeof (lb_mac);
3088 
3089 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3090 		*lbsp = value;
3091 		break;
3092 
3093 	case LB_GET_INFO:
3094 		value = sizeof (lb_normal);
3095 		value += sizeof (lb_mac);
3096 
3097 		size = value;
3098 		if (iocp->ioc_count != size)
3099 			return (IOC_INVAL);
3100 
3101 		value = 0;
3102 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3103 
3104 		lbpp[value++] = lb_normal;
3105 		lbpp[value++] = lb_mac;
3106 		break;
3107 
3108 	case LB_GET_MODE:
3109 		size = sizeof (uint32_t);
3110 		if (iocp->ioc_count != size)
3111 			return (IOC_INVAL);
3112 
3113 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3114 		*lbmp = ixgbe->loopback_mode;
3115 		break;
3116 
3117 	case LB_SET_MODE:
3118 		size = 0;
3119 		if (iocp->ioc_count != sizeof (uint32_t))
3120 			return (IOC_INVAL);
3121 
3122 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3123 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3124 			return (IOC_INVAL);
3125 		break;
3126 	}
3127 
3128 	iocp->ioc_count = size;
3129 	iocp->ioc_error = 0;
3130 
3131 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3132 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3133 		return (IOC_INVAL);
3134 	}
3135 
3136 	return (IOC_REPLY);
3137 }
3138 
3139 /*
3140  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3141  */
3142 static boolean_t
3143 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3144 {
3145 	struct ixgbe_hw *hw;
3146 
3147 	if (mode == ixgbe->loopback_mode)
3148 		return (B_TRUE);
3149 
3150 	hw = &ixgbe->hw;
3151 
3152 	ixgbe->loopback_mode = mode;
3153 
3154 	if (mode == IXGBE_LB_NONE) {
3155 		/*
3156 		 * Reset the chip
3157 		 */
3158 		hw->phy.autoneg_wait_to_complete = B_TRUE;
3159 		(void) ixgbe_reset(ixgbe);
3160 		hw->phy.autoneg_wait_to_complete = B_FALSE;
3161 		return (B_TRUE);
3162 	}
3163 
3164 	mutex_enter(&ixgbe->gen_lock);
3165 
3166 	switch (mode) {
3167 	default:
3168 		mutex_exit(&ixgbe->gen_lock);
3169 		return (B_FALSE);
3170 
3171 	case IXGBE_LB_INTERNAL_MAC:
3172 		ixgbe_set_internal_mac_loopback(ixgbe);
3173 		break;
3174 	}
3175 
3176 	mutex_exit(&ixgbe->gen_lock);
3177 
3178 	return (B_TRUE);
3179 }
3180 
3181 /*
3182  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3183  */
3184 static void
3185 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3186 {
3187 	struct ixgbe_hw *hw;
3188 	uint32_t reg;
3189 	uint8_t atlas;
3190 
3191 	hw = &ixgbe->hw;
3192 
3193 	/*
3194 	 * Setup MAC loopback
3195 	 */
3196 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3197 	reg |= IXGBE_HLREG0_LPBK;
3198 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3199 
3200 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3201 	reg &= ~IXGBE_AUTOC_LMS_MASK;
3202 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3203 
3204 	/*
3205 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3206 	 */
3207 	if (hw->mac.type == ixgbe_mac_82598EB) {
3208 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3209 		    &atlas);
3210 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3211 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3212 		    atlas);
3213 
3214 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3215 		    &atlas);
3216 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3217 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3218 		    atlas);
3219 
3220 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3221 		    &atlas);
3222 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3223 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3224 		    atlas);
3225 
3226 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3227 		    &atlas);
3228 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3229 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3230 		    atlas);
3231 	}
3232 }
3233 
3234 #pragma inline(ixgbe_intr_rx_work)
3235 /*
3236  * ixgbe_intr_rx_work - RX processing of ISR.
3237  */
3238 static void
3239 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3240 {
3241 	mblk_t *mp;
3242 
3243 	mutex_enter(&rx_ring->rx_lock);
3244 
3245 	mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3246 	mutex_exit(&rx_ring->rx_lock);
3247 
3248 	if (mp != NULL)
3249 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3250 		    rx_ring->ring_gen_num);
3251 }
3252 
3253 #pragma inline(ixgbe_intr_tx_work)
3254 /*
3255  * ixgbe_intr_tx_work - TX processing of ISR.
3256  */
3257 static void
3258 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3259 {
3260 	/*
3261 	 * Recycle the tx descriptors
3262 	 */
3263 	tx_ring->tx_recycle(tx_ring);
3264 
3265 	/*
3266 	 * Schedule the re-transmit
3267 	 */
3268 	if (tx_ring->reschedule &&
3269 	    (tx_ring->tbd_free >= tx_ring->resched_thresh)) {
3270 		tx_ring->reschedule = B_FALSE;
3271 		mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3272 		    tx_ring->ring_handle);
3273 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3274 	}
3275 }
3276 
3277 #pragma inline(ixgbe_intr_other_work)
3278 /*
3279  * ixgbe_intr_other_work - Process interrupt types other than tx/rx
3280  */
3281 static void
3282 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
3283 {
3284 	struct ixgbe_hw *hw = &ixgbe->hw;
3285 	/*
3286 	 * dispatch taskq to handle link status change
3287 	 */
3288 	if (eicr & IXGBE_EICR_LSC) {
3289 		if ((ddi_taskq_dispatch(ixgbe->lsc_taskq,
3290 		    ixgbe_driver_link_check, (void *)ixgbe, DDI_NOSLEEP))
3291 		    != DDI_SUCCESS) {
3292 			ixgbe_log(ixgbe, "Fail to dispatch taskq");
3293 		}
3294 	}
3295 
3296 	/*
3297 	 * check for fan failure on adapters with fans
3298 	 */
3299 	if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
3300 	    (eicr & IXGBE_EICR_GPI_SDP1)) {
3301 		if (hw->mac.type < ixgbe_mac_82599EB) {
3302 			ixgbe_log(ixgbe,
3303 			    "Fan has stopped, replace the adapter\n");
3304 
3305 			/* re-enable the interrupt, which was automasked */
3306 			ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
3307 		}
3308 	}
3309 
3310 	/*
3311 	 * Do SFP check for 82599
3312 	 */
3313 	if (hw->mac.type == ixgbe_mac_82599EB) {
3314 		if ((ddi_taskq_dispatch(ixgbe->lsc_taskq,
3315 		    ixgbe_sfp_check, (void *)ixgbe,
3316 		    DDI_NOSLEEP)) != DDI_SUCCESS) {
3317 			ixgbe_log(ixgbe, "No memory available to dispatch "
3318 			    "taskq for SFP check");
3319 		}
3320 	}
3321 }
3322 
3323 /*
3324  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
3325  */
3326 static uint_t
3327 ixgbe_intr_legacy(void *arg1, void *arg2)
3328 {
3329 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3330 	struct ixgbe_hw *hw = &ixgbe->hw;
3331 	ixgbe_tx_ring_t *tx_ring;
3332 	ixgbe_rx_ring_t *rx_ring;
3333 	uint32_t eicr;
3334 	mblk_t *mp;
3335 	boolean_t tx_reschedule;
3336 	uint_t result;
3337 
3338 	_NOTE(ARGUNUSED(arg2));
3339 
3340 	mutex_enter(&ixgbe->gen_lock);
3341 
3342 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
3343 		mutex_exit(&ixgbe->gen_lock);
3344 		return (DDI_INTR_UNCLAIMED);
3345 	}
3346 
3347 	mp = NULL;
3348 	tx_reschedule = B_FALSE;
3349 
3350 	/*
3351 	 * Any bit set in eicr: claim this interrupt
3352 	 */
3353 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3354 	if (eicr) {
3355 		/*
3356 		 * For legacy interrupt, we have only one interrupt,
3357 		 * so we have only one rx ring and one tx ring enabled.
3358 		 */
3359 		ASSERT(ixgbe->num_rx_rings == 1);
3360 		ASSERT(ixgbe->num_tx_rings == 1);
3361 
3362 		/*
3363 		 * For legacy interrupt, rx rings[0] will use RTxQ[0].
3364 		 */
3365 		if (eicr & 0x1) {
3366 			ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
3367 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3368 			ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3369 			/*
3370 			 * Clean the rx descriptors
3371 			 */
3372 			rx_ring = &ixgbe->rx_rings[0];
3373 			mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3374 		}
3375 
3376 		/*
3377 		 * For legacy interrupt, tx rings[0] will use RTxQ[1].
3378 		 */
3379 		if (eicr & 0x2) {
3380 			/*
3381 			 * Recycle the tx descriptors
3382 			 */
3383 			tx_ring = &ixgbe->tx_rings[0];
3384 			tx_ring->tx_recycle(tx_ring);
3385 
3386 			/*
3387 			 * Schedule the re-transmit
3388 			 */
3389 			tx_reschedule = (tx_ring->reschedule &&
3390 			    (tx_ring->tbd_free >= tx_ring->resched_thresh));
3391 		}
3392 
3393 		/* any interrupt type other than tx/rx */
3394 		if (eicr & ixgbe->capab->other_intr) {
3395 			if (hw->mac.type < ixgbe_mac_82599EB) {
3396 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3397 			}
3398 			if (hw->mac.type == ixgbe_mac_82599EB) {
3399 				ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3400 				IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3401 			}
3402 			ixgbe_intr_other_work(ixgbe, eicr);
3403 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3404 		}
3405 
3406 		mutex_exit(&ixgbe->gen_lock);
3407 
3408 		result = DDI_INTR_CLAIMED;
3409 	} else {
3410 		mutex_exit(&ixgbe->gen_lock);
3411 
3412 		/*
3413 		 * No interrupt cause bits set: don't claim this interrupt.
3414 		 */
3415 		result = DDI_INTR_UNCLAIMED;
3416 	}
3417 
3418 	/* re-enable the interrupts which were automasked */
3419 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3420 
3421 	/*
3422 	 * Do the following work outside of the gen_lock
3423 	 */
3424 	if (mp != NULL) {
3425 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3426 		    rx_ring->ring_gen_num);
3427 	}
3428 
3429 	if (tx_reschedule)  {
3430 		tx_ring->reschedule = B_FALSE;
3431 		mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
3432 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3433 	}
3434 
3435 	return (result);
3436 }
3437 
3438 /*
3439  * ixgbe_intr_msi - Interrupt handler for MSI.
3440  */
3441 static uint_t
3442 ixgbe_intr_msi(void *arg1, void *arg2)
3443 {
3444 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3445 	struct ixgbe_hw *hw = &ixgbe->hw;
3446 	uint32_t eicr;
3447 
3448 	_NOTE(ARGUNUSED(arg2));
3449 
3450 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3451 
3452 	/*
3453 	 * For MSI interrupt, we have only one vector,
3454 	 * so we have only one rx ring and one tx ring enabled.
3455 	 */
3456 	ASSERT(ixgbe->num_rx_rings == 1);
3457 	ASSERT(ixgbe->num_tx_rings == 1);
3458 
3459 	/*
3460 	 * For MSI interrupt, rx rings[0] will use RTxQ[0].
3461 	 */
3462 	if (eicr & 0x1) {
3463 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
3464 	}
3465 
3466 	/*
3467 	 * For MSI interrupt, tx rings[0] will use RTxQ[1].
3468 	 */
3469 	if (eicr & 0x2) {
3470 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3471 	}
3472 
3473 	/* any interrupt type other than tx/rx */
3474 	if (eicr & ixgbe->capab->other_intr) {
3475 		mutex_enter(&ixgbe->gen_lock);
3476 		if (hw->mac.type < ixgbe_mac_82599EB) {
3477 			ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3478 		}
3479 		if (hw->mac.type == ixgbe_mac_82599EB) {
3480 			ixgbe->eimc = IXGBE_82599_OTHER_INTR;
3481 			IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
3482 		}
3483 		ixgbe_intr_other_work(ixgbe, eicr);
3484 		ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3485 		mutex_exit(&ixgbe->gen_lock);
3486 	}
3487 
3488 	/* re-enable the interrupts which were automasked */
3489 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3490 
3491 	return (DDI_INTR_CLAIMED);
3492 }
3493 
3494 /*
3495  * ixgbe_intr_msix - Interrupt handler for MSI-X.
3496  */
3497 static uint_t
3498 ixgbe_intr_msix(void *arg1, void *arg2)
3499 {
3500 	ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
3501 	ixgbe_t *ixgbe = vect->ixgbe;
3502 	struct ixgbe_hw *hw = &ixgbe->hw;
3503 	uint32_t eicr;
3504 	int r_idx = 0;
3505 
3506 	_NOTE(ARGUNUSED(arg2));
3507 
3508 	/*
3509 	 * Clean each rx ring that has its bit set in the map
3510 	 */
3511 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
3512 	while (r_idx >= 0) {
3513 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
3514 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3515 		    (ixgbe->num_rx_rings - 1));
3516 	}
3517 
3518 	/*
3519 	 * Clean each tx ring that has its bit set in the map
3520 	 */
3521 	r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
3522 	while (r_idx >= 0) {
3523 		ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
3524 		r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3525 		    (ixgbe->num_tx_rings - 1));
3526 	}
3527 
3528 
3529 	/*
3530 	 * Clean other interrupt (link change) that has its bit set in the map
3531 	 */
3532 	if (BT_TEST(vect->other_map, 0) == 1) {
3533 		eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3534 
3535 		/*
3536 		 * Need check cause bits and only other causes will
3537 		 * be processed
3538 		 */
3539 		/* any interrupt type other than tx/rx */
3540 		if (eicr & ixgbe->capab->other_intr) {
3541 			if (hw->mac.type < ixgbe_mac_82599EB) {
3542 				mutex_enter(&ixgbe->gen_lock);
3543 				ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
3544 				ixgbe_intr_other_work(ixgbe, eicr);
3545 				mutex_exit(&ixgbe->gen_lock);
3546 			} else {
3547 				if (hw->mac.type == ixgbe_mac_82599EB) {
3548 					mutex_enter(&ixgbe->gen_lock);
3549 					ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
3550 					ixgbe_intr_other_work(ixgbe, eicr);
3551 					mutex_exit(&ixgbe->gen_lock);
3552 				}
3553 			}
3554 		}
3555 
3556 		/* re-enable the interrupts which were automasked */
3557 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3558 	}
3559 
3560 	return (DDI_INTR_CLAIMED);
3561 }
3562 
3563 /*
3564  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
3565  *
3566  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
3567  * if not successful, try Legacy.
3568  * ixgbe->intr_force can be used to force sequence to start with
3569  * any of the 3 types.
3570  * If MSI-X is not used, number of tx/rx rings is forced to 1.
3571  */
3572 static int
3573 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
3574 {
3575 	dev_info_t *devinfo;
3576 	int intr_types;
3577 	int rc;
3578 
3579 	devinfo = ixgbe->dip;
3580 
3581 	/*
3582 	 * Get supported interrupt types
3583 	 */
3584 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
3585 
3586 	if (rc != DDI_SUCCESS) {
3587 		ixgbe_log(ixgbe,
3588 		    "Get supported interrupt types failed: %d", rc);
3589 		return (IXGBE_FAILURE);
3590 	}
3591 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
3592 
3593 	ixgbe->intr_type = 0;
3594 
3595 	/*
3596 	 * Install MSI-X interrupts
3597 	 */
3598 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
3599 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
3600 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
3601 		if (rc == IXGBE_SUCCESS)
3602 			return (IXGBE_SUCCESS);
3603 
3604 		ixgbe_log(ixgbe,
3605 		    "Allocate MSI-X failed, trying MSI interrupts...");
3606 	}
3607 
3608 	/*
3609 	 * MSI-X not used, force rings and groups to 1
3610 	 */
3611 	ixgbe->num_rx_rings = 1;
3612 	ixgbe->num_rx_groups = 1;
3613 	ixgbe->num_tx_rings = 1;
3614 	ixgbe_log(ixgbe,
3615 	    "MSI-X not used, force rings and groups number to 1");
3616 
3617 	/*
3618 	 * Install MSI interrupts
3619 	 */
3620 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
3621 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
3622 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
3623 		if (rc == IXGBE_SUCCESS)
3624 			return (IXGBE_SUCCESS);
3625 
3626 		ixgbe_log(ixgbe,
3627 		    "Allocate MSI failed, trying Legacy interrupts...");
3628 	}
3629 
3630 	/*
3631 	 * Install legacy interrupts
3632 	 */
3633 	if (intr_types & DDI_INTR_TYPE_FIXED) {
3634 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
3635 		if (rc == IXGBE_SUCCESS)
3636 			return (IXGBE_SUCCESS);
3637 
3638 		ixgbe_log(ixgbe,
3639 		    "Allocate Legacy interrupts failed");
3640 	}
3641 
3642 	/*
3643 	 * If none of the 3 types succeeded, return failure
3644 	 */
3645 	return (IXGBE_FAILURE);
3646 }
3647 
3648 /*
3649  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
3650  *
3651  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
3652  * if fewer than 2 handles are available, return failure.
3653  * Upon success, this maps the vectors to rx and tx rings for
3654  * interrupts.
3655  */
3656 static int
3657 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
3658 {
3659 	dev_info_t *devinfo;
3660 	int request, count, avail, actual;
3661 	int minimum;
3662 	int rc;
3663 
3664 	devinfo = ixgbe->dip;
3665 
3666 	switch (intr_type) {
3667 	case DDI_INTR_TYPE_FIXED:
3668 		request = 1;	/* Request 1 legacy interrupt handle */
3669 		minimum = 1;
3670 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
3671 		break;
3672 
3673 	case DDI_INTR_TYPE_MSI:
3674 		request = 1;	/* Request 1 MSI interrupt handle */
3675 		minimum = 1;
3676 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
3677 		break;
3678 
3679 	case DDI_INTR_TYPE_MSIX:
3680 		/*
3681 		 * Best number of vectors for the adapter is
3682 		 * # rx rings + # tx rings.
3683 		 */
3684 		request = ixgbe->num_rx_rings + ixgbe->num_tx_rings;
3685 		if (request > ixgbe->capab->max_ring_vect)
3686 			request = ixgbe->capab->max_ring_vect;
3687 		minimum = 2;
3688 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
3689 		break;
3690 
3691 	default:
3692 		ixgbe_log(ixgbe,
3693 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
3694 		    intr_type);
3695 		return (IXGBE_FAILURE);
3696 	}
3697 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
3698 	    request, minimum);
3699 
3700 	/*
3701 	 * Get number of supported interrupts
3702 	 */
3703 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3704 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3705 		ixgbe_log(ixgbe,
3706 		    "Get interrupt number failed. Return: %d, count: %d",
3707 		    rc, count);
3708 		return (IXGBE_FAILURE);
3709 	}
3710 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
3711 
3712 	/*
3713 	 * Get number of available interrupts
3714 	 */
3715 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3716 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3717 		ixgbe_log(ixgbe,
3718 		    "Get interrupt available number failed. "
3719 		    "Return: %d, available: %d", rc, avail);
3720 		return (IXGBE_FAILURE);
3721 	}
3722 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
3723 
3724 	if (avail < request) {
3725 		ixgbe_log(ixgbe, "Request %d handles, %d available",
3726 		    request, avail);
3727 		request = avail;
3728 	}
3729 
3730 	actual = 0;
3731 	ixgbe->intr_cnt = 0;
3732 
3733 	/*
3734 	 * Allocate an array of interrupt handles
3735 	 */
3736 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
3737 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
3738 
3739 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
3740 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
3741 	if (rc != DDI_SUCCESS) {
3742 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
3743 		    "return: %d, request: %d, actual: %d",
3744 		    rc, request, actual);
3745 		goto alloc_handle_fail;
3746 	}
3747 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
3748 
3749 	ixgbe->intr_cnt = actual;
3750 
3751 	/*
3752 	 * Now we know the actual number of vectors.  Here we map the vector
3753 	 * to other, rx rings and tx ring.
3754 	 */
3755 	if (actual < minimum) {
3756 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
3757 		    actual);
3758 		goto alloc_handle_fail;
3759 	}
3760 
3761 	/*
3762 	 * Get priority for first vector, assume remaining are all the same
3763 	 */
3764 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
3765 	if (rc != DDI_SUCCESS) {
3766 		ixgbe_log(ixgbe,
3767 		    "Get interrupt priority failed: %d", rc);
3768 		goto alloc_handle_fail;
3769 	}
3770 
3771 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
3772 	if (rc != DDI_SUCCESS) {
3773 		ixgbe_log(ixgbe,
3774 		    "Get interrupt cap failed: %d", rc);
3775 		goto alloc_handle_fail;
3776 	}
3777 
3778 	ixgbe->intr_type = intr_type;
3779 
3780 	return (IXGBE_SUCCESS);
3781 
3782 alloc_handle_fail:
3783 	ixgbe_rem_intrs(ixgbe);
3784 
3785 	return (IXGBE_FAILURE);
3786 }
3787 
3788 /*
3789  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
3790  *
3791  * Before adding the interrupt handlers, the interrupt vectors have
3792  * been allocated, and the rx/tx rings have also been allocated.
3793  */
3794 static int
3795 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
3796 {
3797 	int vector = 0;
3798 	int rc;
3799 
3800 	switch (ixgbe->intr_type) {
3801 	case DDI_INTR_TYPE_MSIX:
3802 		/*
3803 		 * Add interrupt handler for all vectors
3804 		 */
3805 		for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
3806 			/*
3807 			 * install pointer to vect_map[vector]
3808 			 */
3809 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
3810 			    (ddi_intr_handler_t *)ixgbe_intr_msix,
3811 			    (void *)&ixgbe->vect_map[vector], NULL);
3812 
3813 			if (rc != DDI_SUCCESS) {
3814 				ixgbe_log(ixgbe,
3815 				    "Add rx interrupt handler failed. "
3816 				    "return: %d, vector: %d", rc, vector);
3817 				for (vector--; vector >= 0; vector--) {
3818 					(void) ddi_intr_remove_handler(
3819 					    ixgbe->htable[vector]);
3820 				}
3821 				return (IXGBE_FAILURE);
3822 			}
3823 		}
3824 
3825 		break;
3826 
3827 	case DDI_INTR_TYPE_MSI:
3828 		/*
3829 		 * Add interrupt handlers for the only vector
3830 		 */
3831 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3832 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
3833 		    (void *)ixgbe, NULL);
3834 
3835 		if (rc != DDI_SUCCESS) {
3836 			ixgbe_log(ixgbe,
3837 			    "Add MSI interrupt handler failed: %d", rc);
3838 			return (IXGBE_FAILURE);
3839 		}
3840 
3841 		break;
3842 
3843 	case DDI_INTR_TYPE_FIXED:
3844 		/*
3845 		 * Add interrupt handlers for the only vector
3846 		 */
3847 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3848 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
3849 		    (void *)ixgbe, NULL);
3850 
3851 		if (rc != DDI_SUCCESS) {
3852 			ixgbe_log(ixgbe,
3853 			    "Add legacy interrupt handler failed: %d", rc);
3854 			return (IXGBE_FAILURE);
3855 		}
3856 
3857 		break;
3858 
3859 	default:
3860 		return (IXGBE_FAILURE);
3861 	}
3862 
3863 	return (IXGBE_SUCCESS);
3864 }
3865 
3866 #pragma inline(ixgbe_map_rxring_to_vector)
3867 /*
3868  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
3869  */
3870 static void
3871 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
3872 {
3873 	/*
3874 	 * Set bit in map
3875 	 */
3876 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
3877 
3878 	/*
3879 	 * Count bits set
3880 	 */
3881 	ixgbe->vect_map[v_idx].rxr_cnt++;
3882 
3883 	/*
3884 	 * Remember bit position
3885 	 */
3886 	ixgbe->rx_rings[r_idx].intr_vector = v_idx;
3887 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
3888 }
3889 
3890 #pragma inline(ixgbe_map_txring_to_vector)
3891 /*
3892  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
3893  */
3894 static void
3895 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
3896 {
3897 	/*
3898 	 * Set bit in map
3899 	 */
3900 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
3901 
3902 	/*
3903 	 * Count bits set
3904 	 */
3905 	ixgbe->vect_map[v_idx].txr_cnt++;
3906 
3907 	/*
3908 	 * Remember bit position
3909 	 */
3910 	ixgbe->tx_rings[t_idx].intr_vector = v_idx;
3911 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
3912 }
3913 
3914 /*
3915  * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
3916  * allocation register (IVAR).
3917  * cause:
3918  *   -1 : other cause
3919  *    0 : rx
3920  *    1 : tx
3921  */
3922 static void
3923 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
3924     int8_t cause)
3925 {
3926 	struct ixgbe_hw *hw = &ixgbe->hw;
3927 	u32 ivar, index;
3928 
3929 	switch (hw->mac.type) {
3930 	case ixgbe_mac_82598EB:
3931 		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3932 		if (cause == -1) {
3933 			cause = 0;
3934 		}
3935 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3936 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3937 		ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
3938 		ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
3939 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3940 		break;
3941 	case ixgbe_mac_82599EB:
3942 		if (cause == -1) {
3943 			/* other causes */
3944 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3945 			index = (intr_alloc_entry & 1) * 8;
3946 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3947 			ivar &= ~(0xFF << index);
3948 			ivar |= (msix_vector << index);
3949 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3950 		} else {
3951 			/* tx or rx causes */
3952 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3953 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
3954 			ivar = IXGBE_READ_REG(hw,
3955 			    IXGBE_IVAR(intr_alloc_entry >> 1));
3956 			ivar &= ~(0xFF << index);
3957 			ivar |= (msix_vector << index);
3958 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
3959 			    ivar);
3960 		}
3961 		break;
3962 	default:
3963 		break;
3964 	}
3965 }
3966 
3967 /*
3968  * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
3969  * given interrupt vector allocation register (IVAR).
3970  * cause:
3971  *   -1 : other cause
3972  *    0 : rx
3973  *    1 : tx
3974  */
3975 static void
3976 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
3977 {
3978 	struct ixgbe_hw *hw = &ixgbe->hw;
3979 	u32 ivar, index;
3980 
3981 	switch (hw->mac.type) {
3982 	case ixgbe_mac_82598EB:
3983 		if (cause == -1) {
3984 			cause = 0;
3985 		}
3986 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
3987 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3988 		ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
3989 		    (intr_alloc_entry & 0x3)));
3990 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3991 		break;
3992 	case ixgbe_mac_82599EB:
3993 		if (cause == -1) {
3994 			/* other causes */
3995 			index = (intr_alloc_entry & 1) * 8;
3996 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3997 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
3998 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3999 		} else {
4000 			/* tx or rx causes */
4001 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4002 			ivar = IXGBE_READ_REG(hw,
4003 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4004 			ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4005 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4006 			    ivar);
4007 		}
4008 		break;
4009 	default:
4010 		break;
4011 	}
4012 }
4013 
4014 /*
4015  * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4016  * given interrupt vector allocation register (IVAR).
4017  * cause:
4018  *   -1 : other cause
4019  *    0 : rx
4020  *    1 : tx
4021  */
4022 static void
4023 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4024 {
4025 	struct ixgbe_hw *hw = &ixgbe->hw;
4026 	u32 ivar, index;
4027 
4028 	switch (hw->mac.type) {
4029 	case ixgbe_mac_82598EB:
4030 		if (cause == -1) {
4031 			cause = 0;
4032 		}
4033 		index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4034 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4035 		ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4036 		    (intr_alloc_entry & 0x3)));
4037 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4038 		break;
4039 	case ixgbe_mac_82599EB:
4040 		if (cause == -1) {
4041 			/* other causes */
4042 			index = (intr_alloc_entry & 1) * 8;
4043 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4044 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4045 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4046 		} else {
4047 			/* tx or rx causes */
4048 			index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4049 			ivar = IXGBE_READ_REG(hw,
4050 			    IXGBE_IVAR(intr_alloc_entry >> 1));
4051 			ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4052 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4053 			    ivar);
4054 		}
4055 		break;
4056 	default:
4057 		break;
4058 	}
4059 }
4060 
4061 /*
4062  * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4063  *
4064  * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4065  * to vector[0 - (intr_cnt -1)].
4066  */
4067 static int
4068 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4069 {
4070 	int i, vector = 0;
4071 
4072 	/* initialize vector map */
4073 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4074 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4075 		ixgbe->vect_map[i].ixgbe = ixgbe;
4076 	}
4077 
4078 	/*
4079 	 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4080 	 * tx rings[0] on RTxQ[1].
4081 	 */
4082 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4083 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4084 		ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4085 		return (IXGBE_SUCCESS);
4086 	}
4087 
4088 	/*
4089 	 * Interrupts/vectors mapping for MSI-X
4090 	 */
4091 
4092 	/*
4093 	 * Map other interrupt to vector 0,
4094 	 * Set bit in map and count the bits set.
4095 	 */
4096 	BT_SET(ixgbe->vect_map[vector].other_map, 0);
4097 	ixgbe->vect_map[vector].other_cnt++;
4098 	vector++;
4099 
4100 	/*
4101 	 * Map rx ring interrupts to vectors
4102 	 */
4103 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
4104 		ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4105 		vector = (vector +1) % ixgbe->intr_cnt;
4106 	}
4107 
4108 	/*
4109 	 * Map tx ring interrupts to vectors
4110 	 */
4111 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
4112 		ixgbe_map_txring_to_vector(ixgbe, i, vector);
4113 		vector = (vector +1) % ixgbe->intr_cnt;
4114 	}
4115 
4116 	return (IXGBE_SUCCESS);
4117 }
4118 
4119 /*
4120  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4121  *
4122  * This relies on ring/vector mapping already set up in the
4123  * vect_map[] structures
4124  */
4125 static void
4126 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4127 {
4128 	struct ixgbe_hw *hw = &ixgbe->hw;
4129 	ixgbe_intr_vector_t *vect;	/* vector bitmap */
4130 	int r_idx;	/* ring index */
4131 	int v_idx;	/* vector index */
4132 
4133 	/*
4134 	 * Clear any previous entries
4135 	 */
4136 	switch (hw->mac.type) {
4137 	case ixgbe_mac_82598EB:
4138 		for (v_idx = 0; v_idx < 25; v_idx++)
4139 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4140 
4141 		break;
4142 	case ixgbe_mac_82599EB:
4143 		for (v_idx = 0; v_idx < 64; v_idx++)
4144 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4145 		IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
4146 
4147 		break;
4148 	default:
4149 		break;
4150 	}
4151 
4152 	/*
4153 	 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
4154 	 * tx rings[0] will use RTxQ[1].
4155 	 */
4156 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4157 		ixgbe_setup_ivar(ixgbe, 0, 0, 0);
4158 		ixgbe_setup_ivar(ixgbe, 0, 1, 1);
4159 		return;
4160 	}
4161 
4162 	/*
4163 	 * For MSI-X interrupt, "Other" is always on vector[0].
4164 	 */
4165 	ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
4166 
4167 	/*
4168 	 * For each interrupt vector, populate the IVAR table
4169 	 */
4170 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
4171 		vect = &ixgbe->vect_map[v_idx];
4172 
4173 		/*
4174 		 * For each rx ring bit set
4175 		 */
4176 		r_idx = bt_getlowbit(vect->rx_map, 0,
4177 		    (ixgbe->num_rx_rings - 1));
4178 
4179 		while (r_idx >= 0) {
4180 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 0);
4181 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4182 			    (ixgbe->num_rx_rings - 1));
4183 		}
4184 
4185 		/*
4186 		 * For each tx ring bit set
4187 		 */
4188 		r_idx = bt_getlowbit(vect->tx_map, 0,
4189 		    (ixgbe->num_tx_rings - 1));
4190 
4191 		while (r_idx >= 0) {
4192 			ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
4193 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4194 			    (ixgbe->num_tx_rings - 1));
4195 		}
4196 	}
4197 }
4198 
4199 /*
4200  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
4201  */
4202 static void
4203 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
4204 {
4205 	int i;
4206 	int rc;
4207 
4208 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4209 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
4210 		if (rc != DDI_SUCCESS) {
4211 			IXGBE_DEBUGLOG_1(ixgbe,
4212 			    "Remove intr handler failed: %d", rc);
4213 		}
4214 	}
4215 }
4216 
4217 /*
4218  * ixgbe_rem_intrs - Remove the allocated interrupts.
4219  */
4220 static void
4221 ixgbe_rem_intrs(ixgbe_t *ixgbe)
4222 {
4223 	int i;
4224 	int rc;
4225 
4226 	for (i = 0; i < ixgbe->intr_cnt; i++) {
4227 		rc = ddi_intr_free(ixgbe->htable[i]);
4228 		if (rc != DDI_SUCCESS) {
4229 			IXGBE_DEBUGLOG_1(ixgbe,
4230 			    "Free intr failed: %d", rc);
4231 		}
4232 	}
4233 
4234 	kmem_free(ixgbe->htable, ixgbe->intr_size);
4235 	ixgbe->htable = NULL;
4236 }
4237 
4238 /*
4239  * ixgbe_enable_intrs - Enable all the ddi interrupts.
4240  */
4241 static int
4242 ixgbe_enable_intrs(ixgbe_t *ixgbe)
4243 {
4244 	int i;
4245 	int rc;
4246 
4247 	/*
4248 	 * Enable interrupts
4249 	 */
4250 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4251 		/*
4252 		 * Call ddi_intr_block_enable() for MSI
4253 		 */
4254 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
4255 		if (rc != DDI_SUCCESS) {
4256 			ixgbe_log(ixgbe,
4257 			    "Enable block intr failed: %d", rc);
4258 			return (IXGBE_FAILURE);
4259 		}
4260 	} else {
4261 		/*
4262 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
4263 		 */
4264 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4265 			rc = ddi_intr_enable(ixgbe->htable[i]);
4266 			if (rc != DDI_SUCCESS) {
4267 				ixgbe_log(ixgbe,
4268 				    "Enable intr failed: %d", rc);
4269 				return (IXGBE_FAILURE);
4270 			}
4271 		}
4272 	}
4273 
4274 	return (IXGBE_SUCCESS);
4275 }
4276 
4277 /*
4278  * ixgbe_disable_intrs - Disable all the interrupts.
4279  */
4280 static int
4281 ixgbe_disable_intrs(ixgbe_t *ixgbe)
4282 {
4283 	int i;
4284 	int rc;
4285 
4286 	/*
4287 	 * Disable all interrupts
4288 	 */
4289 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
4290 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
4291 		if (rc != DDI_SUCCESS) {
4292 			ixgbe_log(ixgbe,
4293 			    "Disable block intr failed: %d", rc);
4294 			return (IXGBE_FAILURE);
4295 		}
4296 	} else {
4297 		for (i = 0; i < ixgbe->intr_cnt; i++) {
4298 			rc = ddi_intr_disable(ixgbe->htable[i]);
4299 			if (rc != DDI_SUCCESS) {
4300 				ixgbe_log(ixgbe,
4301 				    "Disable intr failed: %d", rc);
4302 				return (IXGBE_FAILURE);
4303 			}
4304 		}
4305 	}
4306 
4307 	return (IXGBE_SUCCESS);
4308 }
4309 
4310 /*
4311  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
4312  */
4313 static void
4314 ixgbe_get_hw_state(ixgbe_t *ixgbe)
4315 {
4316 	struct ixgbe_hw *hw = &ixgbe->hw;
4317 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
4318 	boolean_t link_up = B_FALSE;
4319 	uint32_t pcs1g_anlp = 0;
4320 	uint32_t pcs1g_ana = 0;
4321 
4322 	ASSERT(mutex_owned(&ixgbe->gen_lock));
4323 	ixgbe->param_lp_1000fdx_cap = 0;
4324 	ixgbe->param_lp_100fdx_cap  = 0;
4325 
4326 	/* check for link, don't wait */
4327 	(void) ixgbe_check_link(hw, &speed, &link_up, false);
4328 	if (link_up) {
4329 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
4330 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
4331 
4332 		ixgbe->param_lp_1000fdx_cap =
4333 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4334 		ixgbe->param_lp_100fdx_cap =
4335 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
4336 	}
4337 
4338 	ixgbe->param_1000fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4339 	ixgbe->param_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
4340 }
4341 
4342 /*
4343  * ixgbe_get_driver_control - Notify that driver is in control of device.
4344  */
4345 static void
4346 ixgbe_get_driver_control(struct ixgbe_hw *hw)
4347 {
4348 	uint32_t ctrl_ext;
4349 
4350 	/*
4351 	 * Notify firmware that driver is in control of device
4352 	 */
4353 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4354 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
4355 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4356 }
4357 
4358 /*
4359  * ixgbe_release_driver_control - Notify that driver is no longer in control
4360  * of device.
4361  */
4362 static void
4363 ixgbe_release_driver_control(struct ixgbe_hw *hw)
4364 {
4365 	uint32_t ctrl_ext;
4366 
4367 	/*
4368 	 * Notify firmware that driver is no longer in control of device
4369 	 */
4370 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
4371 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
4372 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
4373 }
4374 
4375 /*
4376  * ixgbe_atomic_reserve - Atomic decrease operation.
4377  */
4378 int
4379 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
4380 {
4381 	uint32_t oldval;
4382 	uint32_t newval;
4383 
4384 	/*
4385 	 * ATOMICALLY
4386 	 */
4387 	do {
4388 		oldval = *count_p;
4389 		if (oldval < n)
4390 			return (-1);
4391 		newval = oldval - n;
4392 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
4393 
4394 	return (newval);
4395 }
4396 
4397 /*
4398  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
4399  */
4400 static uint8_t *
4401 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
4402 {
4403 	uint8_t *addr = *upd_ptr;
4404 	uint8_t *new_ptr;
4405 
4406 	_NOTE(ARGUNUSED(hw));
4407 	_NOTE(ARGUNUSED(vmdq));
4408 
4409 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
4410 	*upd_ptr = new_ptr;
4411 	return (addr);
4412 }
4413 
4414 /*
4415  * FMA support
4416  */
4417 int
4418 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
4419 {
4420 	ddi_fm_error_t de;
4421 
4422 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4423 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
4424 	return (de.fme_status);
4425 }
4426 
4427 int
4428 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
4429 {
4430 	ddi_fm_error_t de;
4431 
4432 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4433 	return (de.fme_status);
4434 }
4435 
4436 /*
4437  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
4438  */
4439 static int
4440 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4441 {
4442 	_NOTE(ARGUNUSED(impl_data));
4443 	/*
4444 	 * as the driver can always deal with an error in any dma or
4445 	 * access handle, we can just return the fme_status value.
4446 	 */
4447 	pci_ereport_post(dip, err, NULL);
4448 	return (err->fme_status);
4449 }
4450 
4451 static void
4452 ixgbe_fm_init(ixgbe_t *ixgbe)
4453 {
4454 	ddi_iblock_cookie_t iblk;
4455 	int fma_acc_flag, fma_dma_flag;
4456 
4457 	/*
4458 	 * Only register with IO Fault Services if we have some capability
4459 	 */
4460 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
4461 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4462 		fma_acc_flag = 1;
4463 	} else {
4464 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4465 		fma_acc_flag = 0;
4466 	}
4467 
4468 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
4469 		fma_dma_flag = 1;
4470 	} else {
4471 		fma_dma_flag = 0;
4472 	}
4473 
4474 	ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag);
4475 
4476 	if (ixgbe->fm_capabilities) {
4477 
4478 		/*
4479 		 * Register capabilities with IO Fault Services
4480 		 */
4481 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
4482 
4483 		/*
4484 		 * Initialize pci ereport capabilities if ereport capable
4485 		 */
4486 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4487 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4488 			pci_ereport_setup(ixgbe->dip);
4489 
4490 		/*
4491 		 * Register error callback if error callback capable
4492 		 */
4493 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4494 			ddi_fm_handler_register(ixgbe->dip,
4495 			    ixgbe_fm_error_cb, (void*) ixgbe);
4496 	}
4497 }
4498 
4499 static void
4500 ixgbe_fm_fini(ixgbe_t *ixgbe)
4501 {
4502 	/*
4503 	 * Only unregister FMA capabilities if they are registered
4504 	 */
4505 	if (ixgbe->fm_capabilities) {
4506 
4507 		/*
4508 		 * Release any resources allocated by pci_ereport_setup()
4509 		 */
4510 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4511 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4512 			pci_ereport_teardown(ixgbe->dip);
4513 
4514 		/*
4515 		 * Un-register error callback if error callback capable
4516 		 */
4517 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4518 			ddi_fm_handler_unregister(ixgbe->dip);
4519 
4520 		/*
4521 		 * Unregister from IO Fault Service
4522 		 */
4523 		ddi_fm_fini(ixgbe->dip);
4524 	}
4525 }
4526 
4527 void
4528 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
4529 {
4530 	uint64_t ena;
4531 	char buf[FM_MAX_CLASS];
4532 
4533 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4534 	ena = fm_ena_generate(0, FM_ENA_FMT1);
4535 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
4536 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
4537 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
4538 	}
4539 }
4540 
4541 static int
4542 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
4543 {
4544 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
4545 
4546 	mutex_enter(&rx_ring->rx_lock);
4547 	rx_ring->ring_gen_num = mr_gen_num;
4548 	mutex_exit(&rx_ring->rx_lock);
4549 	return (0);
4550 }
4551 
4552 /*
4553  * Callback funtion for MAC layer to register all rings.
4554  */
4555 /* ARGSUSED */
4556 void
4557 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
4558     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
4559 {
4560 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4561 	mac_intr_t *mintr = &infop->mri_intr;
4562 
4563 	switch (rtype) {
4564 	case MAC_RING_TYPE_RX: {
4565 		ASSERT(rg_index == 0);
4566 		ASSERT(ring_index < ixgbe->num_rx_rings);
4567 
4568 		ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[ring_index];
4569 		rx_ring->ring_handle = rh;
4570 
4571 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
4572 		infop->mri_start = ixgbe_ring_start;
4573 		infop->mri_stop = NULL;
4574 		infop->mri_poll = ixgbe_ring_rx_poll;
4575 
4576 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
4577 		mintr->mi_enable = ixgbe_rx_ring_intr_enable;
4578 		mintr->mi_disable = ixgbe_rx_ring_intr_disable;
4579 
4580 		break;
4581 	}
4582 	case MAC_RING_TYPE_TX: {
4583 		ASSERT(rg_index == -1);
4584 		ASSERT(ring_index < ixgbe->num_tx_rings);
4585 
4586 		ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
4587 		tx_ring->ring_handle = rh;
4588 
4589 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
4590 		infop->mri_start = NULL;
4591 		infop->mri_stop = NULL;
4592 		infop->mri_tx = ixgbe_ring_tx;
4593 
4594 		break;
4595 	}
4596 	default:
4597 		break;
4598 	}
4599 }
4600 
4601 /*
4602  * Callback funtion for MAC layer to register all groups.
4603  */
4604 void
4605 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
4606     mac_group_info_t *infop, mac_group_handle_t gh)
4607 {
4608 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4609 
4610 	switch (rtype) {
4611 	case MAC_RING_TYPE_RX: {
4612 		ixgbe_rx_group_t *rx_group;
4613 
4614 		rx_group = &ixgbe->rx_groups[index];
4615 		rx_group->group_handle = gh;
4616 
4617 		infop->mgi_driver = (mac_group_driver_t)rx_group;
4618 		infop->mgi_start = NULL;
4619 		infop->mgi_stop = NULL;
4620 		infop->mgi_addmac = ixgbe_addmac;
4621 		infop->mgi_remmac = ixgbe_remmac;
4622 		infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
4623 
4624 		break;
4625 	}
4626 	case MAC_RING_TYPE_TX:
4627 		break;
4628 	default:
4629 		break;
4630 	}
4631 }
4632 
4633 /*
4634  * Enable interrupt on the specificed rx ring.
4635  */
4636 int
4637 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
4638 {
4639 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4640 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4641 	int r_idx = rx_ring->index;
4642 	int v_idx = rx_ring->intr_vector;
4643 
4644 	mutex_enter(&ixgbe->gen_lock);
4645 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 0);
4646 
4647 	/*
4648 	 * To enable interrupt by setting the VAL bit of given interrupt
4649 	 * vector allocation register (IVAR).
4650 	 */
4651 	ixgbe_enable_ivar(ixgbe, r_idx, 0);
4652 
4653 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4654 
4655 	/*
4656 	 * To trigger a Rx interrupt to on this ring
4657 	 */
4658 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
4659 	IXGBE_WRITE_FLUSH(&ixgbe->hw);
4660 
4661 	mutex_exit(&ixgbe->gen_lock);
4662 
4663 	return (0);
4664 }
4665 
4666 /*
4667  * Disable interrupt on the specificed rx ring.
4668  */
4669 int
4670 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
4671 {
4672 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4673 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4674 	int r_idx = rx_ring->index;
4675 	int v_idx = rx_ring->intr_vector;
4676 
4677 	mutex_enter(&ixgbe->gen_lock);
4678 
4679 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 1);
4680 
4681 	/*
4682 	 * To disable interrupt by clearing the VAL bit of given interrupt
4683 	 * vector allocation register (IVAR).
4684 	 */
4685 	ixgbe_disable_ivar(ixgbe, r_idx, 0);
4686 
4687 	BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
4688 
4689 	mutex_exit(&ixgbe->gen_lock);
4690 
4691 	return (0);
4692 }
4693 
4694 /*
4695  * Add a mac address.
4696  */
4697 static int
4698 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
4699 {
4700 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4701 	ixgbe_t *ixgbe = rx_group->ixgbe;
4702 	int slot;
4703 	int err;
4704 
4705 	mutex_enter(&ixgbe->gen_lock);
4706 
4707 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4708 		mutex_exit(&ixgbe->gen_lock);
4709 		return (ECANCELED);
4710 	}
4711 
4712 	if (ixgbe->unicst_avail == 0) {
4713 		/* no slots available */
4714 		mutex_exit(&ixgbe->gen_lock);
4715 		return (ENOSPC);
4716 	}
4717 
4718 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
4719 		if (ixgbe->unicst_addr[slot].mac.set == 0)
4720 			break;
4721 	}
4722 
4723 	ASSERT((slot >= 0) && (slot < ixgbe->unicst_total));
4724 
4725 	if ((err = ixgbe_unicst_set(ixgbe, mac_addr, slot)) == 0) {
4726 		ixgbe->unicst_addr[slot].mac.set = 1;
4727 		ixgbe->unicst_avail--;
4728 	}
4729 
4730 	mutex_exit(&ixgbe->gen_lock);
4731 
4732 	return (err);
4733 }
4734 
4735 /*
4736  * Remove a mac address.
4737  */
4738 static int
4739 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
4740 {
4741 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4742 	ixgbe_t *ixgbe = rx_group->ixgbe;
4743 	int slot;
4744 	int err;
4745 
4746 	mutex_enter(&ixgbe->gen_lock);
4747 
4748 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4749 		mutex_exit(&ixgbe->gen_lock);
4750 		return (ECANCELED);
4751 	}
4752 
4753 	slot = ixgbe_unicst_find(ixgbe, mac_addr);
4754 	if (slot == -1) {
4755 		mutex_exit(&ixgbe->gen_lock);
4756 		return (EINVAL);
4757 	}
4758 
4759 	if (ixgbe->unicst_addr[slot].mac.set == 0) {
4760 		mutex_exit(&ixgbe->gen_lock);
4761 		return (EINVAL);
4762 	}
4763 
4764 	bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
4765 	if ((err = ixgbe_unicst_set(ixgbe,
4766 	    ixgbe->unicst_addr[slot].mac.addr, slot)) == 0) {
4767 		ixgbe->unicst_addr[slot].mac.set = 0;
4768 		ixgbe->unicst_avail++;
4769 	}
4770 
4771 	mutex_exit(&ixgbe->gen_lock);
4772 
4773 	return (err);
4774 }
4775