xref: /titanic_44/usr/src/uts/common/io/ixgbe/ixgbe_main.c (revision da14cebe459d3275048785f25bd869cb09b5307f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2008 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 
31 
32 #include "ixgbe_sw.h"
33 
34 static char ident[] = "Intel 10Gb Ethernet";
35 
36 /*
37  * Local function protoypes
38  */
39 static int ixgbe_register_mac(ixgbe_t *);
40 static int ixgbe_identify_hardware(ixgbe_t *);
41 static int ixgbe_regs_map(ixgbe_t *);
42 static void ixgbe_init_properties(ixgbe_t *);
43 static int ixgbe_init_driver_settings(ixgbe_t *);
44 static void ixgbe_init_locks(ixgbe_t *);
45 static void ixgbe_destroy_locks(ixgbe_t *);
46 static int ixgbe_init(ixgbe_t *);
47 static int ixgbe_chip_start(ixgbe_t *);
48 static void ixgbe_chip_stop(ixgbe_t *);
49 static int ixgbe_reset(ixgbe_t *);
50 static void ixgbe_tx_clean(ixgbe_t *);
51 static boolean_t ixgbe_tx_drain(ixgbe_t *);
52 static boolean_t ixgbe_rx_drain(ixgbe_t *);
53 static int ixgbe_alloc_rings(ixgbe_t *);
54 static int ixgbe_init_rings(ixgbe_t *);
55 static void ixgbe_free_rings(ixgbe_t *);
56 static void ixgbe_fini_rings(ixgbe_t *);
57 static void ixgbe_setup_rings(ixgbe_t *);
58 static void ixgbe_setup_rx(ixgbe_t *);
59 static void ixgbe_setup_tx(ixgbe_t *);
60 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
61 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
62 static void ixgbe_setup_rss(ixgbe_t *);
63 static void ixgbe_init_unicst(ixgbe_t *);
64 static int ixgbe_unicst_set(ixgbe_t *, const uint8_t *, int);
65 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
66 static void ixgbe_setup_multicst(ixgbe_t *);
67 static void ixgbe_get_hw_state(ixgbe_t *);
68 static void ixgbe_get_conf(ixgbe_t *);
69 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
70 static boolean_t ixgbe_driver_link_check(ixgbe_t *);
71 static void ixgbe_local_timer(void *);
72 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
73 static void ixgbe_start_watchdog_timer(ixgbe_t *);
74 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
75 static void ixgbe_stop_watchdog_timer(ixgbe_t *);
76 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
77 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
78 static boolean_t is_valid_mac_addr(uint8_t *);
79 static boolean_t ixgbe_stall_check(ixgbe_t *);
80 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
81 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
82 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
83 static int ixgbe_alloc_intrs(ixgbe_t *);
84 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
85 static int ixgbe_add_intr_handlers(ixgbe_t *);
86 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
87 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
88 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t);
89 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t);
90 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t);
91 static int ixgbe_map_rings_to_vectors(ixgbe_t *);
92 static void ixgbe_setup_adapter_vector(ixgbe_t *);
93 static void ixgbe_rem_intr_handlers(ixgbe_t *);
94 static void ixgbe_rem_intrs(ixgbe_t *);
95 static int ixgbe_enable_intrs(ixgbe_t *);
96 static int ixgbe_disable_intrs(ixgbe_t *);
97 static uint_t ixgbe_intr_legacy(void *, void *);
98 static uint_t ixgbe_intr_msi(void *, void *);
99 static uint_t ixgbe_intr_rx_tx(void *, void *);
100 static uint_t ixgbe_intr_other(void *, void *);
101 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
102 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
103 static void ixgbe_intr_other_work(ixgbe_t *);
104 static void ixgbe_get_driver_control(struct ixgbe_hw *);
105 static int ixgbe_addmac(void *, const uint8_t *);
106 static int ixgbe_remmac(void *, const uint8_t *);
107 static void ixgbe_release_driver_control(struct ixgbe_hw *);
108 
109 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
110 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
111 static int ixgbe_resume(dev_info_t *);
112 static int ixgbe_suspend(dev_info_t *);
113 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
114 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
115 
116 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
117     const void *impl_data);
118 static void ixgbe_fm_init(ixgbe_t *);
119 static void ixgbe_fm_fini(ixgbe_t *);
120 
121 static struct cb_ops ixgbe_cb_ops = {
122 	nulldev,		/* cb_open */
123 	nulldev,		/* cb_close */
124 	nodev,			/* cb_strategy */
125 	nodev,			/* cb_print */
126 	nodev,			/* cb_dump */
127 	nodev,			/* cb_read */
128 	nodev,			/* cb_write */
129 	nodev,			/* cb_ioctl */
130 	nodev,			/* cb_devmap */
131 	nodev,			/* cb_mmap */
132 	nodev,			/* cb_segmap */
133 	nochpoll,		/* cb_chpoll */
134 	ddi_prop_op,		/* cb_prop_op */
135 	NULL,			/* cb_stream */
136 	D_MP | D_HOTPLUG,	/* cb_flag */
137 	CB_REV,			/* cb_rev */
138 	nodev,			/* cb_aread */
139 	nodev			/* cb_awrite */
140 };
141 
142 static struct dev_ops ixgbe_dev_ops = {
143 	DEVO_REV,		/* devo_rev */
144 	0,			/* devo_refcnt */
145 	NULL,			/* devo_getinfo */
146 	nulldev,		/* devo_identify */
147 	nulldev,		/* devo_probe */
148 	ixgbe_attach,		/* devo_attach */
149 	ixgbe_detach,		/* devo_detach */
150 	nodev,			/* devo_reset */
151 	&ixgbe_cb_ops,		/* devo_cb_ops */
152 	NULL,			/* devo_bus_ops */
153 	ddi_power,		/* devo_power */
154 	ddi_quiesce_not_supported,	/* devo_quiesce */
155 };
156 
157 static struct modldrv ixgbe_modldrv = {
158 	&mod_driverops,		/* Type of module.  This one is a driver */
159 	ident,			/* Discription string */
160 	&ixgbe_dev_ops		/* driver ops */
161 };
162 
163 static struct modlinkage ixgbe_modlinkage = {
164 	MODREV_1, &ixgbe_modldrv, NULL
165 };
166 
167 /*
168  * Access attributes for register mapping
169  */
170 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
171 	DDI_DEVICE_ATTR_V0,
172 	DDI_STRUCTURE_LE_ACC,
173 	DDI_STRICTORDER_ACC,
174 	DDI_FLAGERR_ACC
175 };
176 
177 /*
178  * Loopback property
179  */
180 static lb_property_t lb_normal = {
181 	normal,	"normal", IXGBE_LB_NONE
182 };
183 
184 static lb_property_t lb_mac = {
185 	internal, "MAC", IXGBE_LB_INTERNAL_MAC
186 };
187 
188 #define	IXGBE_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
189 
190 static mac_callbacks_t ixgbe_m_callbacks = {
191 	IXGBE_M_CALLBACK_FLAGS,
192 	ixgbe_m_stat,
193 	ixgbe_m_start,
194 	ixgbe_m_stop,
195 	ixgbe_m_promisc,
196 	ixgbe_m_multicst,
197 	NULL,
198 	NULL,
199 	ixgbe_m_ioctl,
200 	ixgbe_m_getcapab
201 };
202 
203 /*
204  * Module Initialization Functions.
205  */
206 
207 int
208 _init(void)
209 {
210 	int status;
211 
212 	mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
213 
214 	status = mod_install(&ixgbe_modlinkage);
215 
216 	if (status != DDI_SUCCESS) {
217 		mac_fini_ops(&ixgbe_dev_ops);
218 	}
219 
220 	return (status);
221 }
222 
223 int
224 _fini(void)
225 {
226 	int status;
227 
228 	status = mod_remove(&ixgbe_modlinkage);
229 
230 	if (status == DDI_SUCCESS) {
231 		mac_fini_ops(&ixgbe_dev_ops);
232 	}
233 
234 	return (status);
235 }
236 
237 int
238 _info(struct modinfo *modinfop)
239 {
240 	int status;
241 
242 	status = mod_info(&ixgbe_modlinkage, modinfop);
243 
244 	return (status);
245 }
246 
247 /*
248  * ixgbe_attach - Driver attach.
249  *
250  * This function is the device specific initialization entry
251  * point. This entry point is required and must be written.
252  * The DDI_ATTACH command must be provided in the attach entry
253  * point. When attach() is called with cmd set to DDI_ATTACH,
254  * all normal kernel services (such as kmem_alloc(9F)) are
255  * available for use by the driver.
256  *
257  * The attach() function will be called once for each instance
258  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
259  * Until attach() succeeds, the only driver entry points which
260  * may be called are open(9E) and getinfo(9E).
261  */
262 static int
263 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
264 {
265 	ixgbe_t *ixgbe;
266 	struct ixgbe_osdep *osdep;
267 	struct ixgbe_hw *hw;
268 	int instance;
269 
270 	/*
271 	 * Check the command and perform corresponding operations
272 	 */
273 	switch (cmd) {
274 	default:
275 		return (DDI_FAILURE);
276 
277 	case DDI_RESUME:
278 		return (ixgbe_resume(devinfo));
279 
280 	case DDI_ATTACH:
281 		break;
282 	}
283 
284 	/* Get the device instance */
285 	instance = ddi_get_instance(devinfo);
286 
287 	/* Allocate memory for the instance data structure */
288 	ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
289 
290 	ixgbe->dip = devinfo;
291 	ixgbe->instance = instance;
292 
293 	hw = &ixgbe->hw;
294 	osdep = &ixgbe->osdep;
295 	hw->back = osdep;
296 	osdep->ixgbe = ixgbe;
297 
298 	/* Attach the instance pointer to the dev_info data structure */
299 	ddi_set_driver_private(devinfo, ixgbe);
300 
301 	/*
302 	 * Initialize for fma support
303 	 */
304 	ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
305 	    0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
306 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
307 	ixgbe_fm_init(ixgbe);
308 	ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
309 
310 	/*
311 	 * Map PCI config space registers
312 	 */
313 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
314 		ixgbe_error(ixgbe, "Failed to map PCI configurations");
315 		goto attach_fail;
316 	}
317 	ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
318 
319 	/*
320 	 * Identify the chipset family
321 	 */
322 	if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
323 		ixgbe_error(ixgbe, "Failed to identify hardware");
324 		goto attach_fail;
325 	}
326 
327 	/*
328 	 * Map device registers
329 	 */
330 	if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
331 		ixgbe_error(ixgbe, "Failed to map device registers");
332 		goto attach_fail;
333 	}
334 	ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
335 
336 	/*
337 	 * Initialize driver parameters
338 	 */
339 	ixgbe_init_properties(ixgbe);
340 	ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
341 
342 	/*
343 	 * Allocate interrupts
344 	 */
345 	if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
346 		ixgbe_error(ixgbe, "Failed to allocate interrupts");
347 		goto attach_fail;
348 	}
349 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
350 
351 	/*
352 	 * Allocate rx/tx rings based on the ring numbers.
353 	 * The actual numbers of rx/tx rings are decided by the number of
354 	 * allocated interrupt vectors, so we should allocate the rings after
355 	 * interrupts are allocated.
356 	 */
357 	if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
358 		ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
359 		goto attach_fail;
360 	}
361 	ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
362 
363 	/*
364 	 * Map rings to interrupt vectors
365 	 */
366 	if (ixgbe_map_rings_to_vectors(ixgbe) != IXGBE_SUCCESS) {
367 		ixgbe_error(ixgbe, "Failed to map rings to vectors");
368 		goto attach_fail;
369 	}
370 
371 	/*
372 	 * Add interrupt handlers
373 	 */
374 	if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
375 		ixgbe_error(ixgbe, "Failed to add interrupt handlers");
376 		goto attach_fail;
377 	}
378 	ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
379 
380 	/*
381 	 * Initialize driver parameters
382 	 */
383 	if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
384 		ixgbe_error(ixgbe, "Failed to initialize driver settings");
385 		goto attach_fail;
386 	}
387 
388 	/*
389 	 * Initialize mutexes for this device.
390 	 * Do this before enabling the interrupt handler and
391 	 * register the softint to avoid the condition where
392 	 * interrupt handler can try using uninitialized mutex.
393 	 */
394 	ixgbe_init_locks(ixgbe);
395 	ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
396 
397 	/*
398 	 * Initialize chipset hardware
399 	 */
400 	if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
401 		ixgbe_error(ixgbe, "Failed to initialize adapter");
402 		goto attach_fail;
403 	}
404 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
405 
406 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
407 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
408 		goto attach_fail;
409 	}
410 
411 	/*
412 	 * Initialize DMA and hardware settings for rx/tx rings
413 	 */
414 	if (ixgbe_init_rings(ixgbe) != IXGBE_SUCCESS) {
415 		ixgbe_error(ixgbe, "Failed to initialize rings");
416 		goto attach_fail;
417 	}
418 	ixgbe->attach_progress |= ATTACH_PROGRESS_INIT_RINGS;
419 
420 	/*
421 	 * Initialize statistics
422 	 */
423 	if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
424 		ixgbe_error(ixgbe, "Failed to initialize statistics");
425 		goto attach_fail;
426 	}
427 	ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
428 
429 	/*
430 	 * Initialize NDD parameters
431 	 */
432 	if (ixgbe_nd_init(ixgbe) != IXGBE_SUCCESS) {
433 		ixgbe_error(ixgbe, "Failed to initialize ndd");
434 		goto attach_fail;
435 	}
436 	ixgbe->attach_progress |= ATTACH_PROGRESS_NDD;
437 
438 	/*
439 	 * Register the driver to the MAC
440 	 */
441 	if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
442 		ixgbe_error(ixgbe, "Failed to register MAC");
443 		goto attach_fail;
444 	}
445 	ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
446 
447 	/*
448 	 * Now that mutex locks are initialized, and the chip is also
449 	 * initialized, enable interrupts.
450 	 */
451 	if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
452 		ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
453 		goto attach_fail;
454 	}
455 	ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
456 
457 	ixgbe->ixgbe_state |= IXGBE_INITIALIZED;
458 
459 	return (DDI_SUCCESS);
460 
461 attach_fail:
462 	ixgbe_unconfigure(devinfo, ixgbe);
463 	return (DDI_FAILURE);
464 }
465 
466 /*
467  * ixgbe_detach - Driver detach.
468  *
469  * The detach() function is the complement of the attach routine.
470  * If cmd is set to DDI_DETACH, detach() is used to remove  the
471  * state  associated  with  a  given  instance of a device node
472  * prior to the removal of that instance from the system.
473  *
474  * The detach() function will be called once for each  instance
475  * of the device for which there has been a successful attach()
476  * once there are no longer  any  opens  on  the  device.
477  *
478  * Interrupts routine are disabled, All memory allocated by this
479  * driver are freed.
480  */
481 static int
482 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
483 {
484 	ixgbe_t *ixgbe;
485 
486 	/*
487 	 * Check detach command
488 	 */
489 	switch (cmd) {
490 	default:
491 		return (DDI_FAILURE);
492 
493 	case DDI_SUSPEND:
494 		return (ixgbe_suspend(devinfo));
495 
496 	case DDI_DETACH:
497 		break;
498 	}
499 
500 
501 	/*
502 	 * Get the pointer to the driver private data structure
503 	 */
504 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
505 	if (ixgbe == NULL)
506 		return (DDI_FAILURE);
507 
508 	/*
509 	 * Unregister MAC. If failed, we have to fail the detach
510 	 */
511 	if (mac_unregister(ixgbe->mac_hdl) != 0) {
512 		ixgbe_error(ixgbe, "Failed to unregister MAC");
513 		return (DDI_FAILURE);
514 	}
515 	ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC;
516 
517 	/*
518 	 * If the device is still running, it needs to be stopped first.
519 	 * This check is necessary because under some specific circumstances,
520 	 * the detach routine can be called without stopping the interface
521 	 * first.
522 	 */
523 	mutex_enter(&ixgbe->gen_lock);
524 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
525 		ixgbe->ixgbe_state &= ~IXGBE_STARTED;
526 		ixgbe_stop(ixgbe);
527 		mutex_exit(&ixgbe->gen_lock);
528 		/* Disable and stop the watchdog timer */
529 		ixgbe_disable_watchdog_timer(ixgbe);
530 	} else
531 		mutex_exit(&ixgbe->gen_lock);
532 
533 	/*
534 	 * Check if there are still rx buffers held by the upper layer.
535 	 * If so, fail the detach.
536 	 */
537 	if (!ixgbe_rx_drain(ixgbe))
538 		return (DDI_FAILURE);
539 
540 	/*
541 	 * Do the remaining unconfigure routines
542 	 */
543 	ixgbe_unconfigure(devinfo, ixgbe);
544 
545 	return (DDI_SUCCESS);
546 }
547 
548 static void
549 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
550 {
551 	/*
552 	 * Disable interrupt
553 	 */
554 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
555 		(void) ixgbe_disable_intrs(ixgbe);
556 	}
557 
558 	/*
559 	 * Unregister MAC
560 	 */
561 	if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
562 		(void) mac_unregister(ixgbe->mac_hdl);
563 	}
564 
565 	/*
566 	 * Free ndd parameters
567 	 */
568 	if (ixgbe->attach_progress & ATTACH_PROGRESS_NDD) {
569 		ixgbe_nd_cleanup(ixgbe);
570 	}
571 
572 	/*
573 	 * Free statistics
574 	 */
575 	if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
576 		kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
577 	}
578 
579 	/*
580 	 * Remove interrupt handlers
581 	 */
582 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
583 		ixgbe_rem_intr_handlers(ixgbe);
584 	}
585 
586 	/*
587 	 * Remove interrupts
588 	 */
589 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
590 		ixgbe_rem_intrs(ixgbe);
591 	}
592 
593 	/*
594 	 * Remove driver properties
595 	 */
596 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
597 		(void) ddi_prop_remove_all(devinfo);
598 	}
599 
600 	/*
601 	 * Release the DMA resources of rx/tx rings
602 	 */
603 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT_RINGS) {
604 		ixgbe_fini_rings(ixgbe);
605 	}
606 
607 	/*
608 	 * Stop the chipset
609 	 */
610 	if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
611 		mutex_enter(&ixgbe->gen_lock);
612 		ixgbe_chip_stop(ixgbe);
613 		mutex_exit(&ixgbe->gen_lock);
614 	}
615 
616 	/*
617 	 * Free register handle
618 	 */
619 	if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
620 		if (ixgbe->osdep.reg_handle != NULL)
621 			ddi_regs_map_free(&ixgbe->osdep.reg_handle);
622 	}
623 
624 	/*
625 	 * Free PCI config handle
626 	 */
627 	if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
628 		if (ixgbe->osdep.cfg_handle != NULL)
629 			pci_config_teardown(&ixgbe->osdep.cfg_handle);
630 	}
631 
632 	/*
633 	 * Free locks
634 	 */
635 	if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
636 		ixgbe_destroy_locks(ixgbe);
637 	}
638 
639 	/*
640 	 * Free the rx/tx rings
641 	 */
642 	if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
643 		ixgbe_free_rings(ixgbe);
644 	}
645 
646 	/*
647 	 * Unregister FMA capabilities
648 	 */
649 	if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
650 		ixgbe_fm_fini(ixgbe);
651 	}
652 
653 	/*
654 	 * Free the driver data structure
655 	 */
656 	kmem_free(ixgbe, sizeof (ixgbe_t));
657 
658 	ddi_set_driver_private(devinfo, NULL);
659 }
660 
661 /*
662  * ixgbe_register_mac - Register the driver and its function pointers with
663  * the GLD interface.
664  */
665 static int
666 ixgbe_register_mac(ixgbe_t *ixgbe)
667 {
668 	struct ixgbe_hw *hw = &ixgbe->hw;
669 	mac_register_t *mac;
670 	int status;
671 
672 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
673 		return (IXGBE_FAILURE);
674 
675 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
676 	mac->m_driver = ixgbe;
677 	mac->m_dip = ixgbe->dip;
678 	mac->m_src_addr = hw->mac.addr;
679 	mac->m_callbacks = &ixgbe_m_callbacks;
680 	mac->m_min_sdu = 0;
681 	mac->m_max_sdu = ixgbe->default_mtu;
682 	mac->m_margin = VLAN_TAGSZ;
683 	mac->m_v12n = MAC_VIRT_LEVEL1;
684 
685 	status = mac_register(mac, &ixgbe->mac_hdl);
686 
687 	mac_free(mac);
688 
689 	return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
690 }
691 
692 /*
693  * ixgbe_identify_hardware - Identify the type of the chipset.
694  */
695 static int
696 ixgbe_identify_hardware(ixgbe_t *ixgbe)
697 {
698 	struct ixgbe_hw *hw = &ixgbe->hw;
699 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
700 
701 	/*
702 	 * Get the device id
703 	 */
704 	hw->vendor_id =
705 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
706 	hw->device_id =
707 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
708 	hw->revision_id =
709 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
710 	hw->subsystem_device_id =
711 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
712 	hw->subsystem_vendor_id =
713 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
714 
715 	return (IXGBE_SUCCESS);
716 }
717 
718 /*
719  * ixgbe_regs_map - Map the device registers.
720  *
721  */
722 static int
723 ixgbe_regs_map(ixgbe_t *ixgbe)
724 {
725 	dev_info_t *devinfo = ixgbe->dip;
726 	struct ixgbe_hw *hw = &ixgbe->hw;
727 	struct ixgbe_osdep *osdep = &ixgbe->osdep;
728 	off_t mem_size;
729 
730 	/*
731 	 * First get the size of device registers to be mapped.
732 	 */
733 	if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) {
734 		return (IXGBE_FAILURE);
735 	}
736 
737 	/*
738 	 * Call ddi_regs_map_setup() to map registers
739 	 */
740 	if ((ddi_regs_map_setup(devinfo, 1,
741 	    (caddr_t *)&hw->hw_addr, 0,
742 	    mem_size, &ixgbe_regs_acc_attr,
743 	    &osdep->reg_handle)) != DDI_SUCCESS) {
744 		return (IXGBE_FAILURE);
745 	}
746 
747 	return (IXGBE_SUCCESS);
748 }
749 
750 /*
751  * ixgbe_init_properties - Initialize driver properties.
752  */
753 static void
754 ixgbe_init_properties(ixgbe_t *ixgbe)
755 {
756 	/*
757 	 * Get conf file properties, including link settings
758 	 * jumbo frames, ring number, descriptor number, etc.
759 	 */
760 	ixgbe_get_conf(ixgbe);
761 }
762 
763 /*
764  * ixgbe_init_driver_settings - Initialize driver settings.
765  *
766  * The settings include hardware function pointers, bus information,
767  * rx/tx rings settings, link state, and any other parameters that
768  * need to be setup during driver initialization.
769  */
770 static int
771 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
772 {
773 	struct ixgbe_hw *hw = &ixgbe->hw;
774 	dev_info_t *devinfo = ixgbe->dip;
775 	ixgbe_rx_ring_t *rx_ring;
776 	ixgbe_tx_ring_t *tx_ring;
777 	uint32_t rx_size;
778 	uint32_t tx_size;
779 	int i;
780 
781 	/*
782 	 * Initialize chipset specific hardware function pointers
783 	 */
784 	if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
785 		return (IXGBE_FAILURE);
786 	}
787 
788 	/*
789 	 * Get the system page size
790 	 */
791 	ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
792 
793 	/*
794 	 * Set rx buffer size
795 	 *
796 	 * The IP header alignment room is counted in the calculation.
797 	 * The rx buffer size is in unit of 1K that is required by the
798 	 * chipset hardware.
799 	 */
800 	rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
801 	ixgbe->rx_buf_size = ((rx_size >> 10) +
802 	    ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
803 
804 	/*
805 	 * Set tx buffer size
806 	 */
807 	tx_size = ixgbe->max_frame_size;
808 	ixgbe->tx_buf_size = ((tx_size >> 10) +
809 	    ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
810 
811 	/*
812 	 * Initialize rx/tx rings parameters
813 	 */
814 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
815 		rx_ring = &ixgbe->rx_rings[i];
816 		rx_ring->index = i;
817 		rx_ring->ixgbe = ixgbe;
818 
819 		rx_ring->ring_size = ixgbe->rx_ring_size;
820 		rx_ring->free_list_size = ixgbe->rx_ring_size;
821 		rx_ring->copy_thresh = ixgbe->rx_copy_thresh;
822 		rx_ring->limit_per_intr = ixgbe->rx_limit_per_intr;
823 	}
824 
825 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
826 		tx_ring = &ixgbe->tx_rings[i];
827 		tx_ring->index = i;
828 		tx_ring->ixgbe = ixgbe;
829 		if (ixgbe->tx_head_wb_enable)
830 			tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
831 		else
832 			tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
833 
834 		tx_ring->ring_size = ixgbe->tx_ring_size;
835 		tx_ring->free_list_size = ixgbe->tx_ring_size +
836 		    (ixgbe->tx_ring_size >> 1);
837 		tx_ring->copy_thresh = ixgbe->tx_copy_thresh;
838 		tx_ring->recycle_thresh = ixgbe->tx_recycle_thresh;
839 		tx_ring->overload_thresh = ixgbe->tx_overload_thresh;
840 	tx_ring->resched_thresh = ixgbe->tx_resched_thresh;
841 	}
842 
843 	/*
844 	 * Initialize values of interrupt throttling rate
845 	 */
846 	for (i = 1; i < IXGBE_MAX_RING_VECTOR; i++)
847 		ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
848 
849 	/*
850 	 * The initial link state should be "unknown"
851 	 */
852 	ixgbe->link_state = LINK_STATE_UNKNOWN;
853 	return (IXGBE_SUCCESS);
854 }
855 
856 /*
857  * ixgbe_init_locks - Initialize locks.
858  */
859 static void
860 ixgbe_init_locks(ixgbe_t *ixgbe)
861 {
862 	ixgbe_rx_ring_t *rx_ring;
863 	ixgbe_tx_ring_t *tx_ring;
864 	int i;
865 
866 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
867 		rx_ring = &ixgbe->rx_rings[i];
868 		mutex_init(&rx_ring->rx_lock, NULL,
869 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
870 		mutex_init(&rx_ring->recycle_lock, NULL,
871 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
872 	}
873 
874 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
875 		tx_ring = &ixgbe->tx_rings[i];
876 		mutex_init(&tx_ring->tx_lock, NULL,
877 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
878 		mutex_init(&tx_ring->recycle_lock, NULL,
879 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
880 		mutex_init(&tx_ring->tcb_head_lock, NULL,
881 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
882 		mutex_init(&tx_ring->tcb_tail_lock, NULL,
883 		    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
884 	}
885 
886 	mutex_init(&ixgbe->gen_lock, NULL,
887 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
888 
889 	mutex_init(&ixgbe->watchdog_lock, NULL,
890 	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
891 }
892 
893 /*
894  * ixgbe_destroy_locks - Destroy locks.
895  */
896 static void
897 ixgbe_destroy_locks(ixgbe_t *ixgbe)
898 {
899 	ixgbe_rx_ring_t *rx_ring;
900 	ixgbe_tx_ring_t *tx_ring;
901 	int i;
902 
903 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
904 		rx_ring = &ixgbe->rx_rings[i];
905 		mutex_destroy(&rx_ring->rx_lock);
906 		mutex_destroy(&rx_ring->recycle_lock);
907 	}
908 
909 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
910 		tx_ring = &ixgbe->tx_rings[i];
911 		mutex_destroy(&tx_ring->tx_lock);
912 		mutex_destroy(&tx_ring->recycle_lock);
913 		mutex_destroy(&tx_ring->tcb_head_lock);
914 		mutex_destroy(&tx_ring->tcb_tail_lock);
915 	}
916 
917 	mutex_destroy(&ixgbe->gen_lock);
918 	mutex_destroy(&ixgbe->watchdog_lock);
919 }
920 
921 static int
922 ixgbe_resume(dev_info_t *devinfo)
923 {
924 	ixgbe_t *ixgbe;
925 
926 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
927 	if (ixgbe == NULL)
928 		return (DDI_FAILURE);
929 
930 	mutex_enter(&ixgbe->gen_lock);
931 
932 	if (ixgbe->ixgbe_state & IXGBE_STARTED) {
933 		if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) {
934 			mutex_exit(&ixgbe->gen_lock);
935 			return (DDI_FAILURE);
936 		}
937 
938 		/*
939 		 * Enable and start the watchdog timer
940 		 */
941 		ixgbe_enable_watchdog_timer(ixgbe);
942 	}
943 
944 	ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
945 
946 	mutex_exit(&ixgbe->gen_lock);
947 
948 	return (DDI_SUCCESS);
949 }
950 
951 static int
952 ixgbe_suspend(dev_info_t *devinfo)
953 {
954 	ixgbe_t *ixgbe;
955 
956 	ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
957 	if (ixgbe == NULL)
958 		return (DDI_FAILURE);
959 
960 	mutex_enter(&ixgbe->gen_lock);
961 
962 	ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
963 
964 	ixgbe_stop(ixgbe);
965 
966 	mutex_exit(&ixgbe->gen_lock);
967 
968 	/*
969 	 * Disable and stop the watchdog timer
970 	 */
971 	ixgbe_disable_watchdog_timer(ixgbe);
972 
973 	return (DDI_SUCCESS);
974 }
975 
976 /*
977  * ixgbe_init - Initialize the device.
978  */
979 static int
980 ixgbe_init(ixgbe_t *ixgbe)
981 {
982 	struct ixgbe_hw *hw = &ixgbe->hw;
983 
984 	mutex_enter(&ixgbe->gen_lock);
985 
986 	/*
987 	 * Reset chipset to put the hardware in a known state
988 	 * before we try to do anything with the eeprom.
989 	 */
990 	if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
991 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
992 		goto init_fail;
993 	}
994 
995 	/*
996 	 * Need to init eeprom before validating the checksum.
997 	 */
998 	if (ixgbe_init_eeprom_params(hw) < 0) {
999 		ixgbe_error(ixgbe,
1000 		    "Unable to intitialize the eeprom interface.");
1001 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1002 		goto init_fail;
1003 	}
1004 
1005 	/*
1006 	 * NVM validation
1007 	 */
1008 	if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1009 		/*
1010 		 * Some PCI-E parts fail the first check due to
1011 		 * the link being in sleep state.  Call it again,
1012 		 * if it fails a second time it's a real issue.
1013 		 */
1014 		if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1015 			ixgbe_error(ixgbe,
1016 			    "Invalid NVM checksum. Please contact "
1017 			    "the vendor to update the NVM.");
1018 			ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1019 			goto init_fail;
1020 		}
1021 	}
1022 
1023 	/*
1024 	 * Setup default flow control thresholds - enable/disable
1025 	 * & flow control type is controlled by ixgbe.conf
1026 	 */
1027 	hw->fc.high_water = DEFAULT_FCRTH;
1028 	hw->fc.low_water = DEFAULT_FCRTL;
1029 	hw->fc.pause_time = DEFAULT_FCPAUSE;
1030 	hw->fc.send_xon = B_TRUE;
1031 
1032 	/*
1033 	 * Don't wait for auto-negotiation to complete
1034 	 */
1035 	hw->phy.autoneg_wait_to_complete = B_FALSE;
1036 
1037 	/*
1038 	 * Initialize link settings
1039 	 */
1040 	(void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1041 
1042 	/*
1043 	 * Initialize the chipset hardware
1044 	 */
1045 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1046 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1047 		goto init_fail;
1048 	}
1049 
1050 	if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
1051 		goto init_fail;
1052 	}
1053 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1054 		goto init_fail;
1055 	}
1056 
1057 	mutex_exit(&ixgbe->gen_lock);
1058 	return (IXGBE_SUCCESS);
1059 
1060 init_fail:
1061 	/*
1062 	 * Reset PHY
1063 	 */
1064 	(void) ixgbe_reset_phy(hw);
1065 
1066 	mutex_exit(&ixgbe->gen_lock);
1067 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1068 	return (IXGBE_FAILURE);
1069 }
1070 
1071 /*
1072  * ixgbe_init_rings - Allocate DMA resources for all rx/tx rings and
1073  * initialize relevant hardware settings.
1074  */
1075 static int
1076 ixgbe_init_rings(ixgbe_t *ixgbe)
1077 {
1078 	int i;
1079 
1080 	/*
1081 	 * Allocate buffers for all the rx/tx rings
1082 	 */
1083 	if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS)
1084 		return (IXGBE_FAILURE);
1085 
1086 	/*
1087 	 * Setup the rx/tx rings
1088 	 */
1089 	mutex_enter(&ixgbe->gen_lock);
1090 
1091 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1092 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1093 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1094 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1095 
1096 	ixgbe_setup_rings(ixgbe);
1097 
1098 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1099 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1100 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1101 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1102 
1103 	mutex_exit(&ixgbe->gen_lock);
1104 
1105 	return (IXGBE_SUCCESS);
1106 }
1107 
1108 /*
1109  * ixgbe_fini_rings - Release DMA resources of all rx/tx rings.
1110  */
1111 static void
1112 ixgbe_fini_rings(ixgbe_t *ixgbe)
1113 {
1114 	/*
1115 	 * Release the DMA/memory resources of rx/tx rings
1116 	 */
1117 	ixgbe_free_dma(ixgbe);
1118 }
1119 
1120 /*
1121  * ixgbe_chip_start - Initialize and start the chipset hardware.
1122  */
1123 static int
1124 ixgbe_chip_start(ixgbe_t *ixgbe)
1125 {
1126 	struct ixgbe_hw *hw = &ixgbe->hw;
1127 	int i;
1128 
1129 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1130 
1131 	/*
1132 	 * Get the mac address
1133 	 * This function should handle SPARC case correctly.
1134 	 */
1135 	if (!ixgbe_find_mac_address(ixgbe)) {
1136 		ixgbe_error(ixgbe, "Failed to get the mac address");
1137 		return (IXGBE_FAILURE);
1138 	}
1139 
1140 	/*
1141 	 * Validate the mac address
1142 	 */
1143 	(void) ixgbe_init_rx_addrs(hw);
1144 	if (!is_valid_mac_addr(hw->mac.addr)) {
1145 		ixgbe_error(ixgbe, "Invalid mac address");
1146 		return (IXGBE_FAILURE);
1147 	}
1148 
1149 	/*
1150 	 * Configure/Initialize hardware
1151 	 */
1152 	if (ixgbe_init_hw(hw) != IXGBE_SUCCESS) {
1153 		ixgbe_error(ixgbe, "Failed to initialize hardware");
1154 		return (IXGBE_FAILURE);
1155 	}
1156 
1157 	/*
1158 	 * Setup adapter interrupt vectors
1159 	 */
1160 	ixgbe_setup_adapter_vector(ixgbe);
1161 
1162 	/*
1163 	 * Initialize unicast addresses.
1164 	 */
1165 	ixgbe_init_unicst(ixgbe);
1166 
1167 	/*
1168 	 * Setup and initialize the mctable structures.
1169 	 */
1170 	ixgbe_setup_multicst(ixgbe);
1171 
1172 	/*
1173 	 * Set interrupt throttling rate
1174 	 */
1175 	for (i = 0; i < ixgbe->intr_cnt; i++)
1176 		IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1177 
1178 	/*
1179 	 * Save the state of the phy
1180 	 */
1181 	ixgbe_get_hw_state(ixgbe);
1182 
1183 	/*
1184 	 * Make sure driver has control
1185 	 */
1186 	ixgbe_get_driver_control(hw);
1187 
1188 	return (IXGBE_SUCCESS);
1189 }
1190 
1191 /*
1192  * ixgbe_chip_stop - Stop the chipset hardware
1193  */
1194 static void
1195 ixgbe_chip_stop(ixgbe_t *ixgbe)
1196 {
1197 	struct ixgbe_hw *hw = &ixgbe->hw;
1198 
1199 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1200 
1201 	/*
1202 	 * Tell firmware driver is no longer in control
1203 	 */
1204 	ixgbe_release_driver_control(hw);
1205 
1206 	/*
1207 	 * Reset the chipset
1208 	 */
1209 	(void) ixgbe_reset_hw(hw);
1210 
1211 	/*
1212 	 * Reset PHY
1213 	 */
1214 	(void) ixgbe_reset_phy(hw);
1215 }
1216 
1217 /*
1218  * ixgbe_reset - Reset the chipset and re-start the driver.
1219  *
1220  * It involves stopping and re-starting the chipset,
1221  * and re-configuring the rx/tx rings.
1222  */
1223 static int
1224 ixgbe_reset(ixgbe_t *ixgbe)
1225 {
1226 	int i;
1227 
1228 	mutex_enter(&ixgbe->gen_lock);
1229 
1230 	ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1231 	ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1232 
1233 	/*
1234 	 * Disable the adapter interrupts to stop any rx/tx activities
1235 	 * before draining pending data and resetting hardware.
1236 	 */
1237 	ixgbe_disable_adapter_interrupts(ixgbe);
1238 
1239 	/*
1240 	 * Drain the pending transmit packets
1241 	 */
1242 	(void) ixgbe_tx_drain(ixgbe);
1243 
1244 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1245 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1246 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1247 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1248 
1249 	/*
1250 	 * Stop the chipset hardware
1251 	 */
1252 	ixgbe_chip_stop(ixgbe);
1253 
1254 	/*
1255 	 * Clean the pending tx data/resources
1256 	 */
1257 	ixgbe_tx_clean(ixgbe);
1258 
1259 	/*
1260 	 * Start the chipset hardware
1261 	 */
1262 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1263 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1264 		goto reset_failure;
1265 	}
1266 
1267 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1268 		goto reset_failure;
1269 	}
1270 
1271 	/*
1272 	 * Setup the rx/tx rings
1273 	 */
1274 	ixgbe_setup_rings(ixgbe);
1275 
1276 	/*
1277 	 * Enable adapter interrupts
1278 	 * The interrupts must be enabled after the driver state is START
1279 	 */
1280 	ixgbe_enable_adapter_interrupts(ixgbe);
1281 
1282 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1283 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1284 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1285 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1286 
1287 	ixgbe->ixgbe_state |= IXGBE_STARTED;
1288 	mutex_exit(&ixgbe->gen_lock);
1289 
1290 	return (IXGBE_SUCCESS);
1291 
1292 reset_failure:
1293 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1294 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1295 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1296 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1297 
1298 	mutex_exit(&ixgbe->gen_lock);
1299 
1300 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1301 
1302 	return (IXGBE_FAILURE);
1303 }
1304 
1305 /*
1306  * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1307  */
1308 static void
1309 ixgbe_tx_clean(ixgbe_t *ixgbe)
1310 {
1311 	ixgbe_tx_ring_t *tx_ring;
1312 	tx_control_block_t *tcb;
1313 	link_list_t pending_list;
1314 	uint32_t desc_num;
1315 	struct ixgbe_hw *hw = &ixgbe->hw;
1316 	int i, j;
1317 
1318 	LINK_LIST_INIT(&pending_list);
1319 
1320 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1321 		tx_ring = &ixgbe->tx_rings[i];
1322 
1323 		mutex_enter(&tx_ring->recycle_lock);
1324 
1325 		/*
1326 		 * Clean the pending tx data - the pending packets in the
1327 		 * work_list that have no chances to be transmitted again.
1328 		 *
1329 		 * We must ensure the chipset is stopped or the link is down
1330 		 * before cleaning the transmit packets.
1331 		 */
1332 		desc_num = 0;
1333 		for (j = 0; j < tx_ring->ring_size; j++) {
1334 			tcb = tx_ring->work_list[j];
1335 			if (tcb != NULL) {
1336 				desc_num += tcb->desc_num;
1337 
1338 				tx_ring->work_list[j] = NULL;
1339 
1340 				ixgbe_free_tcb(tcb);
1341 
1342 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1343 			}
1344 		}
1345 
1346 		if (desc_num > 0) {
1347 			atomic_add_32(&tx_ring->tbd_free, desc_num);
1348 			ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1349 
1350 			/*
1351 			 * Reset the head and tail pointers of the tbd ring;
1352 			 * Reset the writeback head if it's enable.
1353 			 */
1354 			tx_ring->tbd_head = 0;
1355 			tx_ring->tbd_tail = 0;
1356 			if (ixgbe->tx_head_wb_enable)
1357 				*tx_ring->tbd_head_wb = 0;
1358 
1359 			IXGBE_WRITE_REG(&ixgbe->hw,
1360 			    IXGBE_TDH(tx_ring->index), 0);
1361 			IXGBE_WRITE_REG(&ixgbe->hw,
1362 			    IXGBE_TDT(tx_ring->index), 0);
1363 		}
1364 
1365 		mutex_exit(&tx_ring->recycle_lock);
1366 
1367 		/*
1368 		 * Add the tx control blocks in the pending list to
1369 		 * the free list.
1370 		 */
1371 		ixgbe_put_free_list(tx_ring, &pending_list);
1372 	}
1373 }
1374 
1375 /*
1376  * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1377  * transmitted.
1378  */
1379 static boolean_t
1380 ixgbe_tx_drain(ixgbe_t *ixgbe)
1381 {
1382 	ixgbe_tx_ring_t *tx_ring;
1383 	boolean_t done;
1384 	int i, j;
1385 
1386 	/*
1387 	 * Wait for a specific time to allow pending tx packets
1388 	 * to be transmitted.
1389 	 *
1390 	 * Check the counter tbd_free to see if transmission is done.
1391 	 * No lock protection is needed here.
1392 	 *
1393 	 * Return B_TRUE if all pending packets have been transmitted;
1394 	 * Otherwise return B_FALSE;
1395 	 */
1396 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1397 
1398 		done = B_TRUE;
1399 		for (j = 0; j < ixgbe->num_tx_rings; j++) {
1400 			tx_ring = &ixgbe->tx_rings[j];
1401 			done = done &&
1402 			    (tx_ring->tbd_free == tx_ring->ring_size);
1403 		}
1404 
1405 		if (done)
1406 			break;
1407 
1408 		msec_delay(1);
1409 	}
1410 
1411 	return (done);
1412 }
1413 
1414 /*
1415  * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1416  */
1417 static boolean_t
1418 ixgbe_rx_drain(ixgbe_t *ixgbe)
1419 {
1420 	ixgbe_rx_ring_t *rx_ring;
1421 	boolean_t done;
1422 	int i, j;
1423 
1424 	/*
1425 	 * Polling the rx free list to check if those rx buffers held by
1426 	 * the upper layer are released.
1427 	 *
1428 	 * Check the counter rcb_free to see if all pending buffers are
1429 	 * released. No lock protection is needed here.
1430 	 *
1431 	 * Return B_TRUE if all pending buffers have been released;
1432 	 * Otherwise return B_FALSE;
1433 	 */
1434 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1435 
1436 		done = B_TRUE;
1437 		for (j = 0; j < ixgbe->num_rx_rings; j++) {
1438 			rx_ring = &ixgbe->rx_rings[j];
1439 			done = done &&
1440 			    (rx_ring->rcb_free == rx_ring->free_list_size);
1441 		}
1442 
1443 		if (done)
1444 			break;
1445 
1446 		msec_delay(1);
1447 	}
1448 
1449 	return (done);
1450 }
1451 
1452 /*
1453  * ixgbe_start - Start the driver/chipset.
1454  */
1455 int
1456 ixgbe_start(ixgbe_t *ixgbe)
1457 {
1458 	int i;
1459 
1460 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1461 
1462 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1463 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1464 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1465 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1466 
1467 	/*
1468 	 * Start the chipset hardware
1469 	 */
1470 	if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1471 		ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1472 		goto start_failure;
1473 	}
1474 
1475 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1476 		goto start_failure;
1477 	}
1478 
1479 	/*
1480 	 * Setup the rx/tx rings
1481 	 */
1482 	ixgbe_setup_rings(ixgbe);
1483 
1484 	/*
1485 	 * Enable adapter interrupts
1486 	 * The interrupts must be enabled after the driver state is START
1487 	 */
1488 	ixgbe_enable_adapter_interrupts(ixgbe);
1489 
1490 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1491 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1492 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1493 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1494 
1495 	return (IXGBE_SUCCESS);
1496 
1497 start_failure:
1498 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1499 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1500 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1501 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1502 
1503 	ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1504 
1505 	return (IXGBE_FAILURE);
1506 }
1507 
1508 /*
1509  * ixgbe_stop - Stop the driver/chipset.
1510  */
1511 void
1512 ixgbe_stop(ixgbe_t *ixgbe)
1513 {
1514 	int i;
1515 
1516 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1517 
1518 	/*
1519 	 * Disable the adapter interrupts
1520 	 */
1521 	ixgbe_disable_adapter_interrupts(ixgbe);
1522 
1523 	/*
1524 	 * Drain the pending tx packets
1525 	 */
1526 	(void) ixgbe_tx_drain(ixgbe);
1527 
1528 	for (i = 0; i < ixgbe->num_rx_rings; i++)
1529 		mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1530 	for (i = 0; i < ixgbe->num_tx_rings; i++)
1531 		mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1532 
1533 	/*
1534 	 * Stop the chipset hardware
1535 	 */
1536 	ixgbe_chip_stop(ixgbe);
1537 
1538 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1539 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1540 	}
1541 
1542 	/*
1543 	 * Clean the pending tx data/resources
1544 	 */
1545 	ixgbe_tx_clean(ixgbe);
1546 
1547 	for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1548 		mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1549 	for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1550 		mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1551 }
1552 
1553 /*
1554  * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1555  */
1556 static int
1557 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1558 {
1559 	/*
1560 	 * Allocate memory space for rx rings
1561 	 */
1562 	ixgbe->rx_rings = kmem_zalloc(
1563 	    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1564 	    KM_NOSLEEP);
1565 
1566 	if (ixgbe->rx_rings == NULL) {
1567 		return (IXGBE_FAILURE);
1568 	}
1569 
1570 	/*
1571 	 * Allocate memory space for tx rings
1572 	 */
1573 	ixgbe->tx_rings = kmem_zalloc(
1574 	    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1575 	    KM_NOSLEEP);
1576 
1577 	if (ixgbe->tx_rings == NULL) {
1578 		kmem_free(ixgbe->rx_rings,
1579 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1580 		ixgbe->rx_rings = NULL;
1581 		return (IXGBE_FAILURE);
1582 	}
1583 
1584 	/*
1585 	 * Allocate memory space for rx ring groups
1586 	 */
1587 	ixgbe->rx_groups = kmem_zalloc(
1588 	    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1589 	    KM_NOSLEEP);
1590 
1591 	if (ixgbe->rx_groups == NULL) {
1592 		kmem_free(ixgbe->rx_rings,
1593 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1594 		kmem_free(ixgbe->tx_rings,
1595 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1596 		ixgbe->rx_rings = NULL;
1597 		ixgbe->tx_rings = NULL;
1598 		return (IXGBE_FAILURE);
1599 	}
1600 
1601 	return (IXGBE_SUCCESS);
1602 }
1603 
1604 /*
1605  * ixgbe_free_rings - Free the memory space of rx/tx rings.
1606  */
1607 static void
1608 ixgbe_free_rings(ixgbe_t *ixgbe)
1609 {
1610 	if (ixgbe->rx_rings != NULL) {
1611 		kmem_free(ixgbe->rx_rings,
1612 		    sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1613 		ixgbe->rx_rings = NULL;
1614 	}
1615 
1616 	if (ixgbe->tx_rings != NULL) {
1617 		kmem_free(ixgbe->tx_rings,
1618 		    sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1619 		ixgbe->tx_rings = NULL;
1620 	}
1621 
1622 	if (ixgbe->rx_groups != NULL) {
1623 		kmem_free(ixgbe->rx_groups,
1624 		    sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1625 		ixgbe->rx_groups = NULL;
1626 	}
1627 }
1628 
1629 /*
1630  * ixgbe_setup_rings - Setup rx/tx rings.
1631  */
1632 static void
1633 ixgbe_setup_rings(ixgbe_t *ixgbe)
1634 {
1635 	/*
1636 	 * Setup the rx/tx rings, including the following:
1637 	 *
1638 	 * 1. Setup the descriptor ring and the control block buffers;
1639 	 * 2. Initialize necessary registers for receive/transmit;
1640 	 * 3. Initialize software pointers/parameters for receive/transmit;
1641 	 */
1642 	ixgbe_setup_rx(ixgbe);
1643 
1644 	ixgbe_setup_tx(ixgbe);
1645 }
1646 
1647 static void
1648 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
1649 {
1650 	ixgbe_t *ixgbe = rx_ring->ixgbe;
1651 	struct ixgbe_hw *hw = &ixgbe->hw;
1652 	rx_control_block_t *rcb;
1653 	union ixgbe_adv_rx_desc	*rbd;
1654 	uint32_t size;
1655 	uint32_t buf_low;
1656 	uint32_t buf_high;
1657 	uint32_t reg_val;
1658 	int i;
1659 
1660 	ASSERT(mutex_owned(&rx_ring->rx_lock));
1661 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1662 
1663 	for (i = 0; i < ixgbe->rx_ring_size; i++) {
1664 		rcb = rx_ring->work_list[i];
1665 		rbd = &rx_ring->rbd_ring[i];
1666 
1667 		rbd->read.pkt_addr = rcb->rx_buf.dma_address;
1668 		rbd->read.hdr_addr = NULL;
1669 	}
1670 
1671 	/*
1672 	 * Initialize the length register
1673 	 */
1674 	size = rx_ring->ring_size * sizeof (union ixgbe_adv_rx_desc);
1675 	IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size);
1676 
1677 	/*
1678 	 * Initialize the base address registers
1679 	 */
1680 	buf_low = (uint32_t)rx_ring->rbd_area.dma_address;
1681 	buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32);
1682 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high);
1683 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low);
1684 
1685 	/*
1686 	 * Setup head & tail pointers
1687 	 */
1688 	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_ring->ring_size - 1);
1689 	IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0);
1690 
1691 	rx_ring->rbd_next = 0;
1692 
1693 	/*
1694 	 * Note: Considering the case that the chipset is being reset
1695 	 * and there are still some buffers held by the upper layer,
1696 	 * we should not reset the values of rcb_head, rcb_tail and
1697 	 * rcb_free if the state is not IXGBE_UNKNOWN.
1698 	 */
1699 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
1700 		rx_ring->rcb_head = 0;
1701 		rx_ring->rcb_tail = 0;
1702 		rx_ring->rcb_free = rx_ring->free_list_size;
1703 	}
1704 
1705 	/*
1706 	 * Setup the Receive Descriptor Control Register (RXDCTL)
1707 	 * PTHRESH=32 descriptors (half the internal cache)
1708 	 * HTHRESH=0 descriptors (to minimize latency on fetch)
1709 	 * WTHRESH defaults to 1 (writeback each descriptor)
1710 	 */
1711 	reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index));
1712 	reg_val |= IXGBE_RXDCTL_ENABLE;	/* enable queue */
1713 	reg_val |= 0x0020;		/* pthresh */
1714 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val);
1715 
1716 	/*
1717 	 * Setup the Split and Replication Receive Control Register.
1718 	 * Set the rx buffer size and the advanced descriptor type.
1719 	 */
1720 	reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
1721 	    IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1722 
1723 	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val);
1724 }
1725 
1726 static void
1727 ixgbe_setup_rx(ixgbe_t *ixgbe)
1728 {
1729 	ixgbe_rx_ring_t *rx_ring;
1730 	struct ixgbe_hw *hw = &ixgbe->hw;
1731 	ixgbe_rx_group_t *rx_group;
1732 	uint32_t reg_val;
1733 	uint32_t ring_mapping;
1734 	int i;
1735 
1736 	/*
1737 	 * Set filter control in FCTRL to accept broadcast packets and do
1738 	 * not pass pause frames to host.  Flow control settings are already
1739 	 * in this register, so preserve them.
1740 	 */
1741 	reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1742 	reg_val |= IXGBE_FCTRL_BAM;	/* broadcast accept mode */
1743 	reg_val |= IXGBE_FCTRL_DPF;	/* discard pause frames */
1744 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
1745 
1746 	/*
1747 	 * Enable the receive unit.  This must be done after filter
1748 	 * control is set in FCTRL.
1749 	 */
1750 	reg_val = (IXGBE_RXCTRL_RXEN	/* Enable Receive Unit */
1751 	    | IXGBE_RXCTRL_DMBYPS);	/* descriptor monitor bypass */
1752 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
1753 
1754 	/*
1755 	 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
1756 	 */
1757 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1758 		rx_ring = &ixgbe->rx_rings[i];
1759 		ixgbe_setup_rx_ring(rx_ring);
1760 	}
1761 
1762 	/*
1763 	 * Setup rx groups.
1764 	 */
1765 	for (i = 0; i < ixgbe->num_rx_groups; i++) {
1766 		rx_group = &ixgbe->rx_groups[i];
1767 		rx_group->index = i;
1768 		rx_group->ixgbe = ixgbe;
1769 	}
1770 
1771 	/*
1772 	 * Setup the per-ring statistics mapping.
1773 	 */
1774 	ring_mapping = 0;
1775 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
1776 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1777 		if ((i & 0x3) == 0x3) {
1778 			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1779 			ring_mapping = 0;
1780 		}
1781 	}
1782 	if ((i & 0x3) != 0x3)
1783 		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping);
1784 
1785 	/*
1786 	 * The Max Frame Size in MHADD will be internally increased by four
1787 	 * bytes if the packet has a VLAN field, so includes MTU, ethernet
1788 	 * header and frame check sequence.
1789 	 */
1790 	reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
1791 	    + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
1792 	IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
1793 
1794 	/*
1795 	 * Setup Jumbo Frame enable bit
1796 	 */
1797 	if (ixgbe->default_mtu > ETHERMTU) {
1798 		reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1799 		reg_val |= IXGBE_HLREG0_JUMBOEN;
1800 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1801 	}
1802 
1803 	/*
1804 	 * Hardware checksum settings
1805 	 */
1806 	if (ixgbe->rx_hcksum_enable) {
1807 		reg_val = IXGBE_RXCSUM_IPPCSE;	/* IP checksum */
1808 		IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
1809 	}
1810 
1811 	/*
1812 	 * Setup RSS for multiple receive queues
1813 	 */
1814 	if (ixgbe->num_rx_rings > 1)
1815 		ixgbe_setup_rss(ixgbe);
1816 }
1817 
1818 static void
1819 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
1820 {
1821 	ixgbe_t *ixgbe = tx_ring->ixgbe;
1822 	struct ixgbe_hw *hw = &ixgbe->hw;
1823 	uint32_t size;
1824 	uint32_t buf_low;
1825 	uint32_t buf_high;
1826 	uint32_t reg_val;
1827 
1828 	ASSERT(mutex_owned(&tx_ring->tx_lock));
1829 	ASSERT(mutex_owned(&ixgbe->gen_lock));
1830 
1831 	/*
1832 	 * Initialize the length register
1833 	 */
1834 	size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
1835 	IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
1836 
1837 	/*
1838 	 * Initialize the base address registers
1839 	 */
1840 	buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
1841 	buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
1842 	IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
1843 	IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
1844 
1845 	/*
1846 	 * setup TXDCTL(tx_ring->index)
1847 	 */
1848 	reg_val = IXGBE_TXDCTL_ENABLE;
1849 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
1850 
1851 	/*
1852 	 * Setup head & tail pointers
1853 	 */
1854 	IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
1855 	IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
1856 
1857 	/*
1858 	 * Setup head write-back
1859 	 */
1860 	if (ixgbe->tx_head_wb_enable) {
1861 		/*
1862 		 * The memory of the head write-back is allocated using
1863 		 * the extra tbd beyond the tail of the tbd ring.
1864 		 */
1865 		tx_ring->tbd_head_wb = (uint32_t *)
1866 		    ((uintptr_t)tx_ring->tbd_area.address + size);
1867 		*tx_ring->tbd_head_wb = 0;
1868 
1869 		buf_low = (uint32_t)
1870 		    (tx_ring->tbd_area.dma_address + size);
1871 		buf_high = (uint32_t)
1872 		    ((tx_ring->tbd_area.dma_address + size) >> 32);
1873 
1874 		/* Set the head write-back enable bit */
1875 		buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1876 
1877 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
1878 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
1879 
1880 		/*
1881 		 * Turn off relaxed ordering for head write back or it will
1882 		 * cause problems with the tx recycling
1883 		 */
1884 		reg_val = IXGBE_READ_REG(hw,
1885 		    IXGBE_DCA_TXCTRL(tx_ring->index));
1886 		reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1887 		IXGBE_WRITE_REG(hw,
1888 		    IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
1889 	} else {
1890 		tx_ring->tbd_head_wb = NULL;
1891 	}
1892 
1893 	tx_ring->tbd_head = 0;
1894 	tx_ring->tbd_tail = 0;
1895 	tx_ring->tbd_free = tx_ring->ring_size;
1896 
1897 	/*
1898 	 * Note: Considering the case that the chipset is being reset,
1899 	 * and there are still some tcb in the pending list,
1900 	 * we should not reset the values of tcb_head, tcb_tail and
1901 	 * tcb_free if the state is not IXGBE_UNKNOWN.
1902 	 */
1903 	if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) {
1904 		tx_ring->tcb_head = 0;
1905 		tx_ring->tcb_tail = 0;
1906 		tx_ring->tcb_free = tx_ring->free_list_size;
1907 	}
1908 
1909 	/*
1910 	 * Initialize the s/w context structure
1911 	 */
1912 	bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
1913 }
1914 
1915 static void
1916 ixgbe_setup_tx(ixgbe_t *ixgbe)
1917 {
1918 	struct ixgbe_hw *hw = &ixgbe->hw;
1919 	ixgbe_tx_ring_t *tx_ring;
1920 	uint32_t reg_val;
1921 	uint32_t ring_mapping;
1922 	int i;
1923 
1924 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1925 		tx_ring = &ixgbe->tx_rings[i];
1926 		ixgbe_setup_tx_ring(tx_ring);
1927 	}
1928 
1929 	/*
1930 	 * Setup the per-ring statistics mapping.
1931 	 */
1932 	ring_mapping = 0;
1933 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
1934 		ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
1935 		if ((i & 0x3) == 0x3) {
1936 			IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
1937 			ring_mapping = 0;
1938 		}
1939 	}
1940 	if ((i & 0x3) != 0x3)
1941 		IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
1942 
1943 	/*
1944 	 * Enable CRC appending and TX padding (for short tx frames)
1945 	 */
1946 	reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1947 	reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
1948 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
1949 }
1950 
1951 /*
1952  * ixgbe_setup_rss - Setup receive-side scaling feature.
1953  */
1954 static void
1955 ixgbe_setup_rss(ixgbe_t *ixgbe)
1956 {
1957 	struct ixgbe_hw *hw = &ixgbe->hw;
1958 	uint32_t i, mrqc, rxcsum;
1959 	uint32_t random;
1960 	uint32_t reta;
1961 
1962 	/*
1963 	 * Fill out redirection table
1964 	 */
1965 	reta = 0;
1966 	for (i = 0; i < 128; i++) {
1967 		reta = (reta << 8) | (i % ixgbe->num_rx_rings);
1968 		if ((i & 3) == 3)
1969 			IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
1970 	}
1971 
1972 	/*
1973 	 * Fill out hash function seeds with a random constant
1974 	 */
1975 	for (i = 0; i < 10; i++) {
1976 		(void) random_get_pseudo_bytes((uint8_t *)&random,
1977 		    sizeof (uint32_t));
1978 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
1979 	}
1980 
1981 	/*
1982 	 * Enable RSS & perform hash on these packet types
1983 	 */
1984 	mrqc = IXGBE_MRQC_RSSEN |
1985 	    IXGBE_MRQC_RSS_FIELD_IPV4 |
1986 	    IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
1987 	    IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
1988 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
1989 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX |
1990 	    IXGBE_MRQC_RSS_FIELD_IPV6 |
1991 	    IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
1992 	    IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
1993 	    IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1994 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1995 
1996 	/*
1997 	 * Disable Packet Checksum to enable RSS for multiple receive queues.
1998 	 * It is an adapter hardware limitation that Packet Checksum is
1999 	 * mutually exclusive with RSS.
2000 	 */
2001 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2002 	rxcsum |= IXGBE_RXCSUM_PCSD;
2003 	rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2004 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2005 }
2006 
2007 /*
2008  * ixgbe_init_unicst - Initialize the unicast addresses.
2009  */
2010 static void
2011 ixgbe_init_unicst(ixgbe_t *ixgbe)
2012 {
2013 	struct ixgbe_hw *hw = &ixgbe->hw;
2014 	uint8_t *mac_addr;
2015 	int slot;
2016 	/*
2017 	 * Here we should consider two situations:
2018 	 *
2019 	 * 1. Chipset is initialized at the first time,
2020 	 *    Clear all the multiple unicast addresses.
2021 	 *
2022 	 * 2. Chipset is reset
2023 	 *    Recover the multiple unicast addresses from the
2024 	 *    software data structure to the RAR registers.
2025 	 */
2026 	if (!ixgbe->unicst_init) {
2027 		/*
2028 		 * Initialize the multiple unicast addresses
2029 		 */
2030 		ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2031 		ixgbe->unicst_avail = ixgbe->unicst_total;
2032 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2033 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2034 			bzero(mac_addr, ETHERADDRL);
2035 			(void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2036 			ixgbe->unicst_addr[slot].mac.set = 0;
2037 		}
2038 		ixgbe->unicst_init = B_TRUE;
2039 	} else {
2040 		/* Re-configure the RAR registers */
2041 		for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2042 			mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2043 			if (ixgbe->unicst_addr[slot].mac.set == 1) {
2044 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2045 				    NULL, IXGBE_RAH_AV);
2046 			} else {
2047 				bzero(mac_addr, ETHERADDRL);
2048 				(void) ixgbe_set_rar(hw, slot, mac_addr,
2049 				    NULL, NULL);
2050 			}
2051 		}
2052 	}
2053 }
2054 
2055 /*
2056  * ixgbe_unicst_set - Set the unicast address to the specified slot.
2057  */
2058 int
2059 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr,
2060     int slot)
2061 {
2062 	struct ixgbe_hw *hw = &ixgbe->hw;
2063 
2064 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2065 
2066 	/*
2067 	 * Save the unicast address in the software data structure
2068 	 */
2069 	bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
2070 
2071 	/*
2072 	 * Set the unicast address to the RAR register
2073 	 */
2074 	(void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, IXGBE_RAH_AV);
2075 
2076 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2077 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2078 		return (EIO);
2079 	}
2080 
2081 	return (0);
2082 }
2083 
2084 /*
2085  * ixgbe_unicst_find - Find the slot for the specified unicast address
2086  */
2087 int
2088 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2089 {
2090 	int slot;
2091 
2092 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2093 
2094 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2095 		if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2096 		    mac_addr, ETHERADDRL) == 0)
2097 			return (slot);
2098 	}
2099 
2100 	return (-1);
2101 }
2102 
2103 /*
2104  * ixgbe_multicst_add - Add a multicst address.
2105  */
2106 int
2107 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2108 {
2109 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2110 
2111 	if ((multiaddr[0] & 01) == 0) {
2112 		return (EINVAL);
2113 	}
2114 
2115 	if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2116 		return (ENOENT);
2117 	}
2118 
2119 	bcopy(multiaddr,
2120 	    &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2121 	ixgbe->mcast_count++;
2122 
2123 	/*
2124 	 * Update the multicast table in the hardware
2125 	 */
2126 	ixgbe_setup_multicst(ixgbe);
2127 
2128 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2129 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2130 		return (EIO);
2131 	}
2132 
2133 	return (0);
2134 }
2135 
2136 /*
2137  * ixgbe_multicst_remove - Remove a multicst address.
2138  */
2139 int
2140 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2141 {
2142 	int i;
2143 
2144 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2145 
2146 	for (i = 0; i < ixgbe->mcast_count; i++) {
2147 		if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2148 		    ETHERADDRL) == 0) {
2149 			for (i++; i < ixgbe->mcast_count; i++) {
2150 				ixgbe->mcast_table[i - 1] =
2151 				    ixgbe->mcast_table[i];
2152 			}
2153 			ixgbe->mcast_count--;
2154 			break;
2155 		}
2156 	}
2157 
2158 	/*
2159 	 * Update the multicast table in the hardware
2160 	 */
2161 	ixgbe_setup_multicst(ixgbe);
2162 
2163 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2164 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2165 		return (EIO);
2166 	}
2167 
2168 	return (0);
2169 }
2170 
2171 /*
2172  * ixgbe_setup_multicast - Setup multicast data structures.
2173  *
2174  * This routine initializes all of the multicast related structures
2175  * and save them in the hardware registers.
2176  */
2177 static void
2178 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2179 {
2180 	uint8_t *mc_addr_list;
2181 	uint32_t mc_addr_count;
2182 	struct ixgbe_hw *hw = &ixgbe->hw;
2183 
2184 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2185 
2186 	ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2187 
2188 	mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2189 	mc_addr_count = ixgbe->mcast_count;
2190 
2191 	/*
2192 	 * Update the multicast addresses to the MTA registers
2193 	 */
2194 	(void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2195 	    ixgbe_mc_table_itr);
2196 }
2197 
2198 /*
2199  * ixgbe_get_conf - Get driver configurations set in driver.conf.
2200  *
2201  * This routine gets user-configured values out of the configuration
2202  * file ixgbe.conf.
2203  *
2204  * For each configurable value, there is a minimum, a maximum, and a
2205  * default.
2206  * If user does not configure a value, use the default.
2207  * If user configures below the minimum, use the minumum.
2208  * If user configures above the maximum, use the maxumum.
2209  */
2210 static void
2211 ixgbe_get_conf(ixgbe_t *ixgbe)
2212 {
2213 	struct ixgbe_hw *hw = &ixgbe->hw;
2214 	uint32_t flow_control;
2215 
2216 	/*
2217 	 * ixgbe driver supports the following user configurations:
2218 	 *
2219 	 * Jumbo frame configuration:
2220 	 *    default_mtu
2221 	 *
2222 	 * Ethernet flow control configuration:
2223 	 *    flow_control
2224 	 *
2225 	 * Multiple rings configurations:
2226 	 *    tx_queue_number
2227 	 *    tx_ring_size
2228 	 *    rx_queue_number
2229 	 *    rx_ring_size
2230 	 *
2231 	 * Call ixgbe_get_prop() to get the value for a specific
2232 	 * configuration parameter.
2233 	 */
2234 
2235 	/*
2236 	 * Jumbo frame configuration - max_frame_size controls host buffer
2237 	 * allocation, so includes MTU, ethernet header, vlan tag and
2238 	 * frame check sequence.
2239 	 */
2240 	ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2241 	    MIN_MTU, MAX_MTU, DEFAULT_MTU);
2242 
2243 	ixgbe->max_frame_size = ixgbe->default_mtu +
2244 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
2245 
2246 	/*
2247 	 * Ethernet flow control configuration
2248 	 */
2249 	flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2250 	    ixgbe_fc_none, 3, ixgbe_fc_none);
2251 	if (flow_control == 3)
2252 		flow_control = ixgbe_fc_default;
2253 
2254 	hw->fc.type = flow_control;
2255 
2256 	/*
2257 	 * Multiple rings configurations
2258 	 */
2259 	ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2260 	    MIN_TX_QUEUE_NUM, MAX_TX_QUEUE_NUM, DEFAULT_TX_QUEUE_NUM);
2261 	ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2262 	    MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2263 
2264 	ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2265 	    MIN_RX_QUEUE_NUM, MAX_RX_QUEUE_NUM, DEFAULT_RX_QUEUE_NUM);
2266 	ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2267 	    MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2268 
2269 	/*
2270 	 * Multiple groups configuration
2271 	 */
2272 	ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
2273 	    MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM);
2274 
2275 	ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
2276 	    0, 1, DEFAULT_MR_ENABLE);
2277 
2278 	if (ixgbe->mr_enable == B_FALSE) {
2279 		ixgbe->num_tx_rings = 1;
2280 		ixgbe->num_rx_rings = 1;
2281 		ixgbe->num_rx_groups = 1;
2282 	}
2283 
2284 	/*
2285 	 * Tunable used to force an interrupt type. The only use is
2286 	 * for testing of the lesser interrupt types.
2287 	 * 0 = don't force interrupt type
2288 	 * 1 = force interrupt type MSI-X
2289 	 * 2 = force interrupt type MSI
2290 	 * 3 = force interrupt type Legacy
2291 	 */
2292 	ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
2293 	    IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
2294 	ixgbe_log(ixgbe, "interrupt force: %d\n", ixgbe->intr_force);
2295 
2296 	ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
2297 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
2298 	ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
2299 	    0, 1, DEFAULT_RX_HCKSUM_ENABLE);
2300 	ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
2301 	    0, 1, DEFAULT_LSO_ENABLE);
2302 	ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
2303 	    0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
2304 
2305 	/*
2306 	 * ixgbe LSO needs the tx h/w checksum support.
2307 	 * LSO will be disabled if tx h/w checksum is not
2308 	 * enabled.
2309 	 */
2310 	if (ixgbe->tx_hcksum_enable == B_FALSE) {
2311 		ixgbe->lso_enable = B_FALSE;
2312 	}
2313 
2314 	ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
2315 	    MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
2316 	    DEFAULT_TX_COPY_THRESHOLD);
2317 	ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
2318 	    PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
2319 	    MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
2320 	ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
2321 	    PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
2322 	    MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
2323 	ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
2324 	    PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
2325 	    MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
2326 
2327 	ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
2328 	    MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
2329 	    DEFAULT_RX_COPY_THRESHOLD);
2330 	ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
2331 	    MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
2332 	    DEFAULT_RX_LIMIT_PER_INTR);
2333 
2334 	ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
2335 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
2336 	    DEFAULT_INTR_THROTTLING);
2337 }
2338 
2339 /*
2340  * ixgbe_get_prop - Get a property value out of the configuration file
2341  * ixgbe.conf.
2342  *
2343  * Caller provides the name of the property, a default value, a minimum
2344  * value, and a maximum value.
2345  *
2346  * Return configured value of the property, with default, minimum and
2347  * maximum properly applied.
2348  */
2349 static int
2350 ixgbe_get_prop(ixgbe_t *ixgbe,
2351     char *propname,	/* name of the property */
2352     int minval,		/* minimum acceptable value */
2353     int maxval,		/* maximim acceptable value */
2354     int defval)		/* default value */
2355 {
2356 	int value;
2357 
2358 	/*
2359 	 * Call ddi_prop_get_int() to read the conf settings
2360 	 */
2361 	value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
2362 	    DDI_PROP_DONTPASS, propname, defval);
2363 	if (value > maxval)
2364 		value = maxval;
2365 
2366 	if (value < minval)
2367 		value = minval;
2368 
2369 	return (value);
2370 }
2371 
2372 /*
2373  * ixgbe_driver_setup_link - Using the link properties to setup the link.
2374  */
2375 int
2376 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
2377 {
2378 	struct ixgbe_mac_info *mac;
2379 	struct ixgbe_phy_info *phy;
2380 	boolean_t invalid;
2381 
2382 	mac = &ixgbe->hw.mac;
2383 	phy = &ixgbe->hw.phy;
2384 	invalid = B_FALSE;
2385 
2386 	if (ixgbe->param_adv_autoneg_cap == 1) {
2387 		mac->autoneg = B_TRUE;
2388 		phy->autoneg_advertised = 0;
2389 
2390 		/*
2391 		 * No half duplex support with 10Gb parts
2392 		 */
2393 		if (ixgbe->param_adv_10000fdx_cap == 1)
2394 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
2395 
2396 		if (ixgbe->param_adv_1000fdx_cap == 1)
2397 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
2398 
2399 		if (ixgbe->param_adv_100fdx_cap == 1)
2400 			phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
2401 
2402 		if (phy->autoneg_advertised == 0)
2403 			invalid = B_TRUE;
2404 	} else {
2405 		ixgbe->hw.mac.autoneg = B_FALSE;
2406 	}
2407 
2408 	if (invalid) {
2409 		ixgbe_notice(ixgbe, "Invalid link settings. Setup link to "
2410 		    "autonegotiation with full link capabilities.");
2411 		ixgbe->hw.mac.autoneg = B_TRUE;
2412 	}
2413 
2414 	if (setup_hw) {
2415 		if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS)
2416 			return (IXGBE_FAILURE);
2417 	}
2418 
2419 	return (IXGBE_SUCCESS);
2420 }
2421 
2422 /*
2423  * ixgbe_driver_link_check - Link status processing.
2424  */
2425 static boolean_t
2426 ixgbe_driver_link_check(ixgbe_t *ixgbe)
2427 {
2428 	struct ixgbe_hw *hw = &ixgbe->hw;
2429 	ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
2430 	boolean_t link_up = B_FALSE;
2431 	boolean_t link_changed = B_FALSE;
2432 
2433 	ASSERT(mutex_owned(&ixgbe->gen_lock));
2434 
2435 	(void) ixgbe_check_link(hw, &speed, &link_up);
2436 	if (link_up) {
2437 		/*
2438 		 * The Link is up, check whether it was marked as down earlier
2439 		 */
2440 		if (ixgbe->link_state != LINK_STATE_UP) {
2441 			switch (speed) {
2442 				case IXGBE_LINK_SPEED_10GB_FULL:
2443 					ixgbe->link_speed = SPEED_10GB;
2444 					break;
2445 				case IXGBE_LINK_SPEED_1GB_FULL:
2446 					ixgbe->link_speed = SPEED_1GB;
2447 					break;
2448 				case IXGBE_LINK_SPEED_100_FULL:
2449 					ixgbe->link_speed = SPEED_100;
2450 			}
2451 			ixgbe->link_duplex = LINK_DUPLEX_FULL;
2452 			ixgbe->link_state = LINK_STATE_UP;
2453 			ixgbe->link_down_timeout = 0;
2454 			link_changed = B_TRUE;
2455 		}
2456 	} else {
2457 		if (ixgbe->link_state != LINK_STATE_DOWN) {
2458 			ixgbe->link_speed = 0;
2459 			ixgbe->link_duplex = 0;
2460 			ixgbe->link_state = LINK_STATE_DOWN;
2461 			link_changed = B_TRUE;
2462 		}
2463 
2464 		if (ixgbe->ixgbe_state & IXGBE_STARTED) {
2465 			if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) {
2466 				ixgbe->link_down_timeout++;
2467 			} else if (ixgbe->link_down_timeout ==
2468 			    MAX_LINK_DOWN_TIMEOUT) {
2469 				ixgbe_tx_clean(ixgbe);
2470 				ixgbe->link_down_timeout++;
2471 			}
2472 		}
2473 	}
2474 
2475 	return (link_changed);
2476 }
2477 
2478 /*
2479  * ixgbe_local_timer - Driver watchdog function.
2480  *
2481  * This function will handle the transmit stall check, link status check and
2482  * other routines.
2483  */
2484 static void
2485 ixgbe_local_timer(void *arg)
2486 {
2487 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
2488 
2489 	if (ixgbe_stall_check(ixgbe)) {
2490 		ixgbe->reset_count++;
2491 		if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
2492 			ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
2493 	}
2494 
2495 	ixgbe_restart_watchdog_timer(ixgbe);
2496 }
2497 
2498 /*
2499  * ixgbe_stall_check - Check for transmit stall.
2500  *
2501  * This function checks if the adapter is stalled (in transmit).
2502  *
2503  * It is called each time the watchdog timeout is invoked.
2504  * If the transmit descriptor reclaim continuously fails,
2505  * the watchdog value will increment by 1. If the watchdog
2506  * value exceeds the threshold, the ixgbe is assumed to
2507  * have stalled and need to be reset.
2508  */
2509 static boolean_t
2510 ixgbe_stall_check(ixgbe_t *ixgbe)
2511 {
2512 	ixgbe_tx_ring_t *tx_ring;
2513 	boolean_t result;
2514 	int i;
2515 
2516 	if (ixgbe->link_state != LINK_STATE_UP)
2517 		return (B_FALSE);
2518 
2519 	/*
2520 	 * If any tx ring is stalled, we'll reset the chipset
2521 	 */
2522 	result = B_FALSE;
2523 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
2524 		tx_ring = &ixgbe->tx_rings[i];
2525 		tx_ring->tx_recycle(tx_ring);
2526 
2527 		if (tx_ring->recycle_fail > 0)
2528 			tx_ring->stall_watchdog++;
2529 		else
2530 			tx_ring->stall_watchdog = 0;
2531 
2532 		if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
2533 			result = B_TRUE;
2534 			break;
2535 		}
2536 	}
2537 
2538 	if (result) {
2539 		tx_ring->stall_watchdog = 0;
2540 		tx_ring->recycle_fail = 0;
2541 	}
2542 
2543 	return (result);
2544 }
2545 
2546 
2547 /*
2548  * is_valid_mac_addr - Check if the mac address is valid.
2549  */
2550 static boolean_t
2551 is_valid_mac_addr(uint8_t *mac_addr)
2552 {
2553 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
2554 	const uint8_t addr_test2[6] =
2555 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2556 
2557 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
2558 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
2559 		return (B_FALSE);
2560 
2561 	return (B_TRUE);
2562 }
2563 
2564 static boolean_t
2565 ixgbe_find_mac_address(ixgbe_t *ixgbe)
2566 {
2567 #ifdef __sparc
2568 	struct ixgbe_hw *hw = &ixgbe->hw;
2569 	uchar_t *bytes;
2570 	struct ether_addr sysaddr;
2571 	uint_t nelts;
2572 	int err;
2573 	boolean_t found = B_FALSE;
2574 
2575 	/*
2576 	 * The "vendor's factory-set address" may already have
2577 	 * been extracted from the chip, but if the property
2578 	 * "local-mac-address" is set we use that instead.
2579 	 *
2580 	 * We check whether it looks like an array of 6
2581 	 * bytes (which it should, if OBP set it).  If we can't
2582 	 * make sense of it this way, we'll ignore it.
2583 	 */
2584 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2585 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
2586 	if (err == DDI_PROP_SUCCESS) {
2587 		if (nelts == ETHERADDRL) {
2588 			while (nelts--)
2589 				hw->mac.addr[nelts] = bytes[nelts];
2590 			found = B_TRUE;
2591 		}
2592 		ddi_prop_free(bytes);
2593 	}
2594 
2595 	/*
2596 	 * Look up the OBP property "local-mac-address?". If the user has set
2597 	 * 'local-mac-address? = false', use "the system address" instead.
2598 	 */
2599 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
2600 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
2601 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
2602 			if (localetheraddr(NULL, &sysaddr) != 0) {
2603 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
2604 				found = B_TRUE;
2605 			}
2606 		}
2607 		ddi_prop_free(bytes);
2608 	}
2609 
2610 	/*
2611 	 * Finally(!), if there's a valid "mac-address" property (created
2612 	 * if we netbooted from this interface), we must use this instead
2613 	 * of any of the above to ensure that the NFS/install server doesn't
2614 	 * get confused by the address changing as Solaris takes over!
2615 	 */
2616 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
2617 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
2618 	if (err == DDI_PROP_SUCCESS) {
2619 		if (nelts == ETHERADDRL) {
2620 			while (nelts--)
2621 				hw->mac.addr[nelts] = bytes[nelts];
2622 			found = B_TRUE;
2623 		}
2624 		ddi_prop_free(bytes);
2625 	}
2626 
2627 	if (found) {
2628 		bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
2629 		return (B_TRUE);
2630 	}
2631 #else
2632 	_NOTE(ARGUNUSED(ixgbe));
2633 #endif
2634 
2635 	return (B_TRUE);
2636 }
2637 
2638 #pragma inline(ixgbe_arm_watchdog_timer)
2639 static void
2640 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
2641 {
2642 	/*
2643 	 * Fire a watchdog timer
2644 	 */
2645 	ixgbe->watchdog_tid =
2646 	    timeout(ixgbe_local_timer,
2647 	    (void *)ixgbe, 1 * drv_usectohz(1000000));
2648 
2649 }
2650 
2651 /*
2652  * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
2653  */
2654 void
2655 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
2656 {
2657 	mutex_enter(&ixgbe->watchdog_lock);
2658 
2659 	if (!ixgbe->watchdog_enable) {
2660 		ixgbe->watchdog_enable = B_TRUE;
2661 		ixgbe->watchdog_start = B_TRUE;
2662 		ixgbe_arm_watchdog_timer(ixgbe);
2663 	}
2664 
2665 	mutex_exit(&ixgbe->watchdog_lock);
2666 }
2667 
2668 /*
2669  * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
2670  */
2671 void
2672 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
2673 {
2674 	timeout_id_t tid;
2675 
2676 	mutex_enter(&ixgbe->watchdog_lock);
2677 
2678 	ixgbe->watchdog_enable = B_FALSE;
2679 	ixgbe->watchdog_start = B_FALSE;
2680 	tid = ixgbe->watchdog_tid;
2681 	ixgbe->watchdog_tid = 0;
2682 
2683 	mutex_exit(&ixgbe->watchdog_lock);
2684 
2685 	if (tid != 0)
2686 		(void) untimeout(tid);
2687 }
2688 
2689 /*
2690  * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
2691  */
2692 static void
2693 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
2694 {
2695 	mutex_enter(&ixgbe->watchdog_lock);
2696 
2697 	if (ixgbe->watchdog_enable) {
2698 		if (!ixgbe->watchdog_start) {
2699 			ixgbe->watchdog_start = B_TRUE;
2700 			ixgbe_arm_watchdog_timer(ixgbe);
2701 		}
2702 	}
2703 
2704 	mutex_exit(&ixgbe->watchdog_lock);
2705 }
2706 
2707 /*
2708  * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
2709  */
2710 static void
2711 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
2712 {
2713 	mutex_enter(&ixgbe->watchdog_lock);
2714 
2715 	if (ixgbe->watchdog_start)
2716 		ixgbe_arm_watchdog_timer(ixgbe);
2717 
2718 	mutex_exit(&ixgbe->watchdog_lock);
2719 }
2720 
2721 /*
2722  * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
2723  */
2724 static void
2725 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
2726 {
2727 	timeout_id_t tid;
2728 
2729 	mutex_enter(&ixgbe->watchdog_lock);
2730 
2731 	ixgbe->watchdog_start = B_FALSE;
2732 	tid = ixgbe->watchdog_tid;
2733 	ixgbe->watchdog_tid = 0;
2734 
2735 	mutex_exit(&ixgbe->watchdog_lock);
2736 
2737 	if (tid != 0)
2738 		(void) untimeout(tid);
2739 }
2740 
2741 /*
2742  * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
2743  */
2744 static void
2745 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
2746 {
2747 	struct ixgbe_hw *hw = &ixgbe->hw;
2748 
2749 	/*
2750 	 * mask all interrupts off
2751 	 */
2752 	IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
2753 
2754 	/*
2755 	 * for MSI-X, also disable autoclear
2756 	 */
2757 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2758 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
2759 	}
2760 
2761 	IXGBE_WRITE_FLUSH(hw);
2762 }
2763 
2764 /*
2765  * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
2766  */
2767 static void
2768 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
2769 {
2770 	struct ixgbe_hw *hw = &ixgbe->hw;
2771 	uint32_t eims, eiac, gpie;
2772 
2773 	gpie = 0;
2774 	eims = IXGBE_EIMS_ENABLE_MASK;	/* shared code default */
2775 	eims &= ~IXGBE_EIMS_TCP_TIMER;	/* minus tcp timer */
2776 
2777 	/*
2778 	 * msi-x mode
2779 	 */
2780 	if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
2781 		/* enable autoclear but not on bits 29:20 */
2782 		eiac = (eims & ~0x3ff00000);
2783 
2784 		/* general purpose interrupt enable */
2785 		gpie |= (IXGBE_GPIE_MSIX_MODE |
2786 		    IXGBE_GPIE_PBA_SUPPORT |IXGBE_GPIE_OCD);
2787 	/*
2788 	 * non-msi-x mode
2789 	 */
2790 	} else {
2791 
2792 		/* disable autoclear, leave gpie at default */
2793 		eiac = 0;
2794 	}
2795 
2796 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims);
2797 	IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
2798 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2799 	IXGBE_WRITE_FLUSH(hw);
2800 }
2801 
2802 /*
2803  * ixgbe_loopback_ioctl - Loopback support.
2804  */
2805 enum ioc_reply
2806 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
2807 {
2808 	lb_info_sz_t *lbsp;
2809 	lb_property_t *lbpp;
2810 	uint32_t *lbmp;
2811 	uint32_t size;
2812 	uint32_t value;
2813 
2814 	if (mp->b_cont == NULL)
2815 		return (IOC_INVAL);
2816 
2817 	switch (iocp->ioc_cmd) {
2818 	default:
2819 		return (IOC_INVAL);
2820 
2821 	case LB_GET_INFO_SIZE:
2822 		size = sizeof (lb_info_sz_t);
2823 		if (iocp->ioc_count != size)
2824 			return (IOC_INVAL);
2825 
2826 		value = sizeof (lb_normal);
2827 		value += sizeof (lb_mac);
2828 
2829 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
2830 		*lbsp = value;
2831 		break;
2832 
2833 	case LB_GET_INFO:
2834 		value = sizeof (lb_normal);
2835 		value += sizeof (lb_mac);
2836 
2837 		size = value;
2838 		if (iocp->ioc_count != size)
2839 			return (IOC_INVAL);
2840 
2841 		value = 0;
2842 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
2843 
2844 		lbpp[value++] = lb_normal;
2845 		lbpp[value++] = lb_mac;
2846 		break;
2847 
2848 	case LB_GET_MODE:
2849 		size = sizeof (uint32_t);
2850 		if (iocp->ioc_count != size)
2851 			return (IOC_INVAL);
2852 
2853 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
2854 		*lbmp = ixgbe->loopback_mode;
2855 		break;
2856 
2857 	case LB_SET_MODE:
2858 		size = 0;
2859 		if (iocp->ioc_count != sizeof (uint32_t))
2860 			return (IOC_INVAL);
2861 
2862 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
2863 		if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
2864 			return (IOC_INVAL);
2865 		break;
2866 	}
2867 
2868 	iocp->ioc_count = size;
2869 	iocp->ioc_error = 0;
2870 
2871 	if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2872 		ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2873 		return (IOC_INVAL);
2874 	}
2875 
2876 	return (IOC_REPLY);
2877 }
2878 
2879 /*
2880  * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
2881  */
2882 static boolean_t
2883 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
2884 {
2885 	struct ixgbe_hw *hw;
2886 
2887 	if (mode == ixgbe->loopback_mode)
2888 		return (B_TRUE);
2889 
2890 	hw = &ixgbe->hw;
2891 
2892 	ixgbe->loopback_mode = mode;
2893 
2894 	if (mode == IXGBE_LB_NONE) {
2895 		/*
2896 		 * Reset the chip
2897 		 */
2898 		hw->phy.autoneg_wait_to_complete = B_TRUE;
2899 		(void) ixgbe_reset(ixgbe);
2900 		hw->phy.autoneg_wait_to_complete = B_FALSE;
2901 		return (B_TRUE);
2902 	}
2903 
2904 	mutex_enter(&ixgbe->gen_lock);
2905 
2906 	switch (mode) {
2907 	default:
2908 		mutex_exit(&ixgbe->gen_lock);
2909 		return (B_FALSE);
2910 
2911 	case IXGBE_LB_INTERNAL_MAC:
2912 		ixgbe_set_internal_mac_loopback(ixgbe);
2913 		break;
2914 	}
2915 
2916 	mutex_exit(&ixgbe->gen_lock);
2917 
2918 	return (B_TRUE);
2919 }
2920 
2921 /*
2922  * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
2923  */
2924 static void
2925 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
2926 {
2927 	struct ixgbe_hw *hw;
2928 	uint32_t reg;
2929 	uint8_t atlas;
2930 
2931 	hw = &ixgbe->hw;
2932 
2933 	/*
2934 	 * Setup MAC loopback
2935 	 */
2936 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
2937 	reg |= IXGBE_HLREG0_LPBK;
2938 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
2939 
2940 	reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
2941 	reg &= ~IXGBE_AUTOC_LMS_MASK;
2942 	IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
2943 
2944 	/*
2945 	 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
2946 	 */
2947 	if (hw->mac.type == ixgbe_mac_82598EB) {
2948 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
2949 		    &atlas);
2950 		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
2951 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
2952 		    atlas);
2953 
2954 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
2955 		    &atlas);
2956 		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
2957 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
2958 		    atlas);
2959 
2960 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
2961 		    &atlas);
2962 		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
2963 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
2964 		    atlas);
2965 
2966 		(void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
2967 		    &atlas);
2968 		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
2969 		(void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
2970 		    atlas);
2971 	}
2972 }
2973 
2974 #pragma inline(ixgbe_intr_rx_work)
2975 /*
2976  * ixgbe_intr_rx_work - RX processing of ISR.
2977  */
2978 static void
2979 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
2980 {
2981 	mblk_t *mp;
2982 
2983 	mutex_enter(&rx_ring->rx_lock);
2984 
2985 	mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
2986 	mutex_exit(&rx_ring->rx_lock);
2987 
2988 	if (mp != NULL)
2989 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
2990 		    rx_ring->ring_gen_num);
2991 }
2992 
2993 #pragma inline(ixgbe_intr_tx_work)
2994 /*
2995  * ixgbe_intr_tx_work - TX processing of ISR.
2996  */
2997 static void
2998 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
2999 {
3000 	/*
3001 	 * Recycle the tx descriptors
3002 	 */
3003 	tx_ring->tx_recycle(tx_ring);
3004 
3005 	/*
3006 	 * Schedule the re-transmit
3007 	 */
3008 	if (tx_ring->reschedule &&
3009 	    (tx_ring->tbd_free >= tx_ring->resched_thresh)) {
3010 		tx_ring->reschedule = B_FALSE;
3011 		mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3012 		    tx_ring->ring_handle);
3013 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3014 	}
3015 }
3016 
3017 #pragma inline(ixgbe_intr_other_work)
3018 /*
3019  * ixgbe_intr_other_work - Other processing of ISR.
3020  */
3021 static void
3022 ixgbe_intr_other_work(ixgbe_t *ixgbe)
3023 {
3024 	boolean_t link_changed;
3025 
3026 	ixgbe_stop_watchdog_timer(ixgbe);
3027 
3028 	mutex_enter(&ixgbe->gen_lock);
3029 
3030 	/*
3031 	 * Take care of link status change
3032 	 */
3033 	link_changed = ixgbe_driver_link_check(ixgbe);
3034 
3035 	/*
3036 	 * Get new phy state
3037 	 */
3038 	ixgbe_get_hw_state(ixgbe);
3039 
3040 	mutex_exit(&ixgbe->gen_lock);
3041 
3042 	if (link_changed)
3043 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3044 
3045 	ixgbe_start_watchdog_timer(ixgbe);
3046 }
3047 
3048 /*
3049  * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
3050  */
3051 static uint_t
3052 ixgbe_intr_legacy(void *arg1, void *arg2)
3053 {
3054 	_NOTE(ARGUNUSED(arg2));
3055 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3056 	struct ixgbe_hw *hw = &ixgbe->hw;
3057 	ixgbe_tx_ring_t *tx_ring;
3058 	ixgbe_rx_ring_t *rx_ring;
3059 	uint32_t eicr;
3060 	mblk_t *mp;
3061 	boolean_t tx_reschedule;
3062 	boolean_t link_changed;
3063 	uint_t result;
3064 
3065 
3066 	mutex_enter(&ixgbe->gen_lock);
3067 
3068 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
3069 		mutex_exit(&ixgbe->gen_lock);
3070 		return (DDI_INTR_UNCLAIMED);
3071 	}
3072 
3073 	mp = NULL;
3074 	tx_reschedule = B_FALSE;
3075 	link_changed = B_FALSE;
3076 
3077 	/*
3078 	 * Any bit set in eicr: claim this interrupt
3079 	 */
3080 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3081 	if (eicr) {
3082 		/*
3083 		 * For legacy interrupt, we have only one interrupt,
3084 		 * so we have only one rx ring and one tx ring enabled.
3085 		 */
3086 		ASSERT(ixgbe->num_rx_rings == 1);
3087 		ASSERT(ixgbe->num_tx_rings == 1);
3088 
3089 		/*
3090 		 * For legacy interrupt, rx rings[0] will use RTxQ[0].
3091 		 */
3092 		if (eicr & 0x1) {
3093 			/*
3094 			 * Clean the rx descriptors
3095 			 */
3096 			rx_ring = &ixgbe->rx_rings[0];
3097 			mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3098 		}
3099 
3100 		/*
3101 		 * For legacy interrupt, tx rings[0] will use RTxQ[1].
3102 		 */
3103 		if (eicr & 0x2) {
3104 			/*
3105 			 * Recycle the tx descriptors
3106 			 */
3107 			tx_ring = &ixgbe->tx_rings[0];
3108 			tx_ring->tx_recycle(tx_ring);
3109 
3110 			/*
3111 			 * Schedule the re-transmit
3112 			 */
3113 			tx_reschedule = (tx_ring->reschedule &&
3114 			    (tx_ring->tbd_free >= tx_ring->resched_thresh));
3115 		}
3116 
3117 		if (eicr & IXGBE_EICR_LSC) {
3118 
3119 			/* take care of link status change */
3120 			link_changed = ixgbe_driver_link_check(ixgbe);
3121 
3122 			/* Get new phy state */
3123 			ixgbe_get_hw_state(ixgbe);
3124 		}
3125 
3126 		result = DDI_INTR_CLAIMED;
3127 	} else {
3128 		/*
3129 		 * No interrupt cause bits set: don't claim this interrupt.
3130 		 */
3131 		result = DDI_INTR_UNCLAIMED;
3132 	}
3133 
3134 	mutex_exit(&ixgbe->gen_lock);
3135 
3136 	/*
3137 	 * Do the following work outside of the gen_lock
3138 	 */
3139 	if (mp != NULL)
3140 		mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3141 		    rx_ring->ring_gen_num);
3142 
3143 	if (tx_reschedule)  {
3144 		tx_ring->reschedule = B_FALSE;
3145 		mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
3146 		IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
3147 	}
3148 
3149 	if (link_changed)
3150 		mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3151 
3152 	return (result);
3153 }
3154 
3155 /*
3156  * ixgbe_intr_msi - Interrupt handler for MSI.
3157  */
3158 static uint_t
3159 ixgbe_intr_msi(void *arg1, void *arg2)
3160 {
3161 	_NOTE(ARGUNUSED(arg2));
3162 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3163 	struct ixgbe_hw *hw = &ixgbe->hw;
3164 	uint32_t eicr;
3165 
3166 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3167 
3168 	/*
3169 	 * For MSI interrupt, we have only one vector,
3170 	 * so we have only one rx ring and one tx ring enabled.
3171 	 */
3172 	ASSERT(ixgbe->num_rx_rings == 1);
3173 	ASSERT(ixgbe->num_tx_rings == 1);
3174 
3175 	/*
3176 	 * For MSI interrupt, rx rings[0] will use RTxQ[0].
3177 	 */
3178 	if (eicr & 0x1) {
3179 		ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
3180 	}
3181 
3182 	/*
3183 	 * For MSI interrupt, tx rings[0] will use RTxQ[1].
3184 	 */
3185 	if (eicr & 0x2) {
3186 		ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
3187 	}
3188 
3189 	if (eicr & IXGBE_EICR_LSC) {
3190 		ixgbe_intr_other_work(ixgbe);
3191 	}
3192 
3193 	return (DDI_INTR_CLAIMED);
3194 }
3195 
3196 /*
3197  * ixgbe_intr_rx_tx - Interrupt handler for rx and tx.
3198  */
3199 static uint_t
3200 ixgbe_intr_rx_tx(void *arg1, void *arg2)
3201 {
3202 	_NOTE(ARGUNUSED(arg2));
3203 	ixgbe_ring_vector_t *vect = (ixgbe_ring_vector_t *)arg1;
3204 	ixgbe_t *ixgbe = vect->ixgbe;
3205 	int r_idx = 0;
3206 
3207 	/*
3208 	 * Clean each rx ring that has its bit set in the map
3209 	 */
3210 	r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
3211 	while (r_idx >= 0) {
3212 		ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
3213 		r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3214 		    (ixgbe->num_rx_rings - 1));
3215 	}
3216 
3217 	/*
3218 	 * Clean each tx ring that has its bit set in the map
3219 	 */
3220 	r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
3221 	while (r_idx >= 0) {
3222 		ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
3223 		r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3224 		    (ixgbe->num_tx_rings - 1));
3225 	}
3226 
3227 	return (DDI_INTR_CLAIMED);
3228 }
3229 
3230 /*
3231  * ixgbe_intr_other - Interrupt handler for other.
3232  *
3233  * Only look for other work if the right bits are set in the
3234  * Interrupt Cause Register.
3235  */
3236 static uint_t
3237 ixgbe_intr_other(void *arg1, void *arg2)
3238 {
3239 	_NOTE(ARGUNUSED(arg2));
3240 	ixgbe_t *ixgbe = (ixgbe_t *)arg1;
3241 	struct ixgbe_hw *hw = &ixgbe->hw;
3242 	uint32_t eicr;
3243 
3244 	eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
3245 
3246 	/*
3247 	 * Need check cause bits and only link change will
3248 	 * be processed
3249 	 */
3250 	if (eicr & IXGBE_EICR_LSC) {
3251 		ixgbe_intr_other_work(ixgbe);
3252 	}
3253 
3254 	return (DDI_INTR_CLAIMED);
3255 }
3256 
3257 /*
3258  * ixgbe_alloc_intrs - Allocate interrupts for the driver.
3259  *
3260  * Normal sequence is to try MSI-X; if not sucessful, try MSI;
3261  * if not successful, try Legacy.
3262  * ixgbe->intr_force can be used to force sequence to start with
3263  * any of the 3 types.
3264  * If MSI-X is not used, number of tx/rx rings is forced to 1.
3265  */
3266 static int
3267 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
3268 {
3269 	dev_info_t *devinfo;
3270 	int intr_types;
3271 	int rc;
3272 
3273 	devinfo = ixgbe->dip;
3274 
3275 	/*
3276 	 * Get supported interrupt types
3277 	 */
3278 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
3279 
3280 	if (rc != DDI_SUCCESS) {
3281 		ixgbe_log(ixgbe,
3282 		    "Get supported interrupt types failed: %d", rc);
3283 		return (IXGBE_FAILURE);
3284 	}
3285 	IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
3286 
3287 	ixgbe->intr_type = 0;
3288 
3289 	/*
3290 	 * Install MSI-X interrupts
3291 	 */
3292 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
3293 	    (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
3294 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
3295 		if (rc == IXGBE_SUCCESS)
3296 			return (IXGBE_SUCCESS);
3297 
3298 		ixgbe_log(ixgbe,
3299 		    "Allocate MSI-X failed, trying MSI interrupts...");
3300 	}
3301 
3302 	/*
3303 	 * MSI-X not used, force rings and groups to 1
3304 	 */
3305 	ixgbe->num_rx_rings = 1;
3306 	ixgbe->num_rx_groups = 1;
3307 	ixgbe->num_tx_rings = 1;
3308 	ixgbe_log(ixgbe,
3309 	    "MSI-X not used, force rings and groups number to 1");
3310 
3311 	/*
3312 	 * Install MSI interrupts
3313 	 */
3314 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
3315 	    (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
3316 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
3317 		if (rc == IXGBE_SUCCESS)
3318 			return (IXGBE_SUCCESS);
3319 
3320 		ixgbe_log(ixgbe,
3321 		    "Allocate MSI failed, trying Legacy interrupts...");
3322 	}
3323 
3324 	/*
3325 	 * Install legacy interrupts
3326 	 */
3327 	if (intr_types & DDI_INTR_TYPE_FIXED) {
3328 		rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
3329 		if (rc == IXGBE_SUCCESS)
3330 			return (IXGBE_SUCCESS);
3331 
3332 		ixgbe_log(ixgbe,
3333 		    "Allocate Legacy interrupts failed");
3334 	}
3335 
3336 	/*
3337 	 * If none of the 3 types succeeded, return failure
3338 	 */
3339 	return (IXGBE_FAILURE);
3340 }
3341 
3342 /*
3343  * ixgbe_alloc_intr_handles - Allocate interrupt handles.
3344  *
3345  * For legacy and MSI, only 1 handle is needed.  For MSI-X,
3346  * if fewer than 2 handles are available, return failure.
3347  * Upon success, this maps the vectors to rx and tx rings for
3348  * interrupts.
3349  */
3350 static int
3351 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
3352 {
3353 	dev_info_t *devinfo;
3354 	int request, count, avail, actual;
3355 	int minimum;
3356 	int rc;
3357 
3358 	devinfo = ixgbe->dip;
3359 
3360 	switch (intr_type) {
3361 	case DDI_INTR_TYPE_FIXED:
3362 		request = 1;	/* Request 1 legacy interrupt handle */
3363 		minimum = 1;
3364 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
3365 		break;
3366 
3367 	case DDI_INTR_TYPE_MSI:
3368 		request = 1;	/* Request 1 MSI interrupt handle */
3369 		minimum = 1;
3370 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
3371 		break;
3372 
3373 	case DDI_INTR_TYPE_MSIX:
3374 		/*
3375 		 * Best number of vectors for the adapter is
3376 		 * # rx rings + # tx rings + 1 for other.
3377 		 */
3378 		request = ixgbe->num_rx_rings + ixgbe->num_tx_rings + 1;
3379 		if (request > (IXGBE_MAX_RING_VECTOR + 1))
3380 			request = IXGBE_MAX_RING_VECTOR + 1;
3381 		minimum = 2;
3382 		IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
3383 		break;
3384 
3385 	default:
3386 		ixgbe_log(ixgbe,
3387 		    "invalid call to ixgbe_alloc_intr_handles(): %d\n",
3388 		    intr_type);
3389 		return (IXGBE_FAILURE);
3390 	}
3391 	IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d  minimum: %d",
3392 	    request, minimum);
3393 
3394 	/*
3395 	 * Get number of supported interrupts
3396 	 */
3397 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
3398 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
3399 		ixgbe_log(ixgbe,
3400 		    "Get interrupt number failed. Return: %d, count: %d",
3401 		    rc, count);
3402 		return (IXGBE_FAILURE);
3403 	}
3404 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
3405 
3406 	/*
3407 	 * Get number of available interrupts
3408 	 */
3409 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
3410 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
3411 		ixgbe_log(ixgbe,
3412 		    "Get interrupt available number failed. "
3413 		    "Return: %d, available: %d", rc, avail);
3414 		return (IXGBE_FAILURE);
3415 	}
3416 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail);
3417 
3418 	if (avail < request) {
3419 		ixgbe_log(ixgbe, "Request %d handles, %d available",
3420 		    request, avail);
3421 		request = avail;
3422 	}
3423 
3424 	actual = 0;
3425 	ixgbe->intr_cnt = 0;
3426 
3427 	/*
3428 	 * Allocate an array of interrupt handles
3429 	 */
3430 	ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
3431 	ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
3432 
3433 	rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
3434 	    request, &actual, DDI_INTR_ALLOC_NORMAL);
3435 	if (rc != DDI_SUCCESS) {
3436 		ixgbe_log(ixgbe, "Allocate interrupts failed. "
3437 		    "return: %d, request: %d, actual: %d",
3438 		    rc, request, actual);
3439 		goto alloc_handle_fail;
3440 	}
3441 	IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
3442 
3443 	ixgbe->intr_cnt = actual;
3444 
3445 	/*
3446 	 * Now we know the actual number of vectors.  Here we map the vector
3447 	 * to other, rx rings and tx ring.
3448 	 */
3449 	if (actual < minimum) {
3450 		ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
3451 		    actual);
3452 		goto alloc_handle_fail;
3453 	}
3454 
3455 	/*
3456 	 * Get priority for first vector, assume remaining are all the same
3457 	 */
3458 	rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
3459 	if (rc != DDI_SUCCESS) {
3460 		ixgbe_log(ixgbe,
3461 		    "Get interrupt priority failed: %d", rc);
3462 		goto alloc_handle_fail;
3463 	}
3464 
3465 	rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
3466 	if (rc != DDI_SUCCESS) {
3467 		ixgbe_log(ixgbe,
3468 		    "Get interrupt cap failed: %d", rc);
3469 		goto alloc_handle_fail;
3470 	}
3471 
3472 	ixgbe->intr_type = intr_type;
3473 
3474 	return (IXGBE_SUCCESS);
3475 
3476 alloc_handle_fail:
3477 	ixgbe_rem_intrs(ixgbe);
3478 
3479 	return (IXGBE_FAILURE);
3480 }
3481 
3482 /*
3483  * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
3484  *
3485  * Before adding the interrupt handlers, the interrupt vectors have
3486  * been allocated, and the rx/tx rings have also been allocated.
3487  */
3488 static int
3489 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
3490 {
3491 	int vector = 0;
3492 	int rc;
3493 
3494 	switch (ixgbe->intr_type) {
3495 	case DDI_INTR_TYPE_MSIX:
3496 		/*
3497 		 * Add interrupt handler for rx and tx rings: vector[0 -
3498 		 * (ixgbe->intr_cnt -1)].
3499 		 */
3500 		for (vector = 0; vector < (ixgbe->intr_cnt -1); vector++) {
3501 			/*
3502 			 * install pointer to vect_map[vector]
3503 			 */
3504 			rc = ddi_intr_add_handler(ixgbe->htable[vector],
3505 			    (ddi_intr_handler_t *)ixgbe_intr_rx_tx,
3506 			    (void *)&ixgbe->vect_map[vector], NULL);
3507 
3508 			if (rc != DDI_SUCCESS) {
3509 				ixgbe_log(ixgbe,
3510 				    "Add rx interrupt handler failed. "
3511 				    "return: %d, vector: %d", rc, vector);
3512 				for (vector--; vector >= 0; vector--) {
3513 					(void) ddi_intr_remove_handler(
3514 					    ixgbe->htable[vector]);
3515 				}
3516 				return (IXGBE_FAILURE);
3517 			}
3518 		}
3519 
3520 		/*
3521 		 * Add interrupt handler for other: vector[ixgbe->intr_cnt -1]
3522 		 */
3523 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3524 		    (ddi_intr_handler_t *)ixgbe_intr_other,
3525 		    (void *)ixgbe, NULL);
3526 		if (rc != DDI_SUCCESS) {
3527 			ixgbe_log(ixgbe,
3528 			    "Add other interrupt handler failed: %d", rc);
3529 			return (IXGBE_FAILURE);
3530 		}
3531 
3532 		break;
3533 
3534 	case DDI_INTR_TYPE_MSI:
3535 		/*
3536 		 * Add interrupt handlers for the only vector
3537 		 */
3538 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3539 		    (ddi_intr_handler_t *)ixgbe_intr_msi,
3540 		    (void *)ixgbe, NULL);
3541 
3542 		if (rc != DDI_SUCCESS) {
3543 			ixgbe_log(ixgbe,
3544 			    "Add MSI interrupt handler failed: %d", rc);
3545 			return (IXGBE_FAILURE);
3546 		}
3547 
3548 		break;
3549 
3550 	case DDI_INTR_TYPE_FIXED:
3551 		/*
3552 		 * Add interrupt handlers for the only vector
3553 		 */
3554 		rc = ddi_intr_add_handler(ixgbe->htable[vector],
3555 		    (ddi_intr_handler_t *)ixgbe_intr_legacy,
3556 		    (void *)ixgbe, NULL);
3557 
3558 		if (rc != DDI_SUCCESS) {
3559 			ixgbe_log(ixgbe,
3560 			    "Add legacy interrupt handler failed: %d", rc);
3561 			return (IXGBE_FAILURE);
3562 		}
3563 
3564 		break;
3565 
3566 	default:
3567 		return (IXGBE_FAILURE);
3568 	}
3569 
3570 	ASSERT(vector == (ixgbe->intr_cnt -1));
3571 
3572 	return (IXGBE_SUCCESS);
3573 }
3574 
3575 #pragma inline(ixgbe_map_rxring_to_vector)
3576 /*
3577  * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
3578  */
3579 static void
3580 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
3581 {
3582 	ixgbe->vect_map[v_idx].ixgbe = ixgbe;
3583 
3584 	/*
3585 	 * Set bit in map
3586 	 */
3587 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
3588 
3589 	/*
3590 	 * Count bits set
3591 	 */
3592 	ixgbe->vect_map[v_idx].rxr_cnt++;
3593 
3594 	/*
3595 	 * Remember bit position
3596 	 */
3597 	ixgbe->rx_rings[r_idx].intr_vector = v_idx;
3598 	ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
3599 }
3600 
3601 #pragma inline(ixgbe_map_txring_to_vector)
3602 /*
3603  * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
3604  */
3605 static void
3606 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
3607 {
3608 	ixgbe->vect_map[v_idx].ixgbe = ixgbe;
3609 
3610 	/*
3611 	 * Set bit in map
3612 	 */
3613 	BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
3614 
3615 	/*
3616 	 * Count bits set
3617 	 */
3618 	ixgbe->vect_map[v_idx].txr_cnt++;
3619 
3620 	/*
3621 	 * Remember bit position
3622 	 */
3623 	ixgbe->tx_rings[t_idx].intr_vector = v_idx;
3624 	ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
3625 }
3626 
3627 /*
3628  * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
3629  * allocation register (IVAR).
3630  */
3631 static void
3632 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector)
3633 {
3634 	struct ixgbe_hw *hw = &ixgbe->hw;
3635 	u32 ivar, index;
3636 
3637 	msix_vector |= IXGBE_IVAR_ALLOC_VAL;
3638 	index = (intr_alloc_entry >> 2) & 0x1F;
3639 	ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3640 	ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
3641 	ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
3642 	IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3643 }
3644 
3645 /*
3646  * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
3647  * given interrupt vector allocation register (IVAR).
3648  */
3649 static void
3650 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry)
3651 {
3652 	struct ixgbe_hw *hw = &ixgbe->hw;
3653 	u32 ivar, index;
3654 
3655 	index = (intr_alloc_entry >> 2) & 0x1F;
3656 	ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3657 	ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * (intr_alloc_entry & 0x3)));
3658 	IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3659 }
3660 
3661 /*
3662  * ixgbe_enable_ivar - Disble the given entry by clearing the VAL bit of
3663  * given interrupt vector allocation register (IVAR).
3664  */
3665 static void
3666 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry)
3667 {
3668 	struct ixgbe_hw *hw = &ixgbe->hw;
3669 	u32 ivar, index;
3670 
3671 	index = (intr_alloc_entry >> 2) & 0x1F;
3672 	ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3673 	ivar &= ~(IXGBE_IVAR_ALLOC_VAL << (8 * (intr_alloc_entry & 0x3)));
3674 	IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
3675 }
3676 
3677 /*
3678  * ixgbe_map_rings_to_vectors - Map descriptor rings to interrupt vectors.
3679  *
3680  * For MSI-X, here will map rx and tx ring to vector[0 - (vectors -1)].
3681  * The last vector will be used for other interrupt.
3682  */
3683 static int
3684 ixgbe_map_rings_to_vectors(ixgbe_t *ixgbe)
3685 {
3686 	int i, vector = 0;
3687 
3688 	/* initialize vector map */
3689 	bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
3690 
3691 	/*
3692 	 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
3693 	 * tx rings[0] on RTxQ[1].
3694 	 */
3695 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
3696 		ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
3697 		ixgbe_map_txring_to_vector(ixgbe, 0, 1);
3698 		return (IXGBE_SUCCESS);
3699 	}
3700 
3701 	/*
3702 	 * Ring/vector mapping for MSI-X
3703 	 */
3704 
3705 	/*
3706 	 * Map vectors to rx rings
3707 	 */
3708 	for (i = 0; i < ixgbe->num_rx_rings; i++) {
3709 		ixgbe_map_rxring_to_vector(ixgbe, i, vector);
3710 		vector = (vector +1) % (ixgbe->intr_cnt -1);
3711 	}
3712 
3713 	/*
3714 	 * Map vectors to tx rings
3715 	 */
3716 	for (i = 0; i < ixgbe->num_tx_rings; i++) {
3717 		ixgbe_map_txring_to_vector(ixgbe, i, vector);
3718 		vector = (vector +1) % (ixgbe->intr_cnt -1);
3719 	}
3720 
3721 	return (IXGBE_SUCCESS);
3722 }
3723 
3724 /*
3725  * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
3726  *
3727  * This relies on ring/vector mapping already set up in the
3728  * vect_map[] structures
3729  */
3730 static void
3731 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
3732 {
3733 	struct ixgbe_hw *hw = &ixgbe->hw;
3734 	ixgbe_ring_vector_t *vect;	/* vector bitmap */
3735 	int r_idx;	/* ring index */
3736 	int v_idx;	/* vector index */
3737 
3738 	/*
3739 	 * Clear any previous entries
3740 	 */
3741 	for (v_idx = 0; v_idx < IXGBE_IVAR_REG_NUM; v_idx++)
3742 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
3743 
3744 	/*
3745 	 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
3746 	 * tx rings[0] will use RTxQ[1].
3747 	 */
3748 	if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
3749 		ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(0), 0);
3750 		ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_TX_QUEUE(0), 1);
3751 		return;
3752 	}
3753 
3754 	/*
3755 	 * For MSI-X interrupt, "Other" is always on last vector.
3756 	 */
3757 	ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX,
3758 	    (ixgbe->intr_cnt - 1));
3759 
3760 	/*
3761 	 * For each interrupt vector, populate the IVAR table
3762 	 */
3763 	for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
3764 		vect = &ixgbe->vect_map[v_idx];
3765 
3766 		/*
3767 		 * For each rx ring bit set
3768 		 */
3769 		r_idx = bt_getlowbit(vect->rx_map, 0,
3770 		    (ixgbe->num_rx_rings - 1));
3771 
3772 		while (r_idx >= 0) {
3773 			ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(r_idx),
3774 			    v_idx);
3775 			r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
3776 			    (ixgbe->num_rx_rings - 1));
3777 		}
3778 
3779 		/*
3780 		 * For each tx ring bit set
3781 		 */
3782 		r_idx = bt_getlowbit(vect->tx_map, 0,
3783 		    (ixgbe->num_tx_rings - 1));
3784 
3785 		while (r_idx >= 0) {
3786 			ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_TX_QUEUE(r_idx),
3787 			    v_idx);
3788 			r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
3789 			    (ixgbe->num_tx_rings - 1));
3790 		}
3791 	}
3792 }
3793 
3794 /*
3795  * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
3796  */
3797 static void
3798 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
3799 {
3800 	int i;
3801 	int rc;
3802 
3803 	for (i = 0; i < ixgbe->intr_cnt; i++) {
3804 		rc = ddi_intr_remove_handler(ixgbe->htable[i]);
3805 		if (rc != DDI_SUCCESS) {
3806 			IXGBE_DEBUGLOG_1(ixgbe,
3807 			    "Remove intr handler failed: %d", rc);
3808 		}
3809 	}
3810 }
3811 
3812 /*
3813  * ixgbe_rem_intrs - Remove the allocated interrupts.
3814  */
3815 static void
3816 ixgbe_rem_intrs(ixgbe_t *ixgbe)
3817 {
3818 	int i;
3819 	int rc;
3820 
3821 	for (i = 0; i < ixgbe->intr_cnt; i++) {
3822 		rc = ddi_intr_free(ixgbe->htable[i]);
3823 		if (rc != DDI_SUCCESS) {
3824 			IXGBE_DEBUGLOG_1(ixgbe,
3825 			    "Free intr failed: %d", rc);
3826 		}
3827 	}
3828 
3829 	kmem_free(ixgbe->htable, ixgbe->intr_size);
3830 	ixgbe->htable = NULL;
3831 }
3832 
3833 /*
3834  * ixgbe_enable_intrs - Enable all the ddi interrupts.
3835  */
3836 static int
3837 ixgbe_enable_intrs(ixgbe_t *ixgbe)
3838 {
3839 	int i;
3840 	int rc;
3841 
3842 	/*
3843 	 * Enable interrupts
3844 	 */
3845 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
3846 		/*
3847 		 * Call ddi_intr_block_enable() for MSI
3848 		 */
3849 		rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
3850 		if (rc != DDI_SUCCESS) {
3851 			ixgbe_log(ixgbe,
3852 			    "Enable block intr failed: %d", rc);
3853 			return (IXGBE_FAILURE);
3854 		}
3855 	} else {
3856 		/*
3857 		 * Call ddi_intr_enable() for Legacy/MSI non block enable
3858 		 */
3859 		for (i = 0; i < ixgbe->intr_cnt; i++) {
3860 			rc = ddi_intr_enable(ixgbe->htable[i]);
3861 			if (rc != DDI_SUCCESS) {
3862 				ixgbe_log(ixgbe,
3863 				    "Enable intr failed: %d", rc);
3864 				return (IXGBE_FAILURE);
3865 			}
3866 		}
3867 	}
3868 
3869 	return (IXGBE_SUCCESS);
3870 }
3871 
3872 /*
3873  * ixgbe_disable_intrs - Disable all the interrupts.
3874  */
3875 static int
3876 ixgbe_disable_intrs(ixgbe_t *ixgbe)
3877 {
3878 	int i;
3879 	int rc;
3880 
3881 	/*
3882 	 * Disable all interrupts
3883 	 */
3884 	if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
3885 		rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
3886 		if (rc != DDI_SUCCESS) {
3887 			ixgbe_log(ixgbe,
3888 			    "Disable block intr failed: %d", rc);
3889 			return (IXGBE_FAILURE);
3890 		}
3891 	} else {
3892 		for (i = 0; i < ixgbe->intr_cnt; i++) {
3893 			rc = ddi_intr_disable(ixgbe->htable[i]);
3894 			if (rc != DDI_SUCCESS) {
3895 				ixgbe_log(ixgbe,
3896 				    "Disable intr failed: %d", rc);
3897 				return (IXGBE_FAILURE);
3898 			}
3899 		}
3900 	}
3901 
3902 	return (IXGBE_SUCCESS);
3903 }
3904 
3905 /*
3906  * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
3907  */
3908 static void
3909 ixgbe_get_hw_state(ixgbe_t *ixgbe)
3910 {
3911 	struct ixgbe_hw *hw = &ixgbe->hw;
3912 	uint32_t links;
3913 	uint32_t pcs1g_anlp = 0;
3914 	uint32_t pcs1g_ana = 0;
3915 
3916 	ASSERT(mutex_owned(&ixgbe->gen_lock));
3917 	ixgbe->param_lp_1000fdx_cap = 0;
3918 	ixgbe->param_lp_100fdx_cap  = 0;
3919 
3920 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3921 	if (links & IXGBE_LINKS_PCS_1G_EN) {
3922 		pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3923 		pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3924 
3925 		ixgbe->param_lp_1000fdx_cap =
3926 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
3927 		ixgbe->param_lp_100fdx_cap =
3928 		    (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
3929 	}
3930 
3931 	ixgbe->param_1000fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
3932 	ixgbe->param_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC)  ? 1 : 0;
3933 }
3934 
3935 /*
3936  * ixgbe_get_driver_control - Notify that driver is in control of device.
3937  */
3938 static void
3939 ixgbe_get_driver_control(struct ixgbe_hw *hw)
3940 {
3941 	uint32_t ctrl_ext;
3942 
3943 	/*
3944 	 * Notify firmware that driver is in control of device
3945 	 */
3946 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3947 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
3948 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3949 }
3950 
3951 /*
3952  * ixgbe_release_driver_control - Notify that driver is no longer in control
3953  * of device.
3954  */
3955 static void
3956 ixgbe_release_driver_control(struct ixgbe_hw *hw)
3957 {
3958 	uint32_t ctrl_ext;
3959 
3960 	/*
3961 	 * Notify firmware that driver is no longer in control of device
3962 	 */
3963 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3964 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
3965 	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3966 }
3967 
3968 /*
3969  * ixgbe_atomic_reserve - Atomic decrease operation.
3970  */
3971 int
3972 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
3973 {
3974 	uint32_t oldval;
3975 	uint32_t newval;
3976 
3977 	/*
3978 	 * ATOMICALLY
3979 	 */
3980 	do {
3981 		oldval = *count_p;
3982 		if (oldval < n)
3983 			return (-1);
3984 		newval = oldval - n;
3985 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
3986 
3987 	return (newval);
3988 }
3989 
3990 /*
3991  * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
3992  */
3993 static uint8_t *
3994 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
3995 {
3996 	_NOTE(ARGUNUSED(hw));
3997 	_NOTE(ARGUNUSED(vmdq));
3998 	uint8_t *addr = *upd_ptr;
3999 	uint8_t *new_ptr;
4000 
4001 	new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
4002 	*upd_ptr = new_ptr;
4003 	return (addr);
4004 }
4005 
4006 /*
4007  * FMA support
4008  */
4009 int
4010 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
4011 {
4012 	ddi_fm_error_t de;
4013 
4014 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4015 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
4016 	return (de.fme_status);
4017 }
4018 
4019 int
4020 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
4021 {
4022 	ddi_fm_error_t de;
4023 
4024 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4025 	return (de.fme_status);
4026 }
4027 
4028 /*
4029  * ixgbe_fm_error_cb - The IO fault service error handling callback function.
4030  */
4031 static int
4032 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4033 {
4034 	_NOTE(ARGUNUSED(impl_data));
4035 	/*
4036 	 * as the driver can always deal with an error in any dma or
4037 	 * access handle, we can just return the fme_status value.
4038 	 */
4039 	pci_ereport_post(dip, err, NULL);
4040 	return (err->fme_status);
4041 }
4042 
4043 static void
4044 ixgbe_fm_init(ixgbe_t *ixgbe)
4045 {
4046 	ddi_iblock_cookie_t iblk;
4047 	int fma_acc_flag, fma_dma_flag;
4048 
4049 	/*
4050 	 * Only register with IO Fault Services if we have some capability
4051 	 */
4052 	if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
4053 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4054 		fma_acc_flag = 1;
4055 	} else {
4056 		ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4057 		fma_acc_flag = 0;
4058 	}
4059 
4060 	if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
4061 		fma_dma_flag = 1;
4062 	} else {
4063 		fma_dma_flag = 0;
4064 	}
4065 
4066 	ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag);
4067 
4068 	if (ixgbe->fm_capabilities) {
4069 
4070 		/*
4071 		 * Register capabilities with IO Fault Services
4072 		 */
4073 		ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
4074 
4075 		/*
4076 		 * Initialize pci ereport capabilities if ereport capable
4077 		 */
4078 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4079 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4080 			pci_ereport_setup(ixgbe->dip);
4081 
4082 		/*
4083 		 * Register error callback if error callback capable
4084 		 */
4085 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4086 			ddi_fm_handler_register(ixgbe->dip,
4087 			    ixgbe_fm_error_cb, (void*) ixgbe);
4088 	}
4089 }
4090 
4091 static void
4092 ixgbe_fm_fini(ixgbe_t *ixgbe)
4093 {
4094 	/*
4095 	 * Only unregister FMA capabilities if they are registered
4096 	 */
4097 	if (ixgbe->fm_capabilities) {
4098 
4099 		/*
4100 		 * Release any resources allocated by pci_ereport_setup()
4101 		 */
4102 		if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
4103 		    DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4104 			pci_ereport_teardown(ixgbe->dip);
4105 
4106 		/*
4107 		 * Un-register error callback if error callback capable
4108 		 */
4109 		if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
4110 			ddi_fm_handler_unregister(ixgbe->dip);
4111 
4112 		/*
4113 		 * Unregister from IO Fault Service
4114 		 */
4115 		ddi_fm_fini(ixgbe->dip);
4116 	}
4117 }
4118 
4119 void
4120 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
4121 {
4122 	uint64_t ena;
4123 	char buf[FM_MAX_CLASS];
4124 
4125 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4126 	ena = fm_ena_generate(0, FM_ENA_FMT1);
4127 	if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
4128 		ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
4129 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
4130 	}
4131 }
4132 
4133 static int
4134 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
4135 {
4136 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
4137 
4138 	mutex_enter(&rx_ring->rx_lock);
4139 	rx_ring->ring_gen_num = mr_gen_num;
4140 	mutex_exit(&rx_ring->rx_lock);
4141 	return (0);
4142 }
4143 
4144 /*
4145  * Callback funtion for MAC layer to register all rings.
4146  */
4147 /* ARGSUSED */
4148 void
4149 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
4150     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
4151 {
4152 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4153 	mac_intr_t *mintr = &infop->mri_intr;
4154 
4155 	switch (rtype) {
4156 	case MAC_RING_TYPE_RX: {
4157 		ASSERT(rg_index == 0);
4158 		ASSERT(ring_index < ixgbe->num_rx_rings);
4159 
4160 		ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[ring_index];
4161 		rx_ring->ring_handle = rh;
4162 
4163 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
4164 		infop->mri_start = ixgbe_ring_start;
4165 		infop->mri_stop = NULL;
4166 		infop->mri_poll = ixgbe_ring_rx_poll;
4167 
4168 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
4169 		mintr->mi_enable = ixgbe_rx_ring_intr_enable;
4170 		mintr->mi_disable = ixgbe_rx_ring_intr_disable;
4171 
4172 		break;
4173 	}
4174 	case MAC_RING_TYPE_TX: {
4175 		ASSERT(rg_index == -1);
4176 		ASSERT(ring_index < ixgbe->num_tx_rings);
4177 
4178 		ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
4179 		tx_ring->ring_handle = rh;
4180 
4181 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
4182 		infop->mri_start = NULL;
4183 		infop->mri_stop = NULL;
4184 		infop->mri_tx = ixgbe_ring_tx;
4185 
4186 		break;
4187 	}
4188 	default:
4189 		break;
4190 	}
4191 }
4192 
4193 /*
4194  * Callback funtion for MAC layer to register all groups.
4195  */
4196 void
4197 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
4198     mac_group_info_t *infop, mac_group_handle_t gh)
4199 {
4200 	ixgbe_t *ixgbe = (ixgbe_t *)arg;
4201 
4202 	switch (rtype) {
4203 	case MAC_RING_TYPE_RX: {
4204 		ixgbe_rx_group_t *rx_group;
4205 
4206 		rx_group = &ixgbe->rx_groups[index];
4207 		rx_group->group_handle = gh;
4208 
4209 		infop->mgi_driver = (mac_group_driver_t)rx_group;
4210 		infop->mgi_start = NULL;
4211 		infop->mgi_stop = NULL;
4212 		infop->mgi_addmac = ixgbe_addmac;
4213 		infop->mgi_remmac = ixgbe_remmac;
4214 		infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
4215 
4216 		break;
4217 	}
4218 	case MAC_RING_TYPE_TX:
4219 		break;
4220 	default:
4221 		break;
4222 	}
4223 }
4224 
4225 /*
4226  * Enable interrupt on the specificed rx ring.
4227  */
4228 int
4229 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
4230 {
4231 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4232 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4233 	int r_idx = rx_ring->index;
4234 	int v_idx = rx_ring->intr_vector;
4235 
4236 	mutex_enter(&ixgbe->gen_lock);
4237 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 0);
4238 
4239 	/*
4240 	 * To enable interrupt by setting the VAL bit of given interrupt
4241 	 * vector allocation register (IVAR).
4242 	 */
4243 	ixgbe_enable_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(r_idx));
4244 
4245 	BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4246 	mutex_exit(&ixgbe->gen_lock);
4247 
4248 	return (0);
4249 }
4250 
4251 /*
4252  * Disable interrupt on the specificed rx ring.
4253  */
4254 int
4255 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
4256 {
4257 	ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
4258 	ixgbe_t *ixgbe = rx_ring->ixgbe;
4259 	int r_idx = rx_ring->index;
4260 	int v_idx = rx_ring->intr_vector;
4261 
4262 	mutex_enter(&ixgbe->gen_lock);
4263 
4264 	ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 1);
4265 
4266 	/*
4267 	 * To disable interrupt by clearing the VAL bit of given interrupt
4268 	 * vector allocation register (IVAR).
4269 	 */
4270 	ixgbe_disable_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(r_idx));
4271 
4272 	BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
4273 
4274 	mutex_exit(&ixgbe->gen_lock);
4275 
4276 	return (0);
4277 }
4278 
4279 /*
4280  * Add a mac address.
4281  */
4282 static int
4283 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
4284 {
4285 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4286 	ixgbe_t *ixgbe = rx_group->ixgbe;
4287 	int slot;
4288 	int err;
4289 
4290 	mutex_enter(&ixgbe->gen_lock);
4291 
4292 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4293 		mutex_exit(&ixgbe->gen_lock);
4294 		return (ECANCELED);
4295 	}
4296 
4297 	if (ixgbe->unicst_avail == 0) {
4298 		/* no slots available */
4299 		mutex_exit(&ixgbe->gen_lock);
4300 		return (ENOSPC);
4301 	}
4302 
4303 	for (slot = 0; slot < ixgbe->unicst_total; slot++) {
4304 		if (ixgbe->unicst_addr[slot].mac.set == 0)
4305 			break;
4306 	}
4307 
4308 	ASSERT((slot >= 0) && (slot < ixgbe->unicst_total));
4309 
4310 	if ((err = ixgbe_unicst_set(ixgbe, mac_addr, slot)) == 0) {
4311 		ixgbe->unicst_addr[slot].mac.set = 1;
4312 		ixgbe->unicst_avail--;
4313 	}
4314 
4315 	mutex_exit(&ixgbe->gen_lock);
4316 
4317 	return (err);
4318 }
4319 
4320 /*
4321  * Remove a mac address.
4322  */
4323 static int
4324 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
4325 {
4326 	ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
4327 	ixgbe_t *ixgbe = rx_group->ixgbe;
4328 	int slot;
4329 	int err;
4330 
4331 	mutex_enter(&ixgbe->gen_lock);
4332 
4333 	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4334 		mutex_exit(&ixgbe->gen_lock);
4335 		return (ECANCELED);
4336 	}
4337 
4338 	slot = ixgbe_unicst_find(ixgbe, mac_addr);
4339 	if (slot == -1) {
4340 		mutex_exit(&ixgbe->gen_lock);
4341 		return (EINVAL);
4342 	}
4343 
4344 	if (ixgbe->unicst_addr[slot].mac.set == 0) {
4345 		mutex_exit(&ixgbe->gen_lock);
4346 		return (EINVAL);
4347 	}
4348 
4349 	bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
4350 	if ((err = ixgbe_unicst_set(ixgbe,
4351 	    ixgbe->unicst_addr[slot].mac.addr, slot)) == 0) {
4352 		ixgbe->unicst_addr[slot].mac.set = 0;
4353 		ixgbe->unicst_avail++;
4354 	}
4355 
4356 	mutex_exit(&ixgbe->gen_lock);
4357 
4358 	return (err);
4359 }
4360