xref: /illumos-gate/usr/src/uts/common/io/e1000g/e1000g_main.c (revision b8052df9f609edb713f6828c9eecc3d7be19dfb3)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Copyright 2012 DEY Storage Systems, Inc.  All rights reserved.
27  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
28  * Copyright (c) 2018, Joyent, Inc.
29  * Copyright 2022 Oxide Computer Company
30  */
31 
32 /*
33  * **********************************************************************
34  *									*
35  * Module Name:								*
36  *   e1000g_main.c							*
37  *									*
38  * Abstract:								*
39  *   This file contains the interface routines for the solaris OS.	*
40  *   It has all DDI entry point routines and GLD entry point routines.	*
41  *									*
42  *   This file also contains routines that take care of initialization	*
43  *   uninit routine and interrupt routine.				*
44  *									*
45  * **********************************************************************
46  */
47 
48 #include <sys/dlpi.h>
49 #include <sys/mac.h>
50 #include "e1000g_sw.h"
51 #include "e1000g_debug.h"
52 
53 static char ident[] = "Intel PRO/1000 Ethernet";
54 /* LINTED E_STATIC_UNUSED */
55 static char e1000g_version[] = "Driver Ver. 5.4.00";
56 
57 /*
58  * Proto types for DDI entry points
59  */
60 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
61 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
62 static int e1000g_quiesce(dev_info_t *);
63 
64 /*
65  * init and intr routines prototype
66  */
67 static int e1000g_resume(dev_info_t *);
68 static int e1000g_suspend(dev_info_t *);
69 static uint_t e1000g_intr_pciexpress(caddr_t, caddr_t);
70 static uint_t e1000g_intr(caddr_t, caddr_t);
71 static void e1000g_intr_work(struct e1000g *, uint32_t);
72 #pragma inline(e1000g_intr_work)
73 static int e1000g_init(struct e1000g *);
74 static int e1000g_start(struct e1000g *, boolean_t);
75 static void e1000g_stop(struct e1000g *, boolean_t);
76 static int e1000g_m_start(void *);
77 static void e1000g_m_stop(void *);
78 static int e1000g_m_promisc(void *, boolean_t);
79 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
80 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
81 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
82 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
83     uint_t, const void *);
84 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
85 			    uint_t, void *);
86 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t,
87     mac_prop_info_handle_t);
88 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
89     const void *);
90 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *);
91 static void e1000g_init_locks(struct e1000g *);
92 static void e1000g_destroy_locks(struct e1000g *);
93 static int e1000g_identify_hardware(struct e1000g *);
94 static int e1000g_regs_map(struct e1000g *);
95 static int e1000g_set_driver_params(struct e1000g *);
96 static void e1000g_set_bufsize(struct e1000g *);
97 static int e1000g_register_mac(struct e1000g *);
98 static boolean_t e1000g_rx_drain(struct e1000g *);
99 static boolean_t e1000g_tx_drain(struct e1000g *);
100 static void e1000g_init_unicst(struct e1000g *);
101 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
102 static int e1000g_alloc_rx_data(struct e1000g *);
103 static void e1000g_release_multicast(struct e1000g *);
104 static void e1000g_pch_limits(struct e1000g *);
105 static uint32_t e1000g_mtu2maxframe(uint32_t);
106 
107 /*
108  * Local routines
109  */
110 static boolean_t e1000g_reset_adapter(struct e1000g *);
111 static void e1000g_tx_clean(struct e1000g *);
112 static void e1000g_rx_clean(struct e1000g *);
113 static void e1000g_link_timer(void *);
114 static void e1000g_local_timer(void *);
115 static boolean_t e1000g_link_check(struct e1000g *);
116 static boolean_t e1000g_stall_check(struct e1000g *);
117 static void e1000g_smartspeed(struct e1000g *);
118 static void e1000g_get_conf(struct e1000g *);
119 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int,
120     int *);
121 static void enable_watchdog_timer(struct e1000g *);
122 static void disable_watchdog_timer(struct e1000g *);
123 static void start_watchdog_timer(struct e1000g *);
124 static void restart_watchdog_timer(struct e1000g *);
125 static void stop_watchdog_timer(struct e1000g *);
126 static void stop_link_timer(struct e1000g *);
127 static void stop_82547_timer(e1000g_tx_ring_t *);
128 static void e1000g_force_speed_duplex(struct e1000g *);
129 static void e1000g_setup_max_mtu(struct e1000g *);
130 static void e1000g_get_max_frame_size(struct e1000g *);
131 static boolean_t is_valid_mac_addr(uint8_t *);
132 static void e1000g_unattach(dev_info_t *, struct e1000g *);
133 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *);
134 #ifdef E1000G_DEBUG
135 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
136 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
137 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
138 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
139 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
140     struct iocblk *, mblk_t *);
141 #endif
142 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
143     struct iocblk *, mblk_t *);
144 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
145 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
146 static void e1000g_set_internal_loopback(struct e1000g *);
147 static void e1000g_set_external_loopback_1000(struct e1000g *);
148 static void e1000g_set_external_loopback_100(struct e1000g *);
149 static void e1000g_set_external_loopback_10(struct e1000g *);
150 static int e1000g_add_intrs(struct e1000g *);
151 static int e1000g_intr_add(struct e1000g *, int);
152 static int e1000g_rem_intrs(struct e1000g *);
153 static int e1000g_enable_intrs(struct e1000g *);
154 static int e1000g_disable_intrs(struct e1000g *);
155 static boolean_t e1000g_link_up(struct e1000g *);
156 #ifdef __sparc
157 static boolean_t e1000g_find_mac_address(struct e1000g *);
158 #endif
159 static void e1000g_get_phy_state(struct e1000g *);
160 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
161     const void *impl_data);
162 static void e1000g_fm_init(struct e1000g *Adapter);
163 static void e1000g_fm_fini(struct e1000g *Adapter);
164 static void e1000g_param_sync(struct e1000g *);
165 static void e1000g_get_driver_control(struct e1000_hw *);
166 static void e1000g_release_driver_control(struct e1000_hw *);
167 static void e1000g_restore_promisc(struct e1000g *Adapter);
168 
169 char *e1000g_priv_props[] = {
170 	"_tx_bcopy_threshold",
171 	"_tx_interrupt_enable",
172 	"_tx_intr_delay",
173 	"_tx_intr_abs_delay",
174 	"_rx_bcopy_threshold",
175 	"_max_num_rcv_packets",
176 	"_rx_intr_delay",
177 	"_rx_intr_abs_delay",
178 	"_intr_throttling_rate",
179 	"_intr_adaptive",
180 	"_adv_pause_cap",
181 	"_adv_asym_pause_cap",
182 	NULL
183 };
184 
185 static struct cb_ops cb_ws_ops = {
186 	nulldev,		/* cb_open */
187 	nulldev,		/* cb_close */
188 	nodev,			/* cb_strategy */
189 	nodev,			/* cb_print */
190 	nodev,			/* cb_dump */
191 	nodev,			/* cb_read */
192 	nodev,			/* cb_write */
193 	nodev,			/* cb_ioctl */
194 	nodev,			/* cb_devmap */
195 	nodev,			/* cb_mmap */
196 	nodev,			/* cb_segmap */
197 	nochpoll,		/* cb_chpoll */
198 	ddi_prop_op,		/* cb_prop_op */
199 	NULL,			/* cb_stream */
200 	D_MP | D_HOTPLUG,	/* cb_flag */
201 	CB_REV,			/* cb_rev */
202 	nodev,			/* cb_aread */
203 	nodev			/* cb_awrite */
204 };
205 
206 static struct dev_ops ws_ops = {
207 	DEVO_REV,		/* devo_rev */
208 	0,			/* devo_refcnt */
209 	NULL,			/* devo_getinfo */
210 	nulldev,		/* devo_identify */
211 	nulldev,		/* devo_probe */
212 	e1000g_attach,		/* devo_attach */
213 	e1000g_detach,		/* devo_detach */
214 	nodev,			/* devo_reset */
215 	&cb_ws_ops,		/* devo_cb_ops */
216 	NULL,			/* devo_bus_ops */
217 	ddi_power,		/* devo_power */
218 	e1000g_quiesce		/* devo_quiesce */
219 };
220 
221 static struct modldrv modldrv = {
222 	&mod_driverops,		/* Type of module.  This one is a driver */
223 	ident,			/* Discription string */
224 	&ws_ops,		/* driver ops */
225 };
226 
227 static struct modlinkage modlinkage = {
228 	MODREV_1, &modldrv, NULL
229 };
230 
231 /* Access attributes for register mapping */
232 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
233 	DDI_DEVICE_ATTR_V1,
234 	DDI_STRUCTURE_LE_ACC,
235 	DDI_STRICTORDER_ACC,
236 	DDI_FLAGERR_ACC
237 };
238 
239 #define	E1000G_M_CALLBACK_FLAGS \
240 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
241 
242 static mac_callbacks_t e1000g_m_callbacks = {
243 	E1000G_M_CALLBACK_FLAGS,
244 	e1000g_m_stat,
245 	e1000g_m_start,
246 	e1000g_m_stop,
247 	e1000g_m_promisc,
248 	e1000g_m_multicst,
249 	NULL,
250 	e1000g_m_tx,
251 	NULL,
252 	e1000g_m_ioctl,
253 	e1000g_m_getcapab,
254 	NULL,
255 	NULL,
256 	e1000g_m_setprop,
257 	e1000g_m_getprop,
258 	e1000g_m_propinfo
259 };
260 
261 /*
262  * Global variables
263  */
264 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K;
265 uint32_t e1000g_mblks_pending = 0;
266 /*
267  * Workaround for Dynamic Reconfiguration support, for x86 platform only.
268  * Here we maintain a private dev_info list if e1000g_force_detach is
269  * enabled. If we force the driver to detach while there are still some
270  * rx buffers retained in the upper layer, we have to keep a copy of the
271  * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
272  * structure will be freed after the driver is detached. However when we
273  * finally free those rx buffers released by the upper layer, we need to
274  * refer to the dev_info to free the dma buffers. So we save a copy of
275  * the dev_info for this purpose. On x86 platform, we assume this copy
276  * of dev_info is always valid, but on SPARC platform, it could be invalid
277  * after the system board level DR operation. For this reason, the global
278  * variable e1000g_force_detach must be B_FALSE on SPARC platform.
279  */
280 #ifdef __sparc
281 boolean_t e1000g_force_detach = B_FALSE;
282 #else
283 boolean_t e1000g_force_detach = B_TRUE;
284 #endif
285 private_devi_list_t *e1000g_private_devi_list = NULL;
286 
287 /*
288  * The mutex e1000g_rx_detach_lock is defined to protect the processing of
289  * the private dev_info list, and to serialize the processing of rx buffer
290  * freeing and rx buffer recycling.
291  */
292 kmutex_t e1000g_rx_detach_lock;
293 /*
294  * The rwlock e1000g_dma_type_lock is defined to protect the global flag
295  * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
296  * If there are many e1000g instances, the system may run out of DVMA
297  * resources during the initialization of the instances, then the flag will
298  * be changed to "USE_DMA". Because different e1000g instances are initialized
299  * in parallel, we need to use this lock to protect the flag.
300  */
301 krwlock_t e1000g_dma_type_lock;
302 
303 /*
304  * The 82546 chipset is a dual-port device, both the ports share one eeprom.
305  * Based on the information from Intel, the 82546 chipset has some hardware
306  * problem. When one port is being reset and the other port is trying to
307  * access the eeprom, it could cause system hang or panic. To workaround this
308  * hardware problem, we use a global mutex to prevent such operations from
309  * happening simultaneously on different instances. This workaround is applied
310  * to all the devices supported by this driver.
311  */
312 kmutex_t e1000g_nvm_lock;
313 
314 /*
315  * Loadable module configuration entry points for the driver
316  */
317 
318 /*
319  * _init - module initialization
320  */
321 int
322 _init(void)
323 {
324 	int status;
325 
326 	mac_init_ops(&ws_ops, WSNAME);
327 	status = mod_install(&modlinkage);
328 	if (status != DDI_SUCCESS)
329 		mac_fini_ops(&ws_ops);
330 	else {
331 		mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL);
332 		rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
333 		mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
334 	}
335 
336 	return (status);
337 }
338 
339 /*
340  * _fini - module finalization
341  */
342 int
343 _fini(void)
344 {
345 	int status;
346 
347 	if (e1000g_mblks_pending != 0)
348 		return (EBUSY);
349 
350 	status = mod_remove(&modlinkage);
351 	if (status == DDI_SUCCESS) {
352 		mac_fini_ops(&ws_ops);
353 
354 		if (e1000g_force_detach) {
355 			private_devi_list_t *devi_node;
356 
357 			mutex_enter(&e1000g_rx_detach_lock);
358 			while (e1000g_private_devi_list != NULL) {
359 				devi_node = e1000g_private_devi_list;
360 				e1000g_private_devi_list =
361 				    e1000g_private_devi_list->next;
362 
363 				kmem_free(devi_node->priv_dip,
364 				    sizeof (struct dev_info));
365 				kmem_free(devi_node,
366 				    sizeof (private_devi_list_t));
367 			}
368 			mutex_exit(&e1000g_rx_detach_lock);
369 		}
370 
371 		mutex_destroy(&e1000g_rx_detach_lock);
372 		rw_destroy(&e1000g_dma_type_lock);
373 		mutex_destroy(&e1000g_nvm_lock);
374 	}
375 
376 	return (status);
377 }
378 
379 /*
380  * _info - module information
381  */
382 int
383 _info(struct modinfo *modinfop)
384 {
385 	return (mod_info(&modlinkage, modinfop));
386 }
387 
388 /*
389  * e1000g_attach - driver attach
390  *
391  * This function is the device-specific initialization entry
392  * point. This entry point is required and must be written.
393  * The DDI_ATTACH command must be provided in the attach entry
394  * point. When attach() is called with cmd set to DDI_ATTACH,
395  * all normal kernel services (such as kmem_alloc(9F)) are
396  * available for use by the driver.
397  *
398  * The attach() function will be called once for each instance
399  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
400  * Until attach() succeeds, the only driver entry points which
401  * may be called are open(9E) and getinfo(9E).
402  */
403 static int
404 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
405 {
406 	struct e1000g *Adapter;
407 	struct e1000_hw *hw;
408 	struct e1000g_osdep *osdep;
409 	int instance;
410 
411 	switch (cmd) {
412 	default:
413 		e1000g_log(NULL, CE_WARN,
414 		    "Unsupported command send to e1000g_attach... ");
415 		return (DDI_FAILURE);
416 
417 	case DDI_RESUME:
418 		return (e1000g_resume(devinfo));
419 
420 	case DDI_ATTACH:
421 		break;
422 	}
423 
424 	/*
425 	 * get device instance number
426 	 */
427 	instance = ddi_get_instance(devinfo);
428 
429 	/*
430 	 * Allocate soft data structure
431 	 */
432 	Adapter =
433 	    (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
434 
435 	Adapter->dip = devinfo;
436 	Adapter->instance = instance;
437 	Adapter->tx_ring->adapter = Adapter;
438 	Adapter->rx_ring->adapter = Adapter;
439 
440 	hw = &Adapter->shared;
441 	osdep = &Adapter->osdep;
442 	hw->back = osdep;
443 	osdep->adapter = Adapter;
444 
445 	ddi_set_driver_private(devinfo, (caddr_t)Adapter);
446 
447 	/*
448 	 * Initialize for fma support
449 	 */
450 	(void) e1000g_get_prop(Adapter, "fm-capable",
451 	    0, 0x0f,
452 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
453 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE,
454 	    &Adapter->fm_capabilities);
455 	e1000g_fm_init(Adapter);
456 	Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
457 
458 	/*
459 	 * PCI Configure
460 	 */
461 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
462 		e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
463 		goto attach_fail;
464 	}
465 	Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
466 
467 	/*
468 	 * Setup hardware
469 	 */
470 	if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
471 		e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
472 		goto attach_fail;
473 	}
474 
475 	/*
476 	 * Map in the device registers.
477 	 */
478 	if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
479 		e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
480 		goto attach_fail;
481 	}
482 	Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
483 
484 	/*
485 	 * Initialize driver parameters
486 	 */
487 	if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
488 		goto attach_fail;
489 	}
490 	Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
491 
492 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
493 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
494 		goto attach_fail;
495 	}
496 
497 	/*
498 	 * Disable ULP support
499 	 */
500 	(void) e1000_disable_ulp_lpt_lp(hw, TRUE);
501 
502 	/*
503 	 * Initialize interrupts
504 	 */
505 	if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
506 		e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
507 		goto attach_fail;
508 	}
509 	Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
510 
511 	/*
512 	 * Initialize mutex's for this device.
513 	 * Do this before enabling the interrupt handler and
514 	 * register the softint to avoid the condition where
515 	 * interrupt handler can try using uninitialized mutex
516 	 */
517 	e1000g_init_locks(Adapter);
518 	Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
519 
520 	/*
521 	 * Initialize Driver Counters
522 	 */
523 	if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
524 		e1000g_log(Adapter, CE_WARN, "Init stats failed");
525 		goto attach_fail;
526 	}
527 	Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
528 
529 	/*
530 	 * Initialize chip hardware and software structures
531 	 */
532 	rw_enter(&Adapter->chip_lock, RW_WRITER);
533 	if (e1000g_init(Adapter) != DDI_SUCCESS) {
534 		rw_exit(&Adapter->chip_lock);
535 		e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
536 		goto attach_fail;
537 	}
538 	rw_exit(&Adapter->chip_lock);
539 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
540 
541 	/*
542 	 * Register the driver to the MAC
543 	 */
544 	if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
545 		e1000g_log(Adapter, CE_WARN, "Register MAC failed");
546 		goto attach_fail;
547 	}
548 	Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
549 
550 	/*
551 	 * Now that mutex locks are initialized, and the chip is also
552 	 * initialized, enable interrupts.
553 	 */
554 	if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
555 		e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
556 		goto attach_fail;
557 	}
558 	Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
559 
560 	/*
561 	 * If e1000g_force_detach is enabled, in global private dip list,
562 	 * we will create a new entry, which maintains the priv_dip for DR
563 	 * supports after driver detached.
564 	 */
565 	if (e1000g_force_detach) {
566 		private_devi_list_t *devi_node;
567 
568 		Adapter->priv_dip =
569 		    kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
570 		bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
571 		    sizeof (struct dev_info));
572 
573 		devi_node =
574 		    kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
575 
576 		mutex_enter(&e1000g_rx_detach_lock);
577 		devi_node->priv_dip = Adapter->priv_dip;
578 		devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
579 		devi_node->pending_rx_count = 0;
580 
581 		Adapter->priv_devi_node = devi_node;
582 
583 		if (e1000g_private_devi_list == NULL) {
584 			devi_node->prev = NULL;
585 			devi_node->next = NULL;
586 			e1000g_private_devi_list = devi_node;
587 		} else {
588 			devi_node->prev = NULL;
589 			devi_node->next = e1000g_private_devi_list;
590 			e1000g_private_devi_list->prev = devi_node;
591 			e1000g_private_devi_list = devi_node;
592 		}
593 		mutex_exit(&e1000g_rx_detach_lock);
594 	}
595 
596 	Adapter->e1000g_state = E1000G_INITIALIZED;
597 	return (DDI_SUCCESS);
598 
599 attach_fail:
600 	e1000g_unattach(devinfo, Adapter);
601 	return (DDI_FAILURE);
602 }
603 
604 static int
605 e1000g_register_mac(struct e1000g *Adapter)
606 {
607 	struct e1000_hw *hw = &Adapter->shared;
608 	mac_register_t *mac;
609 	int err;
610 
611 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
612 		return (DDI_FAILURE);
613 
614 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
615 	mac->m_driver = Adapter;
616 	mac->m_dip = Adapter->dip;
617 	mac->m_src_addr = hw->mac.addr;
618 	mac->m_callbacks = &e1000g_m_callbacks;
619 	mac->m_min_sdu = 0;
620 	mac->m_max_sdu = Adapter->default_mtu;
621 	mac->m_margin = VLAN_TAGSZ;
622 	mac->m_priv_props = e1000g_priv_props;
623 	mac->m_v12n = MAC_VIRT_LEVEL1;
624 
625 	err = mac_register(mac, &Adapter->mh);
626 	mac_free(mac);
627 
628 	return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
629 }
630 
631 static int
632 e1000g_identify_hardware(struct e1000g *Adapter)
633 {
634 	struct e1000_hw *hw = &Adapter->shared;
635 	struct e1000g_osdep *osdep = &Adapter->osdep;
636 
637 	/* Get the device id */
638 	hw->vendor_id =
639 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
640 	hw->device_id =
641 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
642 	hw->revision_id =
643 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
644 	hw->subsystem_device_id =
645 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
646 	hw->subsystem_vendor_id =
647 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
648 
649 	if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
650 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
651 		    "MAC type could not be set properly.");
652 		return (DDI_FAILURE);
653 	}
654 
655 	return (DDI_SUCCESS);
656 }
657 
658 static int
659 e1000g_regs_map(struct e1000g *Adapter)
660 {
661 	dev_info_t *devinfo = Adapter->dip;
662 	struct e1000_hw *hw = &Adapter->shared;
663 	struct e1000g_osdep *osdep = &Adapter->osdep;
664 	off_t mem_size;
665 	bar_info_t bar_info;
666 	int offset, rnumber;
667 
668 	rnumber = ADAPTER_REG_SET;
669 	/* Get size of adapter register memory */
670 	if (ddi_dev_regsize(devinfo, rnumber, &mem_size) !=
671 	    DDI_SUCCESS) {
672 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
673 		    "ddi_dev_regsize for registers failed");
674 		return (DDI_FAILURE);
675 	}
676 
677 	/* Map adapter register memory */
678 	if ((ddi_regs_map_setup(devinfo, rnumber,
679 	    (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
680 	    &osdep->reg_handle)) != DDI_SUCCESS) {
681 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
682 		    "ddi_regs_map_setup for registers failed");
683 		goto regs_map_fail;
684 	}
685 
686 	/* ICH needs to map flash memory */
687 	switch (hw->mac.type) {
688 	case e1000_ich8lan:
689 	case e1000_ich9lan:
690 	case e1000_ich10lan:
691 	case e1000_pchlan:
692 	case e1000_pch2lan:
693 	case e1000_pch_lpt:
694 		rnumber = ICH_FLASH_REG_SET;
695 
696 		/* get flash size */
697 		if (ddi_dev_regsize(devinfo, rnumber,
698 		    &mem_size) != DDI_SUCCESS) {
699 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
700 			    "ddi_dev_regsize for ICH flash failed");
701 			goto regs_map_fail;
702 		}
703 
704 		/* map flash in */
705 		if (ddi_regs_map_setup(devinfo, rnumber,
706 		    (caddr_t *)&hw->flash_address, 0,
707 		    mem_size, &e1000g_regs_acc_attr,
708 		    &osdep->ich_flash_handle) != DDI_SUCCESS) {
709 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
710 			    "ddi_regs_map_setup for ICH flash failed");
711 			goto regs_map_fail;
712 		}
713 		break;
714 	case e1000_pch_spt:
715 	case e1000_pch_cnp:
716 	case e1000_pch_tgp:
717 	case e1000_pch_adp:
718 	case e1000_pch_mtp:
719 	case e1000_pch_lnp:
720 	case e1000_pch_rpl:
721 		/*
722 		 * On the SPT, the device flash is actually in BAR0, not a
723 		 * separate BAR. Therefore we end up setting the
724 		 * ich_flash_handle to be the same as the register handle.
725 		 * We mark the same to reduce the confusion in the other
726 		 * functions and macros. Though this does make the set up and
727 		 * tear-down path slightly more complicated.
728 		 */
729 		osdep->ich_flash_handle = osdep->reg_handle;
730 		hw->flash_address = hw->hw_addr;
731 	default:
732 		break;
733 	}
734 
735 	/* map io space */
736 	switch (hw->mac.type) {
737 	case e1000_82544:
738 	case e1000_82540:
739 	case e1000_82545:
740 	case e1000_82546:
741 	case e1000_82541:
742 	case e1000_82541_rev_2:
743 		/* find the IO bar */
744 		rnumber = -1;
745 		for (offset = PCI_CONF_BASE1;
746 		    offset <= PCI_CONF_BASE5; offset += 4) {
747 			if (e1000g_get_bar_info(devinfo, offset, &bar_info)
748 			    != DDI_SUCCESS)
749 				continue;
750 			if (bar_info.type == E1000G_BAR_IO) {
751 				rnumber = bar_info.rnumber;
752 				break;
753 			}
754 		}
755 
756 		if (rnumber < 0) {
757 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
758 			    "No io space is found");
759 			goto regs_map_fail;
760 		}
761 
762 		/* get io space size */
763 		if (ddi_dev_regsize(devinfo, rnumber,
764 		    &mem_size) != DDI_SUCCESS) {
765 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
766 			    "ddi_dev_regsize for io space failed");
767 			goto regs_map_fail;
768 		}
769 
770 		/* map io space */
771 		if ((ddi_regs_map_setup(devinfo, rnumber,
772 		    (caddr_t *)&hw->io_base, 0, mem_size,
773 		    &e1000g_regs_acc_attr,
774 		    &osdep->io_reg_handle)) != DDI_SUCCESS) {
775 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
776 			    "ddi_regs_map_setup for io space failed");
777 			goto regs_map_fail;
778 		}
779 		break;
780 	default:
781 		hw->io_base = 0;
782 		break;
783 	}
784 
785 	return (DDI_SUCCESS);
786 
787 regs_map_fail:
788 	if (osdep->reg_handle != NULL)
789 		ddi_regs_map_free(&osdep->reg_handle);
790 	if (osdep->ich_flash_handle != NULL && hw->mac.type < e1000_pch_spt)
791 		ddi_regs_map_free(&osdep->ich_flash_handle);
792 	return (DDI_FAILURE);
793 }
794 
795 static int
796 e1000g_set_driver_params(struct e1000g *Adapter)
797 {
798 	struct e1000_hw *hw;
799 
800 	hw = &Adapter->shared;
801 
802 	/* Set MAC type and initialize hardware functions */
803 	if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
804 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
805 		    "Could not setup hardware functions");
806 		return (DDI_FAILURE);
807 	}
808 
809 	/* Get bus information */
810 	if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
811 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
812 		    "Could not get bus information");
813 		return (DDI_FAILURE);
814 	}
815 
816 	e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
817 
818 	hw->mac.autoneg_failed = B_TRUE;
819 
820 	/* Set the autoneg_wait_to_complete flag to B_FALSE */
821 	hw->phy.autoneg_wait_to_complete = B_FALSE;
822 
823 	/* Adaptive IFS related changes */
824 	hw->mac.adaptive_ifs = B_TRUE;
825 
826 	/* Enable phy init script for IGP phy of 82541/82547 */
827 	if ((hw->mac.type == e1000_82547) ||
828 	    (hw->mac.type == e1000_82541) ||
829 	    (hw->mac.type == e1000_82547_rev_2) ||
830 	    (hw->mac.type == e1000_82541_rev_2))
831 		e1000_init_script_state_82541(hw, B_TRUE);
832 
833 	/* Enable the TTL workaround for 82541/82547 */
834 	e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
835 
836 #ifdef __sparc
837 	Adapter->strip_crc = B_TRUE;
838 #else
839 	Adapter->strip_crc = B_FALSE;
840 #endif
841 
842 	/* setup the maximum MTU size of the chip */
843 	e1000g_setup_max_mtu(Adapter);
844 
845 	/* Get speed/duplex settings in conf file */
846 	hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
847 	hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
848 	e1000g_force_speed_duplex(Adapter);
849 
850 	/* Get Jumbo Frames settings in conf file */
851 	e1000g_get_max_frame_size(Adapter);
852 
853 	/* Get conf file properties */
854 	e1000g_get_conf(Adapter);
855 
856 	/* enforce PCH limits */
857 	e1000g_pch_limits(Adapter);
858 
859 	/* Set Rx/Tx buffer size */
860 	e1000g_set_bufsize(Adapter);
861 
862 	/* Master Latency Timer */
863 	Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
864 
865 	/* copper options */
866 	if (hw->phy.media_type == e1000_media_type_copper) {
867 		hw->phy.mdix = 0;	/* AUTO_ALL_MODES */
868 		hw->phy.disable_polarity_correction = B_FALSE;
869 		hw->phy.ms_type = e1000_ms_hw_default;	/* E1000_MASTER_SLAVE */
870 	}
871 
872 	/* The initial link state should be "unknown" */
873 	Adapter->link_state = LINK_STATE_UNKNOWN;
874 
875 	/* Initialize rx parameters */
876 	Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
877 	Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
878 
879 	/* Initialize tx parameters */
880 	Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
881 	Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
882 	Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
883 	Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
884 
885 	/* Initialize rx parameters */
886 	Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
887 
888 	return (DDI_SUCCESS);
889 }
890 
891 static void
892 e1000g_setup_max_mtu(struct e1000g *Adapter)
893 {
894 	struct e1000_mac_info *mac = &Adapter->shared.mac;
895 	struct e1000_phy_info *phy = &Adapter->shared.phy;
896 
897 	switch (mac->type) {
898 	/* types that do not support jumbo frames */
899 	case e1000_ich8lan:
900 	case e1000_82573:
901 	case e1000_82583:
902 		Adapter->max_mtu = ETHERMTU;
903 		break;
904 	/* ich9 supports jumbo frames except on one phy type */
905 	case e1000_ich9lan:
906 		if (phy->type == e1000_phy_ife)
907 			Adapter->max_mtu = ETHERMTU;
908 		else
909 			Adapter->max_mtu = MAXIMUM_MTU_9K;
910 		break;
911 	/* pch can do jumbo frames up to 4K */
912 	case e1000_pchlan:
913 		Adapter->max_mtu = MAXIMUM_MTU_4K;
914 		break;
915 	/* pch2 can do jumbo frames up to 9K */
916 	case e1000_pch2lan:
917 	case e1000_pch_lpt:
918 	case e1000_pch_spt:
919 	case e1000_pch_cnp:
920 	case e1000_pch_tgp:
921 	case e1000_pch_adp:
922 	case e1000_pch_mtp:
923 	case e1000_pch_lnp:
924 	case e1000_pch_rpl:
925 		Adapter->max_mtu = MAXIMUM_MTU_9K;
926 		break;
927 	/* types with a special limit */
928 	case e1000_82571:
929 	case e1000_82572:
930 	case e1000_82574:
931 	case e1000_80003es2lan:
932 	case e1000_ich10lan:
933 		if (e1000g_jumbo_mtu >= ETHERMTU &&
934 		    e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) {
935 			Adapter->max_mtu = e1000g_jumbo_mtu;
936 		} else {
937 			Adapter->max_mtu = MAXIMUM_MTU_9K;
938 		}
939 		break;
940 	/* default limit is 16K */
941 	default:
942 		Adapter->max_mtu = FRAME_SIZE_UPTO_16K -
943 		    sizeof (struct ether_vlan_header) - ETHERFCSL;
944 		break;
945 	}
946 }
947 
948 static void
949 e1000g_set_bufsize(struct e1000g *Adapter)
950 {
951 	struct e1000_mac_info *mac = &Adapter->shared.mac;
952 	uint64_t rx_size;
953 	uint64_t tx_size;
954 
955 	dev_info_t *devinfo = Adapter->dip;
956 #ifdef __sparc
957 	ulong_t iommu_pagesize;
958 #endif
959 	/* Get the system page size */
960 	Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
961 
962 #ifdef __sparc
963 	iommu_pagesize = dvma_pagesize(devinfo);
964 	if (iommu_pagesize != 0) {
965 		if (Adapter->sys_page_sz == iommu_pagesize) {
966 			if (iommu_pagesize > 0x4000)
967 				Adapter->sys_page_sz = 0x4000;
968 		} else {
969 			if (Adapter->sys_page_sz > iommu_pagesize)
970 				Adapter->sys_page_sz = iommu_pagesize;
971 		}
972 	}
973 	if (Adapter->lso_enable) {
974 		Adapter->dvma_page_num = E1000_LSO_MAXLEN /
975 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
976 	} else {
977 		Adapter->dvma_page_num = Adapter->max_frame_size /
978 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
979 	}
980 	ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
981 #endif
982 
983 	Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
984 
985 	if (Adapter->mem_workaround_82546 &&
986 	    ((mac->type == e1000_82545) ||
987 	    (mac->type == e1000_82546) ||
988 	    (mac->type == e1000_82546_rev_3))) {
989 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
990 	} else {
991 		rx_size = Adapter->max_frame_size;
992 		if ((rx_size > FRAME_SIZE_UPTO_2K) &&
993 		    (rx_size <= FRAME_SIZE_UPTO_4K))
994 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
995 		else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
996 		    (rx_size <= FRAME_SIZE_UPTO_8K))
997 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
998 		else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
999 		    (rx_size <= FRAME_SIZE_UPTO_16K))
1000 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
1001 		else
1002 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
1003 	}
1004 	Adapter->rx_buffer_size += E1000G_IPALIGNROOM;
1005 
1006 	tx_size = Adapter->max_frame_size;
1007 	if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
1008 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
1009 	else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
1010 	    (tx_size <= FRAME_SIZE_UPTO_8K))
1011 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
1012 	else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
1013 	    (tx_size <= FRAME_SIZE_UPTO_16K))
1014 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
1015 	else
1016 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
1017 
1018 	/*
1019 	 * For Wiseman adapters we have an requirement of having receive
1020 	 * buffers aligned at 256 byte boundary. Since Livengood does not
1021 	 * require this and forcing it for all hardwares will have
1022 	 * performance implications, I am making it applicable only for
1023 	 * Wiseman and for Jumbo frames enabled mode as rest of the time,
1024 	 * it is okay to have normal frames...but it does involve a
1025 	 * potential risk where we may loose data if buffer is not
1026 	 * aligned...so all wiseman boards to have 256 byte aligned
1027 	 * buffers
1028 	 */
1029 	if (mac->type < e1000_82543)
1030 		Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
1031 	else
1032 		Adapter->rx_buf_align = 1;
1033 }
1034 
1035 /*
1036  * e1000g_detach - driver detach
1037  *
1038  * The detach() function is the complement of the attach routine.
1039  * If cmd is set to DDI_DETACH, detach() is used to remove  the
1040  * state  associated  with  a  given  instance of a device node
1041  * prior to the removal of that instance from the system.
1042  *
1043  * The detach() function will be called once for each  instance
1044  * of the device for which there has been a successful attach()
1045  * once there are no longer  any  opens  on  the  device.
1046  *
1047  * Interrupts routine are disabled, All memory allocated by this
1048  * driver are freed.
1049  */
1050 static int
1051 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1052 {
1053 	struct e1000g *Adapter;
1054 	boolean_t rx_drain;
1055 
1056 	switch (cmd) {
1057 	default:
1058 		return (DDI_FAILURE);
1059 
1060 	case DDI_SUSPEND:
1061 		return (e1000g_suspend(devinfo));
1062 
1063 	case DDI_DETACH:
1064 		break;
1065 	}
1066 
1067 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1068 	if (Adapter == NULL)
1069 		return (DDI_FAILURE);
1070 
1071 	rx_drain = e1000g_rx_drain(Adapter);
1072 	if (!rx_drain && !e1000g_force_detach)
1073 		return (DDI_FAILURE);
1074 
1075 	if (mac_unregister(Adapter->mh) != 0) {
1076 		e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
1077 		return (DDI_FAILURE);
1078 	}
1079 	Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
1080 
1081 	ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
1082 
1083 	if (!e1000g_force_detach && !rx_drain)
1084 		return (DDI_FAILURE);
1085 
1086 	e1000g_unattach(devinfo, Adapter);
1087 
1088 	return (DDI_SUCCESS);
1089 }
1090 
1091 /*
1092  * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
1093  */
1094 void
1095 e1000g_free_priv_devi_node(private_devi_list_t *devi_node)
1096 {
1097 	ASSERT(e1000g_private_devi_list != NULL);
1098 	ASSERT(devi_node != NULL);
1099 
1100 	if (devi_node->prev != NULL)
1101 		devi_node->prev->next = devi_node->next;
1102 	if (devi_node->next != NULL)
1103 		devi_node->next->prev = devi_node->prev;
1104 	if (devi_node == e1000g_private_devi_list)
1105 		e1000g_private_devi_list = devi_node->next;
1106 
1107 	kmem_free(devi_node->priv_dip,
1108 	    sizeof (struct dev_info));
1109 	kmem_free(devi_node,
1110 	    sizeof (private_devi_list_t));
1111 }
1112 
1113 static void
1114 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1115 {
1116 	private_devi_list_t *devi_node;
1117 	int result;
1118 
1119 	if (Adapter->e1000g_blink != NULL) {
1120 		ddi_periodic_delete(Adapter->e1000g_blink);
1121 		Adapter->e1000g_blink = NULL;
1122 	}
1123 
1124 	if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1125 		(void) e1000g_disable_intrs(Adapter);
1126 	}
1127 
1128 	if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1129 		(void) mac_unregister(Adapter->mh);
1130 	}
1131 
1132 	if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1133 		(void) e1000g_rem_intrs(Adapter);
1134 	}
1135 
1136 	if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1137 		(void) ddi_prop_remove_all(devinfo);
1138 	}
1139 
1140 	if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1141 		kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1142 	}
1143 
1144 	if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1145 		stop_link_timer(Adapter);
1146 
1147 		mutex_enter(&e1000g_nvm_lock);
1148 		result = e1000_reset_hw(&Adapter->shared);
1149 		mutex_exit(&e1000g_nvm_lock);
1150 
1151 		if (result != E1000_SUCCESS) {
1152 			e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1153 			ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1154 		}
1155 	}
1156 
1157 	e1000g_release_multicast(Adapter);
1158 
1159 	if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1160 		if (Adapter->osdep.reg_handle != NULL)
1161 			ddi_regs_map_free(&Adapter->osdep.reg_handle);
1162 		if (Adapter->osdep.ich_flash_handle != NULL &&
1163 		    Adapter->shared.mac.type < e1000_pch_spt)
1164 			ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1165 		if (Adapter->osdep.io_reg_handle != NULL)
1166 			ddi_regs_map_free(&Adapter->osdep.io_reg_handle);
1167 	}
1168 
1169 	if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1170 		if (Adapter->osdep.cfg_handle != NULL)
1171 			pci_config_teardown(&Adapter->osdep.cfg_handle);
1172 	}
1173 
1174 	if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1175 		e1000g_destroy_locks(Adapter);
1176 	}
1177 
1178 	if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1179 		e1000g_fm_fini(Adapter);
1180 	}
1181 
1182 	mutex_enter(&e1000g_rx_detach_lock);
1183 	if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) {
1184 		devi_node = Adapter->priv_devi_node;
1185 		devi_node->flag |= E1000G_PRIV_DEVI_DETACH;
1186 
1187 		if (devi_node->pending_rx_count == 0) {
1188 			e1000g_free_priv_devi_node(devi_node);
1189 		}
1190 	}
1191 	mutex_exit(&e1000g_rx_detach_lock);
1192 
1193 	kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1194 
1195 	/*
1196 	 * Another hotplug spec requirement,
1197 	 * run ddi_set_driver_private(devinfo, null);
1198 	 */
1199 	ddi_set_driver_private(devinfo, NULL);
1200 }
1201 
1202 /*
1203  * Get the BAR type and rnumber for a given PCI BAR offset
1204  */
1205 static int
1206 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info)
1207 {
1208 	pci_regspec_t *regs;
1209 	uint_t regs_length;
1210 	int type, rnumber, rcount;
1211 
1212 	ASSERT((bar_offset >= PCI_CONF_BASE0) &&
1213 	    (bar_offset <= PCI_CONF_BASE5));
1214 
1215 	/*
1216 	 * Get the DDI "reg" property
1217 	 */
1218 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1219 	    DDI_PROP_DONTPASS, "reg", (int **)&regs,
1220 	    &regs_length) != DDI_PROP_SUCCESS) {
1221 		return (DDI_FAILURE);
1222 	}
1223 
1224 	rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1225 	/*
1226 	 * Check the BAR offset
1227 	 */
1228 	for (rnumber = 0; rnumber < rcount; ++rnumber) {
1229 		if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) {
1230 			type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK;
1231 			break;
1232 		}
1233 	}
1234 
1235 	ddi_prop_free(regs);
1236 
1237 	if (rnumber >= rcount)
1238 		return (DDI_FAILURE);
1239 
1240 	switch (type) {
1241 	case PCI_ADDR_CONFIG:
1242 		bar_info->type = E1000G_BAR_CONFIG;
1243 		break;
1244 	case PCI_ADDR_IO:
1245 		bar_info->type = E1000G_BAR_IO;
1246 		break;
1247 	case PCI_ADDR_MEM32:
1248 		bar_info->type = E1000G_BAR_MEM32;
1249 		break;
1250 	case PCI_ADDR_MEM64:
1251 		bar_info->type = E1000G_BAR_MEM64;
1252 		break;
1253 	default:
1254 		return (DDI_FAILURE);
1255 	}
1256 	bar_info->rnumber = rnumber;
1257 	return (DDI_SUCCESS);
1258 }
1259 
1260 static void
1261 e1000g_init_locks(struct e1000g *Adapter)
1262 {
1263 	e1000g_tx_ring_t *tx_ring;
1264 	e1000g_rx_ring_t *rx_ring;
1265 
1266 	rw_init(&Adapter->chip_lock, NULL,
1267 	    RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1268 	mutex_init(&Adapter->link_lock, NULL,
1269 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1270 	mutex_init(&Adapter->watchdog_lock, NULL,
1271 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1272 
1273 	tx_ring = Adapter->tx_ring;
1274 
1275 	mutex_init(&tx_ring->tx_lock, NULL,
1276 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1277 	mutex_init(&tx_ring->usedlist_lock, NULL,
1278 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1279 	mutex_init(&tx_ring->freelist_lock, NULL,
1280 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1281 
1282 	rx_ring = Adapter->rx_ring;
1283 
1284 	mutex_init(&rx_ring->rx_lock, NULL,
1285 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1286 
1287 	mutex_init(&Adapter->e1000g_led_lock, NULL,
1288 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1289 }
1290 
1291 static void
1292 e1000g_destroy_locks(struct e1000g *Adapter)
1293 {
1294 	e1000g_tx_ring_t *tx_ring;
1295 	e1000g_rx_ring_t *rx_ring;
1296 
1297 	mutex_destroy(&Adapter->e1000g_led_lock);
1298 
1299 	tx_ring = Adapter->tx_ring;
1300 	mutex_destroy(&tx_ring->tx_lock);
1301 	mutex_destroy(&tx_ring->usedlist_lock);
1302 	mutex_destroy(&tx_ring->freelist_lock);
1303 
1304 	rx_ring = Adapter->rx_ring;
1305 	mutex_destroy(&rx_ring->rx_lock);
1306 
1307 	mutex_destroy(&Adapter->link_lock);
1308 	mutex_destroy(&Adapter->watchdog_lock);
1309 	rw_destroy(&Adapter->chip_lock);
1310 
1311 	/* destory mutex initialized in shared code */
1312 	e1000_destroy_hw_mutex(&Adapter->shared);
1313 }
1314 
1315 static int
1316 e1000g_resume(dev_info_t *devinfo)
1317 {
1318 	struct e1000g *Adapter;
1319 
1320 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1321 	if (Adapter == NULL)
1322 		e1000g_log(Adapter, CE_PANIC,
1323 		    "Instance pointer is null\n");
1324 
1325 	if (Adapter->dip != devinfo)
1326 		e1000g_log(Adapter, CE_PANIC,
1327 		    "Devinfo is not the same as saved devinfo\n");
1328 
1329 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1330 
1331 	if (Adapter->e1000g_state & E1000G_STARTED) {
1332 		if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1333 			rw_exit(&Adapter->chip_lock);
1334 			/*
1335 			 * We note the failure, but return success, as the
1336 			 * system is still usable without this controller.
1337 			 */
1338 			e1000g_log(Adapter, CE_WARN,
1339 			    "e1000g_resume: failed to restart controller\n");
1340 			return (DDI_SUCCESS);
1341 		}
1342 		/* Enable and start the watchdog timer */
1343 		enable_watchdog_timer(Adapter);
1344 	}
1345 
1346 	Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1347 
1348 	rw_exit(&Adapter->chip_lock);
1349 
1350 	return (DDI_SUCCESS);
1351 }
1352 
1353 static int
1354 e1000g_suspend(dev_info_t *devinfo)
1355 {
1356 	struct e1000g *Adapter;
1357 
1358 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1359 	if (Adapter == NULL)
1360 		return (DDI_FAILURE);
1361 
1362 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1363 
1364 	Adapter->e1000g_state |= E1000G_SUSPENDED;
1365 
1366 	/* if the port isn't plumbed, we can simply return */
1367 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1368 		rw_exit(&Adapter->chip_lock);
1369 		return (DDI_SUCCESS);
1370 	}
1371 
1372 	e1000g_stop(Adapter, B_FALSE);
1373 
1374 	rw_exit(&Adapter->chip_lock);
1375 
1376 	/* Disable and stop all the timers */
1377 	disable_watchdog_timer(Adapter);
1378 	stop_link_timer(Adapter);
1379 	stop_82547_timer(Adapter->tx_ring);
1380 
1381 	return (DDI_SUCCESS);
1382 }
1383 
1384 static int
1385 e1000g_init(struct e1000g *Adapter)
1386 {
1387 	uint32_t pba;
1388 	uint32_t high_water;
1389 	struct e1000_hw *hw;
1390 	clock_t link_timeout;
1391 	int result;
1392 
1393 	hw = &Adapter->shared;
1394 
1395 	/*
1396 	 * reset to put the hardware in a known state
1397 	 * before we try to do anything with the eeprom
1398 	 */
1399 	mutex_enter(&e1000g_nvm_lock);
1400 	result = e1000_reset_hw(hw);
1401 	mutex_exit(&e1000g_nvm_lock);
1402 
1403 	if (result != E1000_SUCCESS) {
1404 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1405 		goto init_fail;
1406 	}
1407 
1408 	mutex_enter(&e1000g_nvm_lock);
1409 	result = e1000_validate_nvm_checksum(hw);
1410 	if (result < E1000_SUCCESS) {
1411 		/*
1412 		 * Some PCI-E parts fail the first check due to
1413 		 * the link being in sleep state.  Call it again,
1414 		 * if it fails a second time its a real issue.
1415 		 */
1416 		result = e1000_validate_nvm_checksum(hw);
1417 	}
1418 	mutex_exit(&e1000g_nvm_lock);
1419 
1420 	if (result < E1000_SUCCESS) {
1421 		e1000g_log(Adapter, CE_WARN,
1422 		    "Invalid NVM checksum. Please contact "
1423 		    "the vendor to update the NVM.");
1424 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1425 		goto init_fail;
1426 	}
1427 
1428 	result = 0;
1429 #ifdef __sparc
1430 	/*
1431 	 * First, we try to get the local ethernet address from OBP. If
1432 	 * failed, then we get it from the EEPROM of NIC card.
1433 	 */
1434 	result = e1000g_find_mac_address(Adapter);
1435 #endif
1436 	/* Get the local ethernet address. */
1437 	if (!result) {
1438 		mutex_enter(&e1000g_nvm_lock);
1439 		result = e1000_read_mac_addr(hw);
1440 		mutex_exit(&e1000g_nvm_lock);
1441 	}
1442 
1443 	if (result < E1000_SUCCESS) {
1444 		e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1445 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1446 		goto init_fail;
1447 	}
1448 
1449 	/* check for valid mac address */
1450 	if (!is_valid_mac_addr(hw->mac.addr)) {
1451 		e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1452 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1453 		goto init_fail;
1454 	}
1455 
1456 	/* Set LAA state for 82571 chipset */
1457 	e1000_set_laa_state_82571(hw, B_TRUE);
1458 
1459 	/* Master Latency Timer implementation */
1460 	if (Adapter->master_latency_timer) {
1461 		pci_config_put8(Adapter->osdep.cfg_handle,
1462 		    PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1463 	}
1464 
1465 	if (hw->mac.type < e1000_82547) {
1466 		/*
1467 		 * Total FIFO is 64K
1468 		 */
1469 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1470 			pba = E1000_PBA_40K;	/* 40K for Rx, 24K for Tx */
1471 		else
1472 			pba = E1000_PBA_48K;	/* 48K for Rx, 16K for Tx */
1473 	} else if ((hw->mac.type == e1000_82571) ||
1474 	    (hw->mac.type == e1000_82572) ||
1475 	    (hw->mac.type == e1000_80003es2lan)) {
1476 		/*
1477 		 * Total FIFO is 48K
1478 		 */
1479 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1480 			pba = E1000_PBA_30K;	/* 30K for Rx, 18K for Tx */
1481 		else
1482 			pba = E1000_PBA_38K;	/* 38K for Rx, 10K for Tx */
1483 	} else if (hw->mac.type == e1000_82573) {
1484 		pba = E1000_PBA_20K;		/* 20K for Rx, 12K for Tx */
1485 	} else if (hw->mac.type == e1000_82574) {
1486 		/* Keep adapter default: 20K for Rx, 20K for Tx */
1487 		pba = E1000_READ_REG(hw, E1000_PBA);
1488 	} else if (hw->mac.type == e1000_ich8lan) {
1489 		pba = E1000_PBA_8K;		/* 8K for Rx, 12K for Tx */
1490 	} else if (hw->mac.type == e1000_ich9lan) {
1491 		pba = E1000_PBA_10K;
1492 	} else if (hw->mac.type == e1000_ich10lan) {
1493 		pba = E1000_PBA_10K;
1494 	} else if (hw->mac.type == e1000_pchlan) {
1495 		pba = E1000_PBA_26K;
1496 	} else if (hw->mac.type == e1000_pch2lan) {
1497 		pba = E1000_PBA_26K;
1498 	} else if (hw->mac.type == e1000_pch_lpt) {
1499 		pba = E1000_PBA_26K;
1500 	} else if (hw->mac.type == e1000_pch_spt) {
1501 		pba = E1000_PBA_26K;
1502 	} else if (hw->mac.type == e1000_pch_cnp) {
1503 		pba = E1000_PBA_26K;
1504 	} else if (hw->mac.type == e1000_pch_tgp) {
1505 		pba = E1000_PBA_26K;
1506 	} else if (hw->mac.type == e1000_pch_adp) {
1507 		pba = E1000_PBA_26K;
1508 	} else if (hw->mac.type == e1000_pch_mtp) {
1509 		pba = E1000_PBA_26K;
1510 	} else if (hw->mac.type == e1000_pch_lnp) {
1511 		pba = E1000_PBA_26K;
1512 	} else if (hw->mac.type == e1000_pch_rpl) {
1513 		pba = E1000_PBA_26K;
1514 	} else {
1515 		/*
1516 		 * Total FIFO is 40K
1517 		 */
1518 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1519 			pba = E1000_PBA_22K;	/* 22K for Rx, 18K for Tx */
1520 		else
1521 			pba = E1000_PBA_30K;	/* 30K for Rx, 10K for Tx */
1522 	}
1523 	E1000_WRITE_REG(hw, E1000_PBA, pba);
1524 
1525 	/*
1526 	 * These parameters set thresholds for the adapter's generation(Tx)
1527 	 * and response(Rx) to Ethernet PAUSE frames.  These are just threshold
1528 	 * settings.  Flow control is enabled or disabled in the configuration
1529 	 * file.
1530 	 * High-water mark is set down from the top of the rx fifo (not
1531 	 * sensitive to max_frame_size) and low-water is set just below
1532 	 * high-water mark.
1533 	 * The high water mark must be low enough to fit one full frame above
1534 	 * it in the rx FIFO.  Should be the lower of:
1535 	 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1536 	 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1537 	 * Rx FIFO size minus one full frame.
1538 	 */
1539 	high_water = min(((pba << 10) * 9 / 10),
1540 	    ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1541 	    hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1542 	    ((pba << 10) - (E1000_ERT_2048 << 3)) :
1543 	    ((pba << 10) - Adapter->max_frame_size)));
1544 
1545 	hw->fc.high_water = high_water & 0xFFF8;
1546 	hw->fc.low_water = hw->fc.high_water - 8;
1547 
1548 	if (hw->mac.type == e1000_80003es2lan)
1549 		hw->fc.pause_time = 0xFFFF;
1550 	else
1551 		hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1552 	hw->fc.send_xon = B_TRUE;
1553 
1554 	/*
1555 	 * Reset the adapter hardware the second time.
1556 	 */
1557 	mutex_enter(&e1000g_nvm_lock);
1558 	result = e1000_reset_hw(hw);
1559 	mutex_exit(&e1000g_nvm_lock);
1560 
1561 	if (result != E1000_SUCCESS) {
1562 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1563 		goto init_fail;
1564 	}
1565 
1566 	/* disable wakeup control by default */
1567 	if (hw->mac.type >= e1000_82544)
1568 		E1000_WRITE_REG(hw, E1000_WUC, 0);
1569 
1570 	/*
1571 	 * MWI should be disabled on 82546.
1572 	 */
1573 	if (hw->mac.type == e1000_82546)
1574 		e1000_pci_clear_mwi(hw);
1575 	else
1576 		e1000_pci_set_mwi(hw);
1577 
1578 	/*
1579 	 * Configure/Initialize hardware
1580 	 */
1581 	mutex_enter(&e1000g_nvm_lock);
1582 	result = e1000_init_hw(hw);
1583 	mutex_exit(&e1000g_nvm_lock);
1584 
1585 	if (result < E1000_SUCCESS) {
1586 		e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1587 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1588 		goto init_fail;
1589 	}
1590 
1591 	/*
1592 	 * Restore LED settings to the default from EEPROM
1593 	 * to meet the standard for Sun platforms.
1594 	 */
1595 	(void) e1000_cleanup_led(hw);
1596 
1597 	/* Disable Smart Power Down */
1598 	phy_spd_state(hw, B_FALSE);
1599 
1600 	/* Make sure driver has control */
1601 	e1000g_get_driver_control(hw);
1602 
1603 	/*
1604 	 * Initialize unicast addresses.
1605 	 */
1606 	e1000g_init_unicst(Adapter);
1607 
1608 	/*
1609 	 * Setup and initialize the mctable structures.  After this routine
1610 	 * completes  Multicast table will be set
1611 	 */
1612 	e1000_update_mc_addr_list(hw,
1613 	    (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
1614 	msec_delay(5);
1615 
1616 	/*
1617 	 * Implement Adaptive IFS
1618 	 */
1619 	e1000_reset_adaptive(hw);
1620 
1621 	/* Setup Interrupt Throttling Register */
1622 	if (hw->mac.type >= e1000_82540) {
1623 		E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1624 	} else
1625 		Adapter->intr_adaptive = B_FALSE;
1626 
1627 	/* Start the timer for link setup */
1628 	if (hw->mac.autoneg)
1629 		link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1630 	else
1631 		link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1632 
1633 	mutex_enter(&Adapter->link_lock);
1634 	if (hw->phy.autoneg_wait_to_complete) {
1635 		Adapter->link_complete = B_TRUE;
1636 	} else {
1637 		Adapter->link_complete = B_FALSE;
1638 		Adapter->link_tid = timeout(e1000g_link_timer,
1639 		    (void *)Adapter, link_timeout);
1640 	}
1641 	mutex_exit(&Adapter->link_lock);
1642 
1643 	/* Save the state of the phy */
1644 	e1000g_get_phy_state(Adapter);
1645 
1646 	e1000g_param_sync(Adapter);
1647 
1648 	Adapter->init_count++;
1649 
1650 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1651 		goto init_fail;
1652 	}
1653 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1654 		goto init_fail;
1655 	}
1656 
1657 	Adapter->poll_mode = e1000g_poll_mode;
1658 
1659 	return (DDI_SUCCESS);
1660 
1661 init_fail:
1662 	ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1663 	return (DDI_FAILURE);
1664 }
1665 
1666 static int
1667 e1000g_alloc_rx_data(struct e1000g *Adapter)
1668 {
1669 	e1000g_rx_ring_t *rx_ring;
1670 	e1000g_rx_data_t *rx_data;
1671 
1672 	rx_ring = Adapter->rx_ring;
1673 
1674 	rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP);
1675 
1676 	if (rx_data == NULL)
1677 		return (DDI_FAILURE);
1678 
1679 	rx_data->priv_devi_node = Adapter->priv_devi_node;
1680 	rx_data->rx_ring = rx_ring;
1681 
1682 	mutex_init(&rx_data->freelist_lock, NULL,
1683 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1684 	mutex_init(&rx_data->recycle_lock, NULL,
1685 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1686 
1687 	rx_ring->rx_data = rx_data;
1688 
1689 	return (DDI_SUCCESS);
1690 }
1691 
1692 void
1693 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data)
1694 {
1695 	rx_sw_packet_t *packet, *next_packet;
1696 
1697 	if (rx_data == NULL)
1698 		return;
1699 
1700 	packet = rx_data->packet_area;
1701 	while (packet != NULL) {
1702 		next_packet = packet->next;
1703 		e1000g_free_rx_sw_packet(packet, B_TRUE);
1704 		packet = next_packet;
1705 	}
1706 	rx_data->packet_area = NULL;
1707 }
1708 
1709 void
1710 e1000g_free_rx_data(e1000g_rx_data_t *rx_data)
1711 {
1712 	if (rx_data == NULL)
1713 		return;
1714 
1715 	mutex_destroy(&rx_data->freelist_lock);
1716 	mutex_destroy(&rx_data->recycle_lock);
1717 
1718 	kmem_free(rx_data, sizeof (e1000g_rx_data_t));
1719 }
1720 
1721 /*
1722  * Check if the link is up
1723  */
1724 static boolean_t
1725 e1000g_link_up(struct e1000g *Adapter)
1726 {
1727 	struct e1000_hw *hw = &Adapter->shared;
1728 	boolean_t link_up = B_FALSE;
1729 
1730 	/*
1731 	 * get_link_status is set in the interrupt handler on link-status-change
1732 	 * or rx sequence error interrupt.  get_link_status will stay
1733 	 * false until the e1000_check_for_link establishes link only
1734 	 * for copper adapters.
1735 	 */
1736 	switch (hw->phy.media_type) {
1737 	case e1000_media_type_copper:
1738 		if (hw->mac.get_link_status) {
1739 			/*
1740 			 * SPT and newer devices need a bit of extra time before
1741 			 * we ask them.
1742 			 */
1743 			if (hw->mac.type >= e1000_pch_spt)
1744 				msec_delay(50);
1745 			(void) e1000_check_for_link(hw);
1746 			if ((E1000_READ_REG(hw, E1000_STATUS) &
1747 			    E1000_STATUS_LU)) {
1748 				link_up = B_TRUE;
1749 			} else {
1750 				link_up = !hw->mac.get_link_status;
1751 			}
1752 		} else {
1753 			link_up = B_TRUE;
1754 		}
1755 		break;
1756 	case e1000_media_type_fiber:
1757 		(void) e1000_check_for_link(hw);
1758 		link_up = (E1000_READ_REG(hw, E1000_STATUS) &
1759 		    E1000_STATUS_LU);
1760 		break;
1761 	case e1000_media_type_internal_serdes:
1762 		(void) e1000_check_for_link(hw);
1763 		link_up = hw->mac.serdes_has_link;
1764 		break;
1765 	}
1766 
1767 	return (link_up);
1768 }
1769 
1770 static void
1771 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1772 {
1773 	struct iocblk *iocp;
1774 	struct e1000g *e1000gp;
1775 	enum ioc_reply status;
1776 
1777 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1778 	iocp->ioc_error = 0;
1779 	e1000gp = (struct e1000g *)arg;
1780 
1781 	ASSERT(e1000gp);
1782 	if (e1000gp == NULL) {
1783 		miocnak(q, mp, 0, EINVAL);
1784 		return;
1785 	}
1786 
1787 	rw_enter(&e1000gp->chip_lock, RW_READER);
1788 	if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1789 		rw_exit(&e1000gp->chip_lock);
1790 		miocnak(q, mp, 0, EINVAL);
1791 		return;
1792 	}
1793 	rw_exit(&e1000gp->chip_lock);
1794 
1795 	switch (iocp->ioc_cmd) {
1796 
1797 	case LB_GET_INFO_SIZE:
1798 	case LB_GET_INFO:
1799 	case LB_GET_MODE:
1800 	case LB_SET_MODE:
1801 		status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1802 		break;
1803 
1804 
1805 #ifdef E1000G_DEBUG
1806 	case E1000G_IOC_REG_PEEK:
1807 	case E1000G_IOC_REG_POKE:
1808 		status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1809 		break;
1810 	case E1000G_IOC_CHIP_RESET:
1811 		e1000gp->reset_count++;
1812 		if (e1000g_reset_adapter(e1000gp))
1813 			status = IOC_ACK;
1814 		else
1815 			status = IOC_INVAL;
1816 		break;
1817 #endif
1818 	default:
1819 		status = IOC_INVAL;
1820 		break;
1821 	}
1822 
1823 	/*
1824 	 * Decide how to reply
1825 	 */
1826 	switch (status) {
1827 	default:
1828 	case IOC_INVAL:
1829 		/*
1830 		 * Error, reply with a NAK and EINVAL or the specified error
1831 		 */
1832 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1833 		    EINVAL : iocp->ioc_error);
1834 		break;
1835 
1836 	case IOC_DONE:
1837 		/*
1838 		 * OK, reply already sent
1839 		 */
1840 		break;
1841 
1842 	case IOC_ACK:
1843 		/*
1844 		 * OK, reply with an ACK
1845 		 */
1846 		miocack(q, mp, 0, 0);
1847 		break;
1848 
1849 	case IOC_REPLY:
1850 		/*
1851 		 * OK, send prepared reply as ACK or NAK
1852 		 */
1853 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1854 		    M_IOCACK : M_IOCNAK;
1855 		qreply(q, mp);
1856 		break;
1857 	}
1858 }
1859 
1860 /*
1861  * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1862  * capable of supporting only one interrupt and we shouldn't disable
1863  * the physical interrupt. In this case we let the interrupt come and
1864  * we queue the packets in the rx ring itself in case we are in polling
1865  * mode (better latency but slightly lower performance and a very
1866  * high intrrupt count in mpstat which is harmless).
1867  *
1868  * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1869  * which can be disabled in poll mode. This gives better overall
1870  * throughput (compared to the mode above), shows very low interrupt
1871  * count but has slightly higher latency since we pick the packets when
1872  * the poll thread does polling.
1873  *
1874  * Currently, this flag should be enabled only while doing performance
1875  * measurement or when it can be guaranteed that entire NIC going
1876  * in poll mode will not harm any traffic like cluster heartbeat etc.
1877  */
1878 int e1000g_poll_mode = 0;
1879 
1880 /*
1881  * Called from the upper layers when driver is in polling mode to
1882  * pick up any queued packets. Care should be taken to not block
1883  * this thread.
1884  */
1885 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1886 {
1887 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)arg;
1888 	mblk_t			*mp = NULL;
1889 	mblk_t			*tail;
1890 	struct e1000g		*adapter;
1891 
1892 	adapter = rx_ring->adapter;
1893 
1894 	rw_enter(&adapter->chip_lock, RW_READER);
1895 
1896 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
1897 		rw_exit(&adapter->chip_lock);
1898 		return (NULL);
1899 	}
1900 
1901 	mutex_enter(&rx_ring->rx_lock);
1902 	mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup);
1903 	mutex_exit(&rx_ring->rx_lock);
1904 	rw_exit(&adapter->chip_lock);
1905 	return (mp);
1906 }
1907 
1908 static int
1909 e1000g_m_start(void *arg)
1910 {
1911 	struct e1000g *Adapter = (struct e1000g *)arg;
1912 
1913 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1914 
1915 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1916 		rw_exit(&Adapter->chip_lock);
1917 		return (ECANCELED);
1918 	}
1919 
1920 	if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1921 		rw_exit(&Adapter->chip_lock);
1922 		return (ENOTACTIVE);
1923 	}
1924 
1925 	Adapter->e1000g_state |= E1000G_STARTED;
1926 
1927 	rw_exit(&Adapter->chip_lock);
1928 
1929 	/* Enable and start the watchdog timer */
1930 	enable_watchdog_timer(Adapter);
1931 
1932 	return (0);
1933 }
1934 
1935 static int
1936 e1000g_start(struct e1000g *Adapter, boolean_t global)
1937 {
1938 	e1000g_rx_data_t *rx_data;
1939 
1940 	if (global) {
1941 		if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) {
1942 			e1000g_log(Adapter, CE_WARN, "Allocate rx data failed");
1943 			goto start_fail;
1944 		}
1945 
1946 		/* Allocate dma resources for descriptors and buffers */
1947 		if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1948 			e1000g_log(Adapter, CE_WARN,
1949 			    "Alloc DMA resources failed");
1950 			goto start_fail;
1951 		}
1952 		Adapter->rx_buffer_setup = B_FALSE;
1953 	}
1954 
1955 	if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1956 		if (e1000g_init(Adapter) != DDI_SUCCESS) {
1957 			e1000g_log(Adapter, CE_WARN,
1958 			    "Adapter initialization failed");
1959 			goto start_fail;
1960 		}
1961 	}
1962 
1963 	/* Setup and initialize the transmit structures */
1964 	e1000g_tx_setup(Adapter);
1965 	msec_delay(5);
1966 
1967 	/* Setup and initialize the receive structures */
1968 	e1000g_rx_setup(Adapter);
1969 	msec_delay(5);
1970 
1971 	/* Restore the e1000g promiscuous mode */
1972 	e1000g_restore_promisc(Adapter);
1973 
1974 	e1000g_mask_interrupt(Adapter);
1975 
1976 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1977 
1978 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1979 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1980 		goto start_fail;
1981 	}
1982 
1983 	return (DDI_SUCCESS);
1984 
1985 start_fail:
1986 	rx_data = Adapter->rx_ring->rx_data;
1987 
1988 	if (global) {
1989 		e1000g_release_dma_resources(Adapter);
1990 		e1000g_free_rx_pending_buffers(rx_data);
1991 		e1000g_free_rx_data(rx_data);
1992 	}
1993 
1994 	mutex_enter(&e1000g_nvm_lock);
1995 	(void) e1000_reset_hw(&Adapter->shared);
1996 	mutex_exit(&e1000g_nvm_lock);
1997 
1998 	return (DDI_FAILURE);
1999 }
2000 
2001 /*
2002  * The I219 has the curious property that if the descriptor rings are not
2003  * emptied before resetting the hardware or before changing the device state
2004  * based on runtime power management, it'll cause the card to hang. This can
2005  * then only be fixed by a PCI reset. As such, for the I219 and it alone, we
2006  * have to flush the rings if we're in this state.
2007  */
2008 static void
2009 e1000g_flush_desc_rings(struct e1000g *Adapter)
2010 {
2011 	struct e1000_hw	*hw = &Adapter->shared;
2012 	u16		hang_state;
2013 	u32		fext_nvm11, tdlen;
2014 
2015 	/* First, disable MULR fix in FEXTNVM11 */
2016 	fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
2017 	fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
2018 	E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
2019 
2020 	/* do nothing if we're not in faulty state, or if the queue is empty */
2021 	tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
2022 	hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
2023 	    PCICFG_DESC_RING_STATUS);
2024 	if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
2025 		return;
2026 	e1000g_flush_tx_ring(Adapter);
2027 
2028 	/* recheck, maybe the fault is caused by the rx ring */
2029 	hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
2030 	    PCICFG_DESC_RING_STATUS);
2031 	if (hang_state & FLUSH_DESC_REQUIRED)
2032 		e1000g_flush_rx_ring(Adapter);
2033 
2034 }
2035 
2036 static void
2037 e1000g_m_stop(void *arg)
2038 {
2039 	struct e1000g *Adapter = (struct e1000g *)arg;
2040 
2041 	/* Drain tx sessions */
2042 	(void) e1000g_tx_drain(Adapter);
2043 
2044 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2045 
2046 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2047 		rw_exit(&Adapter->chip_lock);
2048 		return;
2049 	}
2050 	Adapter->e1000g_state &= ~E1000G_STARTED;
2051 	e1000g_stop(Adapter, B_TRUE);
2052 
2053 	rw_exit(&Adapter->chip_lock);
2054 
2055 	/* Disable and stop all the timers */
2056 	disable_watchdog_timer(Adapter);
2057 	stop_link_timer(Adapter);
2058 	stop_82547_timer(Adapter->tx_ring);
2059 }
2060 
2061 static void
2062 e1000g_stop(struct e1000g *Adapter, boolean_t global)
2063 {
2064 	private_devi_list_t *devi_node;
2065 	e1000g_rx_data_t *rx_data;
2066 	int result;
2067 
2068 	Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
2069 
2070 	/* Stop the chip and release pending resources */
2071 
2072 	/* Tell firmware driver is no longer in control */
2073 	e1000g_release_driver_control(&Adapter->shared);
2074 
2075 	e1000g_clear_all_interrupts(Adapter);
2076 
2077 	mutex_enter(&e1000g_nvm_lock);
2078 	result = e1000_reset_hw(&Adapter->shared);
2079 	mutex_exit(&e1000g_nvm_lock);
2080 
2081 	if (result != E1000_SUCCESS) {
2082 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
2083 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2084 	}
2085 
2086 	mutex_enter(&Adapter->link_lock);
2087 	Adapter->link_complete = B_FALSE;
2088 	mutex_exit(&Adapter->link_lock);
2089 
2090 	/* Release resources still held by the TX descriptors */
2091 	e1000g_tx_clean(Adapter);
2092 
2093 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2094 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2095 
2096 	/* Clean the pending rx jumbo packet fragment */
2097 	e1000g_rx_clean(Adapter);
2098 
2099 	/*
2100 	 * The I219, eg. the pch_spt, has bugs such that we must ensure that
2101 	 * rings are flushed before we do anything else. This must be done
2102 	 * before we release DMA resources.
2103 	 */
2104 	if (Adapter->shared.mac.type >= e1000_pch_spt)
2105 		e1000g_flush_desc_rings(Adapter);
2106 
2107 	if (global) {
2108 		e1000g_release_dma_resources(Adapter);
2109 
2110 		mutex_enter(&e1000g_rx_detach_lock);
2111 		rx_data = Adapter->rx_ring->rx_data;
2112 		rx_data->flag |= E1000G_RX_STOPPED;
2113 
2114 		if (rx_data->pending_count == 0) {
2115 			e1000g_free_rx_pending_buffers(rx_data);
2116 			e1000g_free_rx_data(rx_data);
2117 		} else {
2118 			devi_node = rx_data->priv_devi_node;
2119 			if (devi_node != NULL)
2120 				atomic_inc_32(&devi_node->pending_rx_count);
2121 			else
2122 				atomic_inc_32(&Adapter->pending_rx_count);
2123 		}
2124 		mutex_exit(&e1000g_rx_detach_lock);
2125 	}
2126 
2127 	if (Adapter->link_state != LINK_STATE_UNKNOWN) {
2128 		Adapter->link_state = LINK_STATE_UNKNOWN;
2129 		if (!Adapter->reset_flag)
2130 			mac_link_update(Adapter->mh, Adapter->link_state);
2131 	}
2132 }
2133 
2134 static void
2135 e1000g_rx_clean(struct e1000g *Adapter)
2136 {
2137 	e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data;
2138 
2139 	if (rx_data == NULL)
2140 		return;
2141 
2142 	if (rx_data->rx_mblk != NULL) {
2143 		freemsg(rx_data->rx_mblk);
2144 		rx_data->rx_mblk = NULL;
2145 		rx_data->rx_mblk_tail = NULL;
2146 		rx_data->rx_mblk_len = 0;
2147 	}
2148 }
2149 
2150 static void
2151 e1000g_tx_clean(struct e1000g *Adapter)
2152 {
2153 	e1000g_tx_ring_t *tx_ring;
2154 	p_tx_sw_packet_t packet;
2155 	mblk_t *mp;
2156 	mblk_t *nmp;
2157 	uint32_t packet_count;
2158 
2159 	tx_ring = Adapter->tx_ring;
2160 
2161 	/*
2162 	 * Here we don't need to protect the lists using
2163 	 * the usedlist_lock and freelist_lock, for they
2164 	 * have been protected by the chip_lock.
2165 	 */
2166 	mp = NULL;
2167 	nmp = NULL;
2168 	packet_count = 0;
2169 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
2170 	while (packet != NULL) {
2171 		if (packet->mp != NULL) {
2172 			/* Assemble the message chain */
2173 			if (mp == NULL) {
2174 				mp = packet->mp;
2175 				nmp = packet->mp;
2176 			} else {
2177 				nmp->b_next = packet->mp;
2178 				nmp = packet->mp;
2179 			}
2180 			/* Disconnect the message from the sw packet */
2181 			packet->mp = NULL;
2182 		}
2183 
2184 		e1000g_free_tx_swpkt(packet);
2185 		packet_count++;
2186 
2187 		packet = (p_tx_sw_packet_t)
2188 		    QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
2189 	}
2190 
2191 	if (mp != NULL)
2192 		freemsgchain(mp);
2193 
2194 	if (packet_count > 0) {
2195 		QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
2196 		QUEUE_INIT_LIST(&tx_ring->used_list);
2197 
2198 		/* Setup TX descriptor pointers */
2199 		tx_ring->tbd_next = tx_ring->tbd_first;
2200 		tx_ring->tbd_oldest = tx_ring->tbd_first;
2201 
2202 		/* Setup our HW Tx Head & Tail descriptor pointers */
2203 		E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
2204 		E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
2205 	}
2206 }
2207 
2208 static boolean_t
2209 e1000g_tx_drain(struct e1000g *Adapter)
2210 {
2211 	int i;
2212 	boolean_t done;
2213 	e1000g_tx_ring_t *tx_ring;
2214 
2215 	tx_ring = Adapter->tx_ring;
2216 
2217 	/* Allow up to 'wsdraintime' for pending xmit's to complete. */
2218 	for (i = 0; i < TX_DRAIN_TIME; i++) {
2219 		mutex_enter(&tx_ring->usedlist_lock);
2220 		done = IS_QUEUE_EMPTY(&tx_ring->used_list);
2221 		mutex_exit(&tx_ring->usedlist_lock);
2222 
2223 		if (done)
2224 			break;
2225 
2226 		msec_delay(1);
2227 	}
2228 
2229 	return (done);
2230 }
2231 
2232 static boolean_t
2233 e1000g_rx_drain(struct e1000g *Adapter)
2234 {
2235 	int i;
2236 	boolean_t done;
2237 
2238 	/*
2239 	 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
2240 	 */
2241 	for (i = 0; i < RX_DRAIN_TIME; i++) {
2242 		done = (Adapter->pending_rx_count == 0);
2243 
2244 		if (done)
2245 			break;
2246 
2247 		msec_delay(1);
2248 	}
2249 
2250 	return (done);
2251 }
2252 
2253 static boolean_t
2254 e1000g_reset_adapter(struct e1000g *Adapter)
2255 {
2256 	/* Disable and stop all the timers */
2257 	disable_watchdog_timer(Adapter);
2258 	stop_link_timer(Adapter);
2259 	stop_82547_timer(Adapter->tx_ring);
2260 
2261 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2262 
2263 	if (Adapter->stall_flag) {
2264 		Adapter->stall_flag = B_FALSE;
2265 		Adapter->reset_flag = B_TRUE;
2266 	}
2267 
2268 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2269 		rw_exit(&Adapter->chip_lock);
2270 		return (B_TRUE);
2271 	}
2272 
2273 	e1000g_stop(Adapter, B_FALSE);
2274 
2275 	if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
2276 		rw_exit(&Adapter->chip_lock);
2277 		e1000g_log(Adapter, CE_WARN, "Reset failed");
2278 			return (B_FALSE);
2279 	}
2280 
2281 	rw_exit(&Adapter->chip_lock);
2282 
2283 	/* Enable and start the watchdog timer */
2284 	enable_watchdog_timer(Adapter);
2285 
2286 	return (B_TRUE);
2287 }
2288 
2289 boolean_t
2290 e1000g_global_reset(struct e1000g *Adapter)
2291 {
2292 	/* Disable and stop all the timers */
2293 	disable_watchdog_timer(Adapter);
2294 	stop_link_timer(Adapter);
2295 	stop_82547_timer(Adapter->tx_ring);
2296 
2297 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2298 
2299 	e1000g_stop(Adapter, B_TRUE);
2300 
2301 	Adapter->init_count = 0;
2302 
2303 	if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
2304 		rw_exit(&Adapter->chip_lock);
2305 		e1000g_log(Adapter, CE_WARN, "Reset failed");
2306 		return (B_FALSE);
2307 	}
2308 
2309 	rw_exit(&Adapter->chip_lock);
2310 
2311 	/* Enable and start the watchdog timer */
2312 	enable_watchdog_timer(Adapter);
2313 
2314 	return (B_TRUE);
2315 }
2316 
2317 /*
2318  * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2319  *
2320  * This interrupt service routine is for PCI-Express adapters.
2321  * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2322  * bit is set.
2323  */
2324 static uint_t
2325 e1000g_intr_pciexpress(caddr_t arg, caddr_t arg1 __unused)
2326 {
2327 	struct e1000g *Adapter;
2328 	uint32_t icr;
2329 
2330 	Adapter = (struct e1000g *)(uintptr_t)arg;
2331 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2332 
2333 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2334 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2335 		return (DDI_INTR_CLAIMED);
2336 	}
2337 
2338 	if (icr & E1000_ICR_INT_ASSERTED) {
2339 		/*
2340 		 * E1000_ICR_INT_ASSERTED bit was set:
2341 		 * Read(Clear) the ICR, claim this interrupt,
2342 		 * look for work to do.
2343 		 */
2344 		e1000g_intr_work(Adapter, icr);
2345 		return (DDI_INTR_CLAIMED);
2346 	} else {
2347 		/*
2348 		 * E1000_ICR_INT_ASSERTED bit was not set:
2349 		 * Don't claim this interrupt, return immediately.
2350 		 */
2351 		return (DDI_INTR_UNCLAIMED);
2352 	}
2353 }
2354 
2355 /*
2356  * e1000g_intr - ISR for PCI/PCI-X chipsets
2357  *
2358  * This interrupt service routine is for PCI/PCI-X adapters.
2359  * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2360  * bit is set or not.
2361  */
2362 static uint_t
2363 e1000g_intr(caddr_t arg, caddr_t arg1 __unused)
2364 {
2365 	struct e1000g *Adapter;
2366 	uint32_t icr;
2367 
2368 	Adapter = (struct e1000g *)(uintptr_t)arg;
2369 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2370 
2371 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2372 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2373 		return (DDI_INTR_CLAIMED);
2374 	}
2375 
2376 	if (icr) {
2377 		/*
2378 		 * Any bit was set in ICR:
2379 		 * Read(Clear) the ICR, claim this interrupt,
2380 		 * look for work to do.
2381 		 */
2382 		e1000g_intr_work(Adapter, icr);
2383 		return (DDI_INTR_CLAIMED);
2384 	} else {
2385 		/*
2386 		 * No bit was set in ICR:
2387 		 * Don't claim this interrupt, return immediately.
2388 		 */
2389 		return (DDI_INTR_UNCLAIMED);
2390 	}
2391 }
2392 
2393 /*
2394  * e1000g_intr_work - actual processing of ISR
2395  *
2396  * Read(clear) the ICR contents and call appropriate interrupt
2397  * processing routines.
2398  */
2399 static void
2400 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2401 {
2402 	struct e1000_hw *hw;
2403 	hw = &Adapter->shared;
2404 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2405 
2406 	Adapter->rx_pkt_cnt = 0;
2407 	Adapter->tx_pkt_cnt = 0;
2408 
2409 	rw_enter(&Adapter->chip_lock, RW_READER);
2410 
2411 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2412 		rw_exit(&Adapter->chip_lock);
2413 		return;
2414 	}
2415 	/*
2416 	 * Here we need to check the "e1000g_state" flag within the chip_lock to
2417 	 * ensure the receive routine will not execute when the adapter is
2418 	 * being reset.
2419 	 */
2420 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2421 		rw_exit(&Adapter->chip_lock);
2422 		return;
2423 	}
2424 
2425 	if (icr & E1000_ICR_RXT0) {
2426 		mblk_t			*mp = NULL;
2427 		mblk_t			*tail = NULL;
2428 		e1000g_rx_ring_t	*rx_ring;
2429 
2430 		rx_ring = Adapter->rx_ring;
2431 		mutex_enter(&rx_ring->rx_lock);
2432 		/*
2433 		 * Sometimes with legacy interrupts, it possible that
2434 		 * there is a single interrupt for Rx/Tx. In which
2435 		 * case, if poll flag is set, we shouldn't really
2436 		 * be doing Rx processing.
2437 		 */
2438 		if (!rx_ring->poll_flag)
2439 			mp = e1000g_receive(rx_ring, &tail,
2440 			    E1000G_CHAIN_NO_LIMIT);
2441 		mutex_exit(&rx_ring->rx_lock);
2442 		rw_exit(&Adapter->chip_lock);
2443 		if (mp != NULL)
2444 			mac_rx_ring(Adapter->mh, rx_ring->mrh,
2445 			    mp, rx_ring->ring_gen_num);
2446 	} else
2447 		rw_exit(&Adapter->chip_lock);
2448 
2449 	if (icr & E1000_ICR_TXDW) {
2450 		if (!Adapter->tx_intr_enable)
2451 			e1000g_clear_tx_interrupt(Adapter);
2452 
2453 		/* Recycle the tx descriptors */
2454 		rw_enter(&Adapter->chip_lock, RW_READER);
2455 		(void) e1000g_recycle(tx_ring);
2456 		E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2457 		rw_exit(&Adapter->chip_lock);
2458 
2459 		if (tx_ring->resched_needed &&
2460 		    (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2461 			tx_ring->resched_needed = B_FALSE;
2462 			mac_tx_update(Adapter->mh);
2463 			E1000G_STAT(tx_ring->stat_reschedule);
2464 		}
2465 	}
2466 
2467 	/*
2468 	 * The Receive Sequence errors RXSEQ and the link status change LSC
2469 	 * are checked to detect that the cable has been pulled out. For
2470 	 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2471 	 * are an indication that cable is not connected.
2472 	 */
2473 	if ((icr & E1000_ICR_RXSEQ) ||
2474 	    (icr & E1000_ICR_LSC) ||
2475 	    (icr & E1000_ICR_GPI_EN1)) {
2476 		boolean_t link_changed;
2477 		timeout_id_t tid = 0;
2478 
2479 		stop_watchdog_timer(Adapter);
2480 
2481 		rw_enter(&Adapter->chip_lock, RW_WRITER);
2482 
2483 		/*
2484 		 * Because we got a link-status-change interrupt, force
2485 		 * e1000_check_for_link() to look at phy
2486 		 */
2487 		Adapter->shared.mac.get_link_status = B_TRUE;
2488 
2489 		/* e1000g_link_check takes care of link status change */
2490 		link_changed = e1000g_link_check(Adapter);
2491 
2492 		/* Get new phy state */
2493 		e1000g_get_phy_state(Adapter);
2494 
2495 		/*
2496 		 * If the link timer has not timed out, we'll not notify
2497 		 * the upper layer with any link state until the link is up.
2498 		 */
2499 		if (link_changed && !Adapter->link_complete) {
2500 			if (Adapter->link_state == LINK_STATE_UP) {
2501 				mutex_enter(&Adapter->link_lock);
2502 				Adapter->link_complete = B_TRUE;
2503 				tid = Adapter->link_tid;
2504 				Adapter->link_tid = 0;
2505 				mutex_exit(&Adapter->link_lock);
2506 			} else {
2507 				link_changed = B_FALSE;
2508 			}
2509 		}
2510 		rw_exit(&Adapter->chip_lock);
2511 
2512 		if (link_changed) {
2513 			if (tid != 0)
2514 				(void) untimeout(tid);
2515 
2516 			/*
2517 			 * Workaround for esb2. Data stuck in fifo on a link
2518 			 * down event. Stop receiver here and reset in watchdog.
2519 			 */
2520 			if ((Adapter->link_state == LINK_STATE_DOWN) &&
2521 			    (Adapter->shared.mac.type == e1000_80003es2lan)) {
2522 				uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2523 				E1000_WRITE_REG(hw, E1000_RCTL,
2524 				    rctl & ~E1000_RCTL_EN);
2525 				e1000g_log(Adapter, CE_WARN,
2526 				    "ESB2 receiver disabled");
2527 				Adapter->esb2_workaround = B_TRUE;
2528 			}
2529 			if (!Adapter->reset_flag)
2530 				mac_link_update(Adapter->mh,
2531 				    Adapter->link_state);
2532 			if (Adapter->link_state == LINK_STATE_UP)
2533 				Adapter->reset_flag = B_FALSE;
2534 		}
2535 
2536 		start_watchdog_timer(Adapter);
2537 	}
2538 }
2539 
2540 static void
2541 e1000g_init_unicst(struct e1000g *Adapter)
2542 {
2543 	struct e1000_hw *hw;
2544 	int slot;
2545 
2546 	hw = &Adapter->shared;
2547 
2548 	if (Adapter->init_count == 0) {
2549 		/* Initialize the multiple unicast addresses */
2550 		Adapter->unicst_total = min(hw->mac.rar_entry_count,
2551 		    MAX_NUM_UNICAST_ADDRESSES);
2552 
2553 		/*
2554 		 * The common code does not correctly calculate the number of
2555 		 * rar's that could be reserved by firmware for the pch_lpt and
2556 		 * pch_spt macs. The interface has one primary rar, and 11
2557 		 * additional ones. Those 11 additional ones are not always
2558 		 * available.  According to the datasheet, we need to check a
2559 		 * few of the bits set in the FWSM register. If the value is
2560 		 * zero, everything is available. If the value is 1, none of the
2561 		 * additional registers are available. If the value is 2-7, only
2562 		 * that number are available.
2563 		 */
2564 		if (hw->mac.type >= e1000_pch_lpt) {
2565 			uint32_t locked, rar;
2566 
2567 			locked = E1000_READ_REG(hw, E1000_FWSM) &
2568 			    E1000_FWSM_WLOCK_MAC_MASK;
2569 			locked >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2570 			rar = 1;
2571 			if (locked == 0)
2572 				rar += 11;
2573 			else if (locked == 1)
2574 				rar += 0;
2575 			else
2576 				rar += locked;
2577 			Adapter->unicst_total = min(rar,
2578 			    MAX_NUM_UNICAST_ADDRESSES);
2579 		}
2580 
2581 		/* Workaround for an erratum of 82571 chipst */
2582 		if ((hw->mac.type == e1000_82571) &&
2583 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2584 			Adapter->unicst_total--;
2585 
2586 		/* VMware doesn't support multiple mac addresses properly */
2587 		if (hw->subsystem_vendor_id == 0x15ad)
2588 			Adapter->unicst_total = 1;
2589 
2590 		Adapter->unicst_avail = Adapter->unicst_total;
2591 
2592 		for (slot = 0; slot < Adapter->unicst_total; slot++) {
2593 			/* Clear both the flag and MAC address */
2594 			Adapter->unicst_addr[slot].reg.high = 0;
2595 			Adapter->unicst_addr[slot].reg.low = 0;
2596 		}
2597 	} else {
2598 		/* Workaround for an erratum of 82571 chipst */
2599 		if ((hw->mac.type == e1000_82571) &&
2600 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2601 			(void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2602 
2603 		/* Re-configure the RAR registers */
2604 		for (slot = 0; slot < Adapter->unicst_total; slot++)
2605 			if (Adapter->unicst_addr[slot].mac.set == 1)
2606 				(void) e1000_rar_set(hw,
2607 				    Adapter->unicst_addr[slot].mac.addr, slot);
2608 	}
2609 
2610 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2611 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2612 }
2613 
2614 static int
2615 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2616     int slot)
2617 {
2618 	struct e1000_hw *hw;
2619 
2620 	hw = &Adapter->shared;
2621 
2622 	/*
2623 	 * The first revision of Wiseman silicon (rev 2.0) has an errata
2624 	 * that requires the receiver to be in reset when any of the
2625 	 * receive address registers (RAR regs) are accessed.  The first
2626 	 * rev of Wiseman silicon also requires MWI to be disabled when
2627 	 * a global reset or a receive reset is issued.  So before we
2628 	 * initialize the RARs, we check the rev of the Wiseman controller
2629 	 * and work around any necessary HW errata.
2630 	 */
2631 	if ((hw->mac.type == e1000_82542) &&
2632 	    (hw->revision_id == E1000_REVISION_2)) {
2633 		e1000_pci_clear_mwi(hw);
2634 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2635 		msec_delay(5);
2636 	}
2637 	if (mac_addr == NULL) {
2638 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2639 		E1000_WRITE_FLUSH(hw);
2640 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2641 		E1000_WRITE_FLUSH(hw);
2642 		/* Clear both the flag and MAC address */
2643 		Adapter->unicst_addr[slot].reg.high = 0;
2644 		Adapter->unicst_addr[slot].reg.low = 0;
2645 	} else {
2646 		bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2647 		    ETHERADDRL);
2648 		(void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2649 		Adapter->unicst_addr[slot].mac.set = 1;
2650 	}
2651 
2652 	/* Workaround for an erratum of 82571 chipst */
2653 	if (slot == 0) {
2654 		if ((hw->mac.type == e1000_82571) &&
2655 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2656 			if (mac_addr == NULL) {
2657 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2658 				    slot << 1, 0);
2659 				E1000_WRITE_FLUSH(hw);
2660 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2661 				    (slot << 1) + 1, 0);
2662 				E1000_WRITE_FLUSH(hw);
2663 			} else {
2664 				(void) e1000_rar_set(hw, (uint8_t *)mac_addr,
2665 				    LAST_RAR_ENTRY);
2666 			}
2667 	}
2668 
2669 	/*
2670 	 * If we are using Wiseman rev 2.0 silicon, we will have previously
2671 	 * put the receive in reset, and disabled MWI, to work around some
2672 	 * HW errata.  Now we should take the receiver out of reset, and
2673 	 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2674 	 */
2675 	if ((hw->mac.type == e1000_82542) &&
2676 	    (hw->revision_id == E1000_REVISION_2)) {
2677 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2678 		msec_delay(1);
2679 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2680 			e1000_pci_set_mwi(hw);
2681 		e1000g_rx_setup(Adapter);
2682 	}
2683 
2684 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2685 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2686 		return (EIO);
2687 	}
2688 
2689 	return (0);
2690 }
2691 
2692 static int
2693 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2694 {
2695 	struct e1000_hw *hw = &Adapter->shared;
2696 	struct ether_addr *newtable;
2697 	size_t new_len;
2698 	size_t old_len;
2699 	int res = 0;
2700 
2701 	if ((multiaddr[0] & 01) == 0) {
2702 		res = EINVAL;
2703 		e1000g_log(Adapter, CE_WARN, "Illegal multicast address");
2704 		goto done;
2705 	}
2706 
2707 	if (Adapter->mcast_count >= Adapter->mcast_max_num) {
2708 		res = ENOENT;
2709 		e1000g_log(Adapter, CE_WARN,
2710 		    "Adapter requested more than %d mcast addresses",
2711 		    Adapter->mcast_max_num);
2712 		goto done;
2713 	}
2714 
2715 
2716 	if (Adapter->mcast_count == Adapter->mcast_alloc_count) {
2717 		old_len = Adapter->mcast_alloc_count *
2718 		    sizeof (struct ether_addr);
2719 		new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) *
2720 		    sizeof (struct ether_addr);
2721 
2722 		newtable = kmem_alloc(new_len, KM_NOSLEEP);
2723 		if (newtable == NULL) {
2724 			res = ENOMEM;
2725 			e1000g_log(Adapter, CE_WARN,
2726 			    "Not enough memory to alloc mcast table");
2727 			goto done;
2728 		}
2729 
2730 		if (Adapter->mcast_table != NULL) {
2731 			bcopy(Adapter->mcast_table, newtable, old_len);
2732 			kmem_free(Adapter->mcast_table, old_len);
2733 		}
2734 		Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE;
2735 		Adapter->mcast_table = newtable;
2736 	}
2737 
2738 	bcopy(multiaddr,
2739 	    &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2740 	Adapter->mcast_count++;
2741 
2742 	/*
2743 	 * Update the MC table in the hardware
2744 	 */
2745 	e1000g_clear_interrupt(Adapter);
2746 
2747 	e1000_update_mc_addr_list(hw,
2748 	    (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2749 
2750 	e1000g_mask_interrupt(Adapter);
2751 
2752 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2753 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2754 		res = EIO;
2755 	}
2756 
2757 done:
2758 	return (res);
2759 }
2760 
2761 static int
2762 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2763 {
2764 	struct e1000_hw *hw = &Adapter->shared;
2765 	struct ether_addr *newtable;
2766 	size_t new_len;
2767 	size_t old_len;
2768 	unsigned i;
2769 
2770 	for (i = 0; i < Adapter->mcast_count; i++) {
2771 		if (bcmp(multiaddr, &Adapter->mcast_table[i],
2772 		    ETHERADDRL) == 0) {
2773 			for (i++; i < Adapter->mcast_count; i++) {
2774 				Adapter->mcast_table[i - 1] =
2775 				    Adapter->mcast_table[i];
2776 			}
2777 			Adapter->mcast_count--;
2778 			break;
2779 		}
2780 	}
2781 
2782 	if ((Adapter->mcast_alloc_count - Adapter->mcast_count) >
2783 	    MCAST_ALLOC_SIZE) {
2784 		old_len = Adapter->mcast_alloc_count *
2785 		    sizeof (struct ether_addr);
2786 		new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) *
2787 		    sizeof (struct ether_addr);
2788 
2789 		newtable = kmem_alloc(new_len, KM_NOSLEEP);
2790 		if (newtable != NULL) {
2791 			bcopy(Adapter->mcast_table, newtable, new_len);
2792 			kmem_free(Adapter->mcast_table, old_len);
2793 
2794 			Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE;
2795 			Adapter->mcast_table = newtable;
2796 		}
2797 	}
2798 
2799 	/*
2800 	 * Update the MC table in the hardware
2801 	 */
2802 	e1000g_clear_interrupt(Adapter);
2803 
2804 	e1000_update_mc_addr_list(hw,
2805 	    (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2806 
2807 	e1000g_mask_interrupt(Adapter);
2808 
2809 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2810 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2811 		return (EIO);
2812 	}
2813 
2814 	return (0);
2815 }
2816 
2817 static void
2818 e1000g_release_multicast(struct e1000g *Adapter)
2819 {
2820 	if (Adapter->mcast_table != NULL) {
2821 		kmem_free(Adapter->mcast_table,
2822 		    Adapter->mcast_alloc_count * sizeof (struct ether_addr));
2823 		Adapter->mcast_table = NULL;
2824 	}
2825 }
2826 
2827 int
2828 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2829 {
2830 	struct e1000g *Adapter = (struct e1000g *)arg;
2831 	int result;
2832 
2833 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2834 
2835 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2836 		result = ECANCELED;
2837 		goto done;
2838 	}
2839 
2840 	result = (add) ? multicst_add(Adapter, addr)
2841 	    : multicst_remove(Adapter, addr);
2842 
2843 done:
2844 	rw_exit(&Adapter->chip_lock);
2845 	return (result);
2846 
2847 }
2848 
2849 int
2850 e1000g_m_promisc(void *arg, boolean_t on)
2851 {
2852 	struct e1000g *Adapter = (struct e1000g *)arg;
2853 	uint32_t rctl;
2854 
2855 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2856 
2857 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2858 		rw_exit(&Adapter->chip_lock);
2859 		return (ECANCELED);
2860 	}
2861 
2862 	rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2863 
2864 	if (on)
2865 		rctl |=
2866 		    (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2867 	else
2868 		rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2869 
2870 	E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2871 
2872 	Adapter->e1000g_promisc = on;
2873 
2874 	rw_exit(&Adapter->chip_lock);
2875 
2876 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2877 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2878 		return (EIO);
2879 	}
2880 
2881 	return (0);
2882 }
2883 
2884 /*
2885  * Entry points to enable and disable interrupts at the granularity of
2886  * a group.
2887  * Turns the poll_mode for the whole adapter on and off to enable or
2888  * override the ring level polling control over the hardware interrupts.
2889  */
2890 static int
2891 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2892 {
2893 	struct e1000g		*adapter = (struct e1000g *)arg;
2894 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2895 
2896 	/*
2897 	 * Later interrupts at the granularity of the this ring will
2898 	 * invoke mac_rx() with NULL, indicating the need for another
2899 	 * software classification.
2900 	 * We have a single ring usable per adapter now, so we only need to
2901 	 * reset the rx handle for that one.
2902 	 * When more RX rings can be used, we should update each one of them.
2903 	 */
2904 	mutex_enter(&rx_ring->rx_lock);
2905 	rx_ring->mrh = NULL;
2906 	adapter->poll_mode = B_FALSE;
2907 	mutex_exit(&rx_ring->rx_lock);
2908 	return (0);
2909 }
2910 
2911 static int
2912 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2913 {
2914 	struct e1000g *adapter = (struct e1000g *)arg;
2915 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2916 
2917 	mutex_enter(&rx_ring->rx_lock);
2918 
2919 	/*
2920 	 * Later interrupts at the granularity of the this ring will
2921 	 * invoke mac_rx() with the handle for this ring;
2922 	 */
2923 	adapter->poll_mode = B_TRUE;
2924 	rx_ring->mrh = rx_ring->mrh_init;
2925 	mutex_exit(&rx_ring->rx_lock);
2926 	return (0);
2927 }
2928 
2929 /*
2930  * Entry points to enable and disable interrupts at the granularity of
2931  * a ring.
2932  * adapter poll_mode controls whether we actually proceed with hardware
2933  * interrupt toggling.
2934  */
2935 static int
2936 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2937 {
2938 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2939 	struct e1000g		*adapter = rx_ring->adapter;
2940 	struct e1000_hw		*hw = &adapter->shared;
2941 	uint32_t		intr_mask;
2942 
2943 	rw_enter(&adapter->chip_lock, RW_READER);
2944 
2945 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
2946 		rw_exit(&adapter->chip_lock);
2947 		return (0);
2948 	}
2949 
2950 	mutex_enter(&rx_ring->rx_lock);
2951 	rx_ring->poll_flag = 0;
2952 	mutex_exit(&rx_ring->rx_lock);
2953 
2954 	/* Rx interrupt enabling for MSI and legacy */
2955 	intr_mask = E1000_READ_REG(hw, E1000_IMS);
2956 	intr_mask |= E1000_IMS_RXT0;
2957 	E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2958 	E1000_WRITE_FLUSH(hw);
2959 
2960 	/* Trigger a Rx interrupt to check Rx ring */
2961 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2962 	E1000_WRITE_FLUSH(hw);
2963 
2964 	rw_exit(&adapter->chip_lock);
2965 	return (0);
2966 }
2967 
2968 static int
2969 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2970 {
2971 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2972 	struct e1000g		*adapter = rx_ring->adapter;
2973 	struct e1000_hw		*hw = &adapter->shared;
2974 
2975 	rw_enter(&adapter->chip_lock, RW_READER);
2976 
2977 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
2978 		rw_exit(&adapter->chip_lock);
2979 		return (0);
2980 	}
2981 	mutex_enter(&rx_ring->rx_lock);
2982 	rx_ring->poll_flag = 1;
2983 	mutex_exit(&rx_ring->rx_lock);
2984 
2985 	/* Rx interrupt disabling for MSI and legacy */
2986 	E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2987 	E1000_WRITE_FLUSH(hw);
2988 
2989 	rw_exit(&adapter->chip_lock);
2990 	return (0);
2991 }
2992 
2993 /*
2994  * e1000g_unicst_find - Find the slot for the specified unicast address
2995  */
2996 static int
2997 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2998 {
2999 	int slot;
3000 
3001 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
3002 		if ((Adapter->unicst_addr[slot].mac.set == 1) &&
3003 		    (bcmp(Adapter->unicst_addr[slot].mac.addr,
3004 		    mac_addr, ETHERADDRL) == 0))
3005 				return (slot);
3006 	}
3007 
3008 	return (-1);
3009 }
3010 
3011 /*
3012  * Entry points to add and remove a MAC address to a ring group.
3013  * The caller takes care of adding and removing the MAC addresses
3014  * to the filter via these two routines.
3015  */
3016 
3017 static int
3018 e1000g_addmac(void *arg, const uint8_t *mac_addr)
3019 {
3020 	struct e1000g *Adapter = (struct e1000g *)arg;
3021 	int slot, err;
3022 
3023 	rw_enter(&Adapter->chip_lock, RW_WRITER);
3024 
3025 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3026 		rw_exit(&Adapter->chip_lock);
3027 		return (ECANCELED);
3028 	}
3029 
3030 	if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
3031 		/* The same address is already in slot */
3032 		rw_exit(&Adapter->chip_lock);
3033 		return (0);
3034 	}
3035 
3036 	if (Adapter->unicst_avail == 0) {
3037 		/* no slots available */
3038 		rw_exit(&Adapter->chip_lock);
3039 		return (ENOSPC);
3040 	}
3041 
3042 	/* Search for a free slot */
3043 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
3044 		if (Adapter->unicst_addr[slot].mac.set == 0)
3045 			break;
3046 	}
3047 	ASSERT(slot < Adapter->unicst_total);
3048 
3049 	err = e1000g_unicst_set(Adapter, mac_addr, slot);
3050 	if (err == 0)
3051 		Adapter->unicst_avail--;
3052 
3053 	rw_exit(&Adapter->chip_lock);
3054 
3055 	return (err);
3056 }
3057 
3058 static int
3059 e1000g_remmac(void *arg, const uint8_t *mac_addr)
3060 {
3061 	struct e1000g *Adapter = (struct e1000g *)arg;
3062 	int slot, err;
3063 
3064 	rw_enter(&Adapter->chip_lock, RW_WRITER);
3065 
3066 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3067 		rw_exit(&Adapter->chip_lock);
3068 		return (ECANCELED);
3069 	}
3070 
3071 	slot = e1000g_unicst_find(Adapter, mac_addr);
3072 	if (slot == -1) {
3073 		rw_exit(&Adapter->chip_lock);
3074 		return (EINVAL);
3075 	}
3076 
3077 	ASSERT(Adapter->unicst_addr[slot].mac.set);
3078 
3079 	/* Clear this slot */
3080 	err = e1000g_unicst_set(Adapter, NULL, slot);
3081 	if (err == 0)
3082 		Adapter->unicst_avail++;
3083 
3084 	rw_exit(&Adapter->chip_lock);
3085 
3086 	return (err);
3087 }
3088 
3089 static int
3090 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
3091 {
3092 	e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
3093 
3094 	mutex_enter(&rx_ring->rx_lock);
3095 	rx_ring->ring_gen_num = mr_gen_num;
3096 	mutex_exit(&rx_ring->rx_lock);
3097 	return (0);
3098 }
3099 
3100 /*
3101  * Callback funtion for MAC layer to register all rings.
3102  *
3103  * The hardware supports a single group with currently only one ring
3104  * available.
3105  * Though not offering virtualization ability per se, exposing the
3106  * group/ring still enables the polling and interrupt toggling.
3107  */
3108 /* ARGSUSED */
3109 void
3110 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
3111     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
3112 {
3113 	struct e1000g *Adapter = (struct e1000g *)arg;
3114 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
3115 	mac_intr_t *mintr;
3116 
3117 	/*
3118 	 * We advertised only RX group/rings, so the MAC framework shouldn't
3119 	 * ask for any thing else.
3120 	 */
3121 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
3122 
3123 	rx_ring->mrh = rx_ring->mrh_init = rh;
3124 	infop->mri_driver = (mac_ring_driver_t)rx_ring;
3125 	infop->mri_start = e1000g_ring_start;
3126 	infop->mri_stop = NULL;
3127 	infop->mri_poll = e1000g_poll_ring;
3128 	infop->mri_stat = e1000g_rx_ring_stat;
3129 
3130 	/* Ring level interrupts */
3131 	mintr = &infop->mri_intr;
3132 	mintr->mi_handle = (mac_intr_handle_t)rx_ring;
3133 	mintr->mi_enable = e1000g_rx_ring_intr_enable;
3134 	mintr->mi_disable = e1000g_rx_ring_intr_disable;
3135 	if (Adapter->msi_enable)
3136 		mintr->mi_ddi_handle = Adapter->htable[0];
3137 }
3138 
3139 /* ARGSUSED */
3140 static void
3141 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
3142     mac_group_info_t *infop, mac_group_handle_t gh)
3143 {
3144 	struct e1000g *Adapter = (struct e1000g *)arg;
3145 	mac_intr_t *mintr;
3146 
3147 	/*
3148 	 * We advertised a single RX ring. Getting a request for anything else
3149 	 * signifies a bug in the MAC framework.
3150 	 */
3151 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
3152 
3153 	Adapter->rx_group = gh;
3154 
3155 	infop->mgi_driver = (mac_group_driver_t)Adapter;
3156 	infop->mgi_start = NULL;
3157 	infop->mgi_stop = NULL;
3158 	infop->mgi_addmac = e1000g_addmac;
3159 	infop->mgi_remmac = e1000g_remmac;
3160 	infop->mgi_count = 1;
3161 
3162 	/* Group level interrupts */
3163 	mintr = &infop->mgi_intr;
3164 	mintr->mi_handle = (mac_intr_handle_t)Adapter;
3165 	mintr->mi_enable = e1000g_rx_group_intr_enable;
3166 	mintr->mi_disable = e1000g_rx_group_intr_disable;
3167 }
3168 
3169 static void
3170 e1000g_led_blink(void *arg)
3171 {
3172 	e1000g_t *e1000g = arg;
3173 
3174 	mutex_enter(&e1000g->e1000g_led_lock);
3175 	VERIFY(e1000g->e1000g_emul_blink);
3176 	if (e1000g->e1000g_emul_state) {
3177 		(void) e1000_led_on(&e1000g->shared);
3178 	} else {
3179 		(void) e1000_led_off(&e1000g->shared);
3180 	}
3181 	e1000g->e1000g_emul_state = !e1000g->e1000g_emul_state;
3182 	mutex_exit(&e1000g->e1000g_led_lock);
3183 }
3184 
3185 static int
3186 e1000g_led_set(void *arg, mac_led_mode_t mode, uint_t flags)
3187 {
3188 	e1000g_t *e1000g = arg;
3189 
3190 	if (flags != 0)
3191 		return (EINVAL);
3192 
3193 	if (mode != MAC_LED_DEFAULT &&
3194 	    mode != MAC_LED_IDENT &&
3195 	    mode != MAC_LED_OFF &&
3196 	    mode != MAC_LED_ON)
3197 		return (ENOTSUP);
3198 
3199 	mutex_enter(&e1000g->e1000g_led_lock);
3200 
3201 	if ((mode == MAC_LED_IDENT || mode == MAC_LED_OFF ||
3202 	    mode == MAC_LED_ON) &&
3203 	    !e1000g->e1000g_led_setup) {
3204 		if (e1000_setup_led(&e1000g->shared) != E1000_SUCCESS) {
3205 			mutex_exit(&e1000g->e1000g_led_lock);
3206 			return (EIO);
3207 		}
3208 
3209 		e1000g->e1000g_led_setup = B_TRUE;
3210 	}
3211 
3212 	if (mode != MAC_LED_IDENT && e1000g->e1000g_blink != NULL) {
3213 		ddi_periodic_t id = e1000g->e1000g_blink;
3214 		e1000g->e1000g_blink = NULL;
3215 		mutex_exit(&e1000g->e1000g_led_lock);
3216 		ddi_periodic_delete(id);
3217 		mutex_enter(&e1000g->e1000g_led_lock);
3218 	}
3219 
3220 	switch (mode) {
3221 	case MAC_LED_DEFAULT:
3222 		if (e1000g->e1000g_led_setup) {
3223 			if (e1000_cleanup_led(&e1000g->shared) !=
3224 			    E1000_SUCCESS) {
3225 				mutex_exit(&e1000g->e1000g_led_lock);
3226 				return (EIO);
3227 			}
3228 			e1000g->e1000g_led_setup = B_FALSE;
3229 		}
3230 		break;
3231 	case MAC_LED_IDENT:
3232 		if (e1000g->e1000g_emul_blink) {
3233 			if (e1000g->e1000g_blink != NULL)
3234 				break;
3235 
3236 			/*
3237 			 * Note, we use a 200 ms period here as that's what
3238 			 * section 10.1.3 8254x Intel Manual (PCI/PCI-X Family
3239 			 * of Gigabit Ethernet Controllers Software Developer's
3240 			 * Manual) indicates that the optional blink hardware
3241 			 * operates at.
3242 			 */
3243 			e1000g->e1000g_blink =
3244 			    ddi_periodic_add(e1000g_led_blink, e1000g,
3245 			    200ULL * (NANOSEC / MILLISEC), DDI_IPL_0);
3246 		} else if (e1000_blink_led(&e1000g->shared) != E1000_SUCCESS) {
3247 			mutex_exit(&e1000g->e1000g_led_lock);
3248 			return (EIO);
3249 		}
3250 		break;
3251 	case MAC_LED_OFF:
3252 		if (e1000_led_off(&e1000g->shared) != E1000_SUCCESS) {
3253 			mutex_exit(&e1000g->e1000g_led_lock);
3254 			return (EIO);
3255 		}
3256 		break;
3257 	case MAC_LED_ON:
3258 		if (e1000_led_on(&e1000g->shared) != E1000_SUCCESS) {
3259 			mutex_exit(&e1000g->e1000g_led_lock);
3260 			return (EIO);
3261 		}
3262 		break;
3263 	default:
3264 		mutex_exit(&e1000g->e1000g_led_lock);
3265 		return (ENOTSUP);
3266 	}
3267 
3268 	mutex_exit(&e1000g->e1000g_led_lock);
3269 	return (0);
3270 
3271 }
3272 
3273 static boolean_t
3274 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3275 {
3276 	struct e1000g *Adapter = (struct e1000g *)arg;
3277 
3278 	switch (cap) {
3279 	case MAC_CAPAB_HCKSUM: {
3280 		uint32_t *txflags = cap_data;
3281 
3282 		if (Adapter->tx_hcksum_enable)
3283 			*txflags = HCKSUM_IPHDRCKSUM |
3284 			    HCKSUM_INET_PARTIAL;
3285 		else
3286 			return (B_FALSE);
3287 		break;
3288 	}
3289 
3290 	case MAC_CAPAB_LSO: {
3291 		mac_capab_lso_t *cap_lso = cap_data;
3292 
3293 		if (Adapter->lso_enable) {
3294 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3295 			cap_lso->lso_basic_tcp_ipv4.lso_max =
3296 			    E1000_LSO_MAXLEN;
3297 		} else
3298 			return (B_FALSE);
3299 		break;
3300 	}
3301 	case MAC_CAPAB_RINGS: {
3302 		mac_capab_rings_t *cap_rings = cap_data;
3303 
3304 		/* No TX rings exposed yet */
3305 		if (cap_rings->mr_type != MAC_RING_TYPE_RX)
3306 			return (B_FALSE);
3307 
3308 		cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3309 		cap_rings->mr_rnum = 1;
3310 		cap_rings->mr_gnum = 1;
3311 		cap_rings->mr_rget = e1000g_fill_ring;
3312 		cap_rings->mr_gget = e1000g_fill_group;
3313 		break;
3314 	}
3315 	case MAC_CAPAB_LED: {
3316 		mac_capab_led_t *cap_led = cap_data;
3317 
3318 		cap_led->mcl_flags = 0;
3319 		cap_led->mcl_modes = MAC_LED_DEFAULT;
3320 		if (Adapter->shared.mac.ops.blink_led != NULL &&
3321 		    Adapter->shared.mac.ops.blink_led !=
3322 		    e1000_null_ops_generic) {
3323 			cap_led->mcl_modes |= MAC_LED_IDENT;
3324 		}
3325 
3326 		if (Adapter->shared.mac.ops.led_off != NULL &&
3327 		    Adapter->shared.mac.ops.led_off !=
3328 		    e1000_null_ops_generic) {
3329 			cap_led->mcl_modes |= MAC_LED_OFF;
3330 		}
3331 
3332 		if (Adapter->shared.mac.ops.led_on != NULL &&
3333 		    Adapter->shared.mac.ops.led_on !=
3334 		    e1000_null_ops_generic) {
3335 			cap_led->mcl_modes |= MAC_LED_ON;
3336 		}
3337 
3338 		/*
3339 		 * Some hardware doesn't support blinking natively as they're
3340 		 * missing the optional blink circuit. If they have both off and
3341 		 * on then we'll emulate it ourselves.
3342 		 */
3343 		if (((cap_led->mcl_modes & MAC_LED_IDENT) == 0) &&
3344 		    ((cap_led->mcl_modes & MAC_LED_OFF) != 0) &&
3345 		    ((cap_led->mcl_modes & MAC_LED_ON) != 0)) {
3346 			cap_led->mcl_modes |= MAC_LED_IDENT;
3347 			Adapter->e1000g_emul_blink = B_TRUE;
3348 		}
3349 
3350 		cap_led->mcl_set = e1000g_led_set;
3351 		break;
3352 	}
3353 	default:
3354 		return (B_FALSE);
3355 	}
3356 	return (B_TRUE);
3357 }
3358 
3359 static boolean_t
3360 e1000g_param_locked(mac_prop_id_t pr_num)
3361 {
3362 	/*
3363 	 * All en_* parameters are locked (read-only) while
3364 	 * the device is in any sort of loopback mode ...
3365 	 */
3366 	switch (pr_num) {
3367 		case MAC_PROP_EN_1000FDX_CAP:
3368 		case MAC_PROP_EN_1000HDX_CAP:
3369 		case MAC_PROP_EN_100FDX_CAP:
3370 		case MAC_PROP_EN_100HDX_CAP:
3371 		case MAC_PROP_EN_10FDX_CAP:
3372 		case MAC_PROP_EN_10HDX_CAP:
3373 		case MAC_PROP_AUTONEG:
3374 		case MAC_PROP_FLOWCTRL:
3375 			return (B_TRUE);
3376 	}
3377 	return (B_FALSE);
3378 }
3379 
3380 /*
3381  * callback function for set/get of properties
3382  */
3383 static int
3384 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3385     uint_t pr_valsize, const void *pr_val)
3386 {
3387 	struct e1000g *Adapter = arg;
3388 	struct e1000_hw *hw = &Adapter->shared;
3389 	struct e1000_fc_info *fc = &Adapter->shared.fc;
3390 	int err = 0;
3391 	link_flowctrl_t flowctrl;
3392 	uint32_t cur_mtu, new_mtu;
3393 
3394 	rw_enter(&Adapter->chip_lock, RW_WRITER);
3395 
3396 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3397 		rw_exit(&Adapter->chip_lock);
3398 		return (ECANCELED);
3399 	}
3400 
3401 	if (Adapter->loopback_mode != E1000G_LB_NONE &&
3402 	    e1000g_param_locked(pr_num)) {
3403 		/*
3404 		 * All en_* parameters are locked (read-only)
3405 		 * while the device is in any sort of loopback mode.
3406 		 */
3407 		rw_exit(&Adapter->chip_lock);
3408 		return (EBUSY);
3409 	}
3410 
3411 	switch (pr_num) {
3412 		case MAC_PROP_EN_1000FDX_CAP:
3413 			if (hw->phy.media_type != e1000_media_type_copper) {
3414 				err = ENOTSUP;
3415 				break;
3416 			}
3417 			Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
3418 			Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
3419 			goto reset;
3420 		case MAC_PROP_EN_100FDX_CAP:
3421 			if (hw->phy.media_type != e1000_media_type_copper) {
3422 				err = ENOTSUP;
3423 				break;
3424 			}
3425 			Adapter->param_en_100fdx = *(uint8_t *)pr_val;
3426 			Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
3427 			goto reset;
3428 		case MAC_PROP_EN_100HDX_CAP:
3429 			if (hw->phy.media_type != e1000_media_type_copper) {
3430 				err = ENOTSUP;
3431 				break;
3432 			}
3433 			Adapter->param_en_100hdx = *(uint8_t *)pr_val;
3434 			Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
3435 			goto reset;
3436 		case MAC_PROP_EN_10FDX_CAP:
3437 			if (hw->phy.media_type != e1000_media_type_copper) {
3438 				err = ENOTSUP;
3439 				break;
3440 			}
3441 			Adapter->param_en_10fdx = *(uint8_t *)pr_val;
3442 			Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
3443 			goto reset;
3444 		case MAC_PROP_EN_10HDX_CAP:
3445 			if (hw->phy.media_type != e1000_media_type_copper) {
3446 				err = ENOTSUP;
3447 				break;
3448 			}
3449 			Adapter->param_en_10hdx = *(uint8_t *)pr_val;
3450 			Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
3451 			goto reset;
3452 		case MAC_PROP_AUTONEG:
3453 			if (hw->phy.media_type != e1000_media_type_copper) {
3454 				err = ENOTSUP;
3455 				break;
3456 			}
3457 			Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
3458 			goto reset;
3459 		case MAC_PROP_FLOWCTRL:
3460 			fc->send_xon = B_TRUE;
3461 			bcopy(pr_val, &flowctrl, sizeof (flowctrl));
3462 
3463 			switch (flowctrl) {
3464 			default:
3465 				err = EINVAL;
3466 				break;
3467 			case LINK_FLOWCTRL_NONE:
3468 				fc->requested_mode = e1000_fc_none;
3469 				break;
3470 			case LINK_FLOWCTRL_RX:
3471 				fc->requested_mode = e1000_fc_rx_pause;
3472 				break;
3473 			case LINK_FLOWCTRL_TX:
3474 				fc->requested_mode = e1000_fc_tx_pause;
3475 				break;
3476 			case LINK_FLOWCTRL_BI:
3477 				fc->requested_mode = e1000_fc_full;
3478 				break;
3479 			}
3480 reset:
3481 			if (err == 0) {
3482 				/* check PCH limits & reset the link */
3483 				e1000g_pch_limits(Adapter);
3484 				if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3485 					err = EINVAL;
3486 			}
3487 			break;
3488 		case MAC_PROP_ADV_1000FDX_CAP:
3489 		case MAC_PROP_ADV_1000HDX_CAP:
3490 		case MAC_PROP_ADV_100FDX_CAP:
3491 		case MAC_PROP_ADV_100HDX_CAP:
3492 		case MAC_PROP_ADV_10FDX_CAP:
3493 		case MAC_PROP_ADV_10HDX_CAP:
3494 		case MAC_PROP_EN_1000HDX_CAP:
3495 		case MAC_PROP_STATUS:
3496 		case MAC_PROP_SPEED:
3497 		case MAC_PROP_DUPLEX:
3498 			err = ENOTSUP; /* read-only prop. Can't set this. */
3499 			break;
3500 		case MAC_PROP_MTU:
3501 			/* adapter must be stopped for an MTU change */
3502 			if (Adapter->e1000g_state & E1000G_STARTED) {
3503 				err = EBUSY;
3504 				break;
3505 			}
3506 
3507 			cur_mtu = Adapter->default_mtu;
3508 
3509 			/* get new requested MTU */
3510 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3511 			if (new_mtu == cur_mtu) {
3512 				err = 0;
3513 				break;
3514 			}
3515 
3516 			if ((new_mtu < DEFAULT_MTU) ||
3517 			    (new_mtu > Adapter->max_mtu)) {
3518 				err = EINVAL;
3519 				break;
3520 			}
3521 
3522 			/* inform MAC framework of new MTU */
3523 			err = mac_maxsdu_update(Adapter->mh, new_mtu);
3524 
3525 			if (err == 0) {
3526 				Adapter->default_mtu = new_mtu;
3527 				Adapter->max_frame_size =
3528 				    e1000g_mtu2maxframe(new_mtu);
3529 
3530 				/*
3531 				 * check PCH limits & set buffer sizes to
3532 				 * match new MTU
3533 				 */
3534 				e1000g_pch_limits(Adapter);
3535 				e1000g_set_bufsize(Adapter);
3536 
3537 				/*
3538 				 * decrease the number of descriptors and free
3539 				 * packets for jumbo frames to reduce tx/rx
3540 				 * resource consumption
3541 				 */
3542 				if (Adapter->max_frame_size >=
3543 				    (FRAME_SIZE_UPTO_4K)) {
3544 					if (Adapter->tx_desc_num_flag == 0)
3545 						Adapter->tx_desc_num =
3546 						    DEFAULT_JUMBO_NUM_TX_DESC;
3547 
3548 					if (Adapter->rx_desc_num_flag == 0)
3549 						Adapter->rx_desc_num =
3550 						    DEFAULT_JUMBO_NUM_RX_DESC;
3551 
3552 					if (Adapter->tx_buf_num_flag == 0)
3553 						Adapter->tx_freelist_num =
3554 						    DEFAULT_JUMBO_NUM_TX_BUF;
3555 
3556 					if (Adapter->rx_buf_num_flag == 0)
3557 						Adapter->rx_freelist_limit =
3558 						    DEFAULT_JUMBO_NUM_RX_BUF;
3559 				} else {
3560 					if (Adapter->tx_desc_num_flag == 0)
3561 						Adapter->tx_desc_num =
3562 						    DEFAULT_NUM_TX_DESCRIPTOR;
3563 
3564 					if (Adapter->rx_desc_num_flag == 0)
3565 						Adapter->rx_desc_num =
3566 						    DEFAULT_NUM_RX_DESCRIPTOR;
3567 
3568 					if (Adapter->tx_buf_num_flag == 0)
3569 						Adapter->tx_freelist_num =
3570 						    DEFAULT_NUM_TX_FREELIST;
3571 
3572 					if (Adapter->rx_buf_num_flag == 0)
3573 						Adapter->rx_freelist_limit =
3574 						    DEFAULT_NUM_RX_FREELIST;
3575 				}
3576 			}
3577 			break;
3578 		case MAC_PROP_PRIVATE:
3579 			err = e1000g_set_priv_prop(Adapter, pr_name,
3580 			    pr_valsize, pr_val);
3581 			break;
3582 		default:
3583 			err = ENOTSUP;
3584 			break;
3585 	}
3586 	rw_exit(&Adapter->chip_lock);
3587 	return (err);
3588 }
3589 
3590 static int
3591 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3592     uint_t pr_valsize, void *pr_val)
3593 {
3594 	struct e1000g *Adapter = arg;
3595 	struct e1000_fc_info *fc = &Adapter->shared.fc;
3596 	int err = 0;
3597 	link_flowctrl_t flowctrl;
3598 	uint64_t tmp = 0;
3599 
3600 	switch (pr_num) {
3601 		case MAC_PROP_DUPLEX:
3602 			ASSERT(pr_valsize >= sizeof (link_duplex_t));
3603 			bcopy(&Adapter->link_duplex, pr_val,
3604 			    sizeof (link_duplex_t));
3605 			break;
3606 		case MAC_PROP_SPEED:
3607 			ASSERT(pr_valsize >= sizeof (uint64_t));
3608 			tmp = Adapter->link_speed * 1000000ull;
3609 			bcopy(&tmp, pr_val, sizeof (tmp));
3610 			break;
3611 		case MAC_PROP_AUTONEG:
3612 			*(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3613 			break;
3614 		case MAC_PROP_FLOWCTRL:
3615 			ASSERT(pr_valsize >= sizeof (link_flowctrl_t));
3616 			switch (fc->current_mode) {
3617 				case e1000_fc_none:
3618 					flowctrl = LINK_FLOWCTRL_NONE;
3619 					break;
3620 				case e1000_fc_rx_pause:
3621 					flowctrl = LINK_FLOWCTRL_RX;
3622 					break;
3623 				case e1000_fc_tx_pause:
3624 					flowctrl = LINK_FLOWCTRL_TX;
3625 					break;
3626 				case e1000_fc_full:
3627 					flowctrl = LINK_FLOWCTRL_BI;
3628 					break;
3629 			}
3630 			bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3631 			break;
3632 		case MAC_PROP_ADV_1000FDX_CAP:
3633 			*(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3634 			break;
3635 		case MAC_PROP_EN_1000FDX_CAP:
3636 			*(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3637 			break;
3638 		case MAC_PROP_ADV_1000HDX_CAP:
3639 			*(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3640 			break;
3641 		case MAC_PROP_EN_1000HDX_CAP:
3642 			*(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3643 			break;
3644 		case MAC_PROP_ADV_100FDX_CAP:
3645 			*(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3646 			break;
3647 		case MAC_PROP_EN_100FDX_CAP:
3648 			*(uint8_t *)pr_val = Adapter->param_en_100fdx;
3649 			break;
3650 		case MAC_PROP_ADV_100HDX_CAP:
3651 			*(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3652 			break;
3653 		case MAC_PROP_EN_100HDX_CAP:
3654 			*(uint8_t *)pr_val = Adapter->param_en_100hdx;
3655 			break;
3656 		case MAC_PROP_ADV_10FDX_CAP:
3657 			*(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3658 			break;
3659 		case MAC_PROP_EN_10FDX_CAP:
3660 			*(uint8_t *)pr_val = Adapter->param_en_10fdx;
3661 			break;
3662 		case MAC_PROP_ADV_10HDX_CAP:
3663 			*(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3664 			break;
3665 		case MAC_PROP_EN_10HDX_CAP:
3666 			*(uint8_t *)pr_val = Adapter->param_en_10hdx;
3667 			break;
3668 		case MAC_PROP_ADV_100T4_CAP:
3669 		case MAC_PROP_EN_100T4_CAP:
3670 			*(uint8_t *)pr_val = Adapter->param_adv_100t4;
3671 			break;
3672 		case MAC_PROP_PRIVATE:
3673 			err = e1000g_get_priv_prop(Adapter, pr_name,
3674 			    pr_valsize, pr_val);
3675 			break;
3676 		default:
3677 			err = ENOTSUP;
3678 			break;
3679 	}
3680 
3681 	return (err);
3682 }
3683 
3684 static void
3685 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3686     mac_prop_info_handle_t prh)
3687 {
3688 	struct e1000g *Adapter = arg;
3689 	struct e1000_hw *hw = &Adapter->shared;
3690 
3691 	switch (pr_num) {
3692 	case MAC_PROP_DUPLEX:
3693 	case MAC_PROP_SPEED:
3694 	case MAC_PROP_ADV_1000FDX_CAP:
3695 	case MAC_PROP_ADV_1000HDX_CAP:
3696 	case MAC_PROP_ADV_100FDX_CAP:
3697 	case MAC_PROP_ADV_100HDX_CAP:
3698 	case MAC_PROP_ADV_10FDX_CAP:
3699 	case MAC_PROP_ADV_10HDX_CAP:
3700 	case MAC_PROP_ADV_100T4_CAP:
3701 	case MAC_PROP_EN_100T4_CAP:
3702 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3703 		break;
3704 
3705 	case MAC_PROP_EN_1000FDX_CAP:
3706 		if (hw->phy.media_type != e1000_media_type_copper) {
3707 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3708 		} else {
3709 			mac_prop_info_set_default_uint8(prh,
3710 			    ((Adapter->phy_ext_status &
3711 			    IEEE_ESR_1000T_FD_CAPS) ||
3712 			    (Adapter->phy_ext_status &
3713 			    IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
3714 		}
3715 		break;
3716 
3717 	case MAC_PROP_EN_100FDX_CAP:
3718 		if (hw->phy.media_type != e1000_media_type_copper) {
3719 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3720 		} else {
3721 			mac_prop_info_set_default_uint8(prh,
3722 			    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3723 			    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3724 			    ? 1 : 0);
3725 		}
3726 		break;
3727 
3728 	case MAC_PROP_EN_100HDX_CAP:
3729 		if (hw->phy.media_type != e1000_media_type_copper) {
3730 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3731 		} else {
3732 			mac_prop_info_set_default_uint8(prh,
3733 			    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
3734 			    (Adapter->phy_status & MII_SR_100T2_HD_CAPS))
3735 			    ? 1 : 0);
3736 		}
3737 		break;
3738 
3739 	case MAC_PROP_EN_10FDX_CAP:
3740 		if (hw->phy.media_type != e1000_media_type_copper) {
3741 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3742 		} else {
3743 			mac_prop_info_set_default_uint8(prh,
3744 			    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
3745 		}
3746 		break;
3747 
3748 	case MAC_PROP_EN_10HDX_CAP:
3749 		if (hw->phy.media_type != e1000_media_type_copper) {
3750 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3751 		} else {
3752 			mac_prop_info_set_default_uint8(prh,
3753 			    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
3754 		}
3755 		break;
3756 
3757 	case MAC_PROP_EN_1000HDX_CAP:
3758 		if (hw->phy.media_type != e1000_media_type_copper)
3759 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3760 		break;
3761 
3762 	case MAC_PROP_AUTONEG:
3763 		if (hw->phy.media_type != e1000_media_type_copper) {
3764 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3765 		} else {
3766 			mac_prop_info_set_default_uint8(prh,
3767 			    (Adapter->phy_status & MII_SR_AUTONEG_CAPS)
3768 			    ? 1 : 0);
3769 		}
3770 		break;
3771 
3772 	case MAC_PROP_FLOWCTRL:
3773 		mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
3774 		break;
3775 
3776 	case MAC_PROP_MTU: {
3777 		struct e1000_mac_info *mac = &Adapter->shared.mac;
3778 		struct e1000_phy_info *phy = &Adapter->shared.phy;
3779 		uint32_t max;
3780 
3781 		/* some MAC types do not support jumbo frames */
3782 		if ((mac->type == e1000_ich8lan) ||
3783 		    ((mac->type == e1000_ich9lan) && (phy->type ==
3784 		    e1000_phy_ife))) {
3785 			max = DEFAULT_MTU;
3786 		} else {
3787 			max = Adapter->max_mtu;
3788 		}
3789 
3790 		mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max);
3791 		break;
3792 	}
3793 	case MAC_PROP_PRIVATE: {
3794 		char valstr[64];
3795 		int value;
3796 
3797 		if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
3798 		    strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3799 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3800 			return;
3801 		} else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3802 			value = DEFAULT_TX_BCOPY_THRESHOLD;
3803 		} else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3804 			value = DEFAULT_TX_INTR_ENABLE;
3805 		} else if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3806 			value = DEFAULT_TX_INTR_DELAY;
3807 		} else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3808 			value = DEFAULT_TX_INTR_ABS_DELAY;
3809 		} else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3810 			value = DEFAULT_RX_BCOPY_THRESHOLD;
3811 		} else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3812 			value = DEFAULT_RX_LIMIT_ON_INTR;
3813 		} else if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3814 			value = DEFAULT_RX_INTR_DELAY;
3815 		} else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3816 			value = DEFAULT_RX_INTR_ABS_DELAY;
3817 		} else if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3818 			value = DEFAULT_INTR_THROTTLING;
3819 		} else if (strcmp(pr_name, "_intr_adaptive") == 0) {
3820 			value = 1;
3821 		} else {
3822 			return;
3823 		}
3824 
3825 		(void) snprintf(valstr, sizeof (valstr), "%d", value);
3826 		mac_prop_info_set_default_str(prh, valstr);
3827 		break;
3828 	}
3829 	}
3830 }
3831 
3832 /* ARGSUSED2 */
3833 static int
3834 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3835     uint_t pr_valsize, const void *pr_val)
3836 {
3837 	int err = 0;
3838 	long result;
3839 	struct e1000_hw *hw = &Adapter->shared;
3840 
3841 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3842 		if (pr_val == NULL) {
3843 			err = EINVAL;
3844 			return (err);
3845 		}
3846 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3847 		if (result < MIN_TX_BCOPY_THRESHOLD ||
3848 		    result > MAX_TX_BCOPY_THRESHOLD)
3849 			err = EINVAL;
3850 		else {
3851 			Adapter->tx_bcopy_thresh = (uint32_t)result;
3852 		}
3853 		return (err);
3854 	}
3855 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3856 		if (pr_val == NULL) {
3857 			err = EINVAL;
3858 			return (err);
3859 		}
3860 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3861 		if (result < 0 || result > 1)
3862 			err = EINVAL;
3863 		else {
3864 			Adapter->tx_intr_enable = (result == 1) ?
3865 			    B_TRUE: B_FALSE;
3866 			if (Adapter->tx_intr_enable)
3867 				e1000g_mask_tx_interrupt(Adapter);
3868 			else
3869 				e1000g_clear_tx_interrupt(Adapter);
3870 			if (e1000g_check_acc_handle(
3871 			    Adapter->osdep.reg_handle) != DDI_FM_OK) {
3872 				ddi_fm_service_impact(Adapter->dip,
3873 				    DDI_SERVICE_DEGRADED);
3874 				err = EIO;
3875 			}
3876 		}
3877 		return (err);
3878 	}
3879 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3880 		if (pr_val == NULL) {
3881 			err = EINVAL;
3882 			return (err);
3883 		}
3884 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3885 		if (result < MIN_TX_INTR_DELAY ||
3886 		    result > MAX_TX_INTR_DELAY)
3887 			err = EINVAL;
3888 		else {
3889 			Adapter->tx_intr_delay = (uint32_t)result;
3890 			E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3891 			if (e1000g_check_acc_handle(
3892 			    Adapter->osdep.reg_handle) != DDI_FM_OK) {
3893 				ddi_fm_service_impact(Adapter->dip,
3894 				    DDI_SERVICE_DEGRADED);
3895 				err = EIO;
3896 			}
3897 		}
3898 		return (err);
3899 	}
3900 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3901 		if (pr_val == NULL) {
3902 			err = EINVAL;
3903 			return (err);
3904 		}
3905 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3906 		if (result < MIN_TX_INTR_ABS_DELAY ||
3907 		    result > MAX_TX_INTR_ABS_DELAY)
3908 			err = EINVAL;
3909 		else {
3910 			Adapter->tx_intr_abs_delay = (uint32_t)result;
3911 			E1000_WRITE_REG(hw, E1000_TADV,
3912 			    Adapter->tx_intr_abs_delay);
3913 			if (e1000g_check_acc_handle(
3914 			    Adapter->osdep.reg_handle) != DDI_FM_OK) {
3915 				ddi_fm_service_impact(Adapter->dip,
3916 				    DDI_SERVICE_DEGRADED);
3917 				err = EIO;
3918 			}
3919 		}
3920 		return (err);
3921 	}
3922 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3923 		if (pr_val == NULL) {
3924 			err = EINVAL;
3925 			return (err);
3926 		}
3927 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3928 		if (result < MIN_RX_BCOPY_THRESHOLD ||
3929 		    result > MAX_RX_BCOPY_THRESHOLD)
3930 			err = EINVAL;
3931 		else
3932 			Adapter->rx_bcopy_thresh = (uint32_t)result;
3933 		return (err);
3934 	}
3935 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3936 		if (pr_val == NULL) {
3937 			err = EINVAL;
3938 			return (err);
3939 		}
3940 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3941 		if (result < MIN_RX_LIMIT_ON_INTR ||
3942 		    result > MAX_RX_LIMIT_ON_INTR)
3943 			err = EINVAL;
3944 		else
3945 			Adapter->rx_limit_onintr = (uint32_t)result;
3946 		return (err);
3947 	}
3948 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3949 		if (pr_val == NULL) {
3950 			err = EINVAL;
3951 			return (err);
3952 		}
3953 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3954 		if (result < MIN_RX_INTR_DELAY ||
3955 		    result > MAX_RX_INTR_DELAY)
3956 			err = EINVAL;
3957 		else {
3958 			Adapter->rx_intr_delay = (uint32_t)result;
3959 			E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3960 			if (e1000g_check_acc_handle(
3961 			    Adapter->osdep.reg_handle) != DDI_FM_OK) {
3962 				ddi_fm_service_impact(Adapter->dip,
3963 				    DDI_SERVICE_DEGRADED);
3964 				err = EIO;
3965 			}
3966 		}
3967 		return (err);
3968 	}
3969 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3970 		if (pr_val == NULL) {
3971 			err = EINVAL;
3972 			return (err);
3973 		}
3974 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3975 		if (result < MIN_RX_INTR_ABS_DELAY ||
3976 		    result > MAX_RX_INTR_ABS_DELAY)
3977 			err = EINVAL;
3978 		else {
3979 			Adapter->rx_intr_abs_delay = (uint32_t)result;
3980 			E1000_WRITE_REG(hw, E1000_RADV,
3981 			    Adapter->rx_intr_abs_delay);
3982 			if (e1000g_check_acc_handle(
3983 			    Adapter->osdep.reg_handle) != DDI_FM_OK) {
3984 				ddi_fm_service_impact(Adapter->dip,
3985 				    DDI_SERVICE_DEGRADED);
3986 				err = EIO;
3987 			}
3988 		}
3989 		return (err);
3990 	}
3991 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3992 		if (pr_val == NULL) {
3993 			err = EINVAL;
3994 			return (err);
3995 		}
3996 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3997 		if (result < MIN_INTR_THROTTLING ||
3998 		    result > MAX_INTR_THROTTLING)
3999 			err = EINVAL;
4000 		else {
4001 			if (hw->mac.type >= e1000_82540) {
4002 				Adapter->intr_throttling_rate =
4003 				    (uint32_t)result;
4004 				E1000_WRITE_REG(hw, E1000_ITR,
4005 				    Adapter->intr_throttling_rate);
4006 				if (e1000g_check_acc_handle(
4007 				    Adapter->osdep.reg_handle) != DDI_FM_OK) {
4008 					ddi_fm_service_impact(Adapter->dip,
4009 					    DDI_SERVICE_DEGRADED);
4010 					err = EIO;
4011 				}
4012 			} else
4013 				err = EINVAL;
4014 		}
4015 		return (err);
4016 	}
4017 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
4018 		if (pr_val == NULL) {
4019 			err = EINVAL;
4020 			return (err);
4021 		}
4022 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4023 		if (result < 0 || result > 1)
4024 			err = EINVAL;
4025 		else {
4026 			if (hw->mac.type >= e1000_82540) {
4027 				Adapter->intr_adaptive = (result == 1) ?
4028 				    B_TRUE : B_FALSE;
4029 			} else {
4030 				err = EINVAL;
4031 			}
4032 		}
4033 		return (err);
4034 	}
4035 	return (ENOTSUP);
4036 }
4037 
4038 static int
4039 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
4040     uint_t pr_valsize, void *pr_val)
4041 {
4042 	int err = ENOTSUP;
4043 	int value;
4044 
4045 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
4046 		value = Adapter->param_adv_pause;
4047 		err = 0;
4048 		goto done;
4049 	}
4050 	if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
4051 		value = Adapter->param_adv_asym_pause;
4052 		err = 0;
4053 		goto done;
4054 	}
4055 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
4056 		value = Adapter->tx_bcopy_thresh;
4057 		err = 0;
4058 		goto done;
4059 	}
4060 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
4061 		value = Adapter->tx_intr_enable;
4062 		err = 0;
4063 		goto done;
4064 	}
4065 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
4066 		value = Adapter->tx_intr_delay;
4067 		err = 0;
4068 		goto done;
4069 	}
4070 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
4071 		value = Adapter->tx_intr_abs_delay;
4072 		err = 0;
4073 		goto done;
4074 	}
4075 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
4076 		value = Adapter->rx_bcopy_thresh;
4077 		err = 0;
4078 		goto done;
4079 	}
4080 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
4081 		value = Adapter->rx_limit_onintr;
4082 		err = 0;
4083 		goto done;
4084 	}
4085 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
4086 		value = Adapter->rx_intr_delay;
4087 		err = 0;
4088 		goto done;
4089 	}
4090 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
4091 		value = Adapter->rx_intr_abs_delay;
4092 		err = 0;
4093 		goto done;
4094 	}
4095 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
4096 		value = Adapter->intr_throttling_rate;
4097 		err = 0;
4098 		goto done;
4099 	}
4100 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
4101 		value = Adapter->intr_adaptive;
4102 		err = 0;
4103 		goto done;
4104 	}
4105 done:
4106 	if (err == 0) {
4107 		(void) snprintf(pr_val, pr_valsize, "%d", value);
4108 	}
4109 	return (err);
4110 }
4111 
4112 /*
4113  * e1000g_get_conf - get configurations set in e1000g.conf
4114  * This routine gets user-configured values out of the configuration
4115  * file e1000g.conf.
4116  *
4117  * For each configurable value, there is a minimum, a maximum, and a
4118  * default.
4119  * If user does not configure a value, use the default.
4120  * If user configures below the minimum, use the minumum.
4121  * If user configures above the maximum, use the maxumum.
4122  */
4123 static void
4124 e1000g_get_conf(struct e1000g *Adapter)
4125 {
4126 	struct e1000_hw *hw = &Adapter->shared;
4127 	boolean_t tbi_compatibility = B_FALSE;
4128 	boolean_t is_jumbo = B_FALSE;
4129 	int propval;
4130 	/*
4131 	 * decrease the number of descriptors and free packets
4132 	 * for jumbo frames to reduce tx/rx resource consumption
4133 	 */
4134 	if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) {
4135 		is_jumbo = B_TRUE;
4136 	}
4137 
4138 	/*
4139 	 * get each configurable property from e1000g.conf
4140 	 */
4141 
4142 	/*
4143 	 * NumTxDescriptors
4144 	 */
4145 	Adapter->tx_desc_num_flag =
4146 	    e1000g_get_prop(Adapter, "NumTxDescriptors",
4147 	    MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
4148 	    is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC
4149 	    : DEFAULT_NUM_TX_DESCRIPTOR, &propval);
4150 	Adapter->tx_desc_num = propval;
4151 
4152 	/*
4153 	 * NumRxDescriptors
4154 	 */
4155 	Adapter->rx_desc_num_flag =
4156 	    e1000g_get_prop(Adapter, "NumRxDescriptors",
4157 	    MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
4158 	    is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC
4159 	    : DEFAULT_NUM_RX_DESCRIPTOR, &propval);
4160 	Adapter->rx_desc_num = propval;
4161 
4162 	/*
4163 	 * NumRxFreeList
4164 	 */
4165 	Adapter->rx_buf_num_flag =
4166 	    e1000g_get_prop(Adapter, "NumRxFreeList",
4167 	    MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
4168 	    is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF
4169 	    : DEFAULT_NUM_RX_FREELIST, &propval);
4170 	Adapter->rx_freelist_limit = propval;
4171 
4172 	/*
4173 	 * NumTxPacketList
4174 	 */
4175 	Adapter->tx_buf_num_flag =
4176 	    e1000g_get_prop(Adapter, "NumTxPacketList",
4177 	    MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
4178 	    is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF
4179 	    : DEFAULT_NUM_TX_FREELIST, &propval);
4180 	Adapter->tx_freelist_num = propval;
4181 
4182 	/*
4183 	 * FlowControl
4184 	 */
4185 	hw->fc.send_xon = B_TRUE;
4186 	(void) e1000g_get_prop(Adapter, "FlowControl",
4187 	    e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval);
4188 	hw->fc.requested_mode = propval;
4189 	/* 4 is the setting that says "let the eeprom decide" */
4190 	if (hw->fc.requested_mode == 4)
4191 		hw->fc.requested_mode = e1000_fc_default;
4192 
4193 	/*
4194 	 * Max Num Receive Packets on Interrupt
4195 	 */
4196 	(void) e1000g_get_prop(Adapter, "MaxNumReceivePackets",
4197 	    MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
4198 	    DEFAULT_RX_LIMIT_ON_INTR, &propval);
4199 	Adapter->rx_limit_onintr = propval;
4200 
4201 	/*
4202 	 * PHY master slave setting
4203 	 */
4204 	(void) e1000g_get_prop(Adapter, "SetMasterSlave",
4205 	    e1000_ms_hw_default, e1000_ms_auto,
4206 	    e1000_ms_hw_default, &propval);
4207 	hw->phy.ms_type = propval;
4208 
4209 	/*
4210 	 * Parameter which controls TBI mode workaround, which is only
4211 	 * needed on certain switches such as Cisco 6500/Foundry
4212 	 */
4213 	(void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
4214 	    0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval);
4215 	tbi_compatibility = (propval == 1);
4216 	e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
4217 
4218 	/*
4219 	 * MSI Enable
4220 	 */
4221 	(void) e1000g_get_prop(Adapter, "MSIEnable",
4222 	    0, 1, DEFAULT_MSI_ENABLE, &propval);
4223 	Adapter->msi_enable = (propval == 1);
4224 
4225 	/*
4226 	 * Interrupt Throttling Rate
4227 	 */
4228 	(void) e1000g_get_prop(Adapter, "intr_throttling_rate",
4229 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
4230 	    DEFAULT_INTR_THROTTLING, &propval);
4231 	Adapter->intr_throttling_rate = propval;
4232 
4233 	/*
4234 	 * Adaptive Interrupt Blanking Enable/Disable
4235 	 * It is enabled by default
4236 	 */
4237 	(void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1,
4238 	    &propval);
4239 	Adapter->intr_adaptive = (propval == 1);
4240 
4241 	/*
4242 	 * Hardware checksum enable/disable parameter
4243 	 */
4244 	(void) e1000g_get_prop(Adapter, "tx_hcksum_enable",
4245 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval);
4246 	Adapter->tx_hcksum_enable = (propval == 1);
4247 	/*
4248 	 * Checksum on/off selection via global parameters.
4249 	 *
4250 	 * If the chip is flagged as not capable of (correctly)
4251 	 * handling checksumming, we don't enable it on either
4252 	 * Rx or Tx side.  Otherwise, we take this chip's settings
4253 	 * from the patchable global defaults.
4254 	 *
4255 	 * We advertise our capabilities only if TX offload is
4256 	 * enabled.  On receive, the stack will accept checksummed
4257 	 * packets anyway, even if we haven't said we can deliver
4258 	 * them.
4259 	 */
4260 	switch (hw->mac.type) {
4261 		case e1000_82540:
4262 		case e1000_82544:
4263 		case e1000_82545:
4264 		case e1000_82545_rev_3:
4265 		case e1000_82546:
4266 		case e1000_82546_rev_3:
4267 		case e1000_82571:
4268 		case e1000_82572:
4269 		case e1000_82573:
4270 		case e1000_80003es2lan:
4271 			break;
4272 		/*
4273 		 * For the following Intel PRO/1000 chipsets, we have not
4274 		 * tested the hardware checksum offload capability, so we
4275 		 * disable the capability for them.
4276 		 *	e1000_82542,
4277 		 *	e1000_82543,
4278 		 *	e1000_82541,
4279 		 *	e1000_82541_rev_2,
4280 		 *	e1000_82547,
4281 		 *	e1000_82547_rev_2,
4282 		 */
4283 		default:
4284 			Adapter->tx_hcksum_enable = B_FALSE;
4285 	}
4286 
4287 	/*
4288 	 * Large Send Offloading(LSO) Enable/Disable
4289 	 * If the tx hardware checksum is not enabled, LSO should be
4290 	 * disabled.
4291 	 */
4292 	(void) e1000g_get_prop(Adapter, "lso_enable",
4293 	    0, 1, DEFAULT_LSO_ENABLE, &propval);
4294 	Adapter->lso_enable = (propval == 1);
4295 
4296 	switch (hw->mac.type) {
4297 		case e1000_82546:
4298 		case e1000_82546_rev_3:
4299 			if (Adapter->lso_enable)
4300 				Adapter->lso_premature_issue = B_TRUE;
4301 			/* FALLTHRU */
4302 		case e1000_82571:
4303 		case e1000_82572:
4304 		case e1000_82573:
4305 		case e1000_80003es2lan:
4306 			break;
4307 		default:
4308 			Adapter->lso_enable = B_FALSE;
4309 	}
4310 
4311 	if (!Adapter->tx_hcksum_enable) {
4312 		Adapter->lso_premature_issue = B_FALSE;
4313 		Adapter->lso_enable = B_FALSE;
4314 	}
4315 
4316 	/*
4317 	 * If mem_workaround_82546 is enabled, the rx buffer allocated by
4318 	 * e1000_82545, e1000_82546 and e1000_82546_rev_3
4319 	 * will not cross 64k boundary.
4320 	 */
4321 	(void) e1000g_get_prop(Adapter, "mem_workaround_82546",
4322 	    0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval);
4323 	Adapter->mem_workaround_82546 = (propval == 1);
4324 
4325 	/*
4326 	 * Max number of multicast addresses
4327 	 */
4328 	(void) e1000g_get_prop(Adapter, "mcast_max_num",
4329 	    MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32,
4330 	    &propval);
4331 	Adapter->mcast_max_num = propval;
4332 }
4333 
4334 /*
4335  * e1000g_get_prop - routine to read properties
4336  *
4337  * Get a user-configure property value out of the configuration
4338  * file e1000g.conf.
4339  *
4340  * Caller provides name of the property, a default value, a minimum
4341  * value, a maximum value and a pointer to the returned property
4342  * value.
4343  *
4344  * Return B_TRUE if the configured value of the property is not a default
4345  * value, otherwise return B_FALSE.
4346  */
4347 static boolean_t
4348 e1000g_get_prop(struct e1000g *Adapter,	/* point to per-adapter structure */
4349     char *propname,		/* name of the property */
4350     int minval,			/* minimum acceptable value */
4351     int maxval,			/* maximim acceptable value */
4352     int defval,			/* default value */
4353     int *propvalue)		/* property value return to caller */
4354 {
4355 	int propval;		/* value returned for requested property */
4356 	int *props;		/* point to array of properties returned */
4357 	uint_t nprops;		/* number of property value returned */
4358 	boolean_t ret = B_TRUE;
4359 
4360 	/*
4361 	 * get the array of properties from the config file
4362 	 */
4363 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
4364 	    DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
4365 		/* got some properties, test if we got enough */
4366 		if (Adapter->instance < nprops) {
4367 			propval = props[Adapter->instance];
4368 		} else {
4369 			/* not enough properties configured */
4370 			propval = defval;
4371 			E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4372 			    "Not Enough %s values found in e1000g.conf"
4373 			    " - set to %d\n",
4374 			    propname, propval);
4375 			ret = B_FALSE;
4376 		}
4377 
4378 		/* free memory allocated for properties */
4379 		ddi_prop_free(props);
4380 
4381 	} else {
4382 		propval = defval;
4383 		ret = B_FALSE;
4384 	}
4385 
4386 	/*
4387 	 * enforce limits
4388 	 */
4389 	if (propval > maxval) {
4390 		propval = maxval;
4391 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4392 		    "Too High %s value in e1000g.conf - set to %d\n",
4393 		    propname, propval);
4394 	}
4395 
4396 	if (propval < minval) {
4397 		propval = minval;
4398 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4399 		    "Too Low %s value in e1000g.conf - set to %d\n",
4400 		    propname, propval);
4401 	}
4402 
4403 	*propvalue = propval;
4404 	return (ret);
4405 }
4406 
4407 static boolean_t
4408 e1000g_link_check(struct e1000g *Adapter)
4409 {
4410 	uint16_t speed, duplex, phydata;
4411 	boolean_t link_changed = B_FALSE;
4412 	struct e1000_hw *hw;
4413 	uint32_t reg_tarc;
4414 
4415 	hw = &Adapter->shared;
4416 
4417 	if (e1000g_link_up(Adapter)) {
4418 		/*
4419 		 * The Link is up, check whether it was marked as down earlier
4420 		 */
4421 		if (Adapter->link_state != LINK_STATE_UP) {
4422 			(void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
4423 			Adapter->link_speed = speed;
4424 			Adapter->link_duplex = duplex;
4425 			Adapter->link_state = LINK_STATE_UP;
4426 			link_changed = B_TRUE;
4427 
4428 			if (Adapter->link_speed == SPEED_1000)
4429 				Adapter->stall_threshold = TX_STALL_TIME_2S;
4430 			else
4431 				Adapter->stall_threshold = TX_STALL_TIME_8S;
4432 
4433 			Adapter->tx_link_down_timeout = 0;
4434 
4435 			if ((hw->mac.type == e1000_82571) ||
4436 			    (hw->mac.type == e1000_82572)) {
4437 				reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
4438 				if (speed == SPEED_1000)
4439 					reg_tarc |= (1 << 21);
4440 				else
4441 					reg_tarc &= ~(1 << 21);
4442 				E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
4443 			}
4444 		}
4445 		Adapter->smartspeed = 0;
4446 	} else {
4447 		if (Adapter->link_state != LINK_STATE_DOWN) {
4448 			Adapter->link_speed = 0;
4449 			Adapter->link_duplex = 0;
4450 			Adapter->link_state = LINK_STATE_DOWN;
4451 			link_changed = B_TRUE;
4452 
4453 			/*
4454 			 * SmartSpeed workaround for Tabor/TanaX, When the
4455 			 * driver loses link disable auto master/slave
4456 			 * resolution.
4457 			 */
4458 			if (hw->phy.type == e1000_phy_igp) {
4459 				(void) e1000_read_phy_reg(hw,
4460 				    PHY_1000T_CTRL, &phydata);
4461 				phydata |= CR_1000T_MS_ENABLE;
4462 				(void) e1000_write_phy_reg(hw,
4463 				    PHY_1000T_CTRL, phydata);
4464 			}
4465 		} else {
4466 			e1000g_smartspeed(Adapter);
4467 		}
4468 
4469 		if (Adapter->e1000g_state & E1000G_STARTED) {
4470 			if (Adapter->tx_link_down_timeout <
4471 			    MAX_TX_LINK_DOWN_TIMEOUT) {
4472 				Adapter->tx_link_down_timeout++;
4473 			} else if (Adapter->tx_link_down_timeout ==
4474 			    MAX_TX_LINK_DOWN_TIMEOUT) {
4475 				e1000g_tx_clean(Adapter);
4476 				Adapter->tx_link_down_timeout++;
4477 			}
4478 		}
4479 	}
4480 
4481 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4482 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4483 
4484 	return (link_changed);
4485 }
4486 
4487 /*
4488  * e1000g_reset_link - Using the link properties to setup the link
4489  */
4490 int
4491 e1000g_reset_link(struct e1000g *Adapter)
4492 {
4493 	struct e1000_mac_info *mac;
4494 	struct e1000_phy_info *phy;
4495 	struct e1000_hw *hw;
4496 	boolean_t invalid;
4497 
4498 	mac = &Adapter->shared.mac;
4499 	phy = &Adapter->shared.phy;
4500 	hw = &Adapter->shared;
4501 	invalid = B_FALSE;
4502 
4503 	if (hw->phy.media_type != e1000_media_type_copper)
4504 		goto out;
4505 
4506 	if (Adapter->param_adv_autoneg == 1) {
4507 		mac->autoneg = B_TRUE;
4508 		phy->autoneg_advertised = 0;
4509 
4510 		/*
4511 		 * 1000hdx is not supported for autonegotiation
4512 		 */
4513 		if (Adapter->param_adv_1000fdx == 1)
4514 			phy->autoneg_advertised |= ADVERTISE_1000_FULL;
4515 
4516 		if (Adapter->param_adv_100fdx == 1)
4517 			phy->autoneg_advertised |= ADVERTISE_100_FULL;
4518 
4519 		if (Adapter->param_adv_100hdx == 1)
4520 			phy->autoneg_advertised |= ADVERTISE_100_HALF;
4521 
4522 		if (Adapter->param_adv_10fdx == 1)
4523 			phy->autoneg_advertised |= ADVERTISE_10_FULL;
4524 
4525 		if (Adapter->param_adv_10hdx == 1)
4526 			phy->autoneg_advertised |= ADVERTISE_10_HALF;
4527 
4528 		if (phy->autoneg_advertised == 0)
4529 			invalid = B_TRUE;
4530 	} else {
4531 		mac->autoneg = B_FALSE;
4532 
4533 		/*
4534 		 * For Intel copper cards, 1000fdx and 1000hdx are not
4535 		 * supported for forced link
4536 		 */
4537 		if (Adapter->param_adv_100fdx == 1)
4538 			mac->forced_speed_duplex = ADVERTISE_100_FULL;
4539 		else if (Adapter->param_adv_100hdx == 1)
4540 			mac->forced_speed_duplex = ADVERTISE_100_HALF;
4541 		else if (Adapter->param_adv_10fdx == 1)
4542 			mac->forced_speed_duplex = ADVERTISE_10_FULL;
4543 		else if (Adapter->param_adv_10hdx == 1)
4544 			mac->forced_speed_duplex = ADVERTISE_10_HALF;
4545 		else
4546 			invalid = B_TRUE;
4547 
4548 	}
4549 
4550 	if (invalid) {
4551 		e1000g_log(Adapter, CE_WARN,
4552 		    "Invalid link settings. Setup link to "
4553 		    "support autonegotiation with all link capabilities.");
4554 		mac->autoneg = B_TRUE;
4555 		phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
4556 	}
4557 
4558 out:
4559 	return (e1000_setup_link(&Adapter->shared));
4560 }
4561 
4562 static void
4563 e1000g_timer_tx_resched(struct e1000g *Adapter)
4564 {
4565 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
4566 
4567 	rw_enter(&Adapter->chip_lock, RW_READER);
4568 
4569 	if (tx_ring->resched_needed &&
4570 	    ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
4571 	    drv_usectohz(1000000)) &&
4572 	    (Adapter->e1000g_state & E1000G_STARTED) &&
4573 	    (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
4574 		tx_ring->resched_needed = B_FALSE;
4575 		mac_tx_update(Adapter->mh);
4576 		E1000G_STAT(tx_ring->stat_reschedule);
4577 		E1000G_STAT(tx_ring->stat_timer_reschedule);
4578 	}
4579 
4580 	rw_exit(&Adapter->chip_lock);
4581 }
4582 
4583 static void
4584 e1000g_local_timer(void *ws)
4585 {
4586 	struct e1000g *Adapter = (struct e1000g *)ws;
4587 	struct e1000_hw *hw;
4588 	e1000g_ether_addr_t ether_addr;
4589 	boolean_t link_changed;
4590 
4591 	hw = &Adapter->shared;
4592 
4593 	if (Adapter->e1000g_state & E1000G_ERROR) {
4594 		rw_enter(&Adapter->chip_lock, RW_WRITER);
4595 		Adapter->e1000g_state &= ~E1000G_ERROR;
4596 		rw_exit(&Adapter->chip_lock);
4597 
4598 		Adapter->reset_count++;
4599 		if (e1000g_global_reset(Adapter)) {
4600 			ddi_fm_service_impact(Adapter->dip,
4601 			    DDI_SERVICE_RESTORED);
4602 			e1000g_timer_tx_resched(Adapter);
4603 		} else
4604 			ddi_fm_service_impact(Adapter->dip,
4605 			    DDI_SERVICE_LOST);
4606 		return;
4607 	}
4608 
4609 	if (e1000g_stall_check(Adapter)) {
4610 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4611 		    "Tx stall detected. Activate automatic recovery.\n");
4612 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
4613 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
4614 		Adapter->reset_count++;
4615 		if (e1000g_reset_adapter(Adapter)) {
4616 			ddi_fm_service_impact(Adapter->dip,
4617 			    DDI_SERVICE_RESTORED);
4618 			e1000g_timer_tx_resched(Adapter);
4619 		}
4620 		return;
4621 	}
4622 
4623 	link_changed = B_FALSE;
4624 	rw_enter(&Adapter->chip_lock, RW_READER);
4625 	if (Adapter->link_complete)
4626 		link_changed = e1000g_link_check(Adapter);
4627 	rw_exit(&Adapter->chip_lock);
4628 
4629 	if (link_changed) {
4630 		if (!Adapter->reset_flag &&
4631 		    (Adapter->e1000g_state & E1000G_STARTED) &&
4632 		    !(Adapter->e1000g_state & E1000G_SUSPENDED))
4633 			mac_link_update(Adapter->mh, Adapter->link_state);
4634 		if (Adapter->link_state == LINK_STATE_UP)
4635 			Adapter->reset_flag = B_FALSE;
4636 	}
4637 	/*
4638 	 * Workaround for esb2. Data stuck in fifo on a link
4639 	 * down event. Reset the adapter to recover it.
4640 	 */
4641 	if (Adapter->esb2_workaround) {
4642 		Adapter->esb2_workaround = B_FALSE;
4643 		(void) e1000g_reset_adapter(Adapter);
4644 		return;
4645 	}
4646 
4647 	/*
4648 	 * With 82571 controllers, any locally administered address will
4649 	 * be overwritten when there is a reset on the other port.
4650 	 * Detect this circumstance and correct it.
4651 	 */
4652 	if ((hw->mac.type == e1000_82571) &&
4653 	    (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4654 		ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4655 		ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4656 
4657 		ether_addr.reg.low = ntohl(ether_addr.reg.low);
4658 		ether_addr.reg.high = ntohl(ether_addr.reg.high);
4659 
4660 		if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4661 		    (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4662 		    (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4663 		    (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4664 		    (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4665 		    (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4666 			(void) e1000_rar_set(hw, hw->mac.addr, 0);
4667 		}
4668 	}
4669 
4670 	/*
4671 	 * Long TTL workaround for 82541/82547
4672 	 */
4673 	(void) e1000_igp_ttl_workaround_82547(hw);
4674 
4675 	/*
4676 	 * Check for Adaptive IFS settings If there are lots of collisions
4677 	 * change the value in steps...
4678 	 * These properties should only be set for 10/100
4679 	 */
4680 	if ((hw->phy.media_type == e1000_media_type_copper) &&
4681 	    ((Adapter->link_speed == SPEED_100) ||
4682 	    (Adapter->link_speed == SPEED_10))) {
4683 		e1000_update_adaptive(hw);
4684 	}
4685 	/*
4686 	 * Set Timer Interrupts
4687 	 */
4688 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4689 
4690 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4691 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4692 	else
4693 		e1000g_timer_tx_resched(Adapter);
4694 
4695 	restart_watchdog_timer(Adapter);
4696 }
4697 
4698 /*
4699  * The function e1000g_link_timer() is called when the timer for link setup
4700  * is expired, which indicates the completion of the link setup. The link
4701  * state will not be updated until the link setup is completed. And the
4702  * link state will not be sent to the upper layer through mac_link_update()
4703  * in this function. It will be updated in the local timer routine or the
4704  * interrupt service routine after the interface is started (plumbed).
4705  */
4706 static void
4707 e1000g_link_timer(void *arg)
4708 {
4709 	struct e1000g *Adapter = (struct e1000g *)arg;
4710 
4711 	mutex_enter(&Adapter->link_lock);
4712 	Adapter->link_complete = B_TRUE;
4713 	Adapter->link_tid = 0;
4714 	mutex_exit(&Adapter->link_lock);
4715 }
4716 
4717 /*
4718  * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4719  *
4720  * This function read the forced speed and duplex for 10/100 Mbps speeds
4721  * and also for 1000 Mbps speeds from the e1000g.conf file
4722  */
4723 static void
4724 e1000g_force_speed_duplex(struct e1000g *Adapter)
4725 {
4726 	int forced;
4727 	int propval;
4728 	struct e1000_mac_info *mac = &Adapter->shared.mac;
4729 	struct e1000_phy_info *phy = &Adapter->shared.phy;
4730 
4731 	/*
4732 	 * get value out of config file
4733 	 */
4734 	(void) e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4735 	    GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced);
4736 
4737 	switch (forced) {
4738 	case GDIAG_10_HALF:
4739 		/*
4740 		 * Disable Auto Negotiation
4741 		 */
4742 		mac->autoneg = B_FALSE;
4743 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
4744 		break;
4745 	case GDIAG_10_FULL:
4746 		/*
4747 		 * Disable Auto Negotiation
4748 		 */
4749 		mac->autoneg = B_FALSE;
4750 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
4751 		break;
4752 	case GDIAG_100_HALF:
4753 		/*
4754 		 * Disable Auto Negotiation
4755 		 */
4756 		mac->autoneg = B_FALSE;
4757 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
4758 		break;
4759 	case GDIAG_100_FULL:
4760 		/*
4761 		 * Disable Auto Negotiation
4762 		 */
4763 		mac->autoneg = B_FALSE;
4764 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
4765 		break;
4766 	case GDIAG_1000_FULL:
4767 		/*
4768 		 * The gigabit spec requires autonegotiation.  Therefore,
4769 		 * when the user wants to force the speed to 1000Mbps, we
4770 		 * enable AutoNeg, but only allow the harware to advertise
4771 		 * 1000Mbps.  This is different from 10/100 operation, where
4772 		 * we are allowed to link without any negotiation.
4773 		 */
4774 		mac->autoneg = B_TRUE;
4775 		phy->autoneg_advertised = ADVERTISE_1000_FULL;
4776 		break;
4777 	default:	/* obey the setting of AutoNegAdvertised */
4778 		mac->autoneg = B_TRUE;
4779 		(void) e1000g_get_prop(Adapter, "AutoNegAdvertised",
4780 		    0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4781 		    AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval);
4782 		phy->autoneg_advertised = (uint16_t)propval;
4783 		break;
4784 	}	/* switch */
4785 }
4786 
4787 /*
4788  * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4789  *
4790  * This function reads MaxFrameSize from e1000g.conf
4791  */
4792 static void
4793 e1000g_get_max_frame_size(struct e1000g *Adapter)
4794 {
4795 	int max_frame;
4796 
4797 	/*
4798 	 * get value out of config file
4799 	 */
4800 	(void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0,
4801 	    &max_frame);
4802 
4803 	switch (max_frame) {
4804 	case 0:
4805 		Adapter->default_mtu = ETHERMTU;
4806 		break;
4807 	case 1:
4808 		Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4809 		    sizeof (struct ether_vlan_header) - ETHERFCSL;
4810 		break;
4811 	case 2:
4812 		Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4813 		    sizeof (struct ether_vlan_header) - ETHERFCSL;
4814 		break;
4815 	case 3:
4816 		Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4817 		    sizeof (struct ether_vlan_header) - ETHERFCSL;
4818 		break;
4819 	default:
4820 		Adapter->default_mtu = ETHERMTU;
4821 		break;
4822 	}	/* switch */
4823 
4824 	/*
4825 	 * If the user configed MTU is larger than the deivce's maximum MTU,
4826 	 * the MTU is set to the deivce's maximum value.
4827 	 */
4828 	if (Adapter->default_mtu > Adapter->max_mtu)
4829 		Adapter->default_mtu = Adapter->max_mtu;
4830 
4831 	Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu);
4832 }
4833 
4834 /*
4835  * e1000g_pch_limits - Apply limits of the PCH silicon type
4836  *
4837  * At any frame size larger than the ethernet default,
4838  * prevent linking at 10/100 speeds.
4839  */
4840 static void
4841 e1000g_pch_limits(struct e1000g *Adapter)
4842 {
4843 	struct e1000_hw *hw = &Adapter->shared;
4844 
4845 	/* only applies to PCH silicon type */
4846 	if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan)
4847 		return;
4848 
4849 	/* only applies to frames larger than ethernet default */
4850 	if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) {
4851 		hw->mac.autoneg = B_TRUE;
4852 		hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
4853 
4854 		Adapter->param_adv_autoneg = 1;
4855 		Adapter->param_adv_1000fdx = 1;
4856 
4857 		Adapter->param_adv_100fdx = 0;
4858 		Adapter->param_adv_100hdx = 0;
4859 		Adapter->param_adv_10fdx = 0;
4860 		Adapter->param_adv_10hdx = 0;
4861 
4862 		e1000g_param_sync(Adapter);
4863 	}
4864 }
4865 
4866 /*
4867  * e1000g_mtu2maxframe - convert given MTU to maximum frame size
4868  */
4869 static uint32_t
4870 e1000g_mtu2maxframe(uint32_t mtu)
4871 {
4872 	uint32_t maxframe;
4873 
4874 	maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL;
4875 
4876 	return (maxframe);
4877 }
4878 
4879 static void
4880 arm_watchdog_timer(struct e1000g *Adapter)
4881 {
4882 	Adapter->watchdog_tid =
4883 	    timeout(e1000g_local_timer,
4884 	    (void *)Adapter, 1 * drv_usectohz(1000000));
4885 }
4886 #pragma inline(arm_watchdog_timer)
4887 
4888 static void
4889 enable_watchdog_timer(struct e1000g *Adapter)
4890 {
4891 	mutex_enter(&Adapter->watchdog_lock);
4892 
4893 	if (!Adapter->watchdog_timer_enabled) {
4894 		Adapter->watchdog_timer_enabled = B_TRUE;
4895 		Adapter->watchdog_timer_started = B_TRUE;
4896 		arm_watchdog_timer(Adapter);
4897 	}
4898 
4899 	mutex_exit(&Adapter->watchdog_lock);
4900 }
4901 
4902 static void
4903 disable_watchdog_timer(struct e1000g *Adapter)
4904 {
4905 	timeout_id_t tid;
4906 
4907 	mutex_enter(&Adapter->watchdog_lock);
4908 
4909 	Adapter->watchdog_timer_enabled = B_FALSE;
4910 	Adapter->watchdog_timer_started = B_FALSE;
4911 	tid = Adapter->watchdog_tid;
4912 	Adapter->watchdog_tid = 0;
4913 
4914 	mutex_exit(&Adapter->watchdog_lock);
4915 
4916 	if (tid != 0)
4917 		(void) untimeout(tid);
4918 }
4919 
4920 static void
4921 start_watchdog_timer(struct e1000g *Adapter)
4922 {
4923 	mutex_enter(&Adapter->watchdog_lock);
4924 
4925 	if (Adapter->watchdog_timer_enabled) {
4926 		if (!Adapter->watchdog_timer_started) {
4927 			Adapter->watchdog_timer_started = B_TRUE;
4928 			arm_watchdog_timer(Adapter);
4929 		}
4930 	}
4931 
4932 	mutex_exit(&Adapter->watchdog_lock);
4933 }
4934 
4935 static void
4936 restart_watchdog_timer(struct e1000g *Adapter)
4937 {
4938 	mutex_enter(&Adapter->watchdog_lock);
4939 
4940 	if (Adapter->watchdog_timer_started)
4941 		arm_watchdog_timer(Adapter);
4942 
4943 	mutex_exit(&Adapter->watchdog_lock);
4944 }
4945 
4946 static void
4947 stop_watchdog_timer(struct e1000g *Adapter)
4948 {
4949 	timeout_id_t tid;
4950 
4951 	mutex_enter(&Adapter->watchdog_lock);
4952 
4953 	Adapter->watchdog_timer_started = B_FALSE;
4954 	tid = Adapter->watchdog_tid;
4955 	Adapter->watchdog_tid = 0;
4956 
4957 	mutex_exit(&Adapter->watchdog_lock);
4958 
4959 	if (tid != 0)
4960 		(void) untimeout(tid);
4961 }
4962 
4963 static void
4964 stop_link_timer(struct e1000g *Adapter)
4965 {
4966 	timeout_id_t tid;
4967 
4968 	/* Disable the link timer */
4969 	mutex_enter(&Adapter->link_lock);
4970 
4971 	tid = Adapter->link_tid;
4972 	Adapter->link_tid = 0;
4973 
4974 	mutex_exit(&Adapter->link_lock);
4975 
4976 	if (tid != 0)
4977 		(void) untimeout(tid);
4978 }
4979 
4980 static void
4981 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4982 {
4983 	timeout_id_t tid;
4984 
4985 	/* Disable the tx timer for 82547 chipset */
4986 	mutex_enter(&tx_ring->tx_lock);
4987 
4988 	tx_ring->timer_enable_82547 = B_FALSE;
4989 	tid = tx_ring->timer_id_82547;
4990 	tx_ring->timer_id_82547 = 0;
4991 
4992 	mutex_exit(&tx_ring->tx_lock);
4993 
4994 	if (tid != 0)
4995 		(void) untimeout(tid);
4996 }
4997 
4998 void
4999 e1000g_clear_interrupt(struct e1000g *Adapter)
5000 {
5001 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
5002 	    0xffffffff & ~E1000_IMS_RXSEQ);
5003 }
5004 
5005 void
5006 e1000g_mask_interrupt(struct e1000g *Adapter)
5007 {
5008 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
5009 	    IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
5010 
5011 	if (Adapter->tx_intr_enable)
5012 		e1000g_mask_tx_interrupt(Adapter);
5013 }
5014 
5015 /*
5016  * This routine is called by e1000g_quiesce(), therefore must not block.
5017  */
5018 void
5019 e1000g_clear_all_interrupts(struct e1000g *Adapter)
5020 {
5021 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
5022 }
5023 
5024 void
5025 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
5026 {
5027 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
5028 }
5029 
5030 void
5031 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
5032 {
5033 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
5034 }
5035 
5036 static void
5037 e1000g_smartspeed(struct e1000g *Adapter)
5038 {
5039 	struct e1000_hw *hw = &Adapter->shared;
5040 	uint16_t phy_status;
5041 	uint16_t phy_ctrl;
5042 
5043 	/*
5044 	 * If we're not T-or-T, or we're not autoneg'ing, or we're not
5045 	 * advertising 1000Full, we don't even use the workaround
5046 	 */
5047 	if ((hw->phy.type != e1000_phy_igp) ||
5048 	    !hw->mac.autoneg ||
5049 	    !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
5050 		return;
5051 
5052 	/*
5053 	 * True if this is the first call of this function or after every
5054 	 * 30 seconds of not having link
5055 	 */
5056 	if (Adapter->smartspeed == 0) {
5057 		/*
5058 		 * If Master/Slave config fault is asserted twice, we
5059 		 * assume back-to-back
5060 		 */
5061 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
5062 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5063 			return;
5064 
5065 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
5066 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5067 			return;
5068 		/*
5069 		 * We're assuming back-2-back because our status register
5070 		 * insists! there's a fault in the master/slave
5071 		 * relationship that was "negotiated"
5072 		 */
5073 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5074 		/*
5075 		 * Is the phy configured for manual configuration of
5076 		 * master/slave?
5077 		 */
5078 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
5079 			/*
5080 			 * Yes.  Then disable manual configuration (enable
5081 			 * auto configuration) of master/slave
5082 			 */
5083 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
5084 			(void) e1000_write_phy_reg(hw,
5085 			    PHY_1000T_CTRL, phy_ctrl);
5086 			/*
5087 			 * Effectively starting the clock
5088 			 */
5089 			Adapter->smartspeed++;
5090 			/*
5091 			 * Restart autonegotiation
5092 			 */
5093 			if (!e1000_phy_setup_autoneg(hw) &&
5094 			    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5095 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
5096 				    MII_CR_RESTART_AUTO_NEG);
5097 				(void) e1000_write_phy_reg(hw,
5098 				    PHY_CONTROL, phy_ctrl);
5099 			}
5100 		}
5101 		return;
5102 		/*
5103 		 * Has 6 seconds transpired still without link? Remember,
5104 		 * you should reset the smartspeed counter once you obtain
5105 		 * link
5106 		 */
5107 	} else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
5108 		/*
5109 		 * Yes.  Remember, we did at the start determine that
5110 		 * there's a master/slave configuration fault, so we're
5111 		 * still assuming there's someone on the other end, but we
5112 		 * just haven't yet been able to talk to it. We then
5113 		 * re-enable auto configuration of master/slave to see if
5114 		 * we're running 2/3 pair cables.
5115 		 */
5116 		/*
5117 		 * If still no link, perhaps using 2/3 pair cable
5118 		 */
5119 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5120 		phy_ctrl |= CR_1000T_MS_ENABLE;
5121 		(void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
5122 		/*
5123 		 * Restart autoneg with phy enabled for manual
5124 		 * configuration of master/slave
5125 		 */
5126 		if (!e1000_phy_setup_autoneg(hw) &&
5127 		    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5128 			phy_ctrl |=
5129 			    (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
5130 			(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
5131 		}
5132 		/*
5133 		 * Hopefully, there are no more faults and we've obtained
5134 		 * link as a result.
5135 		 */
5136 	}
5137 	/*
5138 	 * Restart process after E1000_SMARTSPEED_MAX iterations (30
5139 	 * seconds)
5140 	 */
5141 	if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
5142 		Adapter->smartspeed = 0;
5143 }
5144 
5145 static boolean_t
5146 is_valid_mac_addr(uint8_t *mac_addr)
5147 {
5148 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
5149 	const uint8_t addr_test2[6] =
5150 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5151 
5152 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
5153 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
5154 		return (B_FALSE);
5155 
5156 	return (B_TRUE);
5157 }
5158 
5159 /*
5160  * e1000g_stall_check - check for tx stall
5161  *
5162  * This function checks if the adapter is stalled (in transmit).
5163  *
5164  * It is called each time the watchdog timeout is invoked.
5165  * If the transmit descriptor reclaim continuously fails,
5166  * the watchdog value will increment by 1. If the watchdog
5167  * value exceeds the threshold, the adapter is assumed to
5168  * have stalled and need to be reset.
5169  */
5170 static boolean_t
5171 e1000g_stall_check(struct e1000g *Adapter)
5172 {
5173 	e1000g_tx_ring_t *tx_ring;
5174 
5175 	tx_ring = Adapter->tx_ring;
5176 
5177 	if (Adapter->link_state != LINK_STATE_UP)
5178 		return (B_FALSE);
5179 
5180 	(void) e1000g_recycle(tx_ring);
5181 
5182 	if (Adapter->stall_flag)
5183 		return (B_TRUE);
5184 
5185 	return (B_FALSE);
5186 }
5187 
5188 #ifdef E1000G_DEBUG
5189 static enum ioc_reply
5190 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
5191 {
5192 	void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
5193 	e1000g_peekpoke_t *ppd;
5194 	uint64_t mem_va;
5195 	uint64_t maxoff;
5196 	boolean_t peek;
5197 
5198 	switch (iocp->ioc_cmd) {
5199 
5200 	case E1000G_IOC_REG_PEEK:
5201 		peek = B_TRUE;
5202 		break;
5203 
5204 	case E1000G_IOC_REG_POKE:
5205 		peek = B_FALSE;
5206 		break;
5207 
5208 	deault:
5209 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5210 		    "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
5211 		    iocp->ioc_cmd);
5212 		return (IOC_INVAL);
5213 	}
5214 
5215 	/*
5216 	 * Validate format of ioctl
5217 	 */
5218 	if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
5219 		return (IOC_INVAL);
5220 	if (mp->b_cont == NULL)
5221 		return (IOC_INVAL);
5222 
5223 	ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
5224 
5225 	/*
5226 	 * Validate request parameters
5227 	 */
5228 	switch (ppd->pp_acc_space) {
5229 
5230 	default:
5231 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5232 		    "e1000g_diag_ioctl: invalid access space 0x%X\n",
5233 		    ppd->pp_acc_space);
5234 		return (IOC_INVAL);
5235 
5236 	case E1000G_PP_SPACE_REG:
5237 		/*
5238 		 * Memory-mapped I/O space
5239 		 */
5240 		ASSERT(ppd->pp_acc_size == 4);
5241 		if (ppd->pp_acc_size != 4)
5242 			return (IOC_INVAL);
5243 
5244 		if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
5245 			return (IOC_INVAL);
5246 
5247 		mem_va = 0;
5248 		maxoff = 0x10000;
5249 		ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
5250 		break;
5251 
5252 	case E1000G_PP_SPACE_E1000G:
5253 		/*
5254 		 * E1000g data structure!
5255 		 */
5256 		mem_va = (uintptr_t)e1000gp;
5257 		maxoff = sizeof (struct e1000g);
5258 		ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
5259 		break;
5260 
5261 	}
5262 
5263 	if (ppd->pp_acc_offset >= maxoff)
5264 		return (IOC_INVAL);
5265 
5266 	if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
5267 		return (IOC_INVAL);
5268 
5269 	/*
5270 	 * All OK - go!
5271 	 */
5272 	ppd->pp_acc_offset += mem_va;
5273 	(*ppfn)(e1000gp, ppd);
5274 	return (peek ? IOC_REPLY : IOC_ACK);
5275 }
5276 
5277 static void
5278 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5279 {
5280 	ddi_acc_handle_t handle;
5281 	uint32_t *regaddr;
5282 
5283 	handle = e1000gp->osdep.reg_handle;
5284 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5285 	    (uintptr_t)ppd->pp_acc_offset);
5286 
5287 	ppd->pp_acc_data = ddi_get32(handle, regaddr);
5288 }
5289 
5290 static void
5291 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5292 {
5293 	ddi_acc_handle_t handle;
5294 	uint32_t *regaddr;
5295 	uint32_t value;
5296 
5297 	handle = e1000gp->osdep.reg_handle;
5298 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5299 	    (uintptr_t)ppd->pp_acc_offset);
5300 	value = (uint32_t)ppd->pp_acc_data;
5301 
5302 	ddi_put32(handle, regaddr, value);
5303 }
5304 
5305 static void
5306 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5307 {
5308 	uint64_t value;
5309 	void *vaddr;
5310 
5311 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5312 
5313 	switch (ppd->pp_acc_size) {
5314 	case 1:
5315 		value = *(uint8_t *)vaddr;
5316 		break;
5317 
5318 	case 2:
5319 		value = *(uint16_t *)vaddr;
5320 		break;
5321 
5322 	case 4:
5323 		value = *(uint32_t *)vaddr;
5324 		break;
5325 
5326 	case 8:
5327 		value = *(uint64_t *)vaddr;
5328 		break;
5329 	}
5330 
5331 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5332 	    "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
5333 	    (void *)e1000gp, (void *)ppd, value, vaddr);
5334 
5335 	ppd->pp_acc_data = value;
5336 }
5337 
5338 static void
5339 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5340 {
5341 	uint64_t value;
5342 	void *vaddr;
5343 
5344 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5345 	value = ppd->pp_acc_data;
5346 
5347 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5348 	    "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
5349 	    (void *)e1000gp, (void *)ppd, value, vaddr);
5350 
5351 	switch (ppd->pp_acc_size) {
5352 	case 1:
5353 		*(uint8_t *)vaddr = (uint8_t)value;
5354 		break;
5355 
5356 	case 2:
5357 		*(uint16_t *)vaddr = (uint16_t)value;
5358 		break;
5359 
5360 	case 4:
5361 		*(uint32_t *)vaddr = (uint32_t)value;
5362 		break;
5363 
5364 	case 8:
5365 		*(uint64_t *)vaddr = (uint64_t)value;
5366 		break;
5367 	}
5368 }
5369 #endif
5370 
5371 /*
5372  * Loopback Support
5373  */
5374 static lb_property_t lb_normal =
5375 	{ normal,	"normal",	E1000G_LB_NONE		};
5376 static lb_property_t lb_external1000 =
5377 	{ external,	"1000Mbps",	E1000G_LB_EXTERNAL_1000	};
5378 static lb_property_t lb_external100 =
5379 	{ external,	"100Mbps",	E1000G_LB_EXTERNAL_100	};
5380 static lb_property_t lb_external10 =
5381 	{ external,	"10Mbps",	E1000G_LB_EXTERNAL_10	};
5382 static lb_property_t lb_phy =
5383 	{ internal,	"PHY",		E1000G_LB_INTERNAL_PHY	};
5384 
5385 static enum ioc_reply
5386 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
5387 {
5388 	lb_info_sz_t *lbsp;
5389 	lb_property_t *lbpp;
5390 	struct e1000_hw *hw;
5391 	uint32_t *lbmp;
5392 	uint32_t size;
5393 	uint32_t value;
5394 
5395 	hw = &Adapter->shared;
5396 
5397 	if (mp->b_cont == NULL)
5398 		return (IOC_INVAL);
5399 
5400 	if (!e1000g_check_loopback_support(hw)) {
5401 		e1000g_log(NULL, CE_WARN,
5402 		    "Loopback is not supported on e1000g%d", Adapter->instance);
5403 		return (IOC_INVAL);
5404 	}
5405 
5406 	switch (iocp->ioc_cmd) {
5407 	default:
5408 		return (IOC_INVAL);
5409 
5410 	case LB_GET_INFO_SIZE:
5411 		size = sizeof (lb_info_sz_t);
5412 		if (iocp->ioc_count != size)
5413 			return (IOC_INVAL);
5414 
5415 		rw_enter(&Adapter->chip_lock, RW_WRITER);
5416 		e1000g_get_phy_state(Adapter);
5417 
5418 		/*
5419 		 * Workaround for hardware faults. In order to get a stable
5420 		 * state of phy, we will wait for a specific interval and
5421 		 * try again. The time delay is an experiential value based
5422 		 * on our testing.
5423 		 */
5424 		msec_delay(100);
5425 		e1000g_get_phy_state(Adapter);
5426 		rw_exit(&Adapter->chip_lock);
5427 
5428 		value = sizeof (lb_normal);
5429 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5430 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5431 		    (hw->phy.media_type == e1000_media_type_fiber) ||
5432 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5433 			value += sizeof (lb_phy);
5434 			switch (hw->mac.type) {
5435 			case e1000_82571:
5436 			case e1000_82572:
5437 			case e1000_80003es2lan:
5438 				value += sizeof (lb_external1000);
5439 				break;
5440 			}
5441 		}
5442 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5443 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5444 			value += sizeof (lb_external100);
5445 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5446 			value += sizeof (lb_external10);
5447 
5448 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
5449 		*lbsp = value;
5450 		break;
5451 
5452 	case LB_GET_INFO:
5453 		value = sizeof (lb_normal);
5454 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5455 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5456 		    (hw->phy.media_type == e1000_media_type_fiber) ||
5457 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5458 			value += sizeof (lb_phy);
5459 			switch (hw->mac.type) {
5460 			case e1000_82571:
5461 			case e1000_82572:
5462 			case e1000_80003es2lan:
5463 				value += sizeof (lb_external1000);
5464 				break;
5465 			}
5466 		}
5467 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5468 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5469 			value += sizeof (lb_external100);
5470 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5471 			value += sizeof (lb_external10);
5472 
5473 		size = value;
5474 		if (iocp->ioc_count != size)
5475 			return (IOC_INVAL);
5476 
5477 		value = 0;
5478 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
5479 		lbpp[value++] = lb_normal;
5480 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5481 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5482 		    (hw->phy.media_type == e1000_media_type_fiber) ||
5483 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5484 			lbpp[value++] = lb_phy;
5485 			switch (hw->mac.type) {
5486 			case e1000_82571:
5487 			case e1000_82572:
5488 			case e1000_80003es2lan:
5489 				lbpp[value++] = lb_external1000;
5490 				break;
5491 			}
5492 		}
5493 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5494 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5495 			lbpp[value++] = lb_external100;
5496 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5497 			lbpp[value++] = lb_external10;
5498 		break;
5499 
5500 	case LB_GET_MODE:
5501 		size = sizeof (uint32_t);
5502 		if (iocp->ioc_count != size)
5503 			return (IOC_INVAL);
5504 
5505 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5506 		*lbmp = Adapter->loopback_mode;
5507 		break;
5508 
5509 	case LB_SET_MODE:
5510 		size = 0;
5511 		if (iocp->ioc_count != sizeof (uint32_t))
5512 			return (IOC_INVAL);
5513 
5514 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5515 		if (!e1000g_set_loopback_mode(Adapter, *lbmp))
5516 			return (IOC_INVAL);
5517 		break;
5518 	}
5519 
5520 	iocp->ioc_count = size;
5521 	iocp->ioc_error = 0;
5522 
5523 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
5524 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
5525 		return (IOC_INVAL);
5526 	}
5527 
5528 	return (IOC_REPLY);
5529 }
5530 
5531 static boolean_t
5532 e1000g_check_loopback_support(struct e1000_hw *hw)
5533 {
5534 	switch (hw->mac.type) {
5535 	case e1000_82540:
5536 	case e1000_82545:
5537 	case e1000_82545_rev_3:
5538 	case e1000_82546:
5539 	case e1000_82546_rev_3:
5540 	case e1000_82541:
5541 	case e1000_82541_rev_2:
5542 	case e1000_82547:
5543 	case e1000_82547_rev_2:
5544 	case e1000_82571:
5545 	case e1000_82572:
5546 	case e1000_82573:
5547 	case e1000_82574:
5548 	case e1000_80003es2lan:
5549 	case e1000_ich9lan:
5550 	case e1000_ich10lan:
5551 		return (B_TRUE);
5552 	}
5553 	return (B_FALSE);
5554 }
5555 
5556 static boolean_t
5557 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
5558 {
5559 	struct e1000_hw *hw;
5560 	int i, times;
5561 	boolean_t link_up;
5562 
5563 	if (mode == Adapter->loopback_mode)
5564 		return (B_TRUE);
5565 
5566 	hw = &Adapter->shared;
5567 	times = 0;
5568 
5569 	Adapter->loopback_mode = mode;
5570 
5571 	if (mode == E1000G_LB_NONE) {
5572 		/* Reset the chip */
5573 		hw->phy.autoneg_wait_to_complete = B_TRUE;
5574 		(void) e1000g_reset_adapter(Adapter);
5575 		hw->phy.autoneg_wait_to_complete = B_FALSE;
5576 		return (B_TRUE);
5577 	}
5578 
5579 again:
5580 
5581 	rw_enter(&Adapter->chip_lock, RW_WRITER);
5582 
5583 	switch (mode) {
5584 	default:
5585 		rw_exit(&Adapter->chip_lock);
5586 		return (B_FALSE);
5587 
5588 	case E1000G_LB_EXTERNAL_1000:
5589 		e1000g_set_external_loopback_1000(Adapter);
5590 		break;
5591 
5592 	case E1000G_LB_EXTERNAL_100:
5593 		e1000g_set_external_loopback_100(Adapter);
5594 		break;
5595 
5596 	case E1000G_LB_EXTERNAL_10:
5597 		e1000g_set_external_loopback_10(Adapter);
5598 		break;
5599 
5600 	case E1000G_LB_INTERNAL_PHY:
5601 		e1000g_set_internal_loopback(Adapter);
5602 		break;
5603 	}
5604 
5605 	times++;
5606 
5607 	rw_exit(&Adapter->chip_lock);
5608 
5609 	/* Wait for link up */
5610 	for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
5611 		msec_delay(100);
5612 
5613 	rw_enter(&Adapter->chip_lock, RW_WRITER);
5614 
5615 	link_up = e1000g_link_up(Adapter);
5616 
5617 	rw_exit(&Adapter->chip_lock);
5618 
5619 	if (!link_up) {
5620 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5621 		    "Failed to get the link up");
5622 		if (times < 2) {
5623 			/* Reset the link */
5624 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5625 			    "Reset the link ...");
5626 			(void) e1000g_reset_adapter(Adapter);
5627 			goto again;
5628 		}
5629 
5630 		/*
5631 		 * Reset driver to loopback none when set loopback failed
5632 		 * for the second time.
5633 		 */
5634 		Adapter->loopback_mode = E1000G_LB_NONE;
5635 
5636 		/* Reset the chip */
5637 		hw->phy.autoneg_wait_to_complete = B_TRUE;
5638 		(void) e1000g_reset_adapter(Adapter);
5639 		hw->phy.autoneg_wait_to_complete = B_FALSE;
5640 
5641 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5642 		    "Set loopback mode failed, reset to loopback none");
5643 
5644 		return (B_FALSE);
5645 	}
5646 
5647 	return (B_TRUE);
5648 }
5649 
5650 /*
5651  * The following loopback settings are from Intel's technical
5652  * document - "How To Loopback". All the register settings and
5653  * time delay values are directly inherited from the document
5654  * without more explanations available.
5655  */
5656 static void
5657 e1000g_set_internal_loopback(struct e1000g *Adapter)
5658 {
5659 	struct e1000_hw *hw;
5660 	uint32_t ctrl;
5661 	uint32_t status;
5662 	uint16_t phy_ctrl;
5663 	uint16_t phy_reg;
5664 	uint32_t txcw;
5665 
5666 	hw = &Adapter->shared;
5667 
5668 	/* Disable Smart Power Down */
5669 	phy_spd_state(hw, B_FALSE);
5670 
5671 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5672 	phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
5673 	phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
5674 
5675 	switch (hw->mac.type) {
5676 	case e1000_82540:
5677 	case e1000_82545:
5678 	case e1000_82545_rev_3:
5679 	case e1000_82546:
5680 	case e1000_82546_rev_3:
5681 	case e1000_82573:
5682 		/* Auto-MDI/MDIX off */
5683 		(void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5684 		/* Reset PHY to update Auto-MDI/MDIX */
5685 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5686 		    phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5687 		/* Reset PHY to auto-neg off and force 1000 */
5688 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5689 		    phy_ctrl | MII_CR_RESET);
5690 		/*
5691 		 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5692 		 * See comments above e1000g_set_internal_loopback() for the
5693 		 * background.
5694 		 */
5695 		(void) e1000_write_phy_reg(hw, 29, 0x001F);
5696 		(void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5697 		(void) e1000_write_phy_reg(hw, 29, 0x001A);
5698 		(void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5699 		break;
5700 	case e1000_80003es2lan:
5701 		/* Force Link Up */
5702 		(void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5703 		    0x1CC);
5704 		/* Sets PCS loopback at 1Gbs */
5705 		(void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5706 		    0x1046);
5707 		break;
5708 	}
5709 
5710 	/*
5711 	 * The following registers should be set for e1000_phy_bm phy type.
5712 	 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5713 	 * For others, we do not need to set these registers.
5714 	 */
5715 	if (hw->phy.type == e1000_phy_bm) {
5716 		/* Set Default MAC Interface speed to 1GB */
5717 		(void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5718 		phy_reg &= ~0x0007;
5719 		phy_reg |= 0x006;
5720 		(void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5721 		/* Assert SW reset for above settings to take effect */
5722 		(void) e1000_phy_commit(hw);
5723 		msec_delay(1);
5724 		/* Force Full Duplex */
5725 		(void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5726 		(void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5727 		    phy_reg | 0x000C);
5728 		/* Set Link Up (in force link) */
5729 		(void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5730 		(void) e1000_write_phy_reg(hw, PHY_REG(776, 16),
5731 		    phy_reg | 0x0040);
5732 		/* Force Link */
5733 		(void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5734 		(void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5735 		    phy_reg | 0x0040);
5736 		/* Set Early Link Enable */
5737 		(void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5738 		(void) e1000_write_phy_reg(hw, PHY_REG(769, 20),
5739 		    phy_reg | 0x0400);
5740 	}
5741 
5742 	/* Set loopback */
5743 	(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5744 
5745 	msec_delay(250);
5746 
5747 	/* Now set up the MAC to the same speed/duplex as the PHY. */
5748 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5749 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5750 	ctrl |= (E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
5751 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5752 	    E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
5753 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5754 
5755 	switch (hw->mac.type) {
5756 	case e1000_82540:
5757 	case e1000_82545:
5758 	case e1000_82545_rev_3:
5759 	case e1000_82546:
5760 	case e1000_82546_rev_3:
5761 		/*
5762 		 * For some serdes we'll need to commit the writes now
5763 		 * so that the status is updated on link
5764 		 */
5765 		if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5766 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5767 			msec_delay(100);
5768 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5769 		}
5770 
5771 		if (hw->phy.media_type == e1000_media_type_copper) {
5772 			/* Invert Loss of Signal */
5773 			ctrl |= E1000_CTRL_ILOS;
5774 		} else {
5775 			/* Set ILOS on fiber nic if half duplex is detected */
5776 			status = E1000_READ_REG(hw, E1000_STATUS);
5777 			if ((status & E1000_STATUS_FD) == 0)
5778 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5779 		}
5780 		break;
5781 
5782 	case e1000_82571:
5783 	case e1000_82572:
5784 		/*
5785 		 * The fiber/SerDes versions of this adapter do not contain an
5786 		 * accessible PHY. Therefore, loopback beyond MAC must be done
5787 		 * using SerDes analog loopback.
5788 		 */
5789 		if (hw->phy.media_type != e1000_media_type_copper) {
5790 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5791 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5792 			txcw &= ~((uint32_t)1 << 31);
5793 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5794 
5795 			/*
5796 			 * Write 0x410 to Serdes Control register
5797 			 * to enable Serdes analog loopback
5798 			 */
5799 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5800 			msec_delay(10);
5801 		}
5802 
5803 		status = E1000_READ_REG(hw, E1000_STATUS);
5804 		/* Set ILOS on fiber nic if half duplex is detected */
5805 		if ((hw->phy.media_type == e1000_media_type_fiber) &&
5806 		    ((status & E1000_STATUS_FD) == 0 ||
5807 		    (status & E1000_STATUS_LU) == 0))
5808 			ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5809 		else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5810 			ctrl |= E1000_CTRL_SLU;
5811 		break;
5812 
5813 	case e1000_82573:
5814 		ctrl |= E1000_CTRL_ILOS;
5815 		break;
5816 	case e1000_ich9lan:
5817 	case e1000_ich10lan:
5818 		ctrl |= E1000_CTRL_SLU;
5819 		break;
5820 	}
5821 	if (hw->phy.type == e1000_phy_bm)
5822 		ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5823 
5824 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5825 }
5826 
5827 static void
5828 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5829 {
5830 	struct e1000_hw *hw;
5831 	uint32_t rctl;
5832 	uint32_t ctrl_ext;
5833 	uint32_t ctrl;
5834 	uint32_t status;
5835 	uint32_t txcw;
5836 	uint16_t phydata;
5837 
5838 	hw = &Adapter->shared;
5839 
5840 	/* Disable Smart Power Down */
5841 	phy_spd_state(hw, B_FALSE);
5842 
5843 	switch (hw->mac.type) {
5844 	case e1000_82571:
5845 	case e1000_82572:
5846 		switch (hw->phy.media_type) {
5847 		case e1000_media_type_copper:
5848 			/* Force link up (Must be done before the PHY writes) */
5849 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5850 			ctrl |= E1000_CTRL_SLU;	/* Force Link Up */
5851 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5852 
5853 			rctl = E1000_READ_REG(hw, E1000_RCTL);
5854 			rctl |= (E1000_RCTL_EN |
5855 			    E1000_RCTL_SBP |
5856 			    E1000_RCTL_UPE |
5857 			    E1000_RCTL_MPE |
5858 			    E1000_RCTL_LPE |
5859 			    E1000_RCTL_BAM);		/* 0x803E */
5860 			E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5861 
5862 			ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5863 			ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5864 			    E1000_CTRL_EXT_SDP6_DATA |
5865 			    E1000_CTRL_EXT_SDP3_DATA |
5866 			    E1000_CTRL_EXT_SDP4_DIR |
5867 			    E1000_CTRL_EXT_SDP6_DIR |
5868 			    E1000_CTRL_EXT_SDP3_DIR);	/* 0x0DD0 */
5869 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5870 
5871 			/*
5872 			 * This sequence tunes the PHY's SDP and no customer
5873 			 * settable values. For background, see comments above
5874 			 * e1000g_set_internal_loopback().
5875 			 */
5876 			(void) e1000_write_phy_reg(hw, 0x0, 0x140);
5877 			msec_delay(10);
5878 			(void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5879 			(void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5880 			(void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5881 			(void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5882 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5883 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5884 
5885 			(void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5886 			(void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5887 			(void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5888 			(void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5889 			(void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5890 
5891 			msec_delay(50);
5892 			break;
5893 		case e1000_media_type_fiber:
5894 		case e1000_media_type_internal_serdes:
5895 			status = E1000_READ_REG(hw, E1000_STATUS);
5896 			if (((status & E1000_STATUS_LU) == 0) ||
5897 			    (hw->phy.media_type ==
5898 			    e1000_media_type_internal_serdes)) {
5899 				ctrl = E1000_READ_REG(hw, E1000_CTRL);
5900 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5901 				E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5902 			}
5903 
5904 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5905 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5906 			txcw &= ~((uint32_t)1 << 31);
5907 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5908 
5909 			/*
5910 			 * Write 0x410 to Serdes Control register
5911 			 * to enable Serdes analog loopback
5912 			 */
5913 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5914 			msec_delay(10);
5915 			break;
5916 		default:
5917 			break;
5918 		}
5919 		break;
5920 	case e1000_82574:
5921 	case e1000_80003es2lan:
5922 	case e1000_ich9lan:
5923 	case e1000_ich10lan:
5924 		(void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5925 		(void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5926 		    phydata | (1 << 5));
5927 		Adapter->param_adv_autoneg = 1;
5928 		Adapter->param_adv_1000fdx = 1;
5929 		(void) e1000g_reset_link(Adapter);
5930 		break;
5931 	}
5932 }
5933 
5934 static void
5935 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5936 {
5937 	struct e1000_hw *hw;
5938 	uint32_t ctrl;
5939 	uint16_t phy_ctrl;
5940 
5941 	hw = &Adapter->shared;
5942 
5943 	/* Disable Smart Power Down */
5944 	phy_spd_state(hw, B_FALSE);
5945 
5946 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5947 	    MII_CR_SPEED_100);
5948 
5949 	/* Force 100/FD, reset PHY */
5950 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5951 	    phy_ctrl | MII_CR_RESET);	/* 0xA100 */
5952 	msec_delay(10);
5953 
5954 	/* Force 100/FD */
5955 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5956 	    phy_ctrl);			/* 0x2100 */
5957 	msec_delay(10);
5958 
5959 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5960 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5961 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5962 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5963 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5964 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5965 	    E1000_CTRL_SPD_100 |	/* Force Speed to 100 */
5966 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5967 
5968 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5969 }
5970 
5971 static void
5972 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5973 {
5974 	struct e1000_hw *hw;
5975 	uint32_t ctrl;
5976 	uint16_t phy_ctrl;
5977 
5978 	hw = &Adapter->shared;
5979 
5980 	/* Disable Smart Power Down */
5981 	phy_spd_state(hw, B_FALSE);
5982 
5983 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5984 	    MII_CR_SPEED_10);
5985 
5986 	/* Force 10/FD, reset PHY */
5987 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5988 	    phy_ctrl | MII_CR_RESET);	/* 0x8100 */
5989 	msec_delay(10);
5990 
5991 	/* Force 10/FD */
5992 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5993 	    phy_ctrl);			/* 0x0100 */
5994 	msec_delay(10);
5995 
5996 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5997 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5998 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5999 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
6000 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
6001 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
6002 	    E1000_CTRL_SPD_10 |		/* Force Speed to 10 */
6003 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
6004 
6005 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
6006 }
6007 
6008 #ifdef __sparc
6009 static boolean_t
6010 e1000g_find_mac_address(struct e1000g *Adapter)
6011 {
6012 	struct e1000_hw *hw = &Adapter->shared;
6013 	uchar_t *bytes;
6014 	struct ether_addr sysaddr;
6015 	uint_t nelts;
6016 	int err;
6017 	boolean_t found = B_FALSE;
6018 
6019 	/*
6020 	 * The "vendor's factory-set address" may already have
6021 	 * been extracted from the chip, but if the property
6022 	 * "local-mac-address" is set we use that instead.
6023 	 *
6024 	 * We check whether it looks like an array of 6
6025 	 * bytes (which it should, if OBP set it).  If we can't
6026 	 * make sense of it this way, we'll ignore it.
6027 	 */
6028 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
6029 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
6030 	if (err == DDI_PROP_SUCCESS) {
6031 		if (nelts == ETHERADDRL) {
6032 			while (nelts--)
6033 				hw->mac.addr[nelts] = bytes[nelts];
6034 			found = B_TRUE;
6035 		}
6036 		ddi_prop_free(bytes);
6037 	}
6038 
6039 	/*
6040 	 * Look up the OBP property "local-mac-address?". If the user has set
6041 	 * 'local-mac-address? = false', use "the system address" instead.
6042 	 */
6043 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
6044 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
6045 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
6046 			if (localetheraddr(NULL, &sysaddr) != 0) {
6047 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
6048 				found = B_TRUE;
6049 			}
6050 		}
6051 		ddi_prop_free(bytes);
6052 	}
6053 
6054 	/*
6055 	 * Finally(!), if there's a valid "mac-address" property (created
6056 	 * if we netbooted from this interface), we must use this instead
6057 	 * of any of the above to ensure that the NFS/install server doesn't
6058 	 * get confused by the address changing as Solaris takes over!
6059 	 */
6060 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
6061 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
6062 	if (err == DDI_PROP_SUCCESS) {
6063 		if (nelts == ETHERADDRL) {
6064 			while (nelts--)
6065 				hw->mac.addr[nelts] = bytes[nelts];
6066 			found = B_TRUE;
6067 		}
6068 		ddi_prop_free(bytes);
6069 	}
6070 
6071 	if (found) {
6072 		bcopy(hw->mac.addr, hw->mac.perm_addr,
6073 		    ETHERADDRL);
6074 	}
6075 
6076 	return (found);
6077 }
6078 #endif
6079 
6080 static int
6081 e1000g_add_intrs(struct e1000g *Adapter)
6082 {
6083 	dev_info_t *devinfo;
6084 	int intr_types;
6085 	int rc;
6086 
6087 	devinfo = Adapter->dip;
6088 
6089 	/* Get supported interrupt types */
6090 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
6091 
6092 	if (rc != DDI_SUCCESS) {
6093 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6094 		    "Get supported interrupt types failed: %d\n", rc);
6095 		return (DDI_FAILURE);
6096 	}
6097 
6098 	/*
6099 	 * Based on Intel Technical Advisory document (TA-160), there are some
6100 	 * cases where some older Intel PCI-X NICs may "advertise" to the OS
6101 	 * that it supports MSI, but in fact has problems.
6102 	 * So we should only enable MSI for PCI-E NICs and disable MSI for old
6103 	 * PCI/PCI-X NICs.
6104 	 */
6105 	if (Adapter->shared.mac.type < e1000_82571)
6106 		Adapter->msi_enable = B_FALSE;
6107 
6108 	if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
6109 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
6110 
6111 		if (rc != DDI_SUCCESS) {
6112 			/* EMPTY */
6113 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6114 			    "Add MSI failed, trying Legacy interrupts\n");
6115 		} else {
6116 			Adapter->intr_type = DDI_INTR_TYPE_MSI;
6117 		}
6118 	}
6119 
6120 	if ((Adapter->intr_type == 0) &&
6121 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
6122 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
6123 
6124 		if (rc != DDI_SUCCESS) {
6125 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6126 			    "Add Legacy interrupts failed\n");
6127 			return (DDI_FAILURE);
6128 		}
6129 
6130 		Adapter->intr_type = DDI_INTR_TYPE_FIXED;
6131 	}
6132 
6133 	if (Adapter->intr_type == 0) {
6134 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6135 		    "No interrupts registered\n");
6136 		return (DDI_FAILURE);
6137 	}
6138 
6139 	return (DDI_SUCCESS);
6140 }
6141 
6142 /*
6143  * e1000g_intr_add() handles MSI/Legacy interrupts
6144  */
6145 static int
6146 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
6147 {
6148 	dev_info_t *devinfo;
6149 	int count, avail, actual;
6150 	int x, y, rc, inum = 0;
6151 	int flag;
6152 	ddi_intr_handler_t *intr_handler;
6153 
6154 	devinfo = Adapter->dip;
6155 
6156 	/* get number of interrupts */
6157 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
6158 	if ((rc != DDI_SUCCESS) || (count == 0)) {
6159 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6160 		    "Get interrupt number failed. Return: %d, count: %d\n",
6161 		    rc, count);
6162 		return (DDI_FAILURE);
6163 	}
6164 
6165 	/* get number of available interrupts */
6166 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
6167 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
6168 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6169 		    "Get interrupt available number failed. "
6170 		    "Return: %d, available: %d\n", rc, avail);
6171 		return (DDI_FAILURE);
6172 	}
6173 
6174 	if (avail < count) {
6175 		/* EMPTY */
6176 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6177 		    "Interrupts count: %d, available: %d\n",
6178 		    count, avail);
6179 	}
6180 
6181 	/* Allocate an array of interrupt handles */
6182 	Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
6183 	Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
6184 
6185 	/* Set NORMAL behavior for both MSI and FIXED interrupt */
6186 	flag = DDI_INTR_ALLOC_NORMAL;
6187 
6188 	/* call ddi_intr_alloc() */
6189 	rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
6190 	    count, &actual, flag);
6191 
6192 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
6193 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6194 		    "Allocate interrupts failed: %d\n", rc);
6195 
6196 		kmem_free(Adapter->htable, Adapter->intr_size);
6197 		return (DDI_FAILURE);
6198 	}
6199 
6200 	if (actual < count) {
6201 		/* EMPTY */
6202 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6203 		    "Interrupts requested: %d, received: %d\n",
6204 		    count, actual);
6205 	}
6206 
6207 	Adapter->intr_cnt = actual;
6208 
6209 	/* Get priority for first msi, assume remaining are all the same */
6210 	rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
6211 
6212 	if (rc != DDI_SUCCESS) {
6213 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6214 		    "Get interrupt priority failed: %d\n", rc);
6215 
6216 		/* Free already allocated intr */
6217 		for (y = 0; y < actual; y++)
6218 			(void) ddi_intr_free(Adapter->htable[y]);
6219 
6220 		kmem_free(Adapter->htable, Adapter->intr_size);
6221 		return (DDI_FAILURE);
6222 	}
6223 
6224 	/*
6225 	 * In Legacy Interrupt mode, for PCI-Express adapters, we should
6226 	 * use the interrupt service routine e1000g_intr_pciexpress()
6227 	 * to avoid interrupt stealing when sharing interrupt with other
6228 	 * devices.
6229 	 */
6230 	if (Adapter->shared.mac.type < e1000_82571)
6231 		intr_handler = e1000g_intr;
6232 	else
6233 		intr_handler = e1000g_intr_pciexpress;
6234 
6235 	/* Call ddi_intr_add_handler() */
6236 	for (x = 0; x < actual; x++) {
6237 		rc = ddi_intr_add_handler(Adapter->htable[x],
6238 		    intr_handler, (caddr_t)Adapter, NULL);
6239 
6240 		if (rc != DDI_SUCCESS) {
6241 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6242 			    "Add interrupt handler failed: %d\n", rc);
6243 
6244 			/* Remove already added handler */
6245 			for (y = 0; y < x; y++)
6246 				(void) ddi_intr_remove_handler(
6247 				    Adapter->htable[y]);
6248 
6249 			/* Free already allocated intr */
6250 			for (y = 0; y < actual; y++)
6251 				(void) ddi_intr_free(Adapter->htable[y]);
6252 
6253 			kmem_free(Adapter->htable, Adapter->intr_size);
6254 			return (DDI_FAILURE);
6255 		}
6256 	}
6257 
6258 	rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
6259 
6260 	if (rc != DDI_SUCCESS) {
6261 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6262 		    "Get interrupt cap failed: %d\n", rc);
6263 
6264 		/* Free already allocated intr */
6265 		for (y = 0; y < actual; y++) {
6266 			(void) ddi_intr_remove_handler(Adapter->htable[y]);
6267 			(void) ddi_intr_free(Adapter->htable[y]);
6268 		}
6269 
6270 		kmem_free(Adapter->htable, Adapter->intr_size);
6271 		return (DDI_FAILURE);
6272 	}
6273 
6274 	return (DDI_SUCCESS);
6275 }
6276 
6277 static int
6278 e1000g_rem_intrs(struct e1000g *Adapter)
6279 {
6280 	int x;
6281 	int rc;
6282 
6283 	for (x = 0; x < Adapter->intr_cnt; x++) {
6284 		rc = ddi_intr_remove_handler(Adapter->htable[x]);
6285 		if (rc != DDI_SUCCESS) {
6286 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6287 			    "Remove intr handler failed: %d\n", rc);
6288 			return (DDI_FAILURE);
6289 		}
6290 
6291 		rc = ddi_intr_free(Adapter->htable[x]);
6292 		if (rc != DDI_SUCCESS) {
6293 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6294 			    "Free intr failed: %d\n", rc);
6295 			return (DDI_FAILURE);
6296 		}
6297 	}
6298 
6299 	kmem_free(Adapter->htable, Adapter->intr_size);
6300 
6301 	return (DDI_SUCCESS);
6302 }
6303 
6304 static int
6305 e1000g_enable_intrs(struct e1000g *Adapter)
6306 {
6307 	int x;
6308 	int rc;
6309 
6310 	/* Enable interrupts */
6311 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6312 		/* Call ddi_intr_block_enable() for MSI */
6313 		rc = ddi_intr_block_enable(Adapter->htable,
6314 		    Adapter->intr_cnt);
6315 		if (rc != DDI_SUCCESS) {
6316 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6317 			    "Enable block intr failed: %d\n", rc);
6318 			return (DDI_FAILURE);
6319 		}
6320 	} else {
6321 		/* Call ddi_intr_enable() for Legacy/MSI non block enable */
6322 		for (x = 0; x < Adapter->intr_cnt; x++) {
6323 			rc = ddi_intr_enable(Adapter->htable[x]);
6324 			if (rc != DDI_SUCCESS) {
6325 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6326 				    "Enable intr failed: %d\n", rc);
6327 				return (DDI_FAILURE);
6328 			}
6329 		}
6330 	}
6331 
6332 	return (DDI_SUCCESS);
6333 }
6334 
6335 static int
6336 e1000g_disable_intrs(struct e1000g *Adapter)
6337 {
6338 	int x;
6339 	int rc;
6340 
6341 	/* Disable all interrupts */
6342 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6343 		rc = ddi_intr_block_disable(Adapter->htable,
6344 		    Adapter->intr_cnt);
6345 		if (rc != DDI_SUCCESS) {
6346 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6347 			    "Disable block intr failed: %d\n", rc);
6348 			return (DDI_FAILURE);
6349 		}
6350 	} else {
6351 		for (x = 0; x < Adapter->intr_cnt; x++) {
6352 			rc = ddi_intr_disable(Adapter->htable[x]);
6353 			if (rc != DDI_SUCCESS) {
6354 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6355 				    "Disable intr failed: %d\n", rc);
6356 				return (DDI_FAILURE);
6357 			}
6358 		}
6359 	}
6360 
6361 	return (DDI_SUCCESS);
6362 }
6363 
6364 /*
6365  * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
6366  */
6367 static void
6368 e1000g_get_phy_state(struct e1000g *Adapter)
6369 {
6370 	struct e1000_hw *hw = &Adapter->shared;
6371 
6372 	if (hw->phy.media_type == e1000_media_type_copper) {
6373 		(void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
6374 		(void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
6375 		(void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
6376 		    &Adapter->phy_an_adv);
6377 		(void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP,
6378 		    &Adapter->phy_an_exp);
6379 		(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
6380 		    &Adapter->phy_ext_status);
6381 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL,
6382 		    &Adapter->phy_1000t_ctrl);
6383 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6384 		    &Adapter->phy_1000t_status);
6385 		(void) e1000_read_phy_reg(hw, PHY_LP_ABILITY,
6386 		    &Adapter->phy_lp_able);
6387 
6388 		Adapter->param_autoneg_cap =
6389 		    (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
6390 		Adapter->param_pause_cap =
6391 		    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6392 		Adapter->param_asym_pause_cap =
6393 		    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6394 		Adapter->param_1000fdx_cap =
6395 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
6396 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
6397 		Adapter->param_1000hdx_cap =
6398 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
6399 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
6400 		Adapter->param_100t4_cap =
6401 		    (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
6402 		Adapter->param_100fdx_cap =
6403 		    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
6404 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
6405 		Adapter->param_100hdx_cap =
6406 		    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
6407 		    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
6408 		Adapter->param_10fdx_cap =
6409 		    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
6410 		Adapter->param_10hdx_cap =
6411 		    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
6412 
6413 		Adapter->param_adv_autoneg = hw->mac.autoneg;
6414 		Adapter->param_adv_pause =
6415 		    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6416 		Adapter->param_adv_asym_pause =
6417 		    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6418 		Adapter->param_adv_1000hdx =
6419 		    (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
6420 		Adapter->param_adv_100t4 =
6421 		    (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
6422 		if (Adapter->param_adv_autoneg == 1) {
6423 			Adapter->param_adv_1000fdx =
6424 			    (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS)
6425 			    ? 1 : 0;
6426 			Adapter->param_adv_100fdx =
6427 			    (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS)
6428 			    ? 1 : 0;
6429 			Adapter->param_adv_100hdx =
6430 			    (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS)
6431 			    ? 1 : 0;
6432 			Adapter->param_adv_10fdx =
6433 			    (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
6434 			Adapter->param_adv_10hdx =
6435 			    (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
6436 		}
6437 
6438 		Adapter->param_lp_autoneg =
6439 		    (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
6440 		Adapter->param_lp_pause =
6441 		    (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
6442 		Adapter->param_lp_asym_pause =
6443 		    (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
6444 		Adapter->param_lp_1000fdx =
6445 		    (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
6446 		Adapter->param_lp_1000hdx =
6447 		    (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
6448 		Adapter->param_lp_100t4 =
6449 		    (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
6450 		Adapter->param_lp_100fdx =
6451 		    (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
6452 		Adapter->param_lp_100hdx =
6453 		    (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
6454 		Adapter->param_lp_10fdx =
6455 		    (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
6456 		Adapter->param_lp_10hdx =
6457 		    (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
6458 	} else {
6459 		/*
6460 		 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning,
6461 		 * it can only work with 1Gig Full Duplex Link Partner.
6462 		 */
6463 		Adapter->param_autoneg_cap = 0;
6464 		Adapter->param_pause_cap = 1;
6465 		Adapter->param_asym_pause_cap = 1;
6466 		Adapter->param_1000fdx_cap = 1;
6467 		Adapter->param_1000hdx_cap = 0;
6468 		Adapter->param_100t4_cap = 0;
6469 		Adapter->param_100fdx_cap = 0;
6470 		Adapter->param_100hdx_cap = 0;
6471 		Adapter->param_10fdx_cap = 0;
6472 		Adapter->param_10hdx_cap = 0;
6473 
6474 		Adapter->param_adv_autoneg = 0;
6475 		Adapter->param_adv_pause = 1;
6476 		Adapter->param_adv_asym_pause = 1;
6477 		Adapter->param_adv_1000fdx = 1;
6478 		Adapter->param_adv_1000hdx = 0;
6479 		Adapter->param_adv_100t4 = 0;
6480 		Adapter->param_adv_100fdx = 0;
6481 		Adapter->param_adv_100hdx = 0;
6482 		Adapter->param_adv_10fdx = 0;
6483 		Adapter->param_adv_10hdx = 0;
6484 
6485 		Adapter->param_lp_autoneg = 0;
6486 		Adapter->param_lp_pause = 0;
6487 		Adapter->param_lp_asym_pause = 0;
6488 		Adapter->param_lp_1000fdx = 0;
6489 		Adapter->param_lp_1000hdx = 0;
6490 		Adapter->param_lp_100t4 = 0;
6491 		Adapter->param_lp_100fdx = 0;
6492 		Adapter->param_lp_100hdx = 0;
6493 		Adapter->param_lp_10fdx = 0;
6494 		Adapter->param_lp_10hdx = 0;
6495 	}
6496 }
6497 
6498 /*
6499  * FMA support
6500  */
6501 
6502 int
6503 e1000g_check_acc_handle(ddi_acc_handle_t handle)
6504 {
6505 	ddi_fm_error_t de;
6506 
6507 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6508 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
6509 	return (de.fme_status);
6510 }
6511 
6512 int
6513 e1000g_check_dma_handle(ddi_dma_handle_t handle)
6514 {
6515 	ddi_fm_error_t de;
6516 
6517 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6518 	return (de.fme_status);
6519 }
6520 
6521 /*
6522  * The IO fault service error handling callback function
6523  */
6524 /* ARGSUSED2 */
6525 static int
6526 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6527 {
6528 	/*
6529 	 * as the driver can always deal with an error in any dma or
6530 	 * access handle, we can just return the fme_status value.
6531 	 */
6532 	pci_ereport_post(dip, err, NULL);
6533 	return (err->fme_status);
6534 }
6535 
6536 static void
6537 e1000g_fm_init(struct e1000g *Adapter)
6538 {
6539 	ddi_iblock_cookie_t iblk;
6540 	int fma_dma_flag;
6541 
6542 	/* Only register with IO Fault Services if we have some capability */
6543 	if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
6544 		e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6545 	} else {
6546 		e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6547 	}
6548 
6549 	if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
6550 		fma_dma_flag = 1;
6551 	} else {
6552 		fma_dma_flag = 0;
6553 	}
6554 
6555 	(void) e1000g_set_fma_flags(fma_dma_flag);
6556 
6557 	if (Adapter->fm_capabilities) {
6558 
6559 		/* Register capabilities with IO Fault Services */
6560 		ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
6561 
6562 		/*
6563 		 * Initialize pci ereport capabilities if ereport capable
6564 		 */
6565 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6566 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6567 			pci_ereport_setup(Adapter->dip);
6568 
6569 		/*
6570 		 * Register error callback if error callback capable
6571 		 */
6572 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6573 			ddi_fm_handler_register(Adapter->dip,
6574 			    e1000g_fm_error_cb, (void*) Adapter);
6575 	}
6576 }
6577 
6578 static void
6579 e1000g_fm_fini(struct e1000g *Adapter)
6580 {
6581 	/* Only unregister FMA capabilities if we registered some */
6582 	if (Adapter->fm_capabilities) {
6583 
6584 		/*
6585 		 * Release any resources allocated by pci_ereport_setup()
6586 		 */
6587 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6588 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6589 			pci_ereport_teardown(Adapter->dip);
6590 
6591 		/*
6592 		 * Un-register error callback if error callback capable
6593 		 */
6594 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6595 			ddi_fm_handler_unregister(Adapter->dip);
6596 
6597 		/* Unregister from IO Fault Services */
6598 		mutex_enter(&e1000g_rx_detach_lock);
6599 		ddi_fm_fini(Adapter->dip);
6600 		if (Adapter->priv_dip != NULL) {
6601 			DEVI(Adapter->priv_dip)->devi_fmhdl = NULL;
6602 		}
6603 		mutex_exit(&e1000g_rx_detach_lock);
6604 	}
6605 }
6606 
6607 void
6608 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
6609 {
6610 	uint64_t ena;
6611 	char buf[FM_MAX_CLASS];
6612 
6613 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6614 	ena = fm_ena_generate(0, FM_ENA_FMT1);
6615 	if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
6616 		ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
6617 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6618 	}
6619 }
6620 
6621 /*
6622  * quiesce(9E) entry point.
6623  *
6624  * This function is called when the system is single-threaded at high
6625  * PIL with preemption disabled. Therefore, this function must not be
6626  * blocked.
6627  *
6628  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6629  * DDI_FAILURE indicates an error condition and should almost never happen.
6630  */
6631 static int
6632 e1000g_quiesce(dev_info_t *devinfo)
6633 {
6634 	struct e1000g *Adapter;
6635 
6636 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
6637 
6638 	if (Adapter == NULL)
6639 		return (DDI_FAILURE);
6640 
6641 	e1000g_clear_all_interrupts(Adapter);
6642 
6643 	(void) e1000_reset_hw(&Adapter->shared);
6644 
6645 	/* Setup our HW Tx Head & Tail descriptor pointers */
6646 	E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
6647 	E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
6648 
6649 	/* Setup our HW Rx Head & Tail descriptor pointers */
6650 	E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
6651 	E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
6652 
6653 	return (DDI_SUCCESS);
6654 }
6655 
6656 /*
6657  * synchronize the adv* and en* parameters.
6658  *
6659  * See comments in <sys/dld.h> for details of the *_en_*
6660  * parameters. The usage of ndd for setting adv parameters will
6661  * synchronize all the en parameters with the e1000g parameters,
6662  * implicitly disabling any settings made via dladm.
6663  */
6664 static void
6665 e1000g_param_sync(struct e1000g *Adapter)
6666 {
6667 	Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6668 	Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6669 	Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6670 	Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6671 	Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6672 	Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6673 }
6674 
6675 /*
6676  * e1000g_get_driver_control - tell manageability firmware that the driver
6677  * has control.
6678  */
6679 static void
6680 e1000g_get_driver_control(struct e1000_hw *hw)
6681 {
6682 	uint32_t ctrl_ext;
6683 	uint32_t swsm;
6684 
6685 	/* tell manageability firmware the driver has taken over */
6686 	switch (hw->mac.type) {
6687 	case e1000_82573:
6688 		swsm = E1000_READ_REG(hw, E1000_SWSM);
6689 		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6690 		break;
6691 	case e1000_82571:
6692 	case e1000_82572:
6693 	case e1000_82574:
6694 	case e1000_80003es2lan:
6695 	case e1000_ich8lan:
6696 	case e1000_ich9lan:
6697 	case e1000_ich10lan:
6698 	case e1000_pchlan:
6699 	case e1000_pch2lan:
6700 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6701 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6702 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6703 		break;
6704 	default:
6705 		/* no manageability firmware: do nothing */
6706 		break;
6707 	}
6708 }
6709 
6710 /*
6711  * e1000g_release_driver_control - tell manageability firmware that the driver
6712  * has released control.
6713  */
6714 static void
6715 e1000g_release_driver_control(struct e1000_hw *hw)
6716 {
6717 	uint32_t ctrl_ext;
6718 	uint32_t swsm;
6719 
6720 	/* tell manageability firmware the driver has released control */
6721 	switch (hw->mac.type) {
6722 	case e1000_82573:
6723 		swsm = E1000_READ_REG(hw, E1000_SWSM);
6724 		E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6725 		break;
6726 	case e1000_82571:
6727 	case e1000_82572:
6728 	case e1000_82574:
6729 	case e1000_80003es2lan:
6730 	case e1000_ich8lan:
6731 	case e1000_ich9lan:
6732 	case e1000_ich10lan:
6733 	case e1000_pchlan:
6734 	case e1000_pch2lan:
6735 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6736 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6737 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6738 		break;
6739 	default:
6740 		/* no manageability firmware: do nothing */
6741 		break;
6742 	}
6743 }
6744 
6745 /*
6746  * Restore e1000g promiscuous mode.
6747  */
6748 static void
6749 e1000g_restore_promisc(struct e1000g *Adapter)
6750 {
6751 	if (Adapter->e1000g_promisc) {
6752 		uint32_t rctl;
6753 
6754 		rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6755 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6756 		E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
6757 	}
6758 }
6759