xref: /illumos-gate/usr/src/uts/common/io/e1000g/e1000g_main.c (revision 88ecc943b4eb72f7c4fbbd8435997b85ef171fc3)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * **********************************************************************
28  *									*
29  * Module Name:								*
30  *   e1000g_main.c							*
31  *									*
32  * Abstract:								*
33  *   This file contains the interface routines for the solaris OS.	*
34  *   It has all DDI entry point routines and GLD entry point routines.	*
35  *									*
36  *   This file also contains routines that take care of initialization	*
37  *   uninit routine and interrupt routine.				*
38  *									*
39  * **********************************************************************
40  */
41 
42 #include <sys/dlpi.h>
43 #include <sys/mac.h>
44 #include "e1000g_sw.h"
45 #include "e1000g_debug.h"
46 
47 static char ident[] = "Intel PRO/1000 Ethernet";
48 static char e1000g_string[] = "Intel(R) PRO/1000 Network Connection";
49 static char e1000g_version[] = "Driver Ver. 5.3.13";
50 
51 /*
52  * Proto types for DDI entry points
53  */
54 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
55 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
56 static int e1000g_quiesce(dev_info_t *);
57 
58 /*
59  * init and intr routines prototype
60  */
61 static int e1000g_resume(dev_info_t *);
62 static int e1000g_suspend(dev_info_t *);
63 static uint_t e1000g_intr_pciexpress(caddr_t);
64 static uint_t e1000g_intr(caddr_t);
65 static void e1000g_intr_work(struct e1000g *, uint32_t);
66 #pragma inline(e1000g_intr_work)
67 static int e1000g_init(struct e1000g *);
68 static int e1000g_start(struct e1000g *, boolean_t);
69 static void e1000g_stop(struct e1000g *, boolean_t);
70 static int e1000g_m_start(void *);
71 static void e1000g_m_stop(void *);
72 static int e1000g_m_promisc(void *, boolean_t);
73 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
74 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
75 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
76 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
77     uint_t, const void *);
78 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
79     uint_t, uint_t, void *, uint_t *);
80 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
81     const void *);
82 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t,
83     uint_t, void *, uint_t *);
84 static void e1000g_init_locks(struct e1000g *);
85 static void e1000g_destroy_locks(struct e1000g *);
86 static int e1000g_identify_hardware(struct e1000g *);
87 static int e1000g_regs_map(struct e1000g *);
88 static int e1000g_set_driver_params(struct e1000g *);
89 static void e1000g_set_bufsize(struct e1000g *);
90 static int e1000g_register_mac(struct e1000g *);
91 static boolean_t e1000g_rx_drain(struct e1000g *);
92 static boolean_t e1000g_tx_drain(struct e1000g *);
93 static void e1000g_init_unicst(struct e1000g *);
94 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
95 static int e1000g_alloc_rx_data(struct e1000g *);
96 static void e1000g_release_multicast(struct e1000g *);
97 
98 /*
99  * Local routines
100  */
101 static boolean_t e1000g_reset_adapter(struct e1000g *);
102 static void e1000g_tx_clean(struct e1000g *);
103 static void e1000g_rx_clean(struct e1000g *);
104 static void e1000g_link_timer(void *);
105 static void e1000g_local_timer(void *);
106 static boolean_t e1000g_link_check(struct e1000g *);
107 static boolean_t e1000g_stall_check(struct e1000g *);
108 static void e1000g_smartspeed(struct e1000g *);
109 static void e1000g_get_conf(struct e1000g *);
110 static int e1000g_get_prop(struct e1000g *, char *, int, int, int);
111 static void enable_watchdog_timer(struct e1000g *);
112 static void disable_watchdog_timer(struct e1000g *);
113 static void start_watchdog_timer(struct e1000g *);
114 static void restart_watchdog_timer(struct e1000g *);
115 static void stop_watchdog_timer(struct e1000g *);
116 static void stop_link_timer(struct e1000g *);
117 static void stop_82547_timer(e1000g_tx_ring_t *);
118 static void e1000g_force_speed_duplex(struct e1000g *);
119 static void e1000g_get_max_frame_size(struct e1000g *);
120 static boolean_t is_valid_mac_addr(uint8_t *);
121 static void e1000g_unattach(dev_info_t *, struct e1000g *);
122 #ifdef E1000G_DEBUG
123 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
124 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
125 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
126 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
127 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
128     struct iocblk *, mblk_t *);
129 #endif
130 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
131     struct iocblk *, mblk_t *);
132 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
133 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
134 static void e1000g_set_internal_loopback(struct e1000g *);
135 static void e1000g_set_external_loopback_1000(struct e1000g *);
136 static void e1000g_set_external_loopback_100(struct e1000g *);
137 static void e1000g_set_external_loopback_10(struct e1000g *);
138 static int e1000g_add_intrs(struct e1000g *);
139 static int e1000g_intr_add(struct e1000g *, int);
140 static int e1000g_rem_intrs(struct e1000g *);
141 static int e1000g_enable_intrs(struct e1000g *);
142 static int e1000g_disable_intrs(struct e1000g *);
143 static boolean_t e1000g_link_up(struct e1000g *);
144 #ifdef __sparc
145 static boolean_t e1000g_find_mac_address(struct e1000g *);
146 #endif
147 static void e1000g_get_phy_state(struct e1000g *);
148 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
149     const void *impl_data);
150 static void e1000g_fm_init(struct e1000g *Adapter);
151 static void e1000g_fm_fini(struct e1000g *Adapter);
152 static int e1000g_get_def_val(struct e1000g *, mac_prop_id_t, uint_t, void *);
153 static void e1000g_param_sync(struct e1000g *);
154 static void e1000g_get_driver_control(struct e1000_hw *);
155 static void e1000g_release_driver_control(struct e1000_hw *);
156 static void e1000g_restore_promisc(struct e1000g *Adapter);
157 
158 mac_priv_prop_t e1000g_priv_props[] = {
159 	{"_tx_bcopy_threshold", MAC_PROP_PERM_RW},
160 	{"_tx_interrupt_enable", MAC_PROP_PERM_RW},
161 	{"_tx_intr_delay", MAC_PROP_PERM_RW},
162 	{"_tx_intr_abs_delay", MAC_PROP_PERM_RW},
163 	{"_rx_bcopy_threshold", MAC_PROP_PERM_RW},
164 	{"_max_num_rcv_packets", MAC_PROP_PERM_RW},
165 	{"_rx_intr_delay", MAC_PROP_PERM_RW},
166 	{"_rx_intr_abs_delay", MAC_PROP_PERM_RW},
167 	{"_intr_throttling_rate", MAC_PROP_PERM_RW},
168 	{"_intr_adaptive", MAC_PROP_PERM_RW},
169 	{"_adv_pause_cap", MAC_PROP_PERM_READ},
170 	{"_adv_asym_pause_cap", MAC_PROP_PERM_READ},
171 };
172 #define	E1000G_MAX_PRIV_PROPS	\
173 	(sizeof (e1000g_priv_props)/sizeof (mac_priv_prop_t))
174 
175 
176 static struct cb_ops cb_ws_ops = {
177 	nulldev,		/* cb_open */
178 	nulldev,		/* cb_close */
179 	nodev,			/* cb_strategy */
180 	nodev,			/* cb_print */
181 	nodev,			/* cb_dump */
182 	nodev,			/* cb_read */
183 	nodev,			/* cb_write */
184 	nodev,			/* cb_ioctl */
185 	nodev,			/* cb_devmap */
186 	nodev,			/* cb_mmap */
187 	nodev,			/* cb_segmap */
188 	nochpoll,		/* cb_chpoll */
189 	ddi_prop_op,		/* cb_prop_op */
190 	NULL,			/* cb_stream */
191 	D_MP | D_HOTPLUG,	/* cb_flag */
192 	CB_REV,			/* cb_rev */
193 	nodev,			/* cb_aread */
194 	nodev			/* cb_awrite */
195 };
196 
197 static struct dev_ops ws_ops = {
198 	DEVO_REV,		/* devo_rev */
199 	0,			/* devo_refcnt */
200 	NULL,			/* devo_getinfo */
201 	nulldev,		/* devo_identify */
202 	nulldev,		/* devo_probe */
203 	e1000g_attach,		/* devo_attach */
204 	e1000g_detach,		/* devo_detach */
205 	nodev,			/* devo_reset */
206 	&cb_ws_ops,		/* devo_cb_ops */
207 	NULL,			/* devo_bus_ops */
208 	ddi_power,		/* devo_power */
209 	e1000g_quiesce		/* devo_quiesce */
210 };
211 
212 static struct modldrv modldrv = {
213 	&mod_driverops,		/* Type of module.  This one is a driver */
214 	ident,			/* Discription string */
215 	&ws_ops,		/* driver ops */
216 };
217 
218 static struct modlinkage modlinkage = {
219 	MODREV_1, &modldrv, NULL
220 };
221 
222 /* Access attributes for register mapping */
223 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
224 	DDI_DEVICE_ATTR_V0,
225 	DDI_STRUCTURE_LE_ACC,
226 	DDI_STRICTORDER_ACC,
227 	DDI_FLAGERR_ACC
228 };
229 
230 #define	E1000G_M_CALLBACK_FLAGS \
231 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
232 
233 static mac_callbacks_t e1000g_m_callbacks = {
234 	E1000G_M_CALLBACK_FLAGS,
235 	e1000g_m_stat,
236 	e1000g_m_start,
237 	e1000g_m_stop,
238 	e1000g_m_promisc,
239 	e1000g_m_multicst,
240 	NULL,
241 	e1000g_m_tx,
242 	e1000g_m_ioctl,
243 	e1000g_m_getcapab,
244 	NULL,
245 	NULL,
246 	e1000g_m_setprop,
247 	e1000g_m_getprop
248 };
249 
250 /*
251  * Global variables
252  */
253 uint32_t e1000g_mblks_pending = 0;
254 /*
255  * Workaround for Dynamic Reconfiguration support, for x86 platform only.
256  * Here we maintain a private dev_info list if e1000g_force_detach is
257  * enabled. If we force the driver to detach while there are still some
258  * rx buffers retained in the upper layer, we have to keep a copy of the
259  * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
260  * structure will be freed after the driver is detached. However when we
261  * finally free those rx buffers released by the upper layer, we need to
262  * refer to the dev_info to free the dma buffers. So we save a copy of
263  * the dev_info for this purpose. On x86 platform, we assume this copy
264  * of dev_info is always valid, but on SPARC platform, it could be invalid
265  * after the system board level DR operation. For this reason, the global
266  * variable e1000g_force_detach must be B_FALSE on SPARC platform.
267  */
268 #ifdef __sparc
269 boolean_t e1000g_force_detach = B_FALSE;
270 #else
271 boolean_t e1000g_force_detach = B_TRUE;
272 #endif
273 private_devi_list_t *e1000g_private_devi_list = NULL;
274 
275 /*
276  * The mutex e1000g_rx_detach_lock is defined to protect the processing of
277  * the private dev_info list, and to serialize the processing of rx buffer
278  * freeing and rx buffer recycling.
279  */
280 kmutex_t e1000g_rx_detach_lock;
281 /*
282  * The rwlock e1000g_dma_type_lock is defined to protect the global flag
283  * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
284  * If there are many e1000g instances, the system may run out of DVMA
285  * resources during the initialization of the instances, then the flag will
286  * be changed to "USE_DMA". Because different e1000g instances are initialized
287  * in parallel, we need to use this lock to protect the flag.
288  */
289 krwlock_t e1000g_dma_type_lock;
290 
291 /*
292  * The 82546 chipset is a dual-port device, both the ports share one eeprom.
293  * Based on the information from Intel, the 82546 chipset has some hardware
294  * problem. When one port is being reset and the other port is trying to
295  * access the eeprom, it could cause system hang or panic. To workaround this
296  * hardware problem, we use a global mutex to prevent such operations from
297  * happening simultaneously on different instances. This workaround is applied
298  * to all the devices supported by this driver.
299  */
300 kmutex_t e1000g_nvm_lock;
301 
302 /*
303  * Loadable module configuration entry points for the driver
304  */
305 
306 /*
307  * _init - module initialization
308  */
309 int
310 _init(void)
311 {
312 	int status;
313 
314 	mac_init_ops(&ws_ops, WSNAME);
315 	status = mod_install(&modlinkage);
316 	if (status != DDI_SUCCESS)
317 		mac_fini_ops(&ws_ops);
318 	else {
319 		mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL);
320 		rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
321 		mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
322 	}
323 
324 	return (status);
325 }
326 
327 /*
328  * _fini - module finalization
329  */
330 int
331 _fini(void)
332 {
333 	int status;
334 
335 	if (e1000g_mblks_pending != 0)
336 		return (EBUSY);
337 
338 	status = mod_remove(&modlinkage);
339 	if (status == DDI_SUCCESS) {
340 		mac_fini_ops(&ws_ops);
341 
342 		if (e1000g_force_detach) {
343 			private_devi_list_t *devi_node;
344 
345 			mutex_enter(&e1000g_rx_detach_lock);
346 			while (e1000g_private_devi_list != NULL) {
347 				devi_node = e1000g_private_devi_list;
348 				e1000g_private_devi_list =
349 				    e1000g_private_devi_list->next;
350 
351 				kmem_free(devi_node->priv_dip,
352 				    sizeof (struct dev_info));
353 				kmem_free(devi_node,
354 				    sizeof (private_devi_list_t));
355 			}
356 			mutex_exit(&e1000g_rx_detach_lock);
357 		}
358 
359 		mutex_destroy(&e1000g_rx_detach_lock);
360 		rw_destroy(&e1000g_dma_type_lock);
361 		mutex_destroy(&e1000g_nvm_lock);
362 	}
363 
364 	return (status);
365 }
366 
367 /*
368  * _info - module information
369  */
370 int
371 _info(struct modinfo *modinfop)
372 {
373 	return (mod_info(&modlinkage, modinfop));
374 }
375 
376 /*
377  * e1000g_attach - driver attach
378  *
379  * This function is the device-specific initialization entry
380  * point. This entry point is required and must be written.
381  * The DDI_ATTACH command must be provided in the attach entry
382  * point. When attach() is called with cmd set to DDI_ATTACH,
383  * all normal kernel services (such as kmem_alloc(9F)) are
384  * available for use by the driver.
385  *
386  * The attach() function will be called once for each instance
387  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
388  * Until attach() succeeds, the only driver entry points which
389  * may be called are open(9E) and getinfo(9E).
390  */
391 static int
392 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
393 {
394 	struct e1000g *Adapter;
395 	struct e1000_hw *hw;
396 	struct e1000g_osdep *osdep;
397 	int instance;
398 
399 	switch (cmd) {
400 	default:
401 		e1000g_log(NULL, CE_WARN,
402 		    "Unsupported command send to e1000g_attach... ");
403 		return (DDI_FAILURE);
404 
405 	case DDI_RESUME:
406 		return (e1000g_resume(devinfo));
407 
408 	case DDI_ATTACH:
409 		break;
410 	}
411 
412 	/*
413 	 * get device instance number
414 	 */
415 	instance = ddi_get_instance(devinfo);
416 
417 	/*
418 	 * Allocate soft data structure
419 	 */
420 	Adapter =
421 	    (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
422 
423 	Adapter->dip = devinfo;
424 	Adapter->instance = instance;
425 	Adapter->tx_ring->adapter = Adapter;
426 	Adapter->rx_ring->adapter = Adapter;
427 
428 	hw = &Adapter->shared;
429 	osdep = &Adapter->osdep;
430 	hw->back = osdep;
431 	osdep->adapter = Adapter;
432 
433 	ddi_set_driver_private(devinfo, (caddr_t)Adapter);
434 
435 	/*
436 	 * Initialize for fma support
437 	 */
438 	Adapter->fm_capabilities = e1000g_get_prop(Adapter, "fm-capable",
439 	    0, 0x0f,
440 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
441 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
442 	e1000g_fm_init(Adapter);
443 	Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
444 
445 	/*
446 	 * PCI Configure
447 	 */
448 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
449 		e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
450 		goto attach_fail;
451 	}
452 	Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
453 
454 	/*
455 	 * Setup hardware
456 	 */
457 	if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
458 		e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
459 		goto attach_fail;
460 	}
461 
462 	/*
463 	 * Map in the device registers.
464 	 */
465 	if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
466 		e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
467 		goto attach_fail;
468 	}
469 	Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
470 
471 	/*
472 	 * Initialize driver parameters
473 	 */
474 	if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
475 		goto attach_fail;
476 	}
477 	Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
478 
479 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
480 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
481 		goto attach_fail;
482 	}
483 
484 	/*
485 	 * Initialize interrupts
486 	 */
487 	if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
488 		e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
489 		goto attach_fail;
490 	}
491 	Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
492 
493 	/*
494 	 * Initialize mutex's for this device.
495 	 * Do this before enabling the interrupt handler and
496 	 * register the softint to avoid the condition where
497 	 * interrupt handler can try using uninitialized mutex
498 	 */
499 	e1000g_init_locks(Adapter);
500 	Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
501 
502 	/*
503 	 * Initialize Driver Counters
504 	 */
505 	if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
506 		e1000g_log(Adapter, CE_WARN, "Init stats failed");
507 		goto attach_fail;
508 	}
509 	Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
510 
511 	/*
512 	 * Initialize chip hardware and software structures
513 	 */
514 	rw_enter(&Adapter->chip_lock, RW_WRITER);
515 	if (e1000g_init(Adapter) != DDI_SUCCESS) {
516 		rw_exit(&Adapter->chip_lock);
517 		e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
518 		goto attach_fail;
519 	}
520 	rw_exit(&Adapter->chip_lock);
521 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
522 
523 	/*
524 	 * Register the driver to the MAC
525 	 */
526 	if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
527 		e1000g_log(Adapter, CE_WARN, "Register MAC failed");
528 		goto attach_fail;
529 	}
530 	Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
531 
532 	/*
533 	 * Now that mutex locks are initialized, and the chip is also
534 	 * initialized, enable interrupts.
535 	 */
536 	if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
537 		e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
538 		goto attach_fail;
539 	}
540 	Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
541 
542 	/*
543 	 * If e1000g_force_detach is enabled, in global private dip list,
544 	 * we will create a new entry, which maintains the priv_dip for DR
545 	 * supports after driver detached.
546 	 */
547 	if (e1000g_force_detach) {
548 		private_devi_list_t *devi_node;
549 
550 		Adapter->priv_dip =
551 		    kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
552 		bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
553 		    sizeof (struct dev_info));
554 
555 		devi_node =
556 		    kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
557 
558 		mutex_enter(&e1000g_rx_detach_lock);
559 		devi_node->priv_dip = Adapter->priv_dip;
560 		devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
561 		devi_node->pending_rx_count = 0;
562 
563 		Adapter->priv_devi_node = devi_node;
564 
565 		if (e1000g_private_devi_list == NULL) {
566 			devi_node->prev = NULL;
567 			devi_node->next = NULL;
568 			e1000g_private_devi_list = devi_node;
569 		} else {
570 			devi_node->prev = NULL;
571 			devi_node->next = e1000g_private_devi_list;
572 			e1000g_private_devi_list->prev = devi_node;
573 			e1000g_private_devi_list = devi_node;
574 		}
575 		mutex_exit(&e1000g_rx_detach_lock);
576 	}
577 
578 	cmn_err(CE_CONT, "!%s, %s\n", e1000g_string, e1000g_version);
579 	Adapter->e1000g_state = E1000G_INITIALIZED;
580 
581 	return (DDI_SUCCESS);
582 
583 attach_fail:
584 	e1000g_unattach(devinfo, Adapter);
585 	return (DDI_FAILURE);
586 }
587 
588 static int
589 e1000g_register_mac(struct e1000g *Adapter)
590 {
591 	struct e1000_hw *hw = &Adapter->shared;
592 	mac_register_t *mac;
593 	int err;
594 
595 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
596 		return (DDI_FAILURE);
597 
598 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
599 	mac->m_driver = Adapter;
600 	mac->m_dip = Adapter->dip;
601 	mac->m_src_addr = hw->mac.addr;
602 	mac->m_callbacks = &e1000g_m_callbacks;
603 	mac->m_min_sdu = 0;
604 	mac->m_max_sdu = Adapter->default_mtu;
605 	mac->m_margin = VLAN_TAGSZ;
606 	mac->m_priv_props = e1000g_priv_props;
607 	mac->m_priv_prop_count = E1000G_MAX_PRIV_PROPS;
608 	mac->m_v12n = MAC_VIRT_LEVEL1;
609 
610 	err = mac_register(mac, &Adapter->mh);
611 	mac_free(mac);
612 
613 	return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
614 }
615 
616 static int
617 e1000g_identify_hardware(struct e1000g *Adapter)
618 {
619 	struct e1000_hw *hw = &Adapter->shared;
620 	struct e1000g_osdep *osdep = &Adapter->osdep;
621 
622 	/* Get the device id */
623 	hw->vendor_id =
624 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
625 	hw->device_id =
626 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
627 	hw->revision_id =
628 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
629 	hw->subsystem_device_id =
630 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
631 	hw->subsystem_vendor_id =
632 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
633 
634 	if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
635 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
636 		    "MAC type could not be set properly.");
637 		return (DDI_FAILURE);
638 	}
639 
640 	return (DDI_SUCCESS);
641 }
642 
643 static int
644 e1000g_regs_map(struct e1000g *Adapter)
645 {
646 	dev_info_t *devinfo = Adapter->dip;
647 	struct e1000_hw *hw = &Adapter->shared;
648 	struct e1000g_osdep *osdep = &Adapter->osdep;
649 	off_t mem_size;
650 
651 	/* Get size of adapter register memory */
652 	if (ddi_dev_regsize(devinfo, ADAPTER_REG_SET, &mem_size) !=
653 	    DDI_SUCCESS) {
654 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
655 		    "ddi_dev_regsize for registers failed");
656 		return (DDI_FAILURE);
657 	}
658 
659 	/* Map adapter register memory */
660 	if ((ddi_regs_map_setup(devinfo, ADAPTER_REG_SET,
661 	    (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
662 	    &osdep->reg_handle)) != DDI_SUCCESS) {
663 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
664 		    "ddi_regs_map_setup for registers failed");
665 		goto regs_map_fail;
666 	}
667 
668 	/* ICH needs to map flash memory */
669 	if (hw->mac.type == e1000_ich8lan ||
670 	    hw->mac.type == e1000_ich9lan ||
671 	    hw->mac.type == e1000_ich10lan) {
672 		/* get flash size */
673 		if (ddi_dev_regsize(devinfo, ICH_FLASH_REG_SET,
674 		    &mem_size) != DDI_SUCCESS) {
675 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
676 			    "ddi_dev_regsize for ICH flash failed");
677 			goto regs_map_fail;
678 		}
679 
680 		/* map flash in */
681 		if (ddi_regs_map_setup(devinfo, ICH_FLASH_REG_SET,
682 		    (caddr_t *)&hw->flash_address, 0,
683 		    mem_size, &e1000g_regs_acc_attr,
684 		    &osdep->ich_flash_handle) != DDI_SUCCESS) {
685 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
686 			    "ddi_regs_map_setup for ICH flash failed");
687 			goto regs_map_fail;
688 		}
689 	}
690 
691 	return (DDI_SUCCESS);
692 
693 regs_map_fail:
694 	if (osdep->reg_handle != NULL)
695 		ddi_regs_map_free(&osdep->reg_handle);
696 
697 	return (DDI_FAILURE);
698 }
699 
700 static int
701 e1000g_set_driver_params(struct e1000g *Adapter)
702 {
703 	struct e1000_hw *hw;
704 	uint32_t mem_bar, io_bar, bar64;
705 
706 	hw = &Adapter->shared;
707 
708 	/* Set MAC type and initialize hardware functions */
709 	if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
710 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
711 		    "Could not setup hardware functions");
712 		return (DDI_FAILURE);
713 	}
714 
715 	/* Get bus information */
716 	if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
717 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
718 		    "Could not get bus information");
719 		return (DDI_FAILURE);
720 	}
721 
722 	/* get mem_base addr */
723 	mem_bar = pci_config_get32(Adapter->osdep.cfg_handle, PCI_CONF_BASE0);
724 	bar64 = mem_bar & PCI_BASE_TYPE_ALL;
725 
726 	/* get io_base addr */
727 	if (hw->mac.type >= e1000_82544) {
728 		if (bar64) {
729 			/* IO BAR is different for 64 bit BAR mode */
730 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
731 			    PCI_CONF_BASE4);
732 		} else {
733 			/* normal 32-bit BAR mode */
734 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
735 			    PCI_CONF_BASE2);
736 		}
737 		hw->io_base = io_bar & PCI_BASE_IO_ADDR_M;
738 	} else {
739 		/* no I/O access for adapters prior to 82544 */
740 		hw->io_base = 0x0;
741 	}
742 
743 	e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
744 
745 	hw->mac.autoneg_failed = B_TRUE;
746 
747 	/* Set the autoneg_wait_to_complete flag to B_FALSE */
748 	hw->phy.autoneg_wait_to_complete = B_FALSE;
749 
750 	/* Adaptive IFS related changes */
751 	hw->mac.adaptive_ifs = B_TRUE;
752 
753 	/* Enable phy init script for IGP phy of 82541/82547 */
754 	if ((hw->mac.type == e1000_82547) ||
755 	    (hw->mac.type == e1000_82541) ||
756 	    (hw->mac.type == e1000_82547_rev_2) ||
757 	    (hw->mac.type == e1000_82541_rev_2))
758 		e1000_init_script_state_82541(hw, B_TRUE);
759 
760 	/* Enable the TTL workaround for 82541/82547 */
761 	e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
762 
763 #ifdef __sparc
764 	Adapter->strip_crc = B_TRUE;
765 #else
766 	Adapter->strip_crc = B_FALSE;
767 #endif
768 
769 	/* Get conf file properties */
770 	e1000g_get_conf(Adapter);
771 
772 	/* Get speed/duplex settings in conf file */
773 	hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
774 	hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
775 	e1000g_force_speed_duplex(Adapter);
776 
777 	/* Get Jumbo Frames settings in conf file */
778 	e1000g_get_max_frame_size(Adapter);
779 
780 	/* Set Rx/Tx buffer size */
781 	e1000g_set_bufsize(Adapter);
782 
783 	/* Master Latency Timer */
784 	Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
785 
786 	/* copper options */
787 	if (hw->phy.media_type == e1000_media_type_copper) {
788 		hw->phy.mdix = 0;	/* AUTO_ALL_MODES */
789 		hw->phy.disable_polarity_correction = B_FALSE;
790 		hw->phy.ms_type = e1000_ms_hw_default;	/* E1000_MASTER_SLAVE */
791 	}
792 
793 	/* The initial link state should be "unknown" */
794 	Adapter->link_state = LINK_STATE_UNKNOWN;
795 
796 	/* Initialize rx parameters */
797 	Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
798 	Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
799 
800 	/* Initialize tx parameters */
801 	Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
802 	Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
803 	Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
804 	Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
805 
806 	/* Initialize rx parameters */
807 	Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
808 
809 	return (DDI_SUCCESS);
810 }
811 
812 static void
813 e1000g_set_bufsize(struct e1000g *Adapter)
814 {
815 	struct e1000_mac_info *mac = &Adapter->shared.mac;
816 	uint64_t rx_size;
817 	uint64_t tx_size;
818 
819 	dev_info_t *devinfo = Adapter->dip;
820 #ifdef __sparc
821 	ulong_t iommu_pagesize;
822 #endif
823 	/* Get the system page size */
824 	Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
825 
826 #ifdef __sparc
827 	iommu_pagesize = dvma_pagesize(devinfo);
828 	if (iommu_pagesize != 0) {
829 		if (Adapter->sys_page_sz == iommu_pagesize) {
830 			if (iommu_pagesize > 0x4000)
831 				Adapter->sys_page_sz = 0x4000;
832 		} else {
833 			if (Adapter->sys_page_sz > iommu_pagesize)
834 				Adapter->sys_page_sz = iommu_pagesize;
835 		}
836 	}
837 	if (Adapter->lso_enable) {
838 		Adapter->dvma_page_num = E1000_LSO_MAXLEN /
839 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
840 	} else {
841 		Adapter->dvma_page_num = Adapter->max_frame_size /
842 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
843 	}
844 	ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
845 #endif
846 
847 	Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
848 
849 	if (Adapter->mem_workaround_82546 &&
850 	    ((mac->type == e1000_82545) ||
851 	    (mac->type == e1000_82546) ||
852 	    (mac->type == e1000_82546_rev_3))) {
853 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
854 	} else {
855 		rx_size = Adapter->max_frame_size + E1000G_IPALIGNPRESERVEROOM;
856 		if ((rx_size > FRAME_SIZE_UPTO_2K) &&
857 		    (rx_size <= FRAME_SIZE_UPTO_4K))
858 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
859 		else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
860 		    (rx_size <= FRAME_SIZE_UPTO_8K))
861 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
862 		else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
863 		    (rx_size <= FRAME_SIZE_UPTO_16K))
864 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
865 		else
866 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
867 	}
868 
869 	tx_size = Adapter->max_frame_size;
870 	if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
871 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
872 	else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
873 	    (tx_size <= FRAME_SIZE_UPTO_8K))
874 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
875 	else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
876 	    (tx_size <= FRAME_SIZE_UPTO_16K))
877 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
878 	else
879 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
880 
881 	/*
882 	 * For Wiseman adapters we have an requirement of having receive
883 	 * buffers aligned at 256 byte boundary. Since Livengood does not
884 	 * require this and forcing it for all hardwares will have
885 	 * performance implications, I am making it applicable only for
886 	 * Wiseman and for Jumbo frames enabled mode as rest of the time,
887 	 * it is okay to have normal frames...but it does involve a
888 	 * potential risk where we may loose data if buffer is not
889 	 * aligned...so all wiseman boards to have 256 byte aligned
890 	 * buffers
891 	 */
892 	if (mac->type < e1000_82543)
893 		Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
894 	else
895 		Adapter->rx_buf_align = 1;
896 }
897 
898 /*
899  * e1000g_detach - driver detach
900  *
901  * The detach() function is the complement of the attach routine.
902  * If cmd is set to DDI_DETACH, detach() is used to remove  the
903  * state  associated  with  a  given  instance of a device node
904  * prior to the removal of that instance from the system.
905  *
906  * The detach() function will be called once for each  instance
907  * of the device for which there has been a successful attach()
908  * once there are no longer  any  opens  on  the  device.
909  *
910  * Interrupts routine are disabled, All memory allocated by this
911  * driver are freed.
912  */
913 static int
914 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
915 {
916 	struct e1000g *Adapter;
917 	boolean_t rx_drain;
918 
919 	switch (cmd) {
920 	default:
921 		return (DDI_FAILURE);
922 
923 	case DDI_SUSPEND:
924 		return (e1000g_suspend(devinfo));
925 
926 	case DDI_DETACH:
927 		break;
928 	}
929 
930 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
931 	if (Adapter == NULL)
932 		return (DDI_FAILURE);
933 
934 	rx_drain = e1000g_rx_drain(Adapter);
935 	if (!rx_drain && !e1000g_force_detach)
936 		return (DDI_FAILURE);
937 
938 	if (mac_unregister(Adapter->mh) != 0) {
939 		e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
940 		return (DDI_FAILURE);
941 	}
942 	Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
943 
944 	ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
945 
946 	if (!e1000g_force_detach && !rx_drain)
947 		return (DDI_FAILURE);
948 
949 	e1000g_unattach(devinfo, Adapter);
950 
951 	return (DDI_SUCCESS);
952 }
953 
954 /*
955  * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
956  */
957 void
958 e1000g_free_priv_devi_node(private_devi_list_t *devi_node)
959 {
960 	ASSERT(e1000g_private_devi_list != NULL);
961 	ASSERT(devi_node != NULL);
962 
963 	if (devi_node->prev != NULL)
964 		devi_node->prev->next = devi_node->next;
965 	if (devi_node->next != NULL)
966 		devi_node->next->prev = devi_node->prev;
967 	if (devi_node == e1000g_private_devi_list)
968 		e1000g_private_devi_list = devi_node->next;
969 
970 	kmem_free(devi_node->priv_dip,
971 	    sizeof (struct dev_info));
972 	kmem_free(devi_node,
973 	    sizeof (private_devi_list_t));
974 }
975 
976 static void
977 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
978 {
979 	private_devi_list_t *devi_node;
980 	int result;
981 
982 	if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
983 		(void) e1000g_disable_intrs(Adapter);
984 	}
985 
986 	if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
987 		(void) mac_unregister(Adapter->mh);
988 	}
989 
990 	if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
991 		(void) e1000g_rem_intrs(Adapter);
992 	}
993 
994 	if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
995 		(void) ddi_prop_remove_all(devinfo);
996 	}
997 
998 	if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
999 		kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1000 	}
1001 
1002 	if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1003 		stop_link_timer(Adapter);
1004 
1005 		mutex_enter(&e1000g_nvm_lock);
1006 		result = e1000_reset_hw(&Adapter->shared);
1007 		mutex_exit(&e1000g_nvm_lock);
1008 
1009 		if (result != E1000_SUCCESS) {
1010 			e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1011 			ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1012 		}
1013 	}
1014 
1015 	e1000g_release_multicast(Adapter);
1016 
1017 	if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1018 		if (Adapter->osdep.reg_handle != NULL)
1019 			ddi_regs_map_free(&Adapter->osdep.reg_handle);
1020 		if (Adapter->osdep.ich_flash_handle != NULL)
1021 			ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1022 	}
1023 
1024 	if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1025 		if (Adapter->osdep.cfg_handle != NULL)
1026 			pci_config_teardown(&Adapter->osdep.cfg_handle);
1027 	}
1028 
1029 	if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1030 		e1000g_destroy_locks(Adapter);
1031 	}
1032 
1033 	if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1034 		e1000g_fm_fini(Adapter);
1035 	}
1036 
1037 	mutex_enter(&e1000g_rx_detach_lock);
1038 	if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) {
1039 		devi_node = Adapter->priv_devi_node;
1040 		devi_node->flag |= E1000G_PRIV_DEVI_DETACH;
1041 
1042 		if (devi_node->pending_rx_count == 0) {
1043 			e1000g_free_priv_devi_node(devi_node);
1044 		}
1045 	}
1046 	mutex_exit(&e1000g_rx_detach_lock);
1047 
1048 	kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1049 
1050 	/*
1051 	 * Another hotplug spec requirement,
1052 	 * run ddi_set_driver_private(devinfo, null);
1053 	 */
1054 	ddi_set_driver_private(devinfo, NULL);
1055 }
1056 
1057 static void
1058 e1000g_init_locks(struct e1000g *Adapter)
1059 {
1060 	e1000g_tx_ring_t *tx_ring;
1061 	e1000g_rx_ring_t *rx_ring;
1062 
1063 	rw_init(&Adapter->chip_lock, NULL,
1064 	    RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1065 	mutex_init(&Adapter->link_lock, NULL,
1066 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1067 	mutex_init(&Adapter->watchdog_lock, NULL,
1068 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1069 
1070 	tx_ring = Adapter->tx_ring;
1071 
1072 	mutex_init(&tx_ring->tx_lock, NULL,
1073 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1074 	mutex_init(&tx_ring->usedlist_lock, NULL,
1075 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1076 	mutex_init(&tx_ring->freelist_lock, NULL,
1077 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1078 
1079 	rx_ring = Adapter->rx_ring;
1080 
1081 	mutex_init(&rx_ring->rx_lock, NULL,
1082 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1083 }
1084 
1085 static void
1086 e1000g_destroy_locks(struct e1000g *Adapter)
1087 {
1088 	e1000g_tx_ring_t *tx_ring;
1089 	e1000g_rx_ring_t *rx_ring;
1090 
1091 	tx_ring = Adapter->tx_ring;
1092 	mutex_destroy(&tx_ring->tx_lock);
1093 	mutex_destroy(&tx_ring->usedlist_lock);
1094 	mutex_destroy(&tx_ring->freelist_lock);
1095 
1096 	rx_ring = Adapter->rx_ring;
1097 	mutex_destroy(&rx_ring->rx_lock);
1098 
1099 	mutex_destroy(&Adapter->link_lock);
1100 	mutex_destroy(&Adapter->watchdog_lock);
1101 	rw_destroy(&Adapter->chip_lock);
1102 }
1103 
1104 static int
1105 e1000g_resume(dev_info_t *devinfo)
1106 {
1107 	struct e1000g *Adapter;
1108 
1109 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1110 	if (Adapter == NULL)
1111 		e1000g_log(Adapter, CE_PANIC,
1112 		    "Instance pointer is null\n");
1113 
1114 	if (Adapter->dip != devinfo)
1115 		e1000g_log(Adapter, CE_PANIC,
1116 		    "Devinfo is not the same as saved devinfo\n");
1117 
1118 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1119 
1120 	if (Adapter->e1000g_state & E1000G_STARTED) {
1121 		if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1122 			rw_exit(&Adapter->chip_lock);
1123 			/*
1124 			 * We note the failure, but return success, as the
1125 			 * system is still usable without this controller.
1126 			 */
1127 			e1000g_log(Adapter, CE_WARN,
1128 			    "e1000g_resume: failed to restart controller\n");
1129 			return (DDI_SUCCESS);
1130 		}
1131 		/* Enable and start the watchdog timer */
1132 		enable_watchdog_timer(Adapter);
1133 	}
1134 
1135 	Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1136 
1137 	rw_exit(&Adapter->chip_lock);
1138 
1139 	return (DDI_SUCCESS);
1140 }
1141 
1142 static int
1143 e1000g_suspend(dev_info_t *devinfo)
1144 {
1145 	struct e1000g *Adapter;
1146 
1147 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1148 	if (Adapter == NULL)
1149 		return (DDI_FAILURE);
1150 
1151 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1152 
1153 	Adapter->e1000g_state |= E1000G_SUSPENDED;
1154 
1155 	/* if the port isn't plumbed, we can simply return */
1156 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1157 		rw_exit(&Adapter->chip_lock);
1158 		return (DDI_SUCCESS);
1159 	}
1160 
1161 	e1000g_stop(Adapter, B_FALSE);
1162 
1163 	rw_exit(&Adapter->chip_lock);
1164 
1165 	/* Disable and stop all the timers */
1166 	disable_watchdog_timer(Adapter);
1167 	stop_link_timer(Adapter);
1168 	stop_82547_timer(Adapter->tx_ring);
1169 
1170 	return (DDI_SUCCESS);
1171 }
1172 
1173 static int
1174 e1000g_init(struct e1000g *Adapter)
1175 {
1176 	uint32_t pba;
1177 	uint32_t high_water;
1178 	struct e1000_hw *hw;
1179 	clock_t link_timeout;
1180 	int result;
1181 
1182 	hw = &Adapter->shared;
1183 
1184 	/*
1185 	 * reset to put the hardware in a known state
1186 	 * before we try to do anything with the eeprom
1187 	 */
1188 	mutex_enter(&e1000g_nvm_lock);
1189 	result = e1000_reset_hw(hw);
1190 	mutex_exit(&e1000g_nvm_lock);
1191 
1192 	if (result != E1000_SUCCESS) {
1193 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1194 		goto init_fail;
1195 	}
1196 
1197 	mutex_enter(&e1000g_nvm_lock);
1198 	result = e1000_validate_nvm_checksum(hw);
1199 	if (result < E1000_SUCCESS) {
1200 		/*
1201 		 * Some PCI-E parts fail the first check due to
1202 		 * the link being in sleep state.  Call it again,
1203 		 * if it fails a second time its a real issue.
1204 		 */
1205 		result = e1000_validate_nvm_checksum(hw);
1206 	}
1207 	mutex_exit(&e1000g_nvm_lock);
1208 
1209 	if (result < E1000_SUCCESS) {
1210 		e1000g_log(Adapter, CE_WARN,
1211 		    "Invalid NVM checksum. Please contact "
1212 		    "the vendor to update the NVM.");
1213 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1214 		goto init_fail;
1215 	}
1216 
1217 	result = 0;
1218 #ifdef __sparc
1219 	/*
1220 	 * First, we try to get the local ethernet address from OBP. If
1221 	 * failed, then we get it from the EEPROM of NIC card.
1222 	 */
1223 	result = e1000g_find_mac_address(Adapter);
1224 #endif
1225 	/* Get the local ethernet address. */
1226 	if (!result) {
1227 		mutex_enter(&e1000g_nvm_lock);
1228 		result = e1000_read_mac_addr(hw);
1229 		mutex_exit(&e1000g_nvm_lock);
1230 	}
1231 
1232 	if (result < E1000_SUCCESS) {
1233 		e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1234 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1235 		goto init_fail;
1236 	}
1237 
1238 	/* check for valid mac address */
1239 	if (!is_valid_mac_addr(hw->mac.addr)) {
1240 		e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1241 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1242 		goto init_fail;
1243 	}
1244 
1245 	/* Set LAA state for 82571 chipset */
1246 	e1000_set_laa_state_82571(hw, B_TRUE);
1247 
1248 	/* Master Latency Timer implementation */
1249 	if (Adapter->master_latency_timer) {
1250 		pci_config_put8(Adapter->osdep.cfg_handle,
1251 		    PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1252 	}
1253 
1254 	if (hw->mac.type < e1000_82547) {
1255 		/*
1256 		 * Total FIFO is 64K
1257 		 */
1258 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1259 			pba = E1000_PBA_40K;	/* 40K for Rx, 24K for Tx */
1260 		else
1261 			pba = E1000_PBA_48K;	/* 48K for Rx, 16K for Tx */
1262 	} else if ((hw->mac.type == e1000_82571) ||
1263 	    (hw->mac.type == e1000_82572) ||
1264 	    (hw->mac.type == e1000_80003es2lan)) {
1265 		/*
1266 		 * Total FIFO is 48K
1267 		 */
1268 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1269 			pba = E1000_PBA_30K;	/* 30K for Rx, 18K for Tx */
1270 		else
1271 			pba = E1000_PBA_38K;	/* 38K for Rx, 10K for Tx */
1272 	} else if (hw->mac.type == e1000_82573) {
1273 		pba = E1000_PBA_20K;		/* 20K for Rx, 12K for Tx */
1274 	} else if (hw->mac.type == e1000_82574) {
1275 		/* Keep adapter default: 20K for Rx, 20K for Tx */
1276 		pba = E1000_READ_REG(hw, E1000_PBA);
1277 	} else if (hw->mac.type == e1000_ich8lan) {
1278 		pba = E1000_PBA_8K;		/* 8K for Rx, 12K for Tx */
1279 	} else if (hw->mac.type == e1000_ich9lan) {
1280 		pba = E1000_PBA_10K;
1281 	} else if (hw->mac.type == e1000_ich10lan) {
1282 		pba = E1000_PBA_10K;
1283 	} else {
1284 		/*
1285 		 * Total FIFO is 40K
1286 		 */
1287 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1288 			pba = E1000_PBA_22K;	/* 22K for Rx, 18K for Tx */
1289 		else
1290 			pba = E1000_PBA_30K;	/* 30K for Rx, 10K for Tx */
1291 	}
1292 	E1000_WRITE_REG(hw, E1000_PBA, pba);
1293 
1294 	/*
1295 	 * These parameters set thresholds for the adapter's generation(Tx)
1296 	 * and response(Rx) to Ethernet PAUSE frames.  These are just threshold
1297 	 * settings.  Flow control is enabled or disabled in the configuration
1298 	 * file.
1299 	 * High-water mark is set down from the top of the rx fifo (not
1300 	 * sensitive to max_frame_size) and low-water is set just below
1301 	 * high-water mark.
1302 	 * The high water mark must be low enough to fit one full frame above
1303 	 * it in the rx FIFO.  Should be the lower of:
1304 	 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1305 	 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1306 	 * Rx FIFO size minus one full frame.
1307 	 */
1308 	high_water = min(((pba << 10) * 9 / 10),
1309 	    ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1310 	    hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1311 	    ((pba << 10) - (E1000_ERT_2048 << 3)) :
1312 	    ((pba << 10) - Adapter->max_frame_size)));
1313 
1314 	hw->fc.high_water = high_water & 0xFFF8;
1315 	hw->fc.low_water = hw->fc.high_water - 8;
1316 
1317 	if (hw->mac.type == e1000_80003es2lan)
1318 		hw->fc.pause_time = 0xFFFF;
1319 	else
1320 		hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1321 	hw->fc.send_xon = B_TRUE;
1322 
1323 	/*
1324 	 * Reset the adapter hardware the second time.
1325 	 */
1326 	mutex_enter(&e1000g_nvm_lock);
1327 	result = e1000_reset_hw(hw);
1328 	mutex_exit(&e1000g_nvm_lock);
1329 
1330 	if (result != E1000_SUCCESS) {
1331 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1332 		goto init_fail;
1333 	}
1334 
1335 	/* disable wakeup control by default */
1336 	if (hw->mac.type >= e1000_82544)
1337 		E1000_WRITE_REG(hw, E1000_WUC, 0);
1338 
1339 	/*
1340 	 * MWI should be disabled on 82546.
1341 	 */
1342 	if (hw->mac.type == e1000_82546)
1343 		e1000_pci_clear_mwi(hw);
1344 	else
1345 		e1000_pci_set_mwi(hw);
1346 
1347 	/*
1348 	 * Configure/Initialize hardware
1349 	 */
1350 	mutex_enter(&e1000g_nvm_lock);
1351 	result = e1000_init_hw(hw);
1352 	mutex_exit(&e1000g_nvm_lock);
1353 
1354 	if (result < E1000_SUCCESS) {
1355 		e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1356 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1357 		goto init_fail;
1358 	}
1359 
1360 	/*
1361 	 * Restore LED settings to the default from EEPROM
1362 	 * to meet the standard for Sun platforms.
1363 	 */
1364 	if ((hw->mac.type != e1000_82541) &&
1365 	    (hw->mac.type != e1000_82541_rev_2) &&
1366 	    (hw->mac.type != e1000_82547) &&
1367 	    (hw->mac.type != e1000_82547_rev_2))
1368 		(void) e1000_cleanup_led(hw);
1369 
1370 	/* Disable Smart Power Down */
1371 	phy_spd_state(hw, B_FALSE);
1372 
1373 	/* Make sure driver has control */
1374 	e1000g_get_driver_control(hw);
1375 
1376 	/*
1377 	 * Initialize unicast addresses.
1378 	 */
1379 	e1000g_init_unicst(Adapter);
1380 
1381 	/*
1382 	 * Setup and initialize the mctable structures.  After this routine
1383 	 * completes  Multicast table will be set
1384 	 */
1385 	e1000g_setup_multicast(Adapter);
1386 	msec_delay(5);
1387 
1388 	/*
1389 	 * Implement Adaptive IFS
1390 	 */
1391 	e1000_reset_adaptive(hw);
1392 
1393 	/* Setup Interrupt Throttling Register */
1394 	if (hw->mac.type >= e1000_82540) {
1395 		E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1396 	} else
1397 		Adapter->intr_adaptive = B_FALSE;
1398 
1399 	/* Start the timer for link setup */
1400 	if (hw->mac.autoneg)
1401 		link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1402 	else
1403 		link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1404 
1405 	mutex_enter(&Adapter->link_lock);
1406 	if (hw->phy.autoneg_wait_to_complete) {
1407 		Adapter->link_complete = B_TRUE;
1408 	} else {
1409 		Adapter->link_complete = B_FALSE;
1410 		Adapter->link_tid = timeout(e1000g_link_timer,
1411 		    (void *)Adapter, link_timeout);
1412 	}
1413 	mutex_exit(&Adapter->link_lock);
1414 
1415 	/* Enable PCI-Ex master */
1416 	if (hw->bus.type == e1000_bus_type_pci_express) {
1417 		e1000_enable_pciex_master(hw);
1418 	}
1419 
1420 	/* Save the state of the phy */
1421 	e1000g_get_phy_state(Adapter);
1422 
1423 	e1000g_param_sync(Adapter);
1424 
1425 	Adapter->init_count++;
1426 
1427 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1428 		goto init_fail;
1429 	}
1430 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1431 		goto init_fail;
1432 	}
1433 
1434 	Adapter->poll_mode = e1000g_poll_mode;
1435 
1436 	return (DDI_SUCCESS);
1437 
1438 init_fail:
1439 	ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1440 	return (DDI_FAILURE);
1441 }
1442 
1443 static int
1444 e1000g_alloc_rx_data(struct e1000g *Adapter)
1445 {
1446 	e1000g_rx_ring_t *rx_ring;
1447 	e1000g_rx_data_t *rx_data;
1448 
1449 	rx_ring = Adapter->rx_ring;
1450 
1451 	rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP);
1452 
1453 	if (rx_data == NULL)
1454 		return (DDI_FAILURE);
1455 
1456 	rx_data->priv_devi_node = Adapter->priv_devi_node;
1457 	rx_data->rx_ring = rx_ring;
1458 
1459 	mutex_init(&rx_data->freelist_lock, NULL,
1460 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1461 	mutex_init(&rx_data->recycle_lock, NULL,
1462 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1463 
1464 	rx_ring->rx_data = rx_data;
1465 
1466 	return (DDI_SUCCESS);
1467 }
1468 
1469 void
1470 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data)
1471 {
1472 	rx_sw_packet_t *packet, *next_packet;
1473 
1474 	if (rx_data == NULL)
1475 		return;
1476 
1477 	packet = rx_data->packet_area;
1478 	while (packet != NULL) {
1479 		next_packet = packet->next;
1480 		e1000g_free_rx_sw_packet(packet, B_TRUE);
1481 		packet = next_packet;
1482 	}
1483 	rx_data->packet_area = NULL;
1484 }
1485 
1486 void
1487 e1000g_free_rx_data(e1000g_rx_data_t *rx_data)
1488 {
1489 	if (rx_data == NULL)
1490 		return;
1491 
1492 	mutex_destroy(&rx_data->freelist_lock);
1493 	mutex_destroy(&rx_data->recycle_lock);
1494 
1495 	kmem_free(rx_data, sizeof (e1000g_rx_data_t));
1496 }
1497 
1498 /*
1499  * Check if the link is up
1500  */
1501 static boolean_t
1502 e1000g_link_up(struct e1000g *Adapter)
1503 {
1504 	struct e1000_hw *hw;
1505 	boolean_t link_up;
1506 
1507 	hw = &Adapter->shared;
1508 
1509 	(void) e1000_check_for_link(hw);
1510 
1511 	if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) ||
1512 	    ((!hw->mac.get_link_status) && (hw->mac.type == e1000_82543)) ||
1513 	    ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1514 	    (hw->mac.serdes_has_link))) {
1515 		link_up = B_TRUE;
1516 	} else {
1517 		link_up = B_FALSE;
1518 	}
1519 
1520 	return (link_up);
1521 }
1522 
1523 static void
1524 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1525 {
1526 	struct iocblk *iocp;
1527 	struct e1000g *e1000gp;
1528 	enum ioc_reply status;
1529 
1530 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1531 	iocp->ioc_error = 0;
1532 	e1000gp = (struct e1000g *)arg;
1533 
1534 	ASSERT(e1000gp);
1535 	if (e1000gp == NULL) {
1536 		miocnak(q, mp, 0, EINVAL);
1537 		return;
1538 	}
1539 
1540 	rw_enter(&e1000gp->chip_lock, RW_READER);
1541 	if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1542 		rw_exit(&e1000gp->chip_lock);
1543 		miocnak(q, mp, 0, EINVAL);
1544 		return;
1545 	}
1546 	rw_exit(&e1000gp->chip_lock);
1547 
1548 	switch (iocp->ioc_cmd) {
1549 
1550 	case LB_GET_INFO_SIZE:
1551 	case LB_GET_INFO:
1552 	case LB_GET_MODE:
1553 	case LB_SET_MODE:
1554 		status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1555 		break;
1556 
1557 
1558 #ifdef E1000G_DEBUG
1559 	case E1000G_IOC_REG_PEEK:
1560 	case E1000G_IOC_REG_POKE:
1561 		status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1562 		break;
1563 	case E1000G_IOC_CHIP_RESET:
1564 		e1000gp->reset_count++;
1565 		if (e1000g_reset_adapter(e1000gp))
1566 			status = IOC_ACK;
1567 		else
1568 			status = IOC_INVAL;
1569 		break;
1570 #endif
1571 	default:
1572 		status = IOC_INVAL;
1573 		break;
1574 	}
1575 
1576 	/*
1577 	 * Decide how to reply
1578 	 */
1579 	switch (status) {
1580 	default:
1581 	case IOC_INVAL:
1582 		/*
1583 		 * Error, reply with a NAK and EINVAL or the specified error
1584 		 */
1585 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1586 		    EINVAL : iocp->ioc_error);
1587 		break;
1588 
1589 	case IOC_DONE:
1590 		/*
1591 		 * OK, reply already sent
1592 		 */
1593 		break;
1594 
1595 	case IOC_ACK:
1596 		/*
1597 		 * OK, reply with an ACK
1598 		 */
1599 		miocack(q, mp, 0, 0);
1600 		break;
1601 
1602 	case IOC_REPLY:
1603 		/*
1604 		 * OK, send prepared reply as ACK or NAK
1605 		 */
1606 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1607 		    M_IOCACK : M_IOCNAK;
1608 		qreply(q, mp);
1609 		break;
1610 	}
1611 }
1612 
1613 /*
1614  * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1615  * capable of supporting only one interrupt and we shouldn't disable
1616  * the physical interrupt. In this case we let the interrupt come and
1617  * we queue the packets in the rx ring itself in case we are in polling
1618  * mode (better latency but slightly lower performance and a very
1619  * high intrrupt count in mpstat which is harmless).
1620  *
1621  * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1622  * which can be disabled in poll mode. This gives better overall
1623  * throughput (compared to the mode above), shows very low interrupt
1624  * count but has slightly higher latency since we pick the packets when
1625  * the poll thread does polling.
1626  *
1627  * Currently, this flag should be enabled only while doing performance
1628  * measurement or when it can be guaranteed that entire NIC going
1629  * in poll mode will not harm any traffic like cluster heartbeat etc.
1630  */
1631 int e1000g_poll_mode = 0;
1632 
1633 /*
1634  * Called from the upper layers when driver is in polling mode to
1635  * pick up any queued packets. Care should be taken to not block
1636  * this thread.
1637  */
1638 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1639 {
1640 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)arg;
1641 	mblk_t			*mp = NULL;
1642 	mblk_t			*tail;
1643 	struct e1000g 		*adapter;
1644 
1645 	adapter = rx_ring->adapter;
1646 
1647 	rw_enter(&adapter->chip_lock, RW_READER);
1648 
1649 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
1650 		rw_exit(&adapter->chip_lock);
1651 		return (NULL);
1652 	}
1653 
1654 	mutex_enter(&rx_ring->rx_lock);
1655 	mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup);
1656 	mutex_exit(&rx_ring->rx_lock);
1657 	rw_exit(&adapter->chip_lock);
1658 	return (mp);
1659 }
1660 
1661 static int
1662 e1000g_m_start(void *arg)
1663 {
1664 	struct e1000g *Adapter = (struct e1000g *)arg;
1665 
1666 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1667 
1668 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1669 		rw_exit(&Adapter->chip_lock);
1670 		return (ECANCELED);
1671 	}
1672 
1673 	if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1674 		rw_exit(&Adapter->chip_lock);
1675 		return (ENOTACTIVE);
1676 	}
1677 
1678 	Adapter->e1000g_state |= E1000G_STARTED;
1679 
1680 	rw_exit(&Adapter->chip_lock);
1681 
1682 	/* Enable and start the watchdog timer */
1683 	enable_watchdog_timer(Adapter);
1684 
1685 	return (0);
1686 }
1687 
1688 static int
1689 e1000g_start(struct e1000g *Adapter, boolean_t global)
1690 {
1691 	e1000g_rx_data_t *rx_data;
1692 
1693 	if (global) {
1694 		if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) {
1695 			e1000g_log(Adapter, CE_WARN, "Allocate rx data failed");
1696 			goto start_fail;
1697 		}
1698 
1699 		/* Allocate dma resources for descriptors and buffers */
1700 		if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1701 			e1000g_log(Adapter, CE_WARN,
1702 			    "Alloc DMA resources failed");
1703 			goto start_fail;
1704 		}
1705 		Adapter->rx_buffer_setup = B_FALSE;
1706 	}
1707 
1708 	if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1709 		if (e1000g_init(Adapter) != DDI_SUCCESS) {
1710 			e1000g_log(Adapter, CE_WARN,
1711 			    "Adapter initialization failed");
1712 			goto start_fail;
1713 		}
1714 	}
1715 
1716 	/* Setup and initialize the transmit structures */
1717 	e1000g_tx_setup(Adapter);
1718 	msec_delay(5);
1719 
1720 	/* Setup and initialize the receive structures */
1721 	e1000g_rx_setup(Adapter);
1722 	msec_delay(5);
1723 
1724 	/* Restore the e1000g promiscuous mode */
1725 	e1000g_restore_promisc(Adapter);
1726 
1727 	e1000g_mask_interrupt(Adapter);
1728 
1729 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1730 
1731 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1732 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1733 		goto start_fail;
1734 	}
1735 
1736 	return (DDI_SUCCESS);
1737 
1738 start_fail:
1739 	rx_data = Adapter->rx_ring->rx_data;
1740 
1741 	if (global) {
1742 		e1000g_release_dma_resources(Adapter);
1743 		e1000g_free_rx_pending_buffers(rx_data);
1744 		e1000g_free_rx_data(rx_data);
1745 	}
1746 
1747 	mutex_enter(&e1000g_nvm_lock);
1748 	(void) e1000_reset_hw(&Adapter->shared);
1749 	mutex_exit(&e1000g_nvm_lock);
1750 
1751 	return (DDI_FAILURE);
1752 }
1753 
1754 static void
1755 e1000g_m_stop(void *arg)
1756 {
1757 	struct e1000g *Adapter = (struct e1000g *)arg;
1758 
1759 	/* Drain tx sessions */
1760 	(void) e1000g_tx_drain(Adapter);
1761 
1762 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1763 
1764 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1765 		rw_exit(&Adapter->chip_lock);
1766 		return;
1767 	}
1768 	Adapter->e1000g_state &= ~E1000G_STARTED;
1769 	e1000g_stop(Adapter, B_TRUE);
1770 
1771 	rw_exit(&Adapter->chip_lock);
1772 
1773 	/* Disable and stop all the timers */
1774 	disable_watchdog_timer(Adapter);
1775 	stop_link_timer(Adapter);
1776 	stop_82547_timer(Adapter->tx_ring);
1777 }
1778 
1779 static void
1780 e1000g_stop(struct e1000g *Adapter, boolean_t global)
1781 {
1782 	private_devi_list_t *devi_node;
1783 	e1000g_rx_data_t *rx_data;
1784 	int result;
1785 
1786 	Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
1787 
1788 	/* Stop the chip and release pending resources */
1789 
1790 	/* Tell firmware driver is no longer in control */
1791 	e1000g_release_driver_control(&Adapter->shared);
1792 
1793 	e1000g_clear_all_interrupts(Adapter);
1794 
1795 	mutex_enter(&e1000g_nvm_lock);
1796 	result = e1000_reset_hw(&Adapter->shared);
1797 	mutex_exit(&e1000g_nvm_lock);
1798 
1799 	if (result != E1000_SUCCESS) {
1800 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1801 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1802 	}
1803 
1804 	/* Release resources still held by the TX descriptors */
1805 	e1000g_tx_clean(Adapter);
1806 
1807 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1808 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1809 
1810 	/* Clean the pending rx jumbo packet fragment */
1811 	e1000g_rx_clean(Adapter);
1812 
1813 	if (global) {
1814 		e1000g_release_dma_resources(Adapter);
1815 
1816 		mutex_enter(&e1000g_rx_detach_lock);
1817 		rx_data = Adapter->rx_ring->rx_data;
1818 		rx_data->flag |= E1000G_RX_STOPPED;
1819 
1820 		if (rx_data->pending_count == 0) {
1821 			e1000g_free_rx_pending_buffers(rx_data);
1822 			e1000g_free_rx_data(rx_data);
1823 		} else {
1824 			devi_node = rx_data->priv_devi_node;
1825 			if (devi_node != NULL)
1826 				atomic_inc_32(&devi_node->pending_rx_count);
1827 			else
1828 				atomic_inc_32(&Adapter->pending_rx_count);
1829 		}
1830 		mutex_exit(&e1000g_rx_detach_lock);
1831 	}
1832 
1833 	if (Adapter->link_state == LINK_STATE_UP) {
1834 		Adapter->link_state = LINK_STATE_UNKNOWN;
1835 		mac_link_update(Adapter->mh, Adapter->link_state);
1836 	}
1837 }
1838 
1839 static void
1840 e1000g_rx_clean(struct e1000g *Adapter)
1841 {
1842 	e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data;
1843 
1844 	if (rx_data == NULL)
1845 		return;
1846 
1847 	if (rx_data->rx_mblk != NULL) {
1848 		freemsg(rx_data->rx_mblk);
1849 		rx_data->rx_mblk = NULL;
1850 		rx_data->rx_mblk_tail = NULL;
1851 		rx_data->rx_mblk_len = 0;
1852 	}
1853 }
1854 
1855 static void
1856 e1000g_tx_clean(struct e1000g *Adapter)
1857 {
1858 	e1000g_tx_ring_t *tx_ring;
1859 	p_tx_sw_packet_t packet;
1860 	mblk_t *mp;
1861 	mblk_t *nmp;
1862 	uint32_t packet_count;
1863 
1864 	tx_ring = Adapter->tx_ring;
1865 
1866 	/*
1867 	 * Here we don't need to protect the lists using
1868 	 * the usedlist_lock and freelist_lock, for they
1869 	 * have been protected by the chip_lock.
1870 	 */
1871 	mp = NULL;
1872 	nmp = NULL;
1873 	packet_count = 0;
1874 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
1875 	while (packet != NULL) {
1876 		if (packet->mp != NULL) {
1877 			/* Assemble the message chain */
1878 			if (mp == NULL) {
1879 				mp = packet->mp;
1880 				nmp = packet->mp;
1881 			} else {
1882 				nmp->b_next = packet->mp;
1883 				nmp = packet->mp;
1884 			}
1885 			/* Disconnect the message from the sw packet */
1886 			packet->mp = NULL;
1887 		}
1888 
1889 		e1000g_free_tx_swpkt(packet);
1890 		packet_count++;
1891 
1892 		packet = (p_tx_sw_packet_t)
1893 		    QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
1894 	}
1895 
1896 	if (mp != NULL)
1897 		freemsgchain(mp);
1898 
1899 	if (packet_count > 0) {
1900 		QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
1901 		QUEUE_INIT_LIST(&tx_ring->used_list);
1902 
1903 		/* Setup TX descriptor pointers */
1904 		tx_ring->tbd_next = tx_ring->tbd_first;
1905 		tx_ring->tbd_oldest = tx_ring->tbd_first;
1906 
1907 		/* Setup our HW Tx Head & Tail descriptor pointers */
1908 		E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
1909 		E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
1910 	}
1911 }
1912 
1913 static boolean_t
1914 e1000g_tx_drain(struct e1000g *Adapter)
1915 {
1916 	int i;
1917 	boolean_t done;
1918 	e1000g_tx_ring_t *tx_ring;
1919 
1920 	tx_ring = Adapter->tx_ring;
1921 
1922 	/* Allow up to 'wsdraintime' for pending xmit's to complete. */
1923 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1924 		mutex_enter(&tx_ring->usedlist_lock);
1925 		done = IS_QUEUE_EMPTY(&tx_ring->used_list);
1926 		mutex_exit(&tx_ring->usedlist_lock);
1927 
1928 		if (done)
1929 			break;
1930 
1931 		msec_delay(1);
1932 	}
1933 
1934 	return (done);
1935 }
1936 
1937 static boolean_t
1938 e1000g_rx_drain(struct e1000g *Adapter)
1939 {
1940 	int i;
1941 	boolean_t done;
1942 
1943 	/*
1944 	 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
1945 	 */
1946 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1947 		done = (Adapter->pending_rx_count == 0);
1948 
1949 		if (done)
1950 			break;
1951 
1952 		msec_delay(1);
1953 	}
1954 
1955 	return (done);
1956 }
1957 
1958 static boolean_t
1959 e1000g_reset_adapter(struct e1000g *Adapter)
1960 {
1961 	/* Disable and stop all the timers */
1962 	disable_watchdog_timer(Adapter);
1963 	stop_link_timer(Adapter);
1964 	stop_82547_timer(Adapter->tx_ring);
1965 
1966 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1967 
1968 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1969 		rw_exit(&Adapter->chip_lock);
1970 		return (B_TRUE);
1971 	}
1972 
1973 	e1000g_stop(Adapter, B_FALSE);
1974 
1975 	if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1976 		rw_exit(&Adapter->chip_lock);
1977 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1978 			return (B_FALSE);
1979 	}
1980 
1981 	rw_exit(&Adapter->chip_lock);
1982 
1983 	/* Enable and start the watchdog timer */
1984 	enable_watchdog_timer(Adapter);
1985 
1986 	return (B_TRUE);
1987 }
1988 
1989 boolean_t
1990 e1000g_global_reset(struct e1000g *Adapter)
1991 {
1992 	/* Disable and stop all the timers */
1993 	disable_watchdog_timer(Adapter);
1994 	stop_link_timer(Adapter);
1995 	stop_82547_timer(Adapter->tx_ring);
1996 
1997 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1998 
1999 	e1000g_stop(Adapter, B_TRUE);
2000 
2001 	Adapter->init_count = 0;
2002 
2003 	if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
2004 		rw_exit(&Adapter->chip_lock);
2005 		e1000g_log(Adapter, CE_WARN, "Reset failed");
2006 		return (B_FALSE);
2007 	}
2008 
2009 	rw_exit(&Adapter->chip_lock);
2010 
2011 	/* Enable and start the watchdog timer */
2012 	enable_watchdog_timer(Adapter);
2013 
2014 	return (B_TRUE);
2015 }
2016 
2017 /*
2018  * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2019  *
2020  * This interrupt service routine is for PCI-Express adapters.
2021  * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2022  * bit is set.
2023  */
2024 static uint_t
2025 e1000g_intr_pciexpress(caddr_t arg)
2026 {
2027 	struct e1000g *Adapter;
2028 	uint32_t icr;
2029 
2030 	Adapter = (struct e1000g *)(uintptr_t)arg;
2031 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2032 
2033 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2034 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2035 
2036 	if (icr & E1000_ICR_INT_ASSERTED) {
2037 		/*
2038 		 * E1000_ICR_INT_ASSERTED bit was set:
2039 		 * Read(Clear) the ICR, claim this interrupt,
2040 		 * look for work to do.
2041 		 */
2042 		e1000g_intr_work(Adapter, icr);
2043 		return (DDI_INTR_CLAIMED);
2044 	} else {
2045 		/*
2046 		 * E1000_ICR_INT_ASSERTED bit was not set:
2047 		 * Don't claim this interrupt, return immediately.
2048 		 */
2049 		return (DDI_INTR_UNCLAIMED);
2050 	}
2051 }
2052 
2053 /*
2054  * e1000g_intr - ISR for PCI/PCI-X chipsets
2055  *
2056  * This interrupt service routine is for PCI/PCI-X adapters.
2057  * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2058  * bit is set or not.
2059  */
2060 static uint_t
2061 e1000g_intr(caddr_t arg)
2062 {
2063 	struct e1000g *Adapter;
2064 	uint32_t icr;
2065 
2066 	Adapter = (struct e1000g *)(uintptr_t)arg;
2067 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2068 
2069 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2070 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2071 
2072 	if (icr) {
2073 		/*
2074 		 * Any bit was set in ICR:
2075 		 * Read(Clear) the ICR, claim this interrupt,
2076 		 * look for work to do.
2077 		 */
2078 		e1000g_intr_work(Adapter, icr);
2079 		return (DDI_INTR_CLAIMED);
2080 	} else {
2081 		/*
2082 		 * No bit was set in ICR:
2083 		 * Don't claim this interrupt, return immediately.
2084 		 */
2085 		return (DDI_INTR_UNCLAIMED);
2086 	}
2087 }
2088 
2089 /*
2090  * e1000g_intr_work - actual processing of ISR
2091  *
2092  * Read(clear) the ICR contents and call appropriate interrupt
2093  * processing routines.
2094  */
2095 static void
2096 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2097 {
2098 	struct e1000_hw *hw;
2099 	hw = &Adapter->shared;
2100 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2101 
2102 	Adapter->rx_pkt_cnt = 0;
2103 	Adapter->tx_pkt_cnt = 0;
2104 
2105 	rw_enter(&Adapter->chip_lock, RW_READER);
2106 
2107 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2108 		rw_exit(&Adapter->chip_lock);
2109 		return;
2110 	}
2111 	/*
2112 	 * Here we need to check the "e1000g_state" flag within the chip_lock to
2113 	 * ensure the receive routine will not execute when the adapter is
2114 	 * being reset.
2115 	 */
2116 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2117 		rw_exit(&Adapter->chip_lock);
2118 		return;
2119 	}
2120 
2121 	if (icr & E1000_ICR_RXT0) {
2122 		mblk_t			*mp = NULL;
2123 		mblk_t			*tail = NULL;
2124 		e1000g_rx_ring_t	*rx_ring;
2125 
2126 		rx_ring = Adapter->rx_ring;
2127 		mutex_enter(&rx_ring->rx_lock);
2128 		/*
2129 		 * Sometimes with legacy interrupts, it possible that
2130 		 * there is a single interrupt for Rx/Tx. In which
2131 		 * case, if poll flag is set, we shouldn't really
2132 		 * be doing Rx processing.
2133 		 */
2134 		if (!rx_ring->poll_flag)
2135 			mp = e1000g_receive(rx_ring, &tail,
2136 			    E1000G_CHAIN_NO_LIMIT);
2137 		mutex_exit(&rx_ring->rx_lock);
2138 		rw_exit(&Adapter->chip_lock);
2139 		if (mp != NULL)
2140 			mac_rx_ring(Adapter->mh, rx_ring->mrh,
2141 			    mp, rx_ring->ring_gen_num);
2142 	} else
2143 		rw_exit(&Adapter->chip_lock);
2144 
2145 	if (icr & E1000_ICR_TXDW) {
2146 		if (!Adapter->tx_intr_enable)
2147 			e1000g_clear_tx_interrupt(Adapter);
2148 
2149 		/* Recycle the tx descriptors */
2150 		rw_enter(&Adapter->chip_lock, RW_READER);
2151 		(void) e1000g_recycle(tx_ring);
2152 		E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2153 		rw_exit(&Adapter->chip_lock);
2154 
2155 		if (tx_ring->resched_needed &&
2156 		    (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2157 			tx_ring->resched_needed = B_FALSE;
2158 			mac_tx_update(Adapter->mh);
2159 			E1000G_STAT(tx_ring->stat_reschedule);
2160 		}
2161 	}
2162 
2163 	/*
2164 	 * The Receive Sequence errors RXSEQ and the link status change LSC
2165 	 * are checked to detect that the cable has been pulled out. For
2166 	 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2167 	 * are an indication that cable is not connected.
2168 	 */
2169 	if ((icr & E1000_ICR_RXSEQ) ||
2170 	    (icr & E1000_ICR_LSC) ||
2171 	    (icr & E1000_ICR_GPI_EN1)) {
2172 		boolean_t link_changed;
2173 		timeout_id_t tid = 0;
2174 
2175 		stop_watchdog_timer(Adapter);
2176 
2177 		rw_enter(&Adapter->chip_lock, RW_WRITER);
2178 
2179 		/*
2180 		 * Because we got a link-status-change interrupt, force
2181 		 * e1000_check_for_link() to look at phy
2182 		 */
2183 		Adapter->shared.mac.get_link_status = B_TRUE;
2184 
2185 		/* e1000g_link_check takes care of link status change */
2186 		link_changed = e1000g_link_check(Adapter);
2187 
2188 		/* Get new phy state */
2189 		e1000g_get_phy_state(Adapter);
2190 
2191 		/*
2192 		 * If the link timer has not timed out, we'll not notify
2193 		 * the upper layer with any link state until the link is up.
2194 		 */
2195 		if (link_changed && !Adapter->link_complete) {
2196 			if (Adapter->link_state == LINK_STATE_UP) {
2197 				mutex_enter(&Adapter->link_lock);
2198 				Adapter->link_complete = B_TRUE;
2199 				tid = Adapter->link_tid;
2200 				Adapter->link_tid = 0;
2201 				mutex_exit(&Adapter->link_lock);
2202 			} else {
2203 				link_changed = B_FALSE;
2204 			}
2205 		}
2206 		rw_exit(&Adapter->chip_lock);
2207 
2208 		if (link_changed) {
2209 			if (tid != 0)
2210 				(void) untimeout(tid);
2211 
2212 			/*
2213 			 * Workaround for esb2. Data stuck in fifo on a link
2214 			 * down event. Stop receiver here and reset in watchdog.
2215 			 */
2216 			if ((Adapter->link_state == LINK_STATE_DOWN) &&
2217 			    (Adapter->shared.mac.type == e1000_80003es2lan)) {
2218 				uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2219 				E1000_WRITE_REG(hw, E1000_RCTL,
2220 				    rctl & ~E1000_RCTL_EN);
2221 				e1000g_log(Adapter, CE_WARN,
2222 				    "ESB2 receiver disabled");
2223 				Adapter->esb2_workaround = B_TRUE;
2224 			}
2225 			if (!Adapter->reset_flag)
2226 				mac_link_update(Adapter->mh,
2227 				    Adapter->link_state);
2228 			if (Adapter->link_state == LINK_STATE_UP)
2229 				Adapter->reset_flag = B_FALSE;
2230 		}
2231 
2232 		start_watchdog_timer(Adapter);
2233 	}
2234 }
2235 
2236 static void
2237 e1000g_init_unicst(struct e1000g *Adapter)
2238 {
2239 	struct e1000_hw *hw;
2240 	int slot;
2241 
2242 	hw = &Adapter->shared;
2243 
2244 	if (Adapter->init_count == 0) {
2245 		/* Initialize the multiple unicast addresses */
2246 		Adapter->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2247 
2248 		/* Workaround for an erratum of 82571 chipst */
2249 		if ((hw->mac.type == e1000_82571) &&
2250 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2251 			Adapter->unicst_total--;
2252 
2253 		Adapter->unicst_avail = Adapter->unicst_total;
2254 
2255 		for (slot = 0; slot < Adapter->unicst_total; slot++) {
2256 			/* Clear both the flag and MAC address */
2257 			Adapter->unicst_addr[slot].reg.high = 0;
2258 			Adapter->unicst_addr[slot].reg.low = 0;
2259 		}
2260 	} else {
2261 		/* Workaround for an erratum of 82571 chipst */
2262 		if ((hw->mac.type == e1000_82571) &&
2263 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2264 			e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2265 
2266 		/* Re-configure the RAR registers */
2267 		for (slot = 0; slot < Adapter->unicst_total; slot++)
2268 			if (Adapter->unicst_addr[slot].mac.set == 1)
2269 				e1000_rar_set(hw,
2270 				    Adapter->unicst_addr[slot].mac.addr, slot);
2271 	}
2272 
2273 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2274 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2275 }
2276 
2277 static int
2278 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2279     int slot)
2280 {
2281 	struct e1000_hw *hw;
2282 
2283 	hw = &Adapter->shared;
2284 
2285 	/*
2286 	 * The first revision of Wiseman silicon (rev 2.0) has an errata
2287 	 * that requires the receiver to be in reset when any of the
2288 	 * receive address registers (RAR regs) are accessed.  The first
2289 	 * rev of Wiseman silicon also requires MWI to be disabled when
2290 	 * a global reset or a receive reset is issued.  So before we
2291 	 * initialize the RARs, we check the rev of the Wiseman controller
2292 	 * and work around any necessary HW errata.
2293 	 */
2294 	if ((hw->mac.type == e1000_82542) &&
2295 	    (hw->revision_id == E1000_REVISION_2)) {
2296 		e1000_pci_clear_mwi(hw);
2297 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2298 		msec_delay(5);
2299 	}
2300 	if (mac_addr == NULL) {
2301 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2302 		E1000_WRITE_FLUSH(hw);
2303 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2304 		E1000_WRITE_FLUSH(hw);
2305 		/* Clear both the flag and MAC address */
2306 		Adapter->unicst_addr[slot].reg.high = 0;
2307 		Adapter->unicst_addr[slot].reg.low = 0;
2308 	} else {
2309 		bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2310 		    ETHERADDRL);
2311 		e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2312 		Adapter->unicst_addr[slot].mac.set = 1;
2313 	}
2314 
2315 	/* Workaround for an erratum of 82571 chipst */
2316 	if (slot == 0) {
2317 		if ((hw->mac.type == e1000_82571) &&
2318 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2319 			if (mac_addr == NULL) {
2320 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2321 				    slot << 1, 0);
2322 				E1000_WRITE_FLUSH(hw);
2323 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2324 				    (slot << 1) + 1, 0);
2325 				E1000_WRITE_FLUSH(hw);
2326 			} else {
2327 				e1000_rar_set(hw, (uint8_t *)mac_addr,
2328 				    LAST_RAR_ENTRY);
2329 			}
2330 	}
2331 
2332 	/*
2333 	 * If we are using Wiseman rev 2.0 silicon, we will have previously
2334 	 * put the receive in reset, and disabled MWI, to work around some
2335 	 * HW errata.  Now we should take the receiver out of reset, and
2336 	 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2337 	 */
2338 	if ((hw->mac.type == e1000_82542) &&
2339 	    (hw->revision_id == E1000_REVISION_2)) {
2340 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2341 		msec_delay(1);
2342 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2343 			e1000_pci_set_mwi(hw);
2344 		e1000g_rx_setup(Adapter);
2345 	}
2346 
2347 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2348 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2349 		return (EIO);
2350 	}
2351 
2352 	return (0);
2353 }
2354 
2355 static int
2356 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2357 {
2358 	struct e1000_hw *hw = &Adapter->shared;
2359 	struct ether_addr *newtable;
2360 	size_t new_len;
2361 	size_t old_len;
2362 	int res = 0;
2363 
2364 	if ((multiaddr[0] & 01) == 0) {
2365 		res = EINVAL;
2366 		e1000g_log(Adapter, CE_WARN, "Illegal multicast address");
2367 		goto done;
2368 	}
2369 
2370 	if (Adapter->mcast_count >= Adapter->mcast_max_num) {
2371 		res = ENOENT;
2372 		e1000g_log(Adapter, CE_WARN,
2373 		    "Adapter requested more than %d mcast addresses",
2374 		    Adapter->mcast_max_num);
2375 		goto done;
2376 	}
2377 
2378 
2379 	if (Adapter->mcast_count == Adapter->mcast_alloc_count) {
2380 		old_len = Adapter->mcast_alloc_count *
2381 		    sizeof (struct ether_addr);
2382 		new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) *
2383 		    sizeof (struct ether_addr);
2384 
2385 		newtable = kmem_alloc(new_len, KM_NOSLEEP);
2386 		if (newtable == NULL) {
2387 			res = ENOMEM;
2388 			e1000g_log(Adapter, CE_WARN,
2389 			    "Not enough memory to alloc mcast table");
2390 			goto done;
2391 		}
2392 
2393 		if (Adapter->mcast_table != NULL) {
2394 			bcopy(Adapter->mcast_table, newtable, old_len);
2395 			kmem_free(Adapter->mcast_table, old_len);
2396 		}
2397 		Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE;
2398 		Adapter->mcast_table = newtable;
2399 	}
2400 
2401 	bcopy(multiaddr,
2402 	    &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2403 	Adapter->mcast_count++;
2404 
2405 	/*
2406 	 * Update the MC table in the hardware
2407 	 */
2408 	e1000g_clear_interrupt(Adapter);
2409 
2410 	e1000g_setup_multicast(Adapter);
2411 
2412 	if ((hw->mac.type == e1000_82542) &&
2413 	    (hw->revision_id == E1000_REVISION_2))
2414 		e1000g_rx_setup(Adapter);
2415 
2416 	e1000g_mask_interrupt(Adapter);
2417 
2418 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2419 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2420 		res = EIO;
2421 	}
2422 
2423 done:
2424 	return (res);
2425 }
2426 
2427 static int
2428 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2429 {
2430 	struct e1000_hw *hw = &Adapter->shared;
2431 	struct ether_addr *newtable;
2432 	size_t new_len;
2433 	size_t old_len;
2434 	unsigned i;
2435 
2436 	for (i = 0; i < Adapter->mcast_count; i++) {
2437 		if (bcmp(multiaddr, &Adapter->mcast_table[i],
2438 		    ETHERADDRL) == 0) {
2439 			for (i++; i < Adapter->mcast_count; i++) {
2440 				Adapter->mcast_table[i - 1] =
2441 				    Adapter->mcast_table[i];
2442 			}
2443 			Adapter->mcast_count--;
2444 			break;
2445 		}
2446 	}
2447 
2448 	if ((Adapter->mcast_alloc_count - Adapter->mcast_count) >
2449 	    MCAST_ALLOC_SIZE) {
2450 		old_len = Adapter->mcast_alloc_count *
2451 		    sizeof (struct ether_addr);
2452 		new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) *
2453 		    sizeof (struct ether_addr);
2454 
2455 		newtable = kmem_alloc(new_len, KM_NOSLEEP);
2456 		if (newtable != NULL) {
2457 			bcopy(Adapter->mcast_table, newtable, new_len);
2458 			kmem_free(Adapter->mcast_table, old_len);
2459 
2460 			Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE;
2461 			Adapter->mcast_table = newtable;
2462 		}
2463 	}
2464 
2465 	/*
2466 	 * Update the MC table in the hardware
2467 	 */
2468 	e1000g_clear_interrupt(Adapter);
2469 
2470 	e1000g_setup_multicast(Adapter);
2471 
2472 	if ((hw->mac.type == e1000_82542) &&
2473 	    (hw->revision_id == E1000_REVISION_2))
2474 		e1000g_rx_setup(Adapter);
2475 
2476 	e1000g_mask_interrupt(Adapter);
2477 
2478 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2479 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2480 		return (EIO);
2481 	}
2482 
2483 	return (0);
2484 }
2485 
2486 static void
2487 e1000g_release_multicast(struct e1000g *Adapter)
2488 {
2489 	if (Adapter->mcast_table != NULL) {
2490 		kmem_free(Adapter->mcast_table,
2491 		    Adapter->mcast_alloc_count * sizeof (struct ether_addr));
2492 		Adapter->mcast_table = NULL;
2493 	}
2494 }
2495 
2496 /*
2497  * e1000g_setup_multicast - setup multicast data structures
2498  *
2499  * This routine initializes all of the multicast related structures.
2500  */
2501 void
2502 e1000g_setup_multicast(struct e1000g *Adapter)
2503 {
2504 	uint8_t *mc_addr_list;
2505 	uint32_t mc_addr_count;
2506 	uint32_t rctl;
2507 	struct e1000_hw *hw;
2508 
2509 	hw = &Adapter->shared;
2510 
2511 	/*
2512 	 * The e1000g has the ability to do perfect filtering of 16
2513 	 * addresses. The driver uses one of the e1000g's 16 receive
2514 	 * address registers for its node/network/mac/individual address.
2515 	 * So, we have room for up to 15 multicast addresses in the CAM,
2516 	 * additional MC addresses are handled by the MTA (Multicast Table
2517 	 * Array)
2518 	 */
2519 
2520 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2521 
2522 	mc_addr_list = (uint8_t *)Adapter->mcast_table;
2523 
2524 	ASSERT(Adapter->mcast_count <= Adapter->mcast_max_num);
2525 
2526 	mc_addr_count = Adapter->mcast_count;
2527 	/*
2528 	 * The Wiseman 2.0 silicon has an errata by which the receiver will
2529 	 * hang  while writing to the receive address registers if the receiver
2530 	 * is not in reset before writing to the registers. Updating the RAR
2531 	 * is done during the setting up of the multicast table, hence the
2532 	 * receiver has to be put in reset before updating the multicast table
2533 	 * and then taken out of reset at the end
2534 	 */
2535 	/*
2536 	 * if WMI was enabled then dis able it before issueing the global
2537 	 * reset to the hardware.
2538 	 */
2539 	/*
2540 	 * Only required for WISEMAN_2_0
2541 	 */
2542 	if ((hw->mac.type == e1000_82542) &&
2543 	    (hw->revision_id == E1000_REVISION_2)) {
2544 		e1000_pci_clear_mwi(hw);
2545 		/*
2546 		 * The e1000g must be in reset before changing any RA
2547 		 * registers. Reset receive unit.  The chip will remain in
2548 		 * the reset state until software explicitly restarts it.
2549 		 */
2550 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2551 		/* Allow receiver time to go in to reset */
2552 		msec_delay(5);
2553 	}
2554 
2555 	e1000_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2556 	    Adapter->unicst_total, hw->mac.rar_entry_count);
2557 
2558 	/*
2559 	 * Only for Wiseman_2_0
2560 	 * If MWI was enabled then re-enable it after issueing (as we
2561 	 * disabled it up there) the receive reset command.
2562 	 * Wainwright does not have a receive reset command and only thing
2563 	 * close to it is global reset which will require tx setup also
2564 	 */
2565 	if ((hw->mac.type == e1000_82542) &&
2566 	    (hw->revision_id == E1000_REVISION_2)) {
2567 		/*
2568 		 * if WMI was enabled then reenable it after issueing the
2569 		 * global or receive reset to the hardware.
2570 		 */
2571 
2572 		/*
2573 		 * Take receiver out of reset
2574 		 * clear E1000_RCTL_RST bit (and all others)
2575 		 */
2576 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2577 		msec_delay(5);
2578 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2579 			e1000_pci_set_mwi(hw);
2580 	}
2581 
2582 	/*
2583 	 * Restore original value
2584 	 */
2585 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2586 }
2587 
2588 int
2589 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2590 {
2591 	struct e1000g *Adapter = (struct e1000g *)arg;
2592 	int result;
2593 
2594 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2595 
2596 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2597 		result = ECANCELED;
2598 		goto done;
2599 	}
2600 
2601 	result = (add) ? multicst_add(Adapter, addr)
2602 	    : multicst_remove(Adapter, addr);
2603 
2604 done:
2605 	rw_exit(&Adapter->chip_lock);
2606 	return (result);
2607 
2608 }
2609 
2610 int
2611 e1000g_m_promisc(void *arg, boolean_t on)
2612 {
2613 	struct e1000g *Adapter = (struct e1000g *)arg;
2614 	uint32_t rctl;
2615 
2616 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2617 
2618 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2619 		rw_exit(&Adapter->chip_lock);
2620 		return (ECANCELED);
2621 	}
2622 
2623 	rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2624 
2625 	if (on)
2626 		rctl |=
2627 		    (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2628 	else
2629 		rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2630 
2631 	E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2632 
2633 	Adapter->e1000g_promisc = on;
2634 
2635 	rw_exit(&Adapter->chip_lock);
2636 
2637 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2638 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2639 		return (EIO);
2640 	}
2641 
2642 	return (0);
2643 }
2644 
2645 /*
2646  * Entry points to enable and disable interrupts at the granularity of
2647  * a group.
2648  * Turns the poll_mode for the whole adapter on and off to enable or
2649  * override the ring level polling control over the hardware interrupts.
2650  */
2651 static int
2652 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2653 {
2654 	struct e1000g		*adapter = (struct e1000g *)arg;
2655 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2656 
2657 	/*
2658 	 * Later interrupts at the granularity of the this ring will
2659 	 * invoke mac_rx() with NULL, indicating the need for another
2660 	 * software classification.
2661 	 * We have a single ring usable per adapter now, so we only need to
2662 	 * reset the rx handle for that one.
2663 	 * When more RX rings can be used, we should update each one of them.
2664 	 */
2665 	mutex_enter(&rx_ring->rx_lock);
2666 	rx_ring->mrh = NULL;
2667 	adapter->poll_mode = B_FALSE;
2668 	mutex_exit(&rx_ring->rx_lock);
2669 	return (0);
2670 }
2671 
2672 static int
2673 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2674 {
2675 	struct e1000g *adapter = (struct e1000g *)arg;
2676 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2677 
2678 	mutex_enter(&rx_ring->rx_lock);
2679 
2680 	/*
2681 	 * Later interrupts at the granularity of the this ring will
2682 	 * invoke mac_rx() with the handle for this ring;
2683 	 */
2684 	adapter->poll_mode = B_TRUE;
2685 	rx_ring->mrh = rx_ring->mrh_init;
2686 	mutex_exit(&rx_ring->rx_lock);
2687 	return (0);
2688 }
2689 
2690 /*
2691  * Entry points to enable and disable interrupts at the granularity of
2692  * a ring.
2693  * adapter poll_mode controls whether we actually proceed with hardware
2694  * interrupt toggling.
2695  */
2696 static int
2697 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2698 {
2699 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2700 	struct e1000g 		*adapter = rx_ring->adapter;
2701 	struct e1000_hw 	*hw = &adapter->shared;
2702 	uint32_t		intr_mask;
2703 
2704 	rw_enter(&adapter->chip_lock, RW_READER);
2705 
2706 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
2707 		rw_exit(&adapter->chip_lock);
2708 		return (0);
2709 	}
2710 
2711 	mutex_enter(&rx_ring->rx_lock);
2712 	rx_ring->poll_flag = 0;
2713 	mutex_exit(&rx_ring->rx_lock);
2714 
2715 	/* Rx interrupt enabling for MSI and legacy */
2716 	intr_mask = E1000_READ_REG(hw, E1000_IMS);
2717 	intr_mask |= E1000_IMS_RXT0;
2718 	E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2719 	E1000_WRITE_FLUSH(hw);
2720 
2721 	/* Trigger a Rx interrupt to check Rx ring */
2722 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2723 	E1000_WRITE_FLUSH(hw);
2724 
2725 	rw_exit(&adapter->chip_lock);
2726 	return (0);
2727 }
2728 
2729 static int
2730 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2731 {
2732 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2733 	struct e1000g 		*adapter = rx_ring->adapter;
2734 	struct e1000_hw 	*hw = &adapter->shared;
2735 
2736 	rw_enter(&adapter->chip_lock, RW_READER);
2737 
2738 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
2739 		rw_exit(&adapter->chip_lock);
2740 		return (0);
2741 	}
2742 	mutex_enter(&rx_ring->rx_lock);
2743 	rx_ring->poll_flag = 1;
2744 	mutex_exit(&rx_ring->rx_lock);
2745 
2746 	/* Rx interrupt disabling for MSI and legacy */
2747 	E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2748 	E1000_WRITE_FLUSH(hw);
2749 
2750 	rw_exit(&adapter->chip_lock);
2751 	return (0);
2752 }
2753 
2754 /*
2755  * e1000g_unicst_find - Find the slot for the specified unicast address
2756  */
2757 static int
2758 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2759 {
2760 	int slot;
2761 
2762 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2763 		if ((Adapter->unicst_addr[slot].mac.set == 1) &&
2764 		    (bcmp(Adapter->unicst_addr[slot].mac.addr,
2765 		    mac_addr, ETHERADDRL) == 0))
2766 				return (slot);
2767 	}
2768 
2769 	return (-1);
2770 }
2771 
2772 /*
2773  * Entry points to add and remove a MAC address to a ring group.
2774  * The caller takes care of adding and removing the MAC addresses
2775  * to the filter via these two routines.
2776  */
2777 
2778 static int
2779 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2780 {
2781 	struct e1000g *Adapter = (struct e1000g *)arg;
2782 	int slot, err;
2783 
2784 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2785 
2786 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2787 		rw_exit(&Adapter->chip_lock);
2788 		return (ECANCELED);
2789 	}
2790 
2791 	if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
2792 		/* The same address is already in slot */
2793 		rw_exit(&Adapter->chip_lock);
2794 		return (0);
2795 	}
2796 
2797 	if (Adapter->unicst_avail == 0) {
2798 		/* no slots available */
2799 		rw_exit(&Adapter->chip_lock);
2800 		return (ENOSPC);
2801 	}
2802 
2803 	/* Search for a free slot */
2804 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2805 		if (Adapter->unicst_addr[slot].mac.set == 0)
2806 			break;
2807 	}
2808 	ASSERT(slot < Adapter->unicst_total);
2809 
2810 	err = e1000g_unicst_set(Adapter, mac_addr, slot);
2811 	if (err == 0)
2812 		Adapter->unicst_avail--;
2813 
2814 	rw_exit(&Adapter->chip_lock);
2815 
2816 	return (err);
2817 }
2818 
2819 static int
2820 e1000g_remmac(void *arg, const uint8_t *mac_addr)
2821 {
2822 	struct e1000g *Adapter = (struct e1000g *)arg;
2823 	int slot, err;
2824 
2825 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2826 
2827 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2828 		rw_exit(&Adapter->chip_lock);
2829 		return (ECANCELED);
2830 	}
2831 
2832 	slot = e1000g_unicst_find(Adapter, mac_addr);
2833 	if (slot == -1) {
2834 		rw_exit(&Adapter->chip_lock);
2835 		return (EINVAL);
2836 	}
2837 
2838 	ASSERT(Adapter->unicst_addr[slot].mac.set);
2839 
2840 	/* Clear this slot */
2841 	err = e1000g_unicst_set(Adapter, NULL, slot);
2842 	if (err == 0)
2843 		Adapter->unicst_avail++;
2844 
2845 	rw_exit(&Adapter->chip_lock);
2846 
2847 	return (err);
2848 }
2849 
2850 static int
2851 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
2852 {
2853 	e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
2854 
2855 	mutex_enter(&rx_ring->rx_lock);
2856 	rx_ring->ring_gen_num = mr_gen_num;
2857 	mutex_exit(&rx_ring->rx_lock);
2858 	return (0);
2859 }
2860 
2861 /*
2862  * Callback funtion for MAC layer to register all rings.
2863  *
2864  * The hardware supports a single group with currently only one ring
2865  * available.
2866  * Though not offering virtualization ability per se, exposing the
2867  * group/ring still enables the polling and interrupt toggling.
2868  */
2869 void
2870 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
2871     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2872 {
2873 	struct e1000g *Adapter = (struct e1000g *)arg;
2874 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
2875 	mac_intr_t *mintr;
2876 
2877 	/*
2878 	 * We advertised only RX group/rings, so the MAC framework shouldn't
2879 	 * ask for any thing else.
2880 	 */
2881 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
2882 
2883 	rx_ring->mrh = rx_ring->mrh_init = rh;
2884 	infop->mri_driver = (mac_ring_driver_t)rx_ring;
2885 	infop->mri_start = e1000g_ring_start;
2886 	infop->mri_stop = NULL;
2887 	infop->mri_poll = e1000g_poll_ring;
2888 
2889 	/* Ring level interrupts */
2890 	mintr = &infop->mri_intr;
2891 	mintr->mi_handle = (mac_intr_handle_t)rx_ring;
2892 	mintr->mi_enable = e1000g_rx_ring_intr_enable;
2893 	mintr->mi_disable = e1000g_rx_ring_intr_disable;
2894 }
2895 
2896 static void
2897 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
2898     mac_group_info_t *infop, mac_group_handle_t gh)
2899 {
2900 	struct e1000g *Adapter = (struct e1000g *)arg;
2901 	mac_intr_t *mintr;
2902 
2903 	/*
2904 	 * We advertised a single RX ring. Getting a request for anything else
2905 	 * signifies a bug in the MAC framework.
2906 	 */
2907 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
2908 
2909 	Adapter->rx_group = gh;
2910 
2911 	infop->mgi_driver = (mac_group_driver_t)Adapter;
2912 	infop->mgi_start = NULL;
2913 	infop->mgi_stop = NULL;
2914 	infop->mgi_addmac = e1000g_addmac;
2915 	infop->mgi_remmac = e1000g_remmac;
2916 	infop->mgi_count = 1;
2917 
2918 	/* Group level interrupts */
2919 	mintr = &infop->mgi_intr;
2920 	mintr->mi_handle = (mac_intr_handle_t)Adapter;
2921 	mintr->mi_enable = e1000g_rx_group_intr_enable;
2922 	mintr->mi_disable = e1000g_rx_group_intr_disable;
2923 }
2924 
2925 static boolean_t
2926 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2927 {
2928 	struct e1000g *Adapter = (struct e1000g *)arg;
2929 
2930 	switch (cap) {
2931 	case MAC_CAPAB_HCKSUM: {
2932 		uint32_t *txflags = cap_data;
2933 
2934 		if (Adapter->tx_hcksum_enable)
2935 			*txflags = HCKSUM_IPHDRCKSUM |
2936 			    HCKSUM_INET_PARTIAL;
2937 		else
2938 			return (B_FALSE);
2939 		break;
2940 	}
2941 
2942 	case MAC_CAPAB_LSO: {
2943 		mac_capab_lso_t *cap_lso = cap_data;
2944 
2945 		if (Adapter->lso_enable) {
2946 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2947 			cap_lso->lso_basic_tcp_ipv4.lso_max =
2948 			    E1000_LSO_MAXLEN;
2949 		} else
2950 			return (B_FALSE);
2951 		break;
2952 	}
2953 	case MAC_CAPAB_RINGS: {
2954 		mac_capab_rings_t *cap_rings = cap_data;
2955 
2956 		/* No TX rings exposed yet */
2957 		if (cap_rings->mr_type != MAC_RING_TYPE_RX)
2958 			return (B_FALSE);
2959 
2960 		cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2961 		cap_rings->mr_rnum = 1;
2962 		cap_rings->mr_gnum = 1;
2963 		cap_rings->mr_rget = e1000g_fill_ring;
2964 		cap_rings->mr_gget = e1000g_fill_group;
2965 		break;
2966 	}
2967 	default:
2968 		return (B_FALSE);
2969 	}
2970 	return (B_TRUE);
2971 }
2972 
2973 static boolean_t
2974 e1000g_param_locked(mac_prop_id_t pr_num)
2975 {
2976 	/*
2977 	 * All en_* parameters are locked (read-only) while
2978 	 * the device is in any sort of loopback mode ...
2979 	 */
2980 	switch (pr_num) {
2981 		case MAC_PROP_EN_1000FDX_CAP:
2982 		case MAC_PROP_EN_1000HDX_CAP:
2983 		case MAC_PROP_EN_100FDX_CAP:
2984 		case MAC_PROP_EN_100HDX_CAP:
2985 		case MAC_PROP_EN_10FDX_CAP:
2986 		case MAC_PROP_EN_10HDX_CAP:
2987 		case MAC_PROP_AUTONEG:
2988 		case MAC_PROP_FLOWCTRL:
2989 			return (B_TRUE);
2990 	}
2991 	return (B_FALSE);
2992 }
2993 
2994 /*
2995  * callback function for set/get of properties
2996  */
2997 static int
2998 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
2999     uint_t pr_valsize, const void *pr_val)
3000 {
3001 	struct e1000g *Adapter = arg;
3002 	struct e1000_mac_info *mac = &Adapter->shared.mac;
3003 	struct e1000_phy_info *phy = &Adapter->shared.phy;
3004 	struct e1000_fc_info *fc = &Adapter->shared.fc;
3005 	int err = 0;
3006 	link_flowctrl_t flowctrl;
3007 	uint32_t cur_mtu, new_mtu;
3008 	uint64_t tmp = 0;
3009 
3010 	rw_enter(&Adapter->chip_lock, RW_WRITER);
3011 
3012 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3013 		rw_exit(&Adapter->chip_lock);
3014 		return (ECANCELED);
3015 	}
3016 
3017 	if (Adapter->loopback_mode != E1000G_LB_NONE &&
3018 	    e1000g_param_locked(pr_num)) {
3019 		/*
3020 		 * All en_* parameters are locked (read-only)
3021 		 * while the device is in any sort of loopback mode.
3022 		 */
3023 		rw_exit(&Adapter->chip_lock);
3024 		return (EBUSY);
3025 	}
3026 
3027 	switch (pr_num) {
3028 		case MAC_PROP_EN_1000FDX_CAP:
3029 			Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
3030 			Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
3031 			goto reset;
3032 		case MAC_PROP_EN_100FDX_CAP:
3033 			Adapter->param_en_100fdx = *(uint8_t *)pr_val;
3034 			Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
3035 			goto reset;
3036 		case MAC_PROP_EN_100HDX_CAP:
3037 			Adapter->param_en_100hdx = *(uint8_t *)pr_val;
3038 			Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
3039 			goto reset;
3040 		case MAC_PROP_EN_10FDX_CAP:
3041 			Adapter->param_en_10fdx = *(uint8_t *)pr_val;
3042 			Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
3043 			goto reset;
3044 		case MAC_PROP_EN_10HDX_CAP:
3045 			Adapter->param_en_10hdx = *(uint8_t *)pr_val;
3046 			Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
3047 			goto reset;
3048 		case MAC_PROP_AUTONEG:
3049 			Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
3050 			goto reset;
3051 		case MAC_PROP_FLOWCTRL:
3052 			fc->send_xon = B_TRUE;
3053 			bcopy(pr_val, &flowctrl, sizeof (flowctrl));
3054 
3055 			switch (flowctrl) {
3056 			default:
3057 				err = EINVAL;
3058 				break;
3059 			case LINK_FLOWCTRL_NONE:
3060 				fc->requested_mode = e1000_fc_none;
3061 				break;
3062 			case LINK_FLOWCTRL_RX:
3063 				fc->requested_mode = e1000_fc_rx_pause;
3064 				break;
3065 			case LINK_FLOWCTRL_TX:
3066 				fc->requested_mode = e1000_fc_tx_pause;
3067 				break;
3068 			case LINK_FLOWCTRL_BI:
3069 				fc->requested_mode = e1000_fc_full;
3070 				break;
3071 			}
3072 reset:
3073 			if (err == 0) {
3074 				if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3075 					err = EINVAL;
3076 			}
3077 			break;
3078 		case MAC_PROP_ADV_1000FDX_CAP:
3079 		case MAC_PROP_ADV_1000HDX_CAP:
3080 		case MAC_PROP_ADV_100FDX_CAP:
3081 		case MAC_PROP_ADV_100HDX_CAP:
3082 		case MAC_PROP_ADV_10FDX_CAP:
3083 		case MAC_PROP_ADV_10HDX_CAP:
3084 		case MAC_PROP_EN_1000HDX_CAP:
3085 		case MAC_PROP_STATUS:
3086 		case MAC_PROP_SPEED:
3087 		case MAC_PROP_DUPLEX:
3088 			err = ENOTSUP; /* read-only prop. Can't set this. */
3089 			break;
3090 		case MAC_PROP_MTU:
3091 			cur_mtu = Adapter->default_mtu;
3092 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3093 			if (new_mtu == cur_mtu) {
3094 				err = 0;
3095 				break;
3096 			}
3097 
3098 			tmp = new_mtu + sizeof (struct ether_vlan_header) +
3099 			    ETHERFCSL;
3100 			if ((tmp < DEFAULT_FRAME_SIZE) ||
3101 			    (tmp > MAXIMUM_FRAME_SIZE)) {
3102 				err = EINVAL;
3103 				break;
3104 			}
3105 
3106 			/* ich8 does not support jumbo frames */
3107 			if ((mac->type == e1000_ich8lan) &&
3108 			    (tmp > DEFAULT_FRAME_SIZE)) {
3109 				err = EINVAL;
3110 				break;
3111 			}
3112 			/* ich9 does not do jumbo frames on one phy type */
3113 			if ((mac->type == e1000_ich9lan) &&
3114 			    (phy->type == e1000_phy_ife) &&
3115 			    (tmp > DEFAULT_FRAME_SIZE)) {
3116 				err = EINVAL;
3117 				break;
3118 			}
3119 			if (Adapter->e1000g_state & E1000G_STARTED) {
3120 				err = EBUSY;
3121 				break;
3122 			}
3123 
3124 			err = mac_maxsdu_update(Adapter->mh, new_mtu);
3125 			if (err == 0) {
3126 				Adapter->max_frame_size = (uint32_t)tmp;
3127 				Adapter->default_mtu = new_mtu;
3128 				e1000g_set_bufsize(Adapter);
3129 			}
3130 			break;
3131 		case MAC_PROP_PRIVATE:
3132 			err = e1000g_set_priv_prop(Adapter, pr_name,
3133 			    pr_valsize, pr_val);
3134 			break;
3135 		default:
3136 			err = ENOTSUP;
3137 			break;
3138 	}
3139 	rw_exit(&Adapter->chip_lock);
3140 	return (err);
3141 }
3142 
3143 static int
3144 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3145     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3146 {
3147 	struct e1000g *Adapter = arg;
3148 	struct e1000_fc_info *fc = &Adapter->shared.fc;
3149 	int err = 0;
3150 	link_flowctrl_t flowctrl;
3151 	uint64_t tmp = 0;
3152 
3153 	if (pr_valsize == 0)
3154 		return (EINVAL);
3155 
3156 	*perm = MAC_PROP_PERM_RW;
3157 
3158 	bzero(pr_val, pr_valsize);
3159 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3160 		return (e1000g_get_def_val(Adapter, pr_num,
3161 		    pr_valsize, pr_val));
3162 	}
3163 
3164 	switch (pr_num) {
3165 		case MAC_PROP_DUPLEX:
3166 			*perm = MAC_PROP_PERM_READ;
3167 			if (pr_valsize >= sizeof (link_duplex_t)) {
3168 				bcopy(&Adapter->link_duplex, pr_val,
3169 				    sizeof (link_duplex_t));
3170 			} else
3171 				err = EINVAL;
3172 			break;
3173 		case MAC_PROP_SPEED:
3174 			*perm = MAC_PROP_PERM_READ;
3175 			if (pr_valsize >= sizeof (uint64_t)) {
3176 				tmp = Adapter->link_speed * 1000000ull;
3177 				bcopy(&tmp, pr_val, sizeof (tmp));
3178 			} else
3179 				err = EINVAL;
3180 			break;
3181 		case MAC_PROP_AUTONEG:
3182 			*(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3183 			break;
3184 		case MAC_PROP_FLOWCTRL:
3185 			if (pr_valsize >= sizeof (link_flowctrl_t)) {
3186 				switch (fc->current_mode) {
3187 					case e1000_fc_none:
3188 						flowctrl = LINK_FLOWCTRL_NONE;
3189 						break;
3190 					case e1000_fc_rx_pause:
3191 						flowctrl = LINK_FLOWCTRL_RX;
3192 						break;
3193 					case e1000_fc_tx_pause:
3194 						flowctrl = LINK_FLOWCTRL_TX;
3195 						break;
3196 					case e1000_fc_full:
3197 						flowctrl = LINK_FLOWCTRL_BI;
3198 						break;
3199 				}
3200 				bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3201 			} else
3202 				err = EINVAL;
3203 			break;
3204 		case MAC_PROP_ADV_1000FDX_CAP:
3205 			*perm = MAC_PROP_PERM_READ;
3206 			*(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3207 			break;
3208 		case MAC_PROP_EN_1000FDX_CAP:
3209 			*(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3210 			break;
3211 		case MAC_PROP_ADV_1000HDX_CAP:
3212 			*perm = MAC_PROP_PERM_READ;
3213 			*(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3214 			break;
3215 		case MAC_PROP_EN_1000HDX_CAP:
3216 			*perm = MAC_PROP_PERM_READ;
3217 			*(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3218 			break;
3219 		case MAC_PROP_ADV_100FDX_CAP:
3220 			*perm = MAC_PROP_PERM_READ;
3221 			*(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3222 			break;
3223 		case MAC_PROP_EN_100FDX_CAP:
3224 			*(uint8_t *)pr_val = Adapter->param_en_100fdx;
3225 			break;
3226 		case MAC_PROP_ADV_100HDX_CAP:
3227 			*perm = MAC_PROP_PERM_READ;
3228 			*(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3229 			break;
3230 		case MAC_PROP_EN_100HDX_CAP:
3231 			*(uint8_t *)pr_val = Adapter->param_en_100hdx;
3232 			break;
3233 		case MAC_PROP_ADV_10FDX_CAP:
3234 			*perm = MAC_PROP_PERM_READ;
3235 			*(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3236 			break;
3237 		case MAC_PROP_EN_10FDX_CAP:
3238 			*(uint8_t *)pr_val = Adapter->param_en_10fdx;
3239 			break;
3240 		case MAC_PROP_ADV_10HDX_CAP:
3241 			*perm = MAC_PROP_PERM_READ;
3242 			*(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3243 			break;
3244 		case MAC_PROP_EN_10HDX_CAP:
3245 			*(uint8_t *)pr_val = Adapter->param_en_10hdx;
3246 			break;
3247 		case MAC_PROP_ADV_100T4_CAP:
3248 		case MAC_PROP_EN_100T4_CAP:
3249 			*perm = MAC_PROP_PERM_READ;
3250 			*(uint8_t *)pr_val = Adapter->param_adv_100t4;
3251 			break;
3252 		case MAC_PROP_PRIVATE:
3253 			err = e1000g_get_priv_prop(Adapter, pr_name,
3254 			    pr_flags, pr_valsize, pr_val, perm);
3255 			break;
3256 		case MAC_PROP_MTU: {
3257 			struct e1000_mac_info *mac = &Adapter->shared.mac;
3258 			struct e1000_phy_info *phy = &Adapter->shared.phy;
3259 			mac_propval_range_t range;
3260 
3261 			if (!(pr_flags & MAC_PROP_POSSIBLE))
3262 				return (ENOTSUP);
3263 			if (pr_valsize < sizeof (mac_propval_range_t))
3264 				return (EINVAL);
3265 			range.mpr_count = 1;
3266 			range.mpr_type = MAC_PROPVAL_UINT32;
3267 			range.range_uint32[0].mpur_min = DEFAULT_MTU;
3268 			range.range_uint32[0].mpur_max = MAXIMUM_MTU;
3269 			/* following MAC type do not support jumbo frames */
3270 			if ((mac->type == e1000_ich8lan) ||
3271 			    ((mac->type == e1000_ich9lan) && (phy->type ==
3272 			    e1000_phy_ife))) {
3273 				range.range_uint32[0].mpur_max = DEFAULT_MTU;
3274 			}
3275 			bcopy(&range, pr_val, sizeof (range));
3276 			break;
3277 		}
3278 		default:
3279 			err = ENOTSUP;
3280 			break;
3281 	}
3282 	return (err);
3283 }
3284 
3285 /* ARGSUSED2 */
3286 static int
3287 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3288     uint_t pr_valsize, const void *pr_val)
3289 {
3290 	int err = 0;
3291 	long result;
3292 	struct e1000_hw *hw = &Adapter->shared;
3293 
3294 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3295 		if (pr_val == NULL) {
3296 			err = EINVAL;
3297 			return (err);
3298 		}
3299 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3300 		if (result < MIN_TX_BCOPY_THRESHOLD ||
3301 		    result > MAX_TX_BCOPY_THRESHOLD)
3302 			err = EINVAL;
3303 		else {
3304 			Adapter->tx_bcopy_thresh = (uint32_t)result;
3305 		}
3306 		return (err);
3307 	}
3308 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3309 		if (pr_val == NULL) {
3310 			err = EINVAL;
3311 			return (err);
3312 		}
3313 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3314 		if (result < 0 || result > 1)
3315 			err = EINVAL;
3316 		else {
3317 			Adapter->tx_intr_enable = (result == 1) ?
3318 			    B_TRUE: B_FALSE;
3319 			if (Adapter->tx_intr_enable)
3320 				e1000g_mask_tx_interrupt(Adapter);
3321 			else
3322 				e1000g_clear_tx_interrupt(Adapter);
3323 			if (e1000g_check_acc_handle(
3324 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3325 				ddi_fm_service_impact(Adapter->dip,
3326 				    DDI_SERVICE_DEGRADED);
3327 		}
3328 		return (err);
3329 	}
3330 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3331 		if (pr_val == NULL) {
3332 			err = EINVAL;
3333 			return (err);
3334 		}
3335 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3336 		if (result < MIN_TX_INTR_DELAY ||
3337 		    result > MAX_TX_INTR_DELAY)
3338 			err = EINVAL;
3339 		else {
3340 			Adapter->tx_intr_delay = (uint32_t)result;
3341 			E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3342 			if (e1000g_check_acc_handle(
3343 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3344 				ddi_fm_service_impact(Adapter->dip,
3345 				    DDI_SERVICE_DEGRADED);
3346 		}
3347 		return (err);
3348 	}
3349 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3350 		if (pr_val == NULL) {
3351 			err = EINVAL;
3352 			return (err);
3353 		}
3354 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3355 		if (result < MIN_TX_INTR_ABS_DELAY ||
3356 		    result > MAX_TX_INTR_ABS_DELAY)
3357 			err = EINVAL;
3358 		else {
3359 			Adapter->tx_intr_abs_delay = (uint32_t)result;
3360 			E1000_WRITE_REG(hw, E1000_TADV,
3361 			    Adapter->tx_intr_abs_delay);
3362 			if (e1000g_check_acc_handle(
3363 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3364 				ddi_fm_service_impact(Adapter->dip,
3365 				    DDI_SERVICE_DEGRADED);
3366 		}
3367 		return (err);
3368 	}
3369 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3370 		if (pr_val == NULL) {
3371 			err = EINVAL;
3372 			return (err);
3373 		}
3374 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3375 		if (result < MIN_RX_BCOPY_THRESHOLD ||
3376 		    result > MAX_RX_BCOPY_THRESHOLD)
3377 			err = EINVAL;
3378 		else
3379 			Adapter->rx_bcopy_thresh = (uint32_t)result;
3380 		return (err);
3381 	}
3382 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3383 		if (pr_val == NULL) {
3384 			err = EINVAL;
3385 			return (err);
3386 		}
3387 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3388 		if (result < MIN_RX_LIMIT_ON_INTR ||
3389 		    result > MAX_RX_LIMIT_ON_INTR)
3390 			err = EINVAL;
3391 		else
3392 			Adapter->rx_limit_onintr = (uint32_t)result;
3393 		return (err);
3394 	}
3395 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3396 		if (pr_val == NULL) {
3397 			err = EINVAL;
3398 			return (err);
3399 		}
3400 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3401 		if (result < MIN_RX_INTR_DELAY ||
3402 		    result > MAX_RX_INTR_DELAY)
3403 			err = EINVAL;
3404 		else {
3405 			Adapter->rx_intr_delay = (uint32_t)result;
3406 			E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3407 			if (e1000g_check_acc_handle(
3408 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3409 				ddi_fm_service_impact(Adapter->dip,
3410 				    DDI_SERVICE_DEGRADED);
3411 		}
3412 		return (err);
3413 	}
3414 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3415 		if (pr_val == NULL) {
3416 			err = EINVAL;
3417 			return (err);
3418 		}
3419 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3420 		if (result < MIN_RX_INTR_ABS_DELAY ||
3421 		    result > MAX_RX_INTR_ABS_DELAY)
3422 			err = EINVAL;
3423 		else {
3424 			Adapter->rx_intr_abs_delay = (uint32_t)result;
3425 			E1000_WRITE_REG(hw, E1000_RADV,
3426 			    Adapter->rx_intr_abs_delay);
3427 			if (e1000g_check_acc_handle(
3428 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3429 				ddi_fm_service_impact(Adapter->dip,
3430 				    DDI_SERVICE_DEGRADED);
3431 		}
3432 		return (err);
3433 	}
3434 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3435 		if (pr_val == NULL) {
3436 			err = EINVAL;
3437 			return (err);
3438 		}
3439 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3440 		if (result < MIN_INTR_THROTTLING ||
3441 		    result > MAX_INTR_THROTTLING)
3442 			err = EINVAL;
3443 		else {
3444 			if (hw->mac.type >= e1000_82540) {
3445 				Adapter->intr_throttling_rate =
3446 				    (uint32_t)result;
3447 				E1000_WRITE_REG(hw, E1000_ITR,
3448 				    Adapter->intr_throttling_rate);
3449 				if (e1000g_check_acc_handle(
3450 				    Adapter->osdep.reg_handle) != DDI_FM_OK)
3451 					ddi_fm_service_impact(Adapter->dip,
3452 					    DDI_SERVICE_DEGRADED);
3453 			} else
3454 				err = EINVAL;
3455 		}
3456 		return (err);
3457 	}
3458 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3459 		if (pr_val == NULL) {
3460 			err = EINVAL;
3461 			return (err);
3462 		}
3463 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3464 		if (result < 0 || result > 1)
3465 			err = EINVAL;
3466 		else {
3467 			if (hw->mac.type >= e1000_82540) {
3468 				Adapter->intr_adaptive = (result == 1) ?
3469 				    B_TRUE : B_FALSE;
3470 			} else {
3471 				err = EINVAL;
3472 			}
3473 		}
3474 		return (err);
3475 	}
3476 	return (ENOTSUP);
3477 }
3478 
3479 static int
3480 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
3481     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3482 {
3483 	int err = ENOTSUP;
3484 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
3485 	int value;
3486 
3487 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
3488 		*perm = MAC_PROP_PERM_READ;
3489 		if (is_default)
3490 			goto done;
3491 		value = Adapter->param_adv_pause;
3492 		err = 0;
3493 		goto done;
3494 	}
3495 	if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3496 		*perm = MAC_PROP_PERM_READ;
3497 		if (is_default)
3498 			goto done;
3499 		value = Adapter->param_adv_asym_pause;
3500 		err = 0;
3501 		goto done;
3502 	}
3503 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3504 		value = (is_default ? DEFAULT_TX_BCOPY_THRESHOLD :
3505 		    Adapter->tx_bcopy_thresh);
3506 		err = 0;
3507 		goto done;
3508 	}
3509 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3510 		value = (is_default ? DEFAULT_TX_INTR_ENABLE :
3511 		    Adapter->tx_intr_enable);
3512 		err = 0;
3513 		goto done;
3514 	}
3515 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3516 		value = (is_default ? DEFAULT_TX_INTR_DELAY :
3517 		    Adapter->tx_intr_delay);
3518 		err = 0;
3519 		goto done;
3520 	}
3521 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3522 		value = (is_default ? DEFAULT_TX_INTR_ABS_DELAY :
3523 		    Adapter->tx_intr_abs_delay);
3524 		err = 0;
3525 		goto done;
3526 	}
3527 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3528 		value = (is_default ? DEFAULT_RX_BCOPY_THRESHOLD :
3529 		    Adapter->rx_bcopy_thresh);
3530 		err = 0;
3531 		goto done;
3532 	}
3533 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3534 		value = (is_default ? DEFAULT_RX_LIMIT_ON_INTR :
3535 		    Adapter->rx_limit_onintr);
3536 		err = 0;
3537 		goto done;
3538 	}
3539 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3540 		value = (is_default ? DEFAULT_RX_INTR_DELAY :
3541 		    Adapter->rx_intr_delay);
3542 		err = 0;
3543 		goto done;
3544 	}
3545 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3546 		value = (is_default ? DEFAULT_RX_INTR_ABS_DELAY :
3547 		    Adapter->rx_intr_abs_delay);
3548 		err = 0;
3549 		goto done;
3550 	}
3551 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3552 		value = (is_default ? DEFAULT_INTR_THROTTLING :
3553 		    Adapter->intr_throttling_rate);
3554 		err = 0;
3555 		goto done;
3556 	}
3557 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3558 		value = (is_default ? 1 : Adapter->intr_adaptive);
3559 		err = 0;
3560 		goto done;
3561 	}
3562 done:
3563 	if (err == 0) {
3564 		(void) snprintf(pr_val, pr_valsize, "%d", value);
3565 	}
3566 	return (err);
3567 }
3568 
3569 /*
3570  * e1000g_get_conf - get configurations set in e1000g.conf
3571  * This routine gets user-configured values out of the configuration
3572  * file e1000g.conf.
3573  *
3574  * For each configurable value, there is a minimum, a maximum, and a
3575  * default.
3576  * If user does not configure a value, use the default.
3577  * If user configures below the minimum, use the minumum.
3578  * If user configures above the maximum, use the maxumum.
3579  */
3580 static void
3581 e1000g_get_conf(struct e1000g *Adapter)
3582 {
3583 	struct e1000_hw *hw = &Adapter->shared;
3584 	boolean_t tbi_compatibility = B_FALSE;
3585 
3586 	/*
3587 	 * get each configurable property from e1000g.conf
3588 	 */
3589 
3590 	/*
3591 	 * NumTxDescriptors
3592 	 */
3593 	Adapter->tx_desc_num =
3594 	    e1000g_get_prop(Adapter, "NumTxDescriptors",
3595 	    MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
3596 	    DEFAULT_NUM_TX_DESCRIPTOR);
3597 
3598 	/*
3599 	 * NumRxDescriptors
3600 	 */
3601 	Adapter->rx_desc_num =
3602 	    e1000g_get_prop(Adapter, "NumRxDescriptors",
3603 	    MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
3604 	    DEFAULT_NUM_RX_DESCRIPTOR);
3605 
3606 	/*
3607 	 * NumRxFreeList
3608 	 */
3609 	Adapter->rx_freelist_num =
3610 	    e1000g_get_prop(Adapter, "NumRxFreeList",
3611 	    MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
3612 	    DEFAULT_NUM_RX_FREELIST);
3613 
3614 	/*
3615 	 * NumTxPacketList
3616 	 */
3617 	Adapter->tx_freelist_num =
3618 	    e1000g_get_prop(Adapter, "NumTxPacketList",
3619 	    MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
3620 	    DEFAULT_NUM_TX_FREELIST);
3621 
3622 	/*
3623 	 * FlowControl
3624 	 */
3625 	hw->fc.send_xon = B_TRUE;
3626 	hw->fc.requested_mode =
3627 	    e1000g_get_prop(Adapter, "FlowControl",
3628 	    e1000_fc_none, 4, DEFAULT_FLOW_CONTROL);
3629 	/* 4 is the setting that says "let the eeprom decide" */
3630 	if (hw->fc.requested_mode == 4)
3631 		hw->fc.requested_mode = e1000_fc_default;
3632 
3633 	/*
3634 	 * Max Num Receive Packets on Interrupt
3635 	 */
3636 	Adapter->rx_limit_onintr =
3637 	    e1000g_get_prop(Adapter, "MaxNumReceivePackets",
3638 	    MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
3639 	    DEFAULT_RX_LIMIT_ON_INTR);
3640 
3641 	/*
3642 	 * PHY master slave setting
3643 	 */
3644 	hw->phy.ms_type =
3645 	    e1000g_get_prop(Adapter, "SetMasterSlave",
3646 	    e1000_ms_hw_default, e1000_ms_auto,
3647 	    e1000_ms_hw_default);
3648 
3649 	/*
3650 	 * Parameter which controls TBI mode workaround, which is only
3651 	 * needed on certain switches such as Cisco 6500/Foundry
3652 	 */
3653 	tbi_compatibility =
3654 	    e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
3655 	    0, 1, DEFAULT_TBI_COMPAT_ENABLE);
3656 	e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
3657 
3658 	/*
3659 	 * MSI Enable
3660 	 */
3661 	Adapter->msi_enable =
3662 	    e1000g_get_prop(Adapter, "MSIEnable",
3663 	    0, 1, DEFAULT_MSI_ENABLE);
3664 
3665 	/*
3666 	 * Interrupt Throttling Rate
3667 	 */
3668 	Adapter->intr_throttling_rate =
3669 	    e1000g_get_prop(Adapter, "intr_throttling_rate",
3670 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
3671 	    DEFAULT_INTR_THROTTLING);
3672 
3673 	/*
3674 	 * Adaptive Interrupt Blanking Enable/Disable
3675 	 * It is enabled by default
3676 	 */
3677 	Adapter->intr_adaptive =
3678 	    (e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1) == 1) ?
3679 	    B_TRUE : B_FALSE;
3680 
3681 	/*
3682 	 * Hardware checksum enable/disable parameter
3683 	 */
3684 	Adapter->tx_hcksum_enable =
3685 	    e1000g_get_prop(Adapter, "tx_hcksum_enable",
3686 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3687 	/*
3688 	 * Checksum on/off selection via global parameters.
3689 	 *
3690 	 * If the chip is flagged as not capable of (correctly)
3691 	 * handling checksumming, we don't enable it on either
3692 	 * Rx or Tx side.  Otherwise, we take this chip's settings
3693 	 * from the patchable global defaults.
3694 	 *
3695 	 * We advertise our capabilities only if TX offload is
3696 	 * enabled.  On receive, the stack will accept checksummed
3697 	 * packets anyway, even if we haven't said we can deliver
3698 	 * them.
3699 	 */
3700 	switch (hw->mac.type) {
3701 		case e1000_82540:
3702 		case e1000_82544:
3703 		case e1000_82545:
3704 		case e1000_82545_rev_3:
3705 		case e1000_82546:
3706 		case e1000_82546_rev_3:
3707 		case e1000_82571:
3708 		case e1000_82572:
3709 		case e1000_82573:
3710 		case e1000_80003es2lan:
3711 			break;
3712 		/*
3713 		 * For the following Intel PRO/1000 chipsets, we have not
3714 		 * tested the hardware checksum offload capability, so we
3715 		 * disable the capability for them.
3716 		 *	e1000_82542,
3717 		 *	e1000_82543,
3718 		 *	e1000_82541,
3719 		 *	e1000_82541_rev_2,
3720 		 *	e1000_82547,
3721 		 *	e1000_82547_rev_2,
3722 		 */
3723 		default:
3724 			Adapter->tx_hcksum_enable = B_FALSE;
3725 	}
3726 
3727 	/*
3728 	 * Large Send Offloading(LSO) Enable/Disable
3729 	 * If the tx hardware checksum is not enabled, LSO should be
3730 	 * disabled.
3731 	 */
3732 	Adapter->lso_enable =
3733 	    e1000g_get_prop(Adapter, "lso_enable",
3734 	    0, 1, DEFAULT_LSO_ENABLE);
3735 
3736 	switch (hw->mac.type) {
3737 		case e1000_82546:
3738 		case e1000_82546_rev_3:
3739 			if (Adapter->lso_enable)
3740 				Adapter->lso_premature_issue = B_TRUE;
3741 			/* FALLTHRU */
3742 		case e1000_82571:
3743 		case e1000_82572:
3744 		case e1000_82573:
3745 		case e1000_80003es2lan:
3746 			break;
3747 		default:
3748 			Adapter->lso_enable = B_FALSE;
3749 	}
3750 
3751 	if (!Adapter->tx_hcksum_enable) {
3752 		Adapter->lso_premature_issue = B_FALSE;
3753 		Adapter->lso_enable = B_FALSE;
3754 	}
3755 
3756 	/*
3757 	 * If mem_workaround_82546 is enabled, the rx buffer allocated by
3758 	 * e1000_82545, e1000_82546 and e1000_82546_rev_3
3759 	 * will not cross 64k boundary.
3760 	 */
3761 	Adapter->mem_workaround_82546 =
3762 	    e1000g_get_prop(Adapter, "mem_workaround_82546",
3763 	    0, 1, DEFAULT_MEM_WORKAROUND_82546);
3764 
3765 	/*
3766 	 * Max number of multicast addresses
3767 	 */
3768 	Adapter->mcast_max_num =
3769 	    e1000g_get_prop(Adapter, "mcast_max_num",
3770 	    MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32);
3771 }
3772 
3773 /*
3774  * e1000g_get_prop - routine to read properties
3775  *
3776  * Get a user-configure property value out of the configuration
3777  * file e1000g.conf.
3778  *
3779  * Caller provides name of the property, a default value, a minimum
3780  * value, and a maximum value.
3781  *
3782  * Return configured value of the property, with default, minimum and
3783  * maximum properly applied.
3784  */
3785 static int
3786 e1000g_get_prop(struct e1000g *Adapter,	/* point to per-adapter structure */
3787     char *propname,		/* name of the property */
3788     int minval,			/* minimum acceptable value */
3789     int maxval,			/* maximim acceptable value */
3790     int defval)			/* default value */
3791 {
3792 	int propval;		/* value returned for requested property */
3793 	int *props;		/* point to array of properties returned */
3794 	uint_t nprops;		/* number of property value returned */
3795 
3796 	/*
3797 	 * get the array of properties from the config file
3798 	 */
3799 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
3800 	    DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
3801 		/* got some properties, test if we got enough */
3802 		if (Adapter->instance < nprops) {
3803 			propval = props[Adapter->instance];
3804 		} else {
3805 			/* not enough properties configured */
3806 			propval = defval;
3807 			E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3808 			    "Not Enough %s values found in e1000g.conf"
3809 			    " - set to %d\n",
3810 			    propname, propval);
3811 		}
3812 
3813 		/* free memory allocated for properties */
3814 		ddi_prop_free(props);
3815 
3816 	} else {
3817 		propval = defval;
3818 	}
3819 
3820 	/*
3821 	 * enforce limits
3822 	 */
3823 	if (propval > maxval) {
3824 		propval = maxval;
3825 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3826 		    "Too High %s value in e1000g.conf - set to %d\n",
3827 		    propname, propval);
3828 	}
3829 
3830 	if (propval < minval) {
3831 		propval = minval;
3832 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3833 		    "Too Low %s value in e1000g.conf - set to %d\n",
3834 		    propname, propval);
3835 	}
3836 
3837 	return (propval);
3838 }
3839 
3840 static boolean_t
3841 e1000g_link_check(struct e1000g *Adapter)
3842 {
3843 	uint16_t speed, duplex, phydata;
3844 	boolean_t link_changed = B_FALSE;
3845 	struct e1000_hw *hw;
3846 	uint32_t reg_tarc;
3847 
3848 	hw = &Adapter->shared;
3849 
3850 	if (e1000g_link_up(Adapter)) {
3851 		/*
3852 		 * The Link is up, check whether it was marked as down earlier
3853 		 */
3854 		if (Adapter->link_state != LINK_STATE_UP) {
3855 			(void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
3856 			Adapter->link_speed = speed;
3857 			Adapter->link_duplex = duplex;
3858 			Adapter->link_state = LINK_STATE_UP;
3859 			link_changed = B_TRUE;
3860 
3861 			if (Adapter->link_speed == SPEED_1000)
3862 				Adapter->stall_threshold = TX_STALL_TIME_2S;
3863 			else
3864 				Adapter->stall_threshold = TX_STALL_TIME_8S;
3865 
3866 			Adapter->tx_link_down_timeout = 0;
3867 
3868 			if ((hw->mac.type == e1000_82571) ||
3869 			    (hw->mac.type == e1000_82572)) {
3870 				reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
3871 				if (speed == SPEED_1000)
3872 					reg_tarc |= (1 << 21);
3873 				else
3874 					reg_tarc &= ~(1 << 21);
3875 				E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
3876 			}
3877 		}
3878 		Adapter->smartspeed = 0;
3879 	} else {
3880 		if (Adapter->link_state != LINK_STATE_DOWN) {
3881 			Adapter->link_speed = 0;
3882 			Adapter->link_duplex = 0;
3883 			Adapter->link_state = LINK_STATE_DOWN;
3884 			link_changed = B_TRUE;
3885 
3886 			/*
3887 			 * SmartSpeed workaround for Tabor/TanaX, When the
3888 			 * driver loses link disable auto master/slave
3889 			 * resolution.
3890 			 */
3891 			if (hw->phy.type == e1000_phy_igp) {
3892 				(void) e1000_read_phy_reg(hw,
3893 				    PHY_1000T_CTRL, &phydata);
3894 				phydata |= CR_1000T_MS_ENABLE;
3895 				(void) e1000_write_phy_reg(hw,
3896 				    PHY_1000T_CTRL, phydata);
3897 			}
3898 		} else {
3899 			e1000g_smartspeed(Adapter);
3900 		}
3901 
3902 		if (Adapter->e1000g_state & E1000G_STARTED) {
3903 			if (Adapter->tx_link_down_timeout <
3904 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3905 				Adapter->tx_link_down_timeout++;
3906 			} else if (Adapter->tx_link_down_timeout ==
3907 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3908 				e1000g_tx_clean(Adapter);
3909 				Adapter->tx_link_down_timeout++;
3910 			}
3911 		}
3912 	}
3913 
3914 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
3915 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
3916 
3917 	return (link_changed);
3918 }
3919 
3920 /*
3921  * e1000g_reset_link - Using the link properties to setup the link
3922  */
3923 int
3924 e1000g_reset_link(struct e1000g *Adapter)
3925 {
3926 	struct e1000_mac_info *mac;
3927 	struct e1000_phy_info *phy;
3928 	boolean_t invalid;
3929 
3930 	mac = &Adapter->shared.mac;
3931 	phy = &Adapter->shared.phy;
3932 	invalid = B_FALSE;
3933 
3934 	if (Adapter->param_adv_autoneg == 1) {
3935 		mac->autoneg = B_TRUE;
3936 		phy->autoneg_advertised = 0;
3937 
3938 		/*
3939 		 * 1000hdx is not supported for autonegotiation
3940 		 */
3941 		if (Adapter->param_adv_1000fdx == 1)
3942 			phy->autoneg_advertised |= ADVERTISE_1000_FULL;
3943 
3944 		if (Adapter->param_adv_100fdx == 1)
3945 			phy->autoneg_advertised |= ADVERTISE_100_FULL;
3946 
3947 		if (Adapter->param_adv_100hdx == 1)
3948 			phy->autoneg_advertised |= ADVERTISE_100_HALF;
3949 
3950 		if (Adapter->param_adv_10fdx == 1)
3951 			phy->autoneg_advertised |= ADVERTISE_10_FULL;
3952 
3953 		if (Adapter->param_adv_10hdx == 1)
3954 			phy->autoneg_advertised |= ADVERTISE_10_HALF;
3955 
3956 		if (phy->autoneg_advertised == 0)
3957 			invalid = B_TRUE;
3958 	} else {
3959 		mac->autoneg = B_FALSE;
3960 
3961 		/*
3962 		 * 1000fdx and 1000hdx are not supported for forced link
3963 		 */
3964 		if (Adapter->param_adv_100fdx == 1)
3965 			mac->forced_speed_duplex = ADVERTISE_100_FULL;
3966 		else if (Adapter->param_adv_100hdx == 1)
3967 			mac->forced_speed_duplex = ADVERTISE_100_HALF;
3968 		else if (Adapter->param_adv_10fdx == 1)
3969 			mac->forced_speed_duplex = ADVERTISE_10_FULL;
3970 		else if (Adapter->param_adv_10hdx == 1)
3971 			mac->forced_speed_duplex = ADVERTISE_10_HALF;
3972 		else
3973 			invalid = B_TRUE;
3974 
3975 	}
3976 
3977 	if (invalid) {
3978 		e1000g_log(Adapter, CE_WARN,
3979 		    "Invalid link sets. Setup link to"
3980 		    "support autonegotiation with all link capabilities.");
3981 		mac->autoneg = B_TRUE;
3982 		phy->autoneg_advertised = ADVERTISE_1000_FULL |
3983 		    ADVERTISE_100_FULL | ADVERTISE_100_HALF |
3984 		    ADVERTISE_10_FULL | ADVERTISE_10_HALF;
3985 	}
3986 
3987 	return (e1000_setup_link(&Adapter->shared));
3988 }
3989 
3990 static void
3991 e1000g_timer_tx_resched(struct e1000g *Adapter)
3992 {
3993 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
3994 
3995 	rw_enter(&Adapter->chip_lock, RW_READER);
3996 
3997 	if (tx_ring->resched_needed &&
3998 	    ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
3999 	    drv_usectohz(1000000)) &&
4000 	    (Adapter->e1000g_state & E1000G_STARTED) &&
4001 	    (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
4002 		tx_ring->resched_needed = B_FALSE;
4003 		mac_tx_update(Adapter->mh);
4004 		E1000G_STAT(tx_ring->stat_reschedule);
4005 		E1000G_STAT(tx_ring->stat_timer_reschedule);
4006 	}
4007 
4008 	rw_exit(&Adapter->chip_lock);
4009 }
4010 
4011 static void
4012 e1000g_local_timer(void *ws)
4013 {
4014 	struct e1000g *Adapter = (struct e1000g *)ws;
4015 	struct e1000_hw *hw;
4016 	e1000g_ether_addr_t ether_addr;
4017 	boolean_t link_changed;
4018 
4019 	hw = &Adapter->shared;
4020 
4021 	if (Adapter->e1000g_state & E1000G_ERROR) {
4022 		rw_enter(&Adapter->chip_lock, RW_WRITER);
4023 		Adapter->e1000g_state &= ~E1000G_ERROR;
4024 		rw_exit(&Adapter->chip_lock);
4025 
4026 		Adapter->reset_count++;
4027 		if (e1000g_global_reset(Adapter)) {
4028 			ddi_fm_service_impact(Adapter->dip,
4029 			    DDI_SERVICE_RESTORED);
4030 			e1000g_timer_tx_resched(Adapter);
4031 		} else
4032 			ddi_fm_service_impact(Adapter->dip,
4033 			    DDI_SERVICE_LOST);
4034 		return;
4035 	}
4036 
4037 	if (e1000g_stall_check(Adapter)) {
4038 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4039 		    "Tx stall detected. Activate automatic recovery.\n");
4040 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
4041 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
4042 		Adapter->reset_count++;
4043 		if (e1000g_reset_adapter(Adapter)) {
4044 			ddi_fm_service_impact(Adapter->dip,
4045 			    DDI_SERVICE_RESTORED);
4046 			e1000g_timer_tx_resched(Adapter);
4047 		}
4048 		return;
4049 	}
4050 
4051 	link_changed = B_FALSE;
4052 	rw_enter(&Adapter->chip_lock, RW_READER);
4053 	if (Adapter->link_complete)
4054 		link_changed = e1000g_link_check(Adapter);
4055 	rw_exit(&Adapter->chip_lock);
4056 
4057 	if (link_changed) {
4058 		if (!Adapter->reset_flag)
4059 			mac_link_update(Adapter->mh, Adapter->link_state);
4060 		if (Adapter->link_state == LINK_STATE_UP)
4061 			Adapter->reset_flag = B_FALSE;
4062 	}
4063 	/*
4064 	 * Workaround for esb2. Data stuck in fifo on a link
4065 	 * down event. Reset the adapter to recover it.
4066 	 */
4067 	if (Adapter->esb2_workaround) {
4068 		Adapter->esb2_workaround = B_FALSE;
4069 		(void) e1000g_reset_adapter(Adapter);
4070 		return;
4071 	}
4072 
4073 	/*
4074 	 * With 82571 controllers, any locally administered address will
4075 	 * be overwritten when there is a reset on the other port.
4076 	 * Detect this circumstance and correct it.
4077 	 */
4078 	if ((hw->mac.type == e1000_82571) &&
4079 	    (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4080 		ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4081 		ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4082 
4083 		ether_addr.reg.low = ntohl(ether_addr.reg.low);
4084 		ether_addr.reg.high = ntohl(ether_addr.reg.high);
4085 
4086 		if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4087 		    (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4088 		    (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4089 		    (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4090 		    (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4091 		    (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4092 			e1000_rar_set(hw, hw->mac.addr, 0);
4093 		}
4094 	}
4095 
4096 	/*
4097 	 * Long TTL workaround for 82541/82547
4098 	 */
4099 	(void) e1000_igp_ttl_workaround_82547(hw);
4100 
4101 	/*
4102 	 * Check for Adaptive IFS settings If there are lots of collisions
4103 	 * change the value in steps...
4104 	 * These properties should only be set for 10/100
4105 	 */
4106 	if ((hw->phy.media_type == e1000_media_type_copper) &&
4107 	    ((Adapter->link_speed == SPEED_100) ||
4108 	    (Adapter->link_speed == SPEED_10))) {
4109 		e1000_update_adaptive(hw);
4110 	}
4111 	/*
4112 	 * Set Timer Interrupts
4113 	 */
4114 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4115 
4116 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4117 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4118 	else
4119 		e1000g_timer_tx_resched(Adapter);
4120 
4121 	restart_watchdog_timer(Adapter);
4122 }
4123 
4124 /*
4125  * The function e1000g_link_timer() is called when the timer for link setup
4126  * is expired, which indicates the completion of the link setup. The link
4127  * state will not be updated until the link setup is completed. And the
4128  * link state will not be sent to the upper layer through mac_link_update()
4129  * in this function. It will be updated in the local timer routine or the
4130  * interrupt service routine after the interface is started (plumbed).
4131  */
4132 static void
4133 e1000g_link_timer(void *arg)
4134 {
4135 	struct e1000g *Adapter = (struct e1000g *)arg;
4136 
4137 	mutex_enter(&Adapter->link_lock);
4138 	Adapter->link_complete = B_TRUE;
4139 	Adapter->link_tid = 0;
4140 	mutex_exit(&Adapter->link_lock);
4141 }
4142 
4143 /*
4144  * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4145  *
4146  * This function read the forced speed and duplex for 10/100 Mbps speeds
4147  * and also for 1000 Mbps speeds from the e1000g.conf file
4148  */
4149 static void
4150 e1000g_force_speed_duplex(struct e1000g *Adapter)
4151 {
4152 	int forced;
4153 	struct e1000_mac_info *mac = &Adapter->shared.mac;
4154 	struct e1000_phy_info *phy = &Adapter->shared.phy;
4155 
4156 	/*
4157 	 * get value out of config file
4158 	 */
4159 	forced = e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4160 	    GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY);
4161 
4162 	switch (forced) {
4163 	case GDIAG_10_HALF:
4164 		/*
4165 		 * Disable Auto Negotiation
4166 		 */
4167 		mac->autoneg = B_FALSE;
4168 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
4169 		break;
4170 	case GDIAG_10_FULL:
4171 		/*
4172 		 * Disable Auto Negotiation
4173 		 */
4174 		mac->autoneg = B_FALSE;
4175 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
4176 		break;
4177 	case GDIAG_100_HALF:
4178 		/*
4179 		 * Disable Auto Negotiation
4180 		 */
4181 		mac->autoneg = B_FALSE;
4182 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
4183 		break;
4184 	case GDIAG_100_FULL:
4185 		/*
4186 		 * Disable Auto Negotiation
4187 		 */
4188 		mac->autoneg = B_FALSE;
4189 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
4190 		break;
4191 	case GDIAG_1000_FULL:
4192 		/*
4193 		 * The gigabit spec requires autonegotiation.  Therefore,
4194 		 * when the user wants to force the speed to 1000Mbps, we
4195 		 * enable AutoNeg, but only allow the harware to advertise
4196 		 * 1000Mbps.  This is different from 10/100 operation, where
4197 		 * we are allowed to link without any negotiation.
4198 		 */
4199 		mac->autoneg = B_TRUE;
4200 		phy->autoneg_advertised = ADVERTISE_1000_FULL;
4201 		break;
4202 	default:	/* obey the setting of AutoNegAdvertised */
4203 		mac->autoneg = B_TRUE;
4204 		phy->autoneg_advertised =
4205 		    (uint16_t)e1000g_get_prop(Adapter, "AutoNegAdvertised",
4206 		    0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4207 		    AUTONEG_ADVERTISE_SPEED_DEFAULT);
4208 		break;
4209 	}	/* switch */
4210 }
4211 
4212 /*
4213  * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4214  *
4215  * This function reads MaxFrameSize from e1000g.conf
4216  */
4217 static void
4218 e1000g_get_max_frame_size(struct e1000g *Adapter)
4219 {
4220 	int max_frame;
4221 	struct e1000_mac_info *mac = &Adapter->shared.mac;
4222 	struct e1000_phy_info *phy = &Adapter->shared.phy;
4223 
4224 	/*
4225 	 * get value out of config file
4226 	 */
4227 	max_frame = e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0);
4228 
4229 	switch (max_frame) {
4230 	case 0:
4231 		Adapter->default_mtu = ETHERMTU;
4232 		break;
4233 	/*
4234 	 * To avoid excessive memory allocation for rx buffers,
4235 	 * the bytes of E1000G_IPALIGNPRESERVEROOM are reserved.
4236 	 */
4237 	case 1:
4238 		Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4239 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4240 		    E1000G_IPALIGNPRESERVEROOM;
4241 		break;
4242 	case 2:
4243 		Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4244 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4245 		    E1000G_IPALIGNPRESERVEROOM;
4246 		break;
4247 	case 3:
4248 		if (mac->type >= e1000_82571)
4249 			Adapter->default_mtu = MAXIMUM_MTU;
4250 		else
4251 			Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4252 			    sizeof (struct ether_vlan_header) - ETHERFCSL -
4253 			    E1000G_IPALIGNPRESERVEROOM;
4254 		break;
4255 	default:
4256 		Adapter->default_mtu = ETHERMTU;
4257 		break;
4258 	}	/* switch */
4259 
4260 	Adapter->max_frame_size = Adapter->default_mtu +
4261 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
4262 
4263 	/* ich8 does not do jumbo frames */
4264 	if (mac->type == e1000_ich8lan) {
4265 		Adapter->default_mtu = ETHERMTU;
4266 		Adapter->max_frame_size = ETHERMTU +
4267 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4268 	}
4269 
4270 	/* ich9 does not do jumbo frames on one phy type */
4271 	if ((mac->type == e1000_ich9lan) &&
4272 	    (phy->type == e1000_phy_ife)) {
4273 		Adapter->default_mtu = ETHERMTU;
4274 		Adapter->max_frame_size = ETHERMTU +
4275 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4276 	}
4277 }
4278 
4279 static void
4280 arm_watchdog_timer(struct e1000g *Adapter)
4281 {
4282 	Adapter->watchdog_tid =
4283 	    timeout(e1000g_local_timer,
4284 	    (void *)Adapter, 1 * drv_usectohz(1000000));
4285 }
4286 #pragma inline(arm_watchdog_timer)
4287 
4288 static void
4289 enable_watchdog_timer(struct e1000g *Adapter)
4290 {
4291 	mutex_enter(&Adapter->watchdog_lock);
4292 
4293 	if (!Adapter->watchdog_timer_enabled) {
4294 		Adapter->watchdog_timer_enabled = B_TRUE;
4295 		Adapter->watchdog_timer_started = B_TRUE;
4296 		arm_watchdog_timer(Adapter);
4297 	}
4298 
4299 	mutex_exit(&Adapter->watchdog_lock);
4300 }
4301 
4302 static void
4303 disable_watchdog_timer(struct e1000g *Adapter)
4304 {
4305 	timeout_id_t tid;
4306 
4307 	mutex_enter(&Adapter->watchdog_lock);
4308 
4309 	Adapter->watchdog_timer_enabled = B_FALSE;
4310 	Adapter->watchdog_timer_started = B_FALSE;
4311 	tid = Adapter->watchdog_tid;
4312 	Adapter->watchdog_tid = 0;
4313 
4314 	mutex_exit(&Adapter->watchdog_lock);
4315 
4316 	if (tid != 0)
4317 		(void) untimeout(tid);
4318 }
4319 
4320 static void
4321 start_watchdog_timer(struct e1000g *Adapter)
4322 {
4323 	mutex_enter(&Adapter->watchdog_lock);
4324 
4325 	if (Adapter->watchdog_timer_enabled) {
4326 		if (!Adapter->watchdog_timer_started) {
4327 			Adapter->watchdog_timer_started = B_TRUE;
4328 			arm_watchdog_timer(Adapter);
4329 		}
4330 	}
4331 
4332 	mutex_exit(&Adapter->watchdog_lock);
4333 }
4334 
4335 static void
4336 restart_watchdog_timer(struct e1000g *Adapter)
4337 {
4338 	mutex_enter(&Adapter->watchdog_lock);
4339 
4340 	if (Adapter->watchdog_timer_started)
4341 		arm_watchdog_timer(Adapter);
4342 
4343 	mutex_exit(&Adapter->watchdog_lock);
4344 }
4345 
4346 static void
4347 stop_watchdog_timer(struct e1000g *Adapter)
4348 {
4349 	timeout_id_t tid;
4350 
4351 	mutex_enter(&Adapter->watchdog_lock);
4352 
4353 	Adapter->watchdog_timer_started = B_FALSE;
4354 	tid = Adapter->watchdog_tid;
4355 	Adapter->watchdog_tid = 0;
4356 
4357 	mutex_exit(&Adapter->watchdog_lock);
4358 
4359 	if (tid != 0)
4360 		(void) untimeout(tid);
4361 }
4362 
4363 static void
4364 stop_link_timer(struct e1000g *Adapter)
4365 {
4366 	timeout_id_t tid;
4367 
4368 	/* Disable the link timer */
4369 	mutex_enter(&Adapter->link_lock);
4370 
4371 	tid = Adapter->link_tid;
4372 	Adapter->link_tid = 0;
4373 
4374 	mutex_exit(&Adapter->link_lock);
4375 
4376 	if (tid != 0)
4377 		(void) untimeout(tid);
4378 }
4379 
4380 static void
4381 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4382 {
4383 	timeout_id_t tid;
4384 
4385 	/* Disable the tx timer for 82547 chipset */
4386 	mutex_enter(&tx_ring->tx_lock);
4387 
4388 	tx_ring->timer_enable_82547 = B_FALSE;
4389 	tid = tx_ring->timer_id_82547;
4390 	tx_ring->timer_id_82547 = 0;
4391 
4392 	mutex_exit(&tx_ring->tx_lock);
4393 
4394 	if (tid != 0)
4395 		(void) untimeout(tid);
4396 }
4397 
4398 void
4399 e1000g_clear_interrupt(struct e1000g *Adapter)
4400 {
4401 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4402 	    0xffffffff & ~E1000_IMS_RXSEQ);
4403 }
4404 
4405 void
4406 e1000g_mask_interrupt(struct e1000g *Adapter)
4407 {
4408 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4409 	    IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4410 
4411 	if (Adapter->tx_intr_enable)
4412 		e1000g_mask_tx_interrupt(Adapter);
4413 }
4414 
4415 /*
4416  * This routine is called by e1000g_quiesce(), therefore must not block.
4417  */
4418 void
4419 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4420 {
4421 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4422 }
4423 
4424 void
4425 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
4426 {
4427 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
4428 }
4429 
4430 void
4431 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
4432 {
4433 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
4434 }
4435 
4436 static void
4437 e1000g_smartspeed(struct e1000g *Adapter)
4438 {
4439 	struct e1000_hw *hw = &Adapter->shared;
4440 	uint16_t phy_status;
4441 	uint16_t phy_ctrl;
4442 
4443 	/*
4444 	 * If we're not T-or-T, or we're not autoneg'ing, or we're not
4445 	 * advertising 1000Full, we don't even use the workaround
4446 	 */
4447 	if ((hw->phy.type != e1000_phy_igp) ||
4448 	    !hw->mac.autoneg ||
4449 	    !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
4450 		return;
4451 
4452 	/*
4453 	 * True if this is the first call of this function or after every
4454 	 * 30 seconds of not having link
4455 	 */
4456 	if (Adapter->smartspeed == 0) {
4457 		/*
4458 		 * If Master/Slave config fault is asserted twice, we
4459 		 * assume back-to-back
4460 		 */
4461 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4462 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4463 			return;
4464 
4465 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4466 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4467 			return;
4468 		/*
4469 		 * We're assuming back-2-back because our status register
4470 		 * insists! there's a fault in the master/slave
4471 		 * relationship that was "negotiated"
4472 		 */
4473 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4474 		/*
4475 		 * Is the phy configured for manual configuration of
4476 		 * master/slave?
4477 		 */
4478 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4479 			/*
4480 			 * Yes.  Then disable manual configuration (enable
4481 			 * auto configuration) of master/slave
4482 			 */
4483 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4484 			(void) e1000_write_phy_reg(hw,
4485 			    PHY_1000T_CTRL, phy_ctrl);
4486 			/*
4487 			 * Effectively starting the clock
4488 			 */
4489 			Adapter->smartspeed++;
4490 			/*
4491 			 * Restart autonegotiation
4492 			 */
4493 			if (!e1000_phy_setup_autoneg(hw) &&
4494 			    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4495 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4496 				    MII_CR_RESTART_AUTO_NEG);
4497 				(void) e1000_write_phy_reg(hw,
4498 				    PHY_CONTROL, phy_ctrl);
4499 			}
4500 		}
4501 		return;
4502 		/*
4503 		 * Has 6 seconds transpired still without link? Remember,
4504 		 * you should reset the smartspeed counter once you obtain
4505 		 * link
4506 		 */
4507 	} else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4508 		/*
4509 		 * Yes.  Remember, we did at the start determine that
4510 		 * there's a master/slave configuration fault, so we're
4511 		 * still assuming there's someone on the other end, but we
4512 		 * just haven't yet been able to talk to it. We then
4513 		 * re-enable auto configuration of master/slave to see if
4514 		 * we're running 2/3 pair cables.
4515 		 */
4516 		/*
4517 		 * If still no link, perhaps using 2/3 pair cable
4518 		 */
4519 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4520 		phy_ctrl |= CR_1000T_MS_ENABLE;
4521 		(void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4522 		/*
4523 		 * Restart autoneg with phy enabled for manual
4524 		 * configuration of master/slave
4525 		 */
4526 		if (!e1000_phy_setup_autoneg(hw) &&
4527 		    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4528 			phy_ctrl |=
4529 			    (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
4530 			(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
4531 		}
4532 		/*
4533 		 * Hopefully, there are no more faults and we've obtained
4534 		 * link as a result.
4535 		 */
4536 	}
4537 	/*
4538 	 * Restart process after E1000_SMARTSPEED_MAX iterations (30
4539 	 * seconds)
4540 	 */
4541 	if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4542 		Adapter->smartspeed = 0;
4543 }
4544 
4545 static boolean_t
4546 is_valid_mac_addr(uint8_t *mac_addr)
4547 {
4548 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4549 	const uint8_t addr_test2[6] =
4550 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4551 
4552 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4553 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4554 		return (B_FALSE);
4555 
4556 	return (B_TRUE);
4557 }
4558 
4559 /*
4560  * e1000g_stall_check - check for tx stall
4561  *
4562  * This function checks if the adapter is stalled (in transmit).
4563  *
4564  * It is called each time the watchdog timeout is invoked.
4565  * If the transmit descriptor reclaim continuously fails,
4566  * the watchdog value will increment by 1. If the watchdog
4567  * value exceeds the threshold, the adapter is assumed to
4568  * have stalled and need to be reset.
4569  */
4570 static boolean_t
4571 e1000g_stall_check(struct e1000g *Adapter)
4572 {
4573 	e1000g_tx_ring_t *tx_ring;
4574 
4575 	tx_ring = Adapter->tx_ring;
4576 
4577 	if (Adapter->link_state != LINK_STATE_UP)
4578 		return (B_FALSE);
4579 
4580 	(void) e1000g_recycle(tx_ring);
4581 
4582 	if (Adapter->stall_flag) {
4583 		Adapter->stall_flag = B_FALSE;
4584 		Adapter->reset_flag = B_TRUE;
4585 		return (B_TRUE);
4586 	}
4587 
4588 	return (B_FALSE);
4589 }
4590 
4591 #ifdef E1000G_DEBUG
4592 static enum ioc_reply
4593 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
4594 {
4595 	void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
4596 	e1000g_peekpoke_t *ppd;
4597 	uint64_t mem_va;
4598 	uint64_t maxoff;
4599 	boolean_t peek;
4600 
4601 	switch (iocp->ioc_cmd) {
4602 
4603 	case E1000G_IOC_REG_PEEK:
4604 		peek = B_TRUE;
4605 		break;
4606 
4607 	case E1000G_IOC_REG_POKE:
4608 		peek = B_FALSE;
4609 		break;
4610 
4611 	deault:
4612 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4613 		    "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
4614 		    iocp->ioc_cmd);
4615 		return (IOC_INVAL);
4616 	}
4617 
4618 	/*
4619 	 * Validate format of ioctl
4620 	 */
4621 	if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
4622 		return (IOC_INVAL);
4623 	if (mp->b_cont == NULL)
4624 		return (IOC_INVAL);
4625 
4626 	ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
4627 
4628 	/*
4629 	 * Validate request parameters
4630 	 */
4631 	switch (ppd->pp_acc_space) {
4632 
4633 	default:
4634 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4635 		    "e1000g_diag_ioctl: invalid access space 0x%X\n",
4636 		    ppd->pp_acc_space);
4637 		return (IOC_INVAL);
4638 
4639 	case E1000G_PP_SPACE_REG:
4640 		/*
4641 		 * Memory-mapped I/O space
4642 		 */
4643 		ASSERT(ppd->pp_acc_size == 4);
4644 		if (ppd->pp_acc_size != 4)
4645 			return (IOC_INVAL);
4646 
4647 		if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
4648 			return (IOC_INVAL);
4649 
4650 		mem_va = 0;
4651 		maxoff = 0x10000;
4652 		ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
4653 		break;
4654 
4655 	case E1000G_PP_SPACE_E1000G:
4656 		/*
4657 		 * E1000g data structure!
4658 		 */
4659 		mem_va = (uintptr_t)e1000gp;
4660 		maxoff = sizeof (struct e1000g);
4661 		ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
4662 		break;
4663 
4664 	}
4665 
4666 	if (ppd->pp_acc_offset >= maxoff)
4667 		return (IOC_INVAL);
4668 
4669 	if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
4670 		return (IOC_INVAL);
4671 
4672 	/*
4673 	 * All OK - go!
4674 	 */
4675 	ppd->pp_acc_offset += mem_va;
4676 	(*ppfn)(e1000gp, ppd);
4677 	return (peek ? IOC_REPLY : IOC_ACK);
4678 }
4679 
4680 static void
4681 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4682 {
4683 	ddi_acc_handle_t handle;
4684 	uint32_t *regaddr;
4685 
4686 	handle = e1000gp->osdep.reg_handle;
4687 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4688 	    (uintptr_t)ppd->pp_acc_offset);
4689 
4690 	ppd->pp_acc_data = ddi_get32(handle, regaddr);
4691 }
4692 
4693 static void
4694 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4695 {
4696 	ddi_acc_handle_t handle;
4697 	uint32_t *regaddr;
4698 	uint32_t value;
4699 
4700 	handle = e1000gp->osdep.reg_handle;
4701 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4702 	    (uintptr_t)ppd->pp_acc_offset);
4703 	value = (uint32_t)ppd->pp_acc_data;
4704 
4705 	ddi_put32(handle, regaddr, value);
4706 }
4707 
4708 static void
4709 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4710 {
4711 	uint64_t value;
4712 	void *vaddr;
4713 
4714 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4715 
4716 	switch (ppd->pp_acc_size) {
4717 	case 1:
4718 		value = *(uint8_t *)vaddr;
4719 		break;
4720 
4721 	case 2:
4722 		value = *(uint16_t *)vaddr;
4723 		break;
4724 
4725 	case 4:
4726 		value = *(uint32_t *)vaddr;
4727 		break;
4728 
4729 	case 8:
4730 		value = *(uint64_t *)vaddr;
4731 		break;
4732 	}
4733 
4734 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4735 	    "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
4736 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4737 
4738 	ppd->pp_acc_data = value;
4739 }
4740 
4741 static void
4742 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4743 {
4744 	uint64_t value;
4745 	void *vaddr;
4746 
4747 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4748 	value = ppd->pp_acc_data;
4749 
4750 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4751 	    "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
4752 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4753 
4754 	switch (ppd->pp_acc_size) {
4755 	case 1:
4756 		*(uint8_t *)vaddr = (uint8_t)value;
4757 		break;
4758 
4759 	case 2:
4760 		*(uint16_t *)vaddr = (uint16_t)value;
4761 		break;
4762 
4763 	case 4:
4764 		*(uint32_t *)vaddr = (uint32_t)value;
4765 		break;
4766 
4767 	case 8:
4768 		*(uint64_t *)vaddr = (uint64_t)value;
4769 		break;
4770 	}
4771 }
4772 #endif
4773 
4774 /*
4775  * Loopback Support
4776  */
4777 static lb_property_t lb_normal =
4778 	{ normal,	"normal",	E1000G_LB_NONE		};
4779 static lb_property_t lb_external1000 =
4780 	{ external,	"1000Mbps",	E1000G_LB_EXTERNAL_1000	};
4781 static lb_property_t lb_external100 =
4782 	{ external,	"100Mbps",	E1000G_LB_EXTERNAL_100	};
4783 static lb_property_t lb_external10 =
4784 	{ external,	"10Mbps",	E1000G_LB_EXTERNAL_10	};
4785 static lb_property_t lb_phy =
4786 	{ internal,	"PHY",		E1000G_LB_INTERNAL_PHY	};
4787 
4788 static enum ioc_reply
4789 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
4790 {
4791 	lb_info_sz_t *lbsp;
4792 	lb_property_t *lbpp;
4793 	struct e1000_hw *hw;
4794 	uint32_t *lbmp;
4795 	uint32_t size;
4796 	uint32_t value;
4797 
4798 	hw = &Adapter->shared;
4799 
4800 	if (mp->b_cont == NULL)
4801 		return (IOC_INVAL);
4802 
4803 	if (!e1000g_check_loopback_support(hw)) {
4804 		e1000g_log(NULL, CE_WARN,
4805 		    "Loopback is not supported on e1000g%d", Adapter->instance);
4806 		return (IOC_INVAL);
4807 	}
4808 
4809 	switch (iocp->ioc_cmd) {
4810 	default:
4811 		return (IOC_INVAL);
4812 
4813 	case LB_GET_INFO_SIZE:
4814 		size = sizeof (lb_info_sz_t);
4815 		if (iocp->ioc_count != size)
4816 			return (IOC_INVAL);
4817 
4818 		rw_enter(&Adapter->chip_lock, RW_WRITER);
4819 		e1000g_get_phy_state(Adapter);
4820 
4821 		/*
4822 		 * Workaround for hardware faults. In order to get a stable
4823 		 * state of phy, we will wait for a specific interval and
4824 		 * try again. The time delay is an experiential value based
4825 		 * on our testing.
4826 		 */
4827 		msec_delay(100);
4828 		e1000g_get_phy_state(Adapter);
4829 		rw_exit(&Adapter->chip_lock);
4830 
4831 		value = sizeof (lb_normal);
4832 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4833 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4834 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4835 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4836 			value += sizeof (lb_phy);
4837 			switch (hw->mac.type) {
4838 			case e1000_82571:
4839 			case e1000_82572:
4840 			case e1000_80003es2lan:
4841 				value += sizeof (lb_external1000);
4842 				break;
4843 			}
4844 		}
4845 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4846 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4847 			value += sizeof (lb_external100);
4848 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4849 			value += sizeof (lb_external10);
4850 
4851 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
4852 		*lbsp = value;
4853 		break;
4854 
4855 	case LB_GET_INFO:
4856 		value = sizeof (lb_normal);
4857 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4858 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4859 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4860 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4861 			value += sizeof (lb_phy);
4862 			switch (hw->mac.type) {
4863 			case e1000_82571:
4864 			case e1000_82572:
4865 			case e1000_80003es2lan:
4866 				value += sizeof (lb_external1000);
4867 				break;
4868 			}
4869 		}
4870 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4871 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4872 			value += sizeof (lb_external100);
4873 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4874 			value += sizeof (lb_external10);
4875 
4876 		size = value;
4877 		if (iocp->ioc_count != size)
4878 			return (IOC_INVAL);
4879 
4880 		value = 0;
4881 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
4882 		lbpp[value++] = lb_normal;
4883 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4884 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4885 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4886 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4887 			lbpp[value++] = lb_phy;
4888 			switch (hw->mac.type) {
4889 			case e1000_82571:
4890 			case e1000_82572:
4891 			case e1000_80003es2lan:
4892 				lbpp[value++] = lb_external1000;
4893 				break;
4894 			}
4895 		}
4896 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4897 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4898 			lbpp[value++] = lb_external100;
4899 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4900 			lbpp[value++] = lb_external10;
4901 		break;
4902 
4903 	case LB_GET_MODE:
4904 		size = sizeof (uint32_t);
4905 		if (iocp->ioc_count != size)
4906 			return (IOC_INVAL);
4907 
4908 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4909 		*lbmp = Adapter->loopback_mode;
4910 		break;
4911 
4912 	case LB_SET_MODE:
4913 		size = 0;
4914 		if (iocp->ioc_count != sizeof (uint32_t))
4915 			return (IOC_INVAL);
4916 
4917 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4918 		if (!e1000g_set_loopback_mode(Adapter, *lbmp))
4919 			return (IOC_INVAL);
4920 		break;
4921 	}
4922 
4923 	iocp->ioc_count = size;
4924 	iocp->ioc_error = 0;
4925 
4926 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
4927 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4928 		return (IOC_INVAL);
4929 	}
4930 
4931 	return (IOC_REPLY);
4932 }
4933 
4934 static boolean_t
4935 e1000g_check_loopback_support(struct e1000_hw *hw)
4936 {
4937 	switch (hw->mac.type) {
4938 	case e1000_82540:
4939 	case e1000_82545:
4940 	case e1000_82545_rev_3:
4941 	case e1000_82546:
4942 	case e1000_82546_rev_3:
4943 	case e1000_82541:
4944 	case e1000_82541_rev_2:
4945 	case e1000_82547:
4946 	case e1000_82547_rev_2:
4947 	case e1000_82571:
4948 	case e1000_82572:
4949 	case e1000_82573:
4950 	case e1000_82574:
4951 	case e1000_80003es2lan:
4952 	case e1000_ich9lan:
4953 	case e1000_ich10lan:
4954 		return (B_TRUE);
4955 	}
4956 	return (B_FALSE);
4957 }
4958 
4959 static boolean_t
4960 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
4961 {
4962 	struct e1000_hw *hw;
4963 	int i, times;
4964 	boolean_t link_up;
4965 
4966 	if (mode == Adapter->loopback_mode)
4967 		return (B_TRUE);
4968 
4969 	hw = &Adapter->shared;
4970 	times = 0;
4971 
4972 	Adapter->loopback_mode = mode;
4973 
4974 	if (mode == E1000G_LB_NONE) {
4975 		/* Reset the chip */
4976 		hw->phy.autoneg_wait_to_complete = B_TRUE;
4977 		(void) e1000g_reset_adapter(Adapter);
4978 		hw->phy.autoneg_wait_to_complete = B_FALSE;
4979 		return (B_TRUE);
4980 	}
4981 
4982 again:
4983 
4984 	rw_enter(&Adapter->chip_lock, RW_WRITER);
4985 
4986 	switch (mode) {
4987 	default:
4988 		rw_exit(&Adapter->chip_lock);
4989 		return (B_FALSE);
4990 
4991 	case E1000G_LB_EXTERNAL_1000:
4992 		e1000g_set_external_loopback_1000(Adapter);
4993 		break;
4994 
4995 	case E1000G_LB_EXTERNAL_100:
4996 		e1000g_set_external_loopback_100(Adapter);
4997 		break;
4998 
4999 	case E1000G_LB_EXTERNAL_10:
5000 		e1000g_set_external_loopback_10(Adapter);
5001 		break;
5002 
5003 	case E1000G_LB_INTERNAL_PHY:
5004 		e1000g_set_internal_loopback(Adapter);
5005 		break;
5006 	}
5007 
5008 	times++;
5009 
5010 	rw_exit(&Adapter->chip_lock);
5011 
5012 	/* Wait for link up */
5013 	for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
5014 		msec_delay(100);
5015 
5016 	rw_enter(&Adapter->chip_lock, RW_WRITER);
5017 
5018 	link_up = e1000g_link_up(Adapter);
5019 
5020 	rw_exit(&Adapter->chip_lock);
5021 
5022 	if (!link_up) {
5023 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5024 		    "Failed to get the link up");
5025 		if (times < 2) {
5026 			/* Reset the link */
5027 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5028 			    "Reset the link ...");
5029 			(void) e1000g_reset_adapter(Adapter);
5030 			goto again;
5031 		}
5032 	}
5033 
5034 	return (B_TRUE);
5035 }
5036 
5037 /*
5038  * The following loopback settings are from Intel's technical
5039  * document - "How To Loopback". All the register settings and
5040  * time delay values are directly inherited from the document
5041  * without more explanations available.
5042  */
5043 static void
5044 e1000g_set_internal_loopback(struct e1000g *Adapter)
5045 {
5046 	struct e1000_hw *hw;
5047 	uint32_t ctrl;
5048 	uint32_t status;
5049 	uint16_t phy_ctrl;
5050 	uint16_t phy_reg;
5051 	uint32_t txcw;
5052 
5053 	hw = &Adapter->shared;
5054 
5055 	/* Disable Smart Power Down */
5056 	phy_spd_state(hw, B_FALSE);
5057 
5058 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5059 	phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
5060 	phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
5061 
5062 	switch (hw->mac.type) {
5063 	case e1000_82540:
5064 	case e1000_82545:
5065 	case e1000_82545_rev_3:
5066 	case e1000_82546:
5067 	case e1000_82546_rev_3:
5068 	case e1000_82573:
5069 		/* Auto-MDI/MDIX off */
5070 		(void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5071 		/* Reset PHY to update Auto-MDI/MDIX */
5072 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5073 		    phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5074 		/* Reset PHY to auto-neg off and force 1000 */
5075 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5076 		    phy_ctrl | MII_CR_RESET);
5077 		/*
5078 		 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5079 		 * See comments above e1000g_set_internal_loopback() for the
5080 		 * background.
5081 		 */
5082 		(void) e1000_write_phy_reg(hw, 29, 0x001F);
5083 		(void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5084 		(void) e1000_write_phy_reg(hw, 29, 0x001A);
5085 		(void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5086 		break;
5087 	case e1000_80003es2lan:
5088 		/* Force Link Up */
5089 		(void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5090 		    0x1CC);
5091 		/* Sets PCS loopback at 1Gbs */
5092 		(void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5093 		    0x1046);
5094 		break;
5095 	}
5096 
5097 	/*
5098 	 * The following registers should be set for e1000_phy_bm phy type.
5099 	 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5100 	 * For others, we do not need to set these registers.
5101 	 */
5102 	if (hw->phy.type == e1000_phy_bm) {
5103 		/* Set Default MAC Interface speed to 1GB */
5104 		(void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5105 		phy_reg &= ~0x0007;
5106 		phy_reg |= 0x006;
5107 		(void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5108 		/* Assert SW reset for above settings to take effect */
5109 		(void) e1000_phy_commit(hw);
5110 		msec_delay(1);
5111 		/* Force Full Duplex */
5112 		(void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5113 		(void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5114 		    phy_reg | 0x000C);
5115 		/* Set Link Up (in force link) */
5116 		(void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5117 		(void) e1000_write_phy_reg(hw, PHY_REG(776, 16),
5118 		    phy_reg | 0x0040);
5119 		/* Force Link */
5120 		(void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5121 		(void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5122 		    phy_reg | 0x0040);
5123 		/* Set Early Link Enable */
5124 		(void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5125 		(void) e1000_write_phy_reg(hw, PHY_REG(769, 20),
5126 		    phy_reg | 0x0400);
5127 	}
5128 
5129 	/* Set loopback */
5130 	(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5131 
5132 	msec_delay(250);
5133 
5134 	/* Now set up the MAC to the same speed/duplex as the PHY. */
5135 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5136 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5137 	ctrl |= (E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
5138 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5139 	    E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
5140 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5141 
5142 	switch (hw->mac.type) {
5143 	case e1000_82540:
5144 	case e1000_82545:
5145 	case e1000_82545_rev_3:
5146 	case e1000_82546:
5147 	case e1000_82546_rev_3:
5148 		/*
5149 		 * For some serdes we'll need to commit the writes now
5150 		 * so that the status is updated on link
5151 		 */
5152 		if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5153 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5154 			msec_delay(100);
5155 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5156 		}
5157 
5158 		if (hw->phy.media_type == e1000_media_type_copper) {
5159 			/* Invert Loss of Signal */
5160 			ctrl |= E1000_CTRL_ILOS;
5161 		} else {
5162 			/* Set ILOS on fiber nic if half duplex is detected */
5163 			status = E1000_READ_REG(hw, E1000_STATUS);
5164 			if ((status & E1000_STATUS_FD) == 0)
5165 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5166 		}
5167 		break;
5168 
5169 	case e1000_82571:
5170 	case e1000_82572:
5171 		/*
5172 		 * The fiber/SerDes versions of this adapter do not contain an
5173 		 * accessible PHY. Therefore, loopback beyond MAC must be done
5174 		 * using SerDes analog loopback.
5175 		 */
5176 		if (hw->phy.media_type != e1000_media_type_copper) {
5177 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5178 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5179 			txcw &= ~((uint32_t)1 << 31);
5180 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5181 
5182 			/*
5183 			 * Write 0x410 to Serdes Control register
5184 			 * to enable Serdes analog loopback
5185 			 */
5186 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5187 			msec_delay(10);
5188 		}
5189 
5190 		status = E1000_READ_REG(hw, E1000_STATUS);
5191 		/* Set ILOS on fiber nic if half duplex is detected */
5192 		if ((hw->phy.media_type == e1000_media_type_fiber) &&
5193 		    ((status & E1000_STATUS_FD) == 0 ||
5194 		    (status & E1000_STATUS_LU) == 0))
5195 			ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5196 		else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5197 			ctrl |= E1000_CTRL_SLU;
5198 		break;
5199 
5200 	case e1000_82573:
5201 		ctrl |= E1000_CTRL_ILOS;
5202 		break;
5203 	case e1000_ich9lan:
5204 	case e1000_ich10lan:
5205 		ctrl |= E1000_CTRL_SLU;
5206 		break;
5207 	}
5208 	if (hw->phy.type == e1000_phy_bm)
5209 		ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5210 
5211 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5212 }
5213 
5214 static void
5215 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5216 {
5217 	struct e1000_hw *hw;
5218 	uint32_t rctl;
5219 	uint32_t ctrl_ext;
5220 	uint32_t ctrl;
5221 	uint32_t status;
5222 	uint32_t txcw;
5223 	uint16_t phydata;
5224 
5225 	hw = &Adapter->shared;
5226 
5227 	/* Disable Smart Power Down */
5228 	phy_spd_state(hw, B_FALSE);
5229 
5230 	switch (hw->mac.type) {
5231 	case e1000_82571:
5232 	case e1000_82572:
5233 		switch (hw->phy.media_type) {
5234 		case e1000_media_type_copper:
5235 			/* Force link up (Must be done before the PHY writes) */
5236 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5237 			ctrl |= E1000_CTRL_SLU;	/* Force Link Up */
5238 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5239 
5240 			rctl = E1000_READ_REG(hw, E1000_RCTL);
5241 			rctl |= (E1000_RCTL_EN |
5242 			    E1000_RCTL_SBP |
5243 			    E1000_RCTL_UPE |
5244 			    E1000_RCTL_MPE |
5245 			    E1000_RCTL_LPE |
5246 			    E1000_RCTL_BAM);		/* 0x803E */
5247 			E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5248 
5249 			ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5250 			ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5251 			    E1000_CTRL_EXT_SDP6_DATA |
5252 			    E1000_CTRL_EXT_SDP7_DATA |
5253 			    E1000_CTRL_EXT_SDP4_DIR |
5254 			    E1000_CTRL_EXT_SDP6_DIR |
5255 			    E1000_CTRL_EXT_SDP7_DIR);	/* 0x0DD0 */
5256 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5257 
5258 			/*
5259 			 * This sequence tunes the PHY's SDP and no customer
5260 			 * settable values. For background, see comments above
5261 			 * e1000g_set_internal_loopback().
5262 			 */
5263 			(void) e1000_write_phy_reg(hw, 0x0, 0x140);
5264 			msec_delay(10);
5265 			(void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5266 			(void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5267 			(void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5268 			(void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5269 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5270 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5271 
5272 			(void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5273 			(void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5274 			(void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5275 			(void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5276 			(void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5277 
5278 			msec_delay(50);
5279 			break;
5280 		case e1000_media_type_fiber:
5281 		case e1000_media_type_internal_serdes:
5282 			status = E1000_READ_REG(hw, E1000_STATUS);
5283 			if (((status & E1000_STATUS_LU) == 0) ||
5284 			    (hw->phy.media_type ==
5285 			    e1000_media_type_internal_serdes)) {
5286 				ctrl = E1000_READ_REG(hw, E1000_CTRL);
5287 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5288 				E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5289 			}
5290 
5291 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5292 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5293 			txcw &= ~((uint32_t)1 << 31);
5294 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5295 
5296 			/*
5297 			 * Write 0x410 to Serdes Control register
5298 			 * to enable Serdes analog loopback
5299 			 */
5300 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5301 			msec_delay(10);
5302 			break;
5303 		default:
5304 			break;
5305 		}
5306 		break;
5307 	case e1000_82574:
5308 	case e1000_80003es2lan:
5309 	case e1000_ich9lan:
5310 	case e1000_ich10lan:
5311 		(void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5312 		(void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5313 		    phydata | (1 << 5));
5314 		Adapter->param_adv_autoneg = 1;
5315 		Adapter->param_adv_1000fdx = 1;
5316 		(void) e1000g_reset_link(Adapter);
5317 		break;
5318 	}
5319 }
5320 
5321 static void
5322 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5323 {
5324 	struct e1000_hw *hw;
5325 	uint32_t ctrl;
5326 	uint16_t phy_ctrl;
5327 
5328 	hw = &Adapter->shared;
5329 
5330 	/* Disable Smart Power Down */
5331 	phy_spd_state(hw, B_FALSE);
5332 
5333 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5334 	    MII_CR_SPEED_100);
5335 
5336 	/* Force 100/FD, reset PHY */
5337 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5338 	    phy_ctrl | MII_CR_RESET);	/* 0xA100 */
5339 	msec_delay(10);
5340 
5341 	/* Force 100/FD */
5342 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5343 	    phy_ctrl);			/* 0x2100 */
5344 	msec_delay(10);
5345 
5346 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5347 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5348 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5349 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5350 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5351 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5352 	    E1000_CTRL_SPD_100 |	/* Force Speed to 100 */
5353 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5354 
5355 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5356 }
5357 
5358 static void
5359 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5360 {
5361 	struct e1000_hw *hw;
5362 	uint32_t ctrl;
5363 	uint16_t phy_ctrl;
5364 
5365 	hw = &Adapter->shared;
5366 
5367 	/* Disable Smart Power Down */
5368 	phy_spd_state(hw, B_FALSE);
5369 
5370 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5371 	    MII_CR_SPEED_10);
5372 
5373 	/* Force 10/FD, reset PHY */
5374 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5375 	    phy_ctrl | MII_CR_RESET);	/* 0x8100 */
5376 	msec_delay(10);
5377 
5378 	/* Force 10/FD */
5379 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5380 	    phy_ctrl);			/* 0x0100 */
5381 	msec_delay(10);
5382 
5383 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5384 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5385 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5386 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5387 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5388 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5389 	    E1000_CTRL_SPD_10 |		/* Force Speed to 10 */
5390 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5391 
5392 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5393 }
5394 
5395 #ifdef __sparc
5396 static boolean_t
5397 e1000g_find_mac_address(struct e1000g *Adapter)
5398 {
5399 	struct e1000_hw *hw = &Adapter->shared;
5400 	uchar_t *bytes;
5401 	struct ether_addr sysaddr;
5402 	uint_t nelts;
5403 	int err;
5404 	boolean_t found = B_FALSE;
5405 
5406 	/*
5407 	 * The "vendor's factory-set address" may already have
5408 	 * been extracted from the chip, but if the property
5409 	 * "local-mac-address" is set we use that instead.
5410 	 *
5411 	 * We check whether it looks like an array of 6
5412 	 * bytes (which it should, if OBP set it).  If we can't
5413 	 * make sense of it this way, we'll ignore it.
5414 	 */
5415 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5416 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
5417 	if (err == DDI_PROP_SUCCESS) {
5418 		if (nelts == ETHERADDRL) {
5419 			while (nelts--)
5420 				hw->mac.addr[nelts] = bytes[nelts];
5421 			found = B_TRUE;
5422 		}
5423 		ddi_prop_free(bytes);
5424 	}
5425 
5426 	/*
5427 	 * Look up the OBP property "local-mac-address?". If the user has set
5428 	 * 'local-mac-address? = false', use "the system address" instead.
5429 	 */
5430 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
5431 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
5432 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
5433 			if (localetheraddr(NULL, &sysaddr) != 0) {
5434 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
5435 				found = B_TRUE;
5436 			}
5437 		}
5438 		ddi_prop_free(bytes);
5439 	}
5440 
5441 	/*
5442 	 * Finally(!), if there's a valid "mac-address" property (created
5443 	 * if we netbooted from this interface), we must use this instead
5444 	 * of any of the above to ensure that the NFS/install server doesn't
5445 	 * get confused by the address changing as Solaris takes over!
5446 	 */
5447 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5448 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
5449 	if (err == DDI_PROP_SUCCESS) {
5450 		if (nelts == ETHERADDRL) {
5451 			while (nelts--)
5452 				hw->mac.addr[nelts] = bytes[nelts];
5453 			found = B_TRUE;
5454 		}
5455 		ddi_prop_free(bytes);
5456 	}
5457 
5458 	if (found) {
5459 		bcopy(hw->mac.addr, hw->mac.perm_addr,
5460 		    ETHERADDRL);
5461 	}
5462 
5463 	return (found);
5464 }
5465 #endif
5466 
5467 static int
5468 e1000g_add_intrs(struct e1000g *Adapter)
5469 {
5470 	dev_info_t *devinfo;
5471 	int intr_types;
5472 	int rc;
5473 
5474 	devinfo = Adapter->dip;
5475 
5476 	/* Get supported interrupt types */
5477 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
5478 
5479 	if (rc != DDI_SUCCESS) {
5480 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5481 		    "Get supported interrupt types failed: %d\n", rc);
5482 		return (DDI_FAILURE);
5483 	}
5484 
5485 	/*
5486 	 * Based on Intel Technical Advisory document (TA-160), there are some
5487 	 * cases where some older Intel PCI-X NICs may "advertise" to the OS
5488 	 * that it supports MSI, but in fact has problems.
5489 	 * So we should only enable MSI for PCI-E NICs and disable MSI for old
5490 	 * PCI/PCI-X NICs.
5491 	 */
5492 	if (Adapter->shared.mac.type < e1000_82571)
5493 		Adapter->msi_enable = B_FALSE;
5494 
5495 	if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
5496 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
5497 
5498 		if (rc != DDI_SUCCESS) {
5499 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5500 			    "Add MSI failed, trying Legacy interrupts\n");
5501 		} else {
5502 			Adapter->intr_type = DDI_INTR_TYPE_MSI;
5503 		}
5504 	}
5505 
5506 	if ((Adapter->intr_type == 0) &&
5507 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
5508 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
5509 
5510 		if (rc != DDI_SUCCESS) {
5511 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5512 			    "Add Legacy interrupts failed\n");
5513 			return (DDI_FAILURE);
5514 		}
5515 
5516 		Adapter->intr_type = DDI_INTR_TYPE_FIXED;
5517 	}
5518 
5519 	if (Adapter->intr_type == 0) {
5520 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5521 		    "No interrupts registered\n");
5522 		return (DDI_FAILURE);
5523 	}
5524 
5525 	return (DDI_SUCCESS);
5526 }
5527 
5528 /*
5529  * e1000g_intr_add() handles MSI/Legacy interrupts
5530  */
5531 static int
5532 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
5533 {
5534 	dev_info_t *devinfo;
5535 	int count, avail, actual;
5536 	int x, y, rc, inum = 0;
5537 	int flag;
5538 	ddi_intr_handler_t *intr_handler;
5539 
5540 	devinfo = Adapter->dip;
5541 
5542 	/* get number of interrupts */
5543 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5544 	if ((rc != DDI_SUCCESS) || (count == 0)) {
5545 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5546 		    "Get interrupt number failed. Return: %d, count: %d\n",
5547 		    rc, count);
5548 		return (DDI_FAILURE);
5549 	}
5550 
5551 	/* get number of available interrupts */
5552 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
5553 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
5554 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5555 		    "Get interrupt available number failed. "
5556 		    "Return: %d, available: %d\n", rc, avail);
5557 		return (DDI_FAILURE);
5558 	}
5559 
5560 	if (avail < count) {
5561 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5562 		    "Interrupts count: %d, available: %d\n",
5563 		    count, avail);
5564 	}
5565 
5566 	/* Allocate an array of interrupt handles */
5567 	Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
5568 	Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
5569 
5570 	/* Set NORMAL behavior for both MSI and FIXED interrupt */
5571 	flag = DDI_INTR_ALLOC_NORMAL;
5572 
5573 	/* call ddi_intr_alloc() */
5574 	rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
5575 	    count, &actual, flag);
5576 
5577 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
5578 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5579 		    "Allocate interrupts failed: %d\n", rc);
5580 
5581 		kmem_free(Adapter->htable, Adapter->intr_size);
5582 		return (DDI_FAILURE);
5583 	}
5584 
5585 	if (actual < count) {
5586 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5587 		    "Interrupts requested: %d, received: %d\n",
5588 		    count, actual);
5589 	}
5590 
5591 	Adapter->intr_cnt = actual;
5592 
5593 	/* Get priority for first msi, assume remaining are all the same */
5594 	rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
5595 
5596 	if (rc != DDI_SUCCESS) {
5597 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5598 		    "Get interrupt priority failed: %d\n", rc);
5599 
5600 		/* Free already allocated intr */
5601 		for (y = 0; y < actual; y++)
5602 			(void) ddi_intr_free(Adapter->htable[y]);
5603 
5604 		kmem_free(Adapter->htable, Adapter->intr_size);
5605 		return (DDI_FAILURE);
5606 	}
5607 
5608 	/*
5609 	 * In Legacy Interrupt mode, for PCI-Express adapters, we should
5610 	 * use the interrupt service routine e1000g_intr_pciexpress()
5611 	 * to avoid interrupt stealing when sharing interrupt with other
5612 	 * devices.
5613 	 */
5614 	if (Adapter->shared.mac.type < e1000_82571)
5615 		intr_handler = (ddi_intr_handler_t *)e1000g_intr;
5616 	else
5617 		intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
5618 
5619 	/* Call ddi_intr_add_handler() */
5620 	for (x = 0; x < actual; x++) {
5621 		rc = ddi_intr_add_handler(Adapter->htable[x],
5622 		    intr_handler, (caddr_t)Adapter, NULL);
5623 
5624 		if (rc != DDI_SUCCESS) {
5625 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5626 			    "Add interrupt handler failed: %d\n", rc);
5627 
5628 			/* Remove already added handler */
5629 			for (y = 0; y < x; y++)
5630 				(void) ddi_intr_remove_handler(
5631 				    Adapter->htable[y]);
5632 
5633 			/* Free already allocated intr */
5634 			for (y = 0; y < actual; y++)
5635 				(void) ddi_intr_free(Adapter->htable[y]);
5636 
5637 			kmem_free(Adapter->htable, Adapter->intr_size);
5638 			return (DDI_FAILURE);
5639 		}
5640 	}
5641 
5642 	rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
5643 
5644 	if (rc != DDI_SUCCESS) {
5645 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5646 		    "Get interrupt cap failed: %d\n", rc);
5647 
5648 		/* Free already allocated intr */
5649 		for (y = 0; y < actual; y++) {
5650 			(void) ddi_intr_remove_handler(Adapter->htable[y]);
5651 			(void) ddi_intr_free(Adapter->htable[y]);
5652 		}
5653 
5654 		kmem_free(Adapter->htable, Adapter->intr_size);
5655 		return (DDI_FAILURE);
5656 	}
5657 
5658 	return (DDI_SUCCESS);
5659 }
5660 
5661 static int
5662 e1000g_rem_intrs(struct e1000g *Adapter)
5663 {
5664 	int x;
5665 	int rc;
5666 
5667 	for (x = 0; x < Adapter->intr_cnt; x++) {
5668 		rc = ddi_intr_remove_handler(Adapter->htable[x]);
5669 		if (rc != DDI_SUCCESS) {
5670 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5671 			    "Remove intr handler failed: %d\n", rc);
5672 			return (DDI_FAILURE);
5673 		}
5674 
5675 		rc = ddi_intr_free(Adapter->htable[x]);
5676 		if (rc != DDI_SUCCESS) {
5677 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5678 			    "Free intr failed: %d\n", rc);
5679 			return (DDI_FAILURE);
5680 		}
5681 	}
5682 
5683 	kmem_free(Adapter->htable, Adapter->intr_size);
5684 
5685 	return (DDI_SUCCESS);
5686 }
5687 
5688 static int
5689 e1000g_enable_intrs(struct e1000g *Adapter)
5690 {
5691 	int x;
5692 	int rc;
5693 
5694 	/* Enable interrupts */
5695 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5696 		/* Call ddi_intr_block_enable() for MSI */
5697 		rc = ddi_intr_block_enable(Adapter->htable,
5698 		    Adapter->intr_cnt);
5699 		if (rc != DDI_SUCCESS) {
5700 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5701 			    "Enable block intr failed: %d\n", rc);
5702 			return (DDI_FAILURE);
5703 		}
5704 	} else {
5705 		/* Call ddi_intr_enable() for Legacy/MSI non block enable */
5706 		for (x = 0; x < Adapter->intr_cnt; x++) {
5707 			rc = ddi_intr_enable(Adapter->htable[x]);
5708 			if (rc != DDI_SUCCESS) {
5709 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5710 				    "Enable intr failed: %d\n", rc);
5711 				return (DDI_FAILURE);
5712 			}
5713 		}
5714 	}
5715 
5716 	return (DDI_SUCCESS);
5717 }
5718 
5719 static int
5720 e1000g_disable_intrs(struct e1000g *Adapter)
5721 {
5722 	int x;
5723 	int rc;
5724 
5725 	/* Disable all interrupts */
5726 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5727 		rc = ddi_intr_block_disable(Adapter->htable,
5728 		    Adapter->intr_cnt);
5729 		if (rc != DDI_SUCCESS) {
5730 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5731 			    "Disable block intr failed: %d\n", rc);
5732 			return (DDI_FAILURE);
5733 		}
5734 	} else {
5735 		for (x = 0; x < Adapter->intr_cnt; x++) {
5736 			rc = ddi_intr_disable(Adapter->htable[x]);
5737 			if (rc != DDI_SUCCESS) {
5738 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5739 				    "Disable intr failed: %d\n", rc);
5740 				return (DDI_FAILURE);
5741 			}
5742 		}
5743 	}
5744 
5745 	return (DDI_SUCCESS);
5746 }
5747 
5748 /*
5749  * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
5750  */
5751 static void
5752 e1000g_get_phy_state(struct e1000g *Adapter)
5753 {
5754 	struct e1000_hw *hw = &Adapter->shared;
5755 
5756 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
5757 	(void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
5758 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &Adapter->phy_an_adv);
5759 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &Adapter->phy_an_exp);
5760 	(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &Adapter->phy_ext_status);
5761 	(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &Adapter->phy_1000t_ctrl);
5762 	(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
5763 	    &Adapter->phy_1000t_status);
5764 	(void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &Adapter->phy_lp_able);
5765 
5766 	Adapter->param_autoneg_cap =
5767 	    (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
5768 	Adapter->param_pause_cap =
5769 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5770 	Adapter->param_asym_pause_cap =
5771 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5772 	Adapter->param_1000fdx_cap =
5773 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5774 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5775 	Adapter->param_1000hdx_cap =
5776 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5777 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5778 	Adapter->param_100t4_cap =
5779 	    (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
5780 	Adapter->param_100fdx_cap =
5781 	    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5782 	    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
5783 	Adapter->param_100hdx_cap =
5784 	    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
5785 	    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
5786 	Adapter->param_10fdx_cap =
5787 	    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
5788 	Adapter->param_10hdx_cap =
5789 	    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
5790 
5791 	Adapter->param_adv_autoneg = hw->mac.autoneg;
5792 	Adapter->param_adv_pause =
5793 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5794 	Adapter->param_adv_asym_pause =
5795 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5796 	Adapter->param_adv_1000hdx =
5797 	    (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
5798 	Adapter->param_adv_100t4 =
5799 	    (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
5800 	if (Adapter->param_adv_autoneg == 1) {
5801 		Adapter->param_adv_1000fdx =
5802 		    (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0;
5803 		Adapter->param_adv_100fdx =
5804 		    (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0;
5805 		Adapter->param_adv_100hdx =
5806 		    (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0;
5807 		Adapter->param_adv_10fdx =
5808 		    (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
5809 		Adapter->param_adv_10hdx =
5810 		    (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
5811 	}
5812 
5813 	Adapter->param_lp_autoneg =
5814 	    (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
5815 	Adapter->param_lp_pause =
5816 	    (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
5817 	Adapter->param_lp_asym_pause =
5818 	    (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
5819 	Adapter->param_lp_1000fdx =
5820 	    (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
5821 	Adapter->param_lp_1000hdx =
5822 	    (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
5823 	Adapter->param_lp_100t4 =
5824 	    (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
5825 	Adapter->param_lp_100fdx =
5826 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
5827 	Adapter->param_lp_100hdx =
5828 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
5829 	Adapter->param_lp_10fdx =
5830 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
5831 	Adapter->param_lp_10hdx =
5832 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
5833 }
5834 
5835 /*
5836  * FMA support
5837  */
5838 
5839 int
5840 e1000g_check_acc_handle(ddi_acc_handle_t handle)
5841 {
5842 	ddi_fm_error_t de;
5843 
5844 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5845 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5846 	return (de.fme_status);
5847 }
5848 
5849 int
5850 e1000g_check_dma_handle(ddi_dma_handle_t handle)
5851 {
5852 	ddi_fm_error_t de;
5853 
5854 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5855 	return (de.fme_status);
5856 }
5857 
5858 /*
5859  * The IO fault service error handling callback function
5860  */
5861 /* ARGSUSED2 */
5862 static int
5863 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5864 {
5865 	/*
5866 	 * as the driver can always deal with an error in any dma or
5867 	 * access handle, we can just return the fme_status value.
5868 	 */
5869 	pci_ereport_post(dip, err, NULL);
5870 	return (err->fme_status);
5871 }
5872 
5873 static void
5874 e1000g_fm_init(struct e1000g *Adapter)
5875 {
5876 	ddi_iblock_cookie_t iblk;
5877 	int fma_acc_flag, fma_dma_flag;
5878 
5879 	/* Only register with IO Fault Services if we have some capability */
5880 	if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5881 		e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5882 		fma_acc_flag = 1;
5883 	} else {
5884 		e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5885 		fma_acc_flag = 0;
5886 	}
5887 
5888 	if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5889 		fma_dma_flag = 1;
5890 	} else {
5891 		fma_dma_flag = 0;
5892 	}
5893 
5894 	(void) e1000g_set_fma_flags(Adapter, fma_acc_flag, fma_dma_flag);
5895 
5896 	if (Adapter->fm_capabilities) {
5897 
5898 		/* Register capabilities with IO Fault Services */
5899 		ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
5900 
5901 		/*
5902 		 * Initialize pci ereport capabilities if ereport capable
5903 		 */
5904 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5905 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5906 			pci_ereport_setup(Adapter->dip);
5907 
5908 		/*
5909 		 * Register error callback if error callback capable
5910 		 */
5911 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5912 			ddi_fm_handler_register(Adapter->dip,
5913 			    e1000g_fm_error_cb, (void*) Adapter);
5914 	}
5915 }
5916 
5917 static void
5918 e1000g_fm_fini(struct e1000g *Adapter)
5919 {
5920 	/* Only unregister FMA capabilities if we registered some */
5921 	if (Adapter->fm_capabilities) {
5922 
5923 		/*
5924 		 * Release any resources allocated by pci_ereport_setup()
5925 		 */
5926 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5927 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5928 			pci_ereport_teardown(Adapter->dip);
5929 
5930 		/*
5931 		 * Un-register error callback if error callback capable
5932 		 */
5933 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5934 			ddi_fm_handler_unregister(Adapter->dip);
5935 
5936 		/* Unregister from IO Fault Services */
5937 		mutex_enter(&e1000g_rx_detach_lock);
5938 		ddi_fm_fini(Adapter->dip);
5939 		if (Adapter->priv_dip != NULL) {
5940 			DEVI(Adapter->priv_dip)->devi_fmhdl = NULL;
5941 		}
5942 		mutex_exit(&e1000g_rx_detach_lock);
5943 	}
5944 }
5945 
5946 void
5947 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
5948 {
5949 	uint64_t ena;
5950 	char buf[FM_MAX_CLASS];
5951 
5952 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5953 	ena = fm_ena_generate(0, FM_ENA_FMT1);
5954 	if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
5955 		ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
5956 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5957 	}
5958 }
5959 
5960 /*
5961  * quiesce(9E) entry point.
5962  *
5963  * This function is called when the system is single-threaded at high
5964  * PIL with preemption disabled. Therefore, this function must not be
5965  * blocked.
5966  *
5967  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5968  * DDI_FAILURE indicates an error condition and should almost never happen.
5969  */
5970 static int
5971 e1000g_quiesce(dev_info_t *devinfo)
5972 {
5973 	struct e1000g *Adapter;
5974 
5975 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
5976 
5977 	if (Adapter == NULL)
5978 		return (DDI_FAILURE);
5979 
5980 	e1000g_clear_all_interrupts(Adapter);
5981 
5982 	(void) e1000_reset_hw(&Adapter->shared);
5983 
5984 	/* Setup our HW Tx Head & Tail descriptor pointers */
5985 	E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
5986 	E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
5987 
5988 	/* Setup our HW Rx Head & Tail descriptor pointers */
5989 	E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
5990 	E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
5991 
5992 	return (DDI_SUCCESS);
5993 }
5994 
5995 static int
5996 e1000g_get_def_val(struct e1000g *Adapter, mac_prop_id_t pr_num,
5997     uint_t pr_valsize, void *pr_val)
5998 {
5999 	link_flowctrl_t fl;
6000 	int err = 0;
6001 
6002 	ASSERT(pr_valsize > 0);
6003 	switch (pr_num) {
6004 	case MAC_PROP_AUTONEG:
6005 		*(uint8_t *)pr_val =
6006 		    ((Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
6007 		break;
6008 	case MAC_PROP_FLOWCTRL:
6009 		if (pr_valsize < sizeof (link_flowctrl_t))
6010 			return (EINVAL);
6011 		fl = LINK_FLOWCTRL_BI;
6012 		bcopy(&fl, pr_val, sizeof (fl));
6013 		break;
6014 	case MAC_PROP_ADV_1000FDX_CAP:
6015 	case MAC_PROP_EN_1000FDX_CAP:
6016 		*(uint8_t *)pr_val =
6017 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
6018 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
6019 		break;
6020 	case MAC_PROP_ADV_1000HDX_CAP:
6021 	case MAC_PROP_EN_1000HDX_CAP:
6022 		*(uint8_t *)pr_val =
6023 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
6024 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
6025 		break;
6026 	case MAC_PROP_ADV_100FDX_CAP:
6027 	case MAC_PROP_EN_100FDX_CAP:
6028 		*(uint8_t *)pr_val =
6029 		    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
6030 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
6031 		break;
6032 	case MAC_PROP_ADV_100HDX_CAP:
6033 	case MAC_PROP_EN_100HDX_CAP:
6034 		*(uint8_t *)pr_val =
6035 		    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
6036 		    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
6037 		break;
6038 	case MAC_PROP_ADV_10FDX_CAP:
6039 	case MAC_PROP_EN_10FDX_CAP:
6040 		*(uint8_t *)pr_val =
6041 		    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
6042 		break;
6043 	case MAC_PROP_ADV_10HDX_CAP:
6044 	case MAC_PROP_EN_10HDX_CAP:
6045 		*(uint8_t *)pr_val =
6046 		    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
6047 		break;
6048 	default:
6049 		err = ENOTSUP;
6050 		break;
6051 	}
6052 	return (err);
6053 }
6054 
6055 /*
6056  * synchronize the adv* and en* parameters.
6057  *
6058  * See comments in <sys/dld.h> for details of the *_en_*
6059  * parameters. The usage of ndd for setting adv parameters will
6060  * synchronize all the en parameters with the e1000g parameters,
6061  * implicitly disabling any settings made via dladm.
6062  */
6063 static void
6064 e1000g_param_sync(struct e1000g *Adapter)
6065 {
6066 	Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6067 	Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6068 	Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6069 	Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6070 	Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6071 	Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6072 }
6073 
6074 /*
6075  * e1000g_get_driver_control - tell manageability firmware that the driver
6076  * has control.
6077  */
6078 static void
6079 e1000g_get_driver_control(struct e1000_hw *hw)
6080 {
6081 	uint32_t ctrl_ext;
6082 	uint32_t swsm;
6083 
6084 	/* tell manageability firmware the driver has taken over */
6085 	switch (hw->mac.type) {
6086 	case e1000_82573:
6087 		swsm = E1000_READ_REG(hw, E1000_SWSM);
6088 		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6089 		break;
6090 	case e1000_82571:
6091 	case e1000_82572:
6092 	case e1000_82574:
6093 	case e1000_80003es2lan:
6094 	case e1000_ich8lan:
6095 	case e1000_ich9lan:
6096 	case e1000_ich10lan:
6097 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6098 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6099 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6100 		break;
6101 	default:
6102 		/* no manageability firmware: do nothing */
6103 		break;
6104 	}
6105 }
6106 
6107 /*
6108  * e1000g_release_driver_control - tell manageability firmware that the driver
6109  * has released control.
6110  */
6111 static void
6112 e1000g_release_driver_control(struct e1000_hw *hw)
6113 {
6114 	uint32_t ctrl_ext;
6115 	uint32_t swsm;
6116 
6117 	/* tell manageability firmware the driver has released control */
6118 	switch (hw->mac.type) {
6119 	case e1000_82573:
6120 		swsm = E1000_READ_REG(hw, E1000_SWSM);
6121 		E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6122 		break;
6123 	case e1000_82571:
6124 	case e1000_82572:
6125 	case e1000_82574:
6126 	case e1000_80003es2lan:
6127 	case e1000_ich8lan:
6128 	case e1000_ich9lan:
6129 	case e1000_ich10lan:
6130 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6131 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6132 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6133 		break;
6134 	default:
6135 		/* no manageability firmware: do nothing */
6136 		break;
6137 	}
6138 }
6139 
6140 /*
6141  * Restore e1000g promiscuous mode.
6142  */
6143 static void
6144 e1000g_restore_promisc(struct e1000g *Adapter)
6145 {
6146 	if (Adapter->e1000g_promisc) {
6147 		uint32_t rctl;
6148 
6149 		rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6150 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6151 		E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
6152 	}
6153 }
6154