xref: /illumos-gate/usr/src/uts/common/io/e1000g/e1000g_main.c (revision a73c0fe4e90b82a478f821ef3adb5cf34f6a9346)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * **********************************************************************
28  *									*
29  * Module Name:								*
30  *   e1000g_main.c							*
31  *									*
32  * Abstract:								*
33  *   This file contains the interface routines for the solaris OS.	*
34  *   It has all DDI entry point routines and GLD entry point routines.	*
35  *									*
36  *   This file also contains routines that take care of initialization	*
37  *   uninit routine and interrupt routine.				*
38  *									*
39  * **********************************************************************
40  */
41 
42 #include <sys/dlpi.h>
43 #include <sys/mac.h>
44 #include "e1000g_sw.h"
45 #include "e1000g_debug.h"
46 
47 static char ident[] = "Intel PRO/1000 Ethernet";
48 static char e1000g_string[] = "Intel(R) PRO/1000 Network Connection";
49 static char e1000g_version[] = "Driver Ver. 5.3.3";
50 
51 /*
52  * Proto types for DDI entry points
53  */
54 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
55 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
56 static int e1000g_quiesce(dev_info_t *);
57 
58 /*
59  * init and intr routines prototype
60  */
61 static int e1000g_resume(dev_info_t *);
62 static int e1000g_suspend(dev_info_t *);
63 static uint_t e1000g_intr_pciexpress(caddr_t);
64 static uint_t e1000g_intr(caddr_t);
65 static void e1000g_intr_work(struct e1000g *, uint32_t);
66 #pragma inline(e1000g_intr_work)
67 static int e1000g_init(struct e1000g *);
68 static int e1000g_start(struct e1000g *, boolean_t);
69 static void e1000g_stop(struct e1000g *, boolean_t);
70 static int e1000g_m_start(void *);
71 static void e1000g_m_stop(void *);
72 static int e1000g_m_promisc(void *, boolean_t);
73 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
74 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
75 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
76 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
77     uint_t, const void *);
78 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
79     uint_t, uint_t, void *, uint_t *);
80 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
81     const void *);
82 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t,
83     uint_t, void *, uint_t *);
84 static void e1000g_init_locks(struct e1000g *);
85 static void e1000g_destroy_locks(struct e1000g *);
86 static int e1000g_identify_hardware(struct e1000g *);
87 static int e1000g_regs_map(struct e1000g *);
88 static int e1000g_set_driver_params(struct e1000g *);
89 static void e1000g_set_bufsize(struct e1000g *);
90 static int e1000g_register_mac(struct e1000g *);
91 static boolean_t e1000g_rx_drain(struct e1000g *);
92 static boolean_t e1000g_tx_drain(struct e1000g *);
93 static void e1000g_init_unicst(struct e1000g *);
94 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
95 
96 /*
97  * Local routines
98  */
99 static boolean_t e1000g_reset_adapter(struct e1000g *);
100 static void e1000g_tx_clean(struct e1000g *);
101 static void e1000g_rx_clean(struct e1000g *);
102 static void e1000g_link_timer(void *);
103 static void e1000g_local_timer(void *);
104 static boolean_t e1000g_link_check(struct e1000g *);
105 static boolean_t e1000g_stall_check(struct e1000g *);
106 static void e1000g_smartspeed(struct e1000g *);
107 static void e1000g_get_conf(struct e1000g *);
108 static int e1000g_get_prop(struct e1000g *, char *, int, int, int);
109 static void enable_watchdog_timer(struct e1000g *);
110 static void disable_watchdog_timer(struct e1000g *);
111 static void start_watchdog_timer(struct e1000g *);
112 static void restart_watchdog_timer(struct e1000g *);
113 static void stop_watchdog_timer(struct e1000g *);
114 static void stop_link_timer(struct e1000g *);
115 static void stop_82547_timer(e1000g_tx_ring_t *);
116 static void e1000g_force_speed_duplex(struct e1000g *);
117 static void e1000g_get_max_frame_size(struct e1000g *);
118 static boolean_t is_valid_mac_addr(uint8_t *);
119 static void e1000g_unattach(dev_info_t *, struct e1000g *);
120 #ifdef E1000G_DEBUG
121 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
122 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
123 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
124 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
125 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
126     struct iocblk *, mblk_t *);
127 #endif
128 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
129     struct iocblk *, mblk_t *);
130 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
131 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
132 static void e1000g_set_internal_loopback(struct e1000g *);
133 static void e1000g_set_external_loopback_1000(struct e1000g *);
134 static void e1000g_set_external_loopback_100(struct e1000g *);
135 static void e1000g_set_external_loopback_10(struct e1000g *);
136 static int e1000g_add_intrs(struct e1000g *);
137 static int e1000g_intr_add(struct e1000g *, int);
138 static int e1000g_rem_intrs(struct e1000g *);
139 static int e1000g_enable_intrs(struct e1000g *);
140 static int e1000g_disable_intrs(struct e1000g *);
141 static boolean_t e1000g_link_up(struct e1000g *);
142 #ifdef __sparc
143 static boolean_t e1000g_find_mac_address(struct e1000g *);
144 #endif
145 static void e1000g_get_phy_state(struct e1000g *);
146 static void e1000g_free_priv_devi_node(struct e1000g *, boolean_t);
147 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
148     const void *impl_data);
149 static void e1000g_fm_init(struct e1000g *Adapter);
150 static void e1000g_fm_fini(struct e1000g *Adapter);
151 static int e1000g_get_def_val(struct e1000g *, mac_prop_id_t, uint_t, void *);
152 static void e1000g_param_sync(struct e1000g *);
153 static void e1000g_get_driver_control(struct e1000_hw *);
154 static void e1000g_release_driver_control(struct e1000_hw *);
155 static void e1000g_restore_promisc(struct e1000g *Adapter);
156 
157 mac_priv_prop_t e1000g_priv_props[] = {
158 	{"_tx_bcopy_threshold", MAC_PROP_PERM_RW},
159 	{"_tx_interrupt_enable", MAC_PROP_PERM_RW},
160 	{"_tx_intr_delay", MAC_PROP_PERM_RW},
161 	{"_tx_intr_abs_delay", MAC_PROP_PERM_RW},
162 	{"_rx_bcopy_threshold", MAC_PROP_PERM_RW},
163 	{"_max_num_rcv_packets", MAC_PROP_PERM_RW},
164 	{"_rx_intr_delay", MAC_PROP_PERM_RW},
165 	{"_rx_intr_abs_delay", MAC_PROP_PERM_RW},
166 	{"_intr_throttling_rate", MAC_PROP_PERM_RW},
167 	{"_intr_adaptive", MAC_PROP_PERM_RW},
168 	{"_adv_pause_cap", MAC_PROP_PERM_READ},
169 	{"_adv_asym_pause_cap", MAC_PROP_PERM_READ},
170 };
171 #define	E1000G_MAX_PRIV_PROPS	\
172 	(sizeof (e1000g_priv_props)/sizeof (mac_priv_prop_t))
173 
174 
175 static struct cb_ops cb_ws_ops = {
176 	nulldev,		/* cb_open */
177 	nulldev,		/* cb_close */
178 	nodev,			/* cb_strategy */
179 	nodev,			/* cb_print */
180 	nodev,			/* cb_dump */
181 	nodev,			/* cb_read */
182 	nodev,			/* cb_write */
183 	nodev,			/* cb_ioctl */
184 	nodev,			/* cb_devmap */
185 	nodev,			/* cb_mmap */
186 	nodev,			/* cb_segmap */
187 	nochpoll,		/* cb_chpoll */
188 	ddi_prop_op,		/* cb_prop_op */
189 	NULL,			/* cb_stream */
190 	D_MP | D_HOTPLUG,	/* cb_flag */
191 	CB_REV,			/* cb_rev */
192 	nodev,			/* cb_aread */
193 	nodev			/* cb_awrite */
194 };
195 
196 static struct dev_ops ws_ops = {
197 	DEVO_REV,		/* devo_rev */
198 	0,			/* devo_refcnt */
199 	NULL,			/* devo_getinfo */
200 	nulldev,		/* devo_identify */
201 	nulldev,		/* devo_probe */
202 	e1000g_attach,		/* devo_attach */
203 	e1000g_detach,		/* devo_detach */
204 	nodev,			/* devo_reset */
205 	&cb_ws_ops,		/* devo_cb_ops */
206 	NULL,			/* devo_bus_ops */
207 	ddi_power,		/* devo_power */
208 	e1000g_quiesce		/* devo_quiesce */
209 };
210 
211 static struct modldrv modldrv = {
212 	&mod_driverops,		/* Type of module.  This one is a driver */
213 	ident,			/* Discription string */
214 	&ws_ops,		/* driver ops */
215 };
216 
217 static struct modlinkage modlinkage = {
218 	MODREV_1, &modldrv, NULL
219 };
220 
221 /* Access attributes for register mapping */
222 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
223 	DDI_DEVICE_ATTR_V0,
224 	DDI_STRUCTURE_LE_ACC,
225 	DDI_STRICTORDER_ACC,
226 	DDI_FLAGERR_ACC
227 };
228 
229 #define	E1000G_M_CALLBACK_FLAGS \
230 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
231 
232 static mac_callbacks_t e1000g_m_callbacks = {
233 	E1000G_M_CALLBACK_FLAGS,
234 	e1000g_m_stat,
235 	e1000g_m_start,
236 	e1000g_m_stop,
237 	e1000g_m_promisc,
238 	e1000g_m_multicst,
239 	NULL,
240 	e1000g_m_tx,
241 	e1000g_m_ioctl,
242 	e1000g_m_getcapab,
243 	NULL,
244 	NULL,
245 	e1000g_m_setprop,
246 	e1000g_m_getprop
247 };
248 
249 /*
250  * Global variables
251  */
252 uint32_t e1000g_mblks_pending = 0;
253 /*
254  * Workaround for Dynamic Reconfiguration support, for x86 platform only.
255  * Here we maintain a private dev_info list if e1000g_force_detach is
256  * enabled. If we force the driver to detach while there are still some
257  * rx buffers retained in the upper layer, we have to keep a copy of the
258  * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
259  * structure will be freed after the driver is detached. However when we
260  * finally free those rx buffers released by the upper layer, we need to
261  * refer to the dev_info to free the dma buffers. So we save a copy of
262  * the dev_info for this purpose. On x86 platform, we assume this copy
263  * of dev_info is always valid, but on SPARC platform, it could be invalid
264  * after the system board level DR operation. For this reason, the global
265  * variable e1000g_force_detach must be B_FALSE on SPARC platform.
266  */
267 #ifdef __sparc
268 boolean_t e1000g_force_detach = B_FALSE;
269 #else
270 boolean_t e1000g_force_detach = B_TRUE;
271 #endif
272 private_devi_list_t *e1000g_private_devi_list = NULL;
273 
274 /*
275  * The rwlock is defined to protect the whole processing of rx recycling
276  * and the rx packets release in detach processing to make them mutually
277  * exclusive.
278  * The rx recycling processes different rx packets in different threads,
279  * so it will be protected with RW_READER and it won't block any other rx
280  * recycling threads.
281  * While the detach processing will be protected with RW_WRITER to make
282  * it mutually exclusive with the rx recycling.
283  */
284 krwlock_t e1000g_rx_detach_lock;
285 /*
286  * The rwlock e1000g_dma_type_lock is defined to protect the global flag
287  * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
288  * If there are many e1000g instances, the system may run out of DVMA
289  * resources during the initialization of the instances, then the flag will
290  * be changed to "USE_DMA". Because different e1000g instances are initialized
291  * in parallel, we need to use this lock to protect the flag.
292  */
293 krwlock_t e1000g_dma_type_lock;
294 
295 /*
296  * The 82546 chipset is a dual-port device, both the ports share one eeprom.
297  * Based on the information from Intel, the 82546 chipset has some hardware
298  * problem. When one port is being reset and the other port is trying to
299  * access the eeprom, it could cause system hang or panic. To workaround this
300  * hardware problem, we use a global mutex to prevent such operations from
301  * happening simultaneously on different instances. This workaround is applied
302  * to all the devices supported by this driver.
303  */
304 kmutex_t e1000g_nvm_lock;
305 
306 /*
307  * Loadable module configuration entry points for the driver
308  */
309 
310 /*
311  * _init - module initialization
312  */
313 int
314 _init(void)
315 {
316 	int status;
317 
318 	mac_init_ops(&ws_ops, WSNAME);
319 	status = mod_install(&modlinkage);
320 	if (status != DDI_SUCCESS)
321 		mac_fini_ops(&ws_ops);
322 	else {
323 		rw_init(&e1000g_rx_detach_lock, NULL, RW_DRIVER, NULL);
324 		rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
325 		mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
326 	}
327 
328 	return (status);
329 }
330 
331 /*
332  * _fini - module finalization
333  */
334 int
335 _fini(void)
336 {
337 	int status;
338 
339 	rw_enter(&e1000g_rx_detach_lock, RW_READER);
340 	if (e1000g_mblks_pending != 0) {
341 		rw_exit(&e1000g_rx_detach_lock);
342 		return (EBUSY);
343 	}
344 	rw_exit(&e1000g_rx_detach_lock);
345 
346 	status = mod_remove(&modlinkage);
347 	if (status == DDI_SUCCESS) {
348 		mac_fini_ops(&ws_ops);
349 
350 		if (e1000g_force_detach) {
351 			private_devi_list_t *devi_node;
352 
353 			rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
354 			while (e1000g_private_devi_list != NULL) {
355 				devi_node = e1000g_private_devi_list;
356 				e1000g_private_devi_list =
357 				    e1000g_private_devi_list->next;
358 
359 				kmem_free(devi_node->priv_dip,
360 				    sizeof (struct dev_info));
361 				kmem_free(devi_node,
362 				    sizeof (private_devi_list_t));
363 			}
364 			rw_exit(&e1000g_rx_detach_lock);
365 		}
366 
367 		rw_destroy(&e1000g_rx_detach_lock);
368 		rw_destroy(&e1000g_dma_type_lock);
369 		mutex_destroy(&e1000g_nvm_lock);
370 	}
371 
372 	return (status);
373 }
374 
375 /*
376  * _info - module information
377  */
378 int
379 _info(struct modinfo *modinfop)
380 {
381 	return (mod_info(&modlinkage, modinfop));
382 }
383 
384 /*
385  * e1000g_attach - driver attach
386  *
387  * This function is the device-specific initialization entry
388  * point. This entry point is required and must be written.
389  * The DDI_ATTACH command must be provided in the attach entry
390  * point. When attach() is called with cmd set to DDI_ATTACH,
391  * all normal kernel services (such as kmem_alloc(9F)) are
392  * available for use by the driver.
393  *
394  * The attach() function will be called once for each instance
395  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
396  * Until attach() succeeds, the only driver entry points which
397  * may be called are open(9E) and getinfo(9E).
398  */
399 static int
400 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
401 {
402 	struct e1000g *Adapter;
403 	struct e1000_hw *hw;
404 	struct e1000g_osdep *osdep;
405 	int instance;
406 
407 	switch (cmd) {
408 	default:
409 		e1000g_log(NULL, CE_WARN,
410 		    "Unsupported command send to e1000g_attach... ");
411 		return (DDI_FAILURE);
412 
413 	case DDI_RESUME:
414 		return (e1000g_resume(devinfo));
415 
416 	case DDI_ATTACH:
417 		break;
418 	}
419 
420 	/*
421 	 * get device instance number
422 	 */
423 	instance = ddi_get_instance(devinfo);
424 
425 	/*
426 	 * Allocate soft data structure
427 	 */
428 	Adapter =
429 	    (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
430 
431 	Adapter->dip = devinfo;
432 	Adapter->instance = instance;
433 	Adapter->tx_ring->adapter = Adapter;
434 	Adapter->rx_ring->adapter = Adapter;
435 
436 	hw = &Adapter->shared;
437 	osdep = &Adapter->osdep;
438 	hw->back = osdep;
439 	osdep->adapter = Adapter;
440 
441 	ddi_set_driver_private(devinfo, (caddr_t)Adapter);
442 
443 	/*
444 	 * Initialize for fma support
445 	 */
446 	Adapter->fm_capabilities = e1000g_get_prop(Adapter, "fm-capable",
447 	    0, 0x0f,
448 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
449 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
450 	e1000g_fm_init(Adapter);
451 	Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
452 
453 	/*
454 	 * PCI Configure
455 	 */
456 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
457 		e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
458 		goto attach_fail;
459 	}
460 	Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
461 
462 	/*
463 	 * Setup hardware
464 	 */
465 	if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
466 		e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
467 		goto attach_fail;
468 	}
469 
470 	/*
471 	 * Map in the device registers.
472 	 */
473 	if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
474 		e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
475 		goto attach_fail;
476 	}
477 	Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
478 
479 	/*
480 	 * Initialize driver parameters
481 	 */
482 	if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
483 		goto attach_fail;
484 	}
485 	Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
486 
487 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
488 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
489 		goto attach_fail;
490 	}
491 
492 	/*
493 	 * Initialize interrupts
494 	 */
495 	if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
496 		e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
497 		goto attach_fail;
498 	}
499 	Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
500 
501 	/*
502 	 * Initialize mutex's for this device.
503 	 * Do this before enabling the interrupt handler and
504 	 * register the softint to avoid the condition where
505 	 * interrupt handler can try using uninitialized mutex
506 	 */
507 	e1000g_init_locks(Adapter);
508 	Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
509 
510 	/*
511 	 * Initialize Driver Counters
512 	 */
513 	if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
514 		e1000g_log(Adapter, CE_WARN, "Init stats failed");
515 		goto attach_fail;
516 	}
517 	Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
518 
519 	/*
520 	 * Initialize chip hardware and software structures
521 	 */
522 	rw_enter(&Adapter->chip_lock, RW_WRITER);
523 	if (e1000g_init(Adapter) != DDI_SUCCESS) {
524 		rw_exit(&Adapter->chip_lock);
525 		e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
526 		goto attach_fail;
527 	}
528 	rw_exit(&Adapter->chip_lock);
529 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
530 
531 	/*
532 	 * Register the driver to the MAC
533 	 */
534 	if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
535 		e1000g_log(Adapter, CE_WARN, "Register MAC failed");
536 		goto attach_fail;
537 	}
538 	Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
539 
540 	/*
541 	 * Now that mutex locks are initialized, and the chip is also
542 	 * initialized, enable interrupts.
543 	 */
544 	if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
545 		e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
546 		goto attach_fail;
547 	}
548 	Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
549 
550 	/*
551 	 * If e1000g_force_detach is enabled, in global private dip list,
552 	 * we will create a new entry, which maintains the priv_dip for DR
553 	 * supports after driver detached.
554 	 */
555 	if (e1000g_force_detach) {
556 		private_devi_list_t *devi_node;
557 
558 		Adapter->priv_dip =
559 		    kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
560 		bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
561 		    sizeof (struct dev_info));
562 
563 		devi_node =
564 		    kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
565 
566 		rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
567 		devi_node->priv_dip = Adapter->priv_dip;
568 		devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
569 		devi_node->next = e1000g_private_devi_list;
570 		e1000g_private_devi_list = devi_node;
571 		rw_exit(&e1000g_rx_detach_lock);
572 	}
573 
574 	cmn_err(CE_CONT, "!%s, %s\n", e1000g_string, e1000g_version);
575 	Adapter->e1000g_state = E1000G_INITIALIZED;
576 
577 	return (DDI_SUCCESS);
578 
579 attach_fail:
580 	e1000g_unattach(devinfo, Adapter);
581 	return (DDI_FAILURE);
582 }
583 
584 static int
585 e1000g_register_mac(struct e1000g *Adapter)
586 {
587 	struct e1000_hw *hw = &Adapter->shared;
588 	mac_register_t *mac;
589 	int err;
590 
591 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
592 		return (DDI_FAILURE);
593 
594 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
595 	mac->m_driver = Adapter;
596 	mac->m_dip = Adapter->dip;
597 	mac->m_src_addr = hw->mac.addr;
598 	mac->m_callbacks = &e1000g_m_callbacks;
599 	mac->m_min_sdu = 0;
600 	mac->m_max_sdu = Adapter->default_mtu;
601 	mac->m_margin = VLAN_TAGSZ;
602 	mac->m_priv_props = e1000g_priv_props;
603 	mac->m_priv_prop_count = E1000G_MAX_PRIV_PROPS;
604 	mac->m_v12n = MAC_VIRT_LEVEL1;
605 
606 	err = mac_register(mac, &Adapter->mh);
607 	mac_free(mac);
608 
609 	return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
610 }
611 
612 static int
613 e1000g_identify_hardware(struct e1000g *Adapter)
614 {
615 	struct e1000_hw *hw = &Adapter->shared;
616 	struct e1000g_osdep *osdep = &Adapter->osdep;
617 
618 	/* Get the device id */
619 	hw->vendor_id =
620 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
621 	hw->device_id =
622 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
623 	hw->revision_id =
624 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
625 	hw->subsystem_device_id =
626 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
627 	hw->subsystem_vendor_id =
628 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
629 
630 	if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
631 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
632 		    "MAC type could not be set properly.");
633 		return (DDI_FAILURE);
634 	}
635 
636 	return (DDI_SUCCESS);
637 }
638 
639 static int
640 e1000g_regs_map(struct e1000g *Adapter)
641 {
642 	dev_info_t *devinfo = Adapter->dip;
643 	struct e1000_hw *hw = &Adapter->shared;
644 	struct e1000g_osdep *osdep = &Adapter->osdep;
645 	off_t mem_size;
646 
647 	/* Get size of adapter register memory */
648 	if (ddi_dev_regsize(devinfo, ADAPTER_REG_SET, &mem_size) !=
649 	    DDI_SUCCESS) {
650 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
651 		    "ddi_dev_regsize for registers failed");
652 		return (DDI_FAILURE);
653 	}
654 
655 	/* Map adapter register memory */
656 	if ((ddi_regs_map_setup(devinfo, ADAPTER_REG_SET,
657 	    (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
658 	    &osdep->reg_handle)) != DDI_SUCCESS) {
659 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
660 		    "ddi_regs_map_setup for registers failed");
661 		goto regs_map_fail;
662 	}
663 
664 	/* ICH needs to map flash memory */
665 	if (hw->mac.type == e1000_ich8lan ||
666 	    hw->mac.type == e1000_ich9lan ||
667 	    hw->mac.type == e1000_ich10lan) {
668 		/* get flash size */
669 		if (ddi_dev_regsize(devinfo, ICH_FLASH_REG_SET,
670 		    &mem_size) != DDI_SUCCESS) {
671 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
672 			    "ddi_dev_regsize for ICH flash failed");
673 			goto regs_map_fail;
674 		}
675 
676 		/* map flash in */
677 		if (ddi_regs_map_setup(devinfo, ICH_FLASH_REG_SET,
678 		    (caddr_t *)&hw->flash_address, 0,
679 		    mem_size, &e1000g_regs_acc_attr,
680 		    &osdep->ich_flash_handle) != DDI_SUCCESS) {
681 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
682 			    "ddi_regs_map_setup for ICH flash failed");
683 			goto regs_map_fail;
684 		}
685 	}
686 
687 	return (DDI_SUCCESS);
688 
689 regs_map_fail:
690 	if (osdep->reg_handle != NULL)
691 		ddi_regs_map_free(&osdep->reg_handle);
692 
693 	return (DDI_FAILURE);
694 }
695 
696 static int
697 e1000g_set_driver_params(struct e1000g *Adapter)
698 {
699 	struct e1000_hw *hw;
700 	uint32_t mem_bar, io_bar, bar64;
701 
702 	hw = &Adapter->shared;
703 
704 	/* Set MAC type and initialize hardware functions */
705 	if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
706 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
707 		    "Could not setup hardware functions");
708 		return (DDI_FAILURE);
709 	}
710 
711 	/* Get bus information */
712 	if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
713 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
714 		    "Could not get bus information");
715 		return (DDI_FAILURE);
716 	}
717 
718 	/* get mem_base addr */
719 	mem_bar = pci_config_get32(Adapter->osdep.cfg_handle, PCI_CONF_BASE0);
720 	bar64 = mem_bar & PCI_BASE_TYPE_ALL;
721 
722 	/* get io_base addr */
723 	if (hw->mac.type >= e1000_82544) {
724 		if (bar64) {
725 			/* IO BAR is different for 64 bit BAR mode */
726 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
727 			    PCI_CONF_BASE4);
728 		} else {
729 			/* normal 32-bit BAR mode */
730 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
731 			    PCI_CONF_BASE2);
732 		}
733 		hw->io_base = io_bar & PCI_BASE_IO_ADDR_M;
734 	} else {
735 		/* no I/O access for adapters prior to 82544 */
736 		hw->io_base = 0x0;
737 	}
738 
739 	e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
740 
741 	hw->mac.autoneg_failed = B_TRUE;
742 
743 	/* Set the autoneg_wait_to_complete flag to B_FALSE */
744 	hw->phy.autoneg_wait_to_complete = B_FALSE;
745 
746 	/* Adaptive IFS related changes */
747 	hw->mac.adaptive_ifs = B_TRUE;
748 
749 	/* Enable phy init script for IGP phy of 82541/82547 */
750 	if ((hw->mac.type == e1000_82547) ||
751 	    (hw->mac.type == e1000_82541) ||
752 	    (hw->mac.type == e1000_82547_rev_2) ||
753 	    (hw->mac.type == e1000_82541_rev_2))
754 		e1000_init_script_state_82541(hw, B_TRUE);
755 
756 	/* Enable the TTL workaround for 82541/82547 */
757 	e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
758 
759 #ifdef __sparc
760 	Adapter->strip_crc = B_TRUE;
761 #else
762 	Adapter->strip_crc = B_FALSE;
763 #endif
764 
765 	/* Get conf file properties */
766 	e1000g_get_conf(Adapter);
767 
768 	/* Get speed/duplex settings in conf file */
769 	hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
770 	hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
771 	e1000g_force_speed_duplex(Adapter);
772 
773 	/* Get Jumbo Frames settings in conf file */
774 	e1000g_get_max_frame_size(Adapter);
775 
776 	/* Set Rx/Tx buffer size */
777 	e1000g_set_bufsize(Adapter);
778 
779 	/* Master Latency Timer */
780 	Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
781 
782 	/* copper options */
783 	if (hw->phy.media_type == e1000_media_type_copper) {
784 		hw->phy.mdix = 0;	/* AUTO_ALL_MODES */
785 		hw->phy.disable_polarity_correction = B_FALSE;
786 		hw->phy.ms_type = e1000_ms_hw_default;	/* E1000_MASTER_SLAVE */
787 	}
788 
789 	/* The initial link state should be "unknown" */
790 	Adapter->link_state = LINK_STATE_UNKNOWN;
791 
792 	/* Initialize rx parameters */
793 	Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
794 	Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
795 
796 	/* Initialize tx parameters */
797 	Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
798 	Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
799 	Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
800 	Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
801 
802 	/* Initialize rx parameters */
803 	Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
804 
805 	return (DDI_SUCCESS);
806 }
807 
808 static void
809 e1000g_set_bufsize(struct e1000g *Adapter)
810 {
811 	struct e1000_mac_info *mac = &Adapter->shared.mac;
812 	uint64_t rx_size;
813 	uint64_t tx_size;
814 
815 	dev_info_t *devinfo = Adapter->dip;
816 #ifdef __sparc
817 	ulong_t iommu_pagesize;
818 #endif
819 	/* Get the system page size */
820 	Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
821 
822 #ifdef __sparc
823 	iommu_pagesize = dvma_pagesize(devinfo);
824 	if (iommu_pagesize != 0) {
825 		if (Adapter->sys_page_sz == iommu_pagesize) {
826 			if (iommu_pagesize > 0x4000)
827 				Adapter->sys_page_sz = 0x4000;
828 		} else {
829 			if (Adapter->sys_page_sz > iommu_pagesize)
830 				Adapter->sys_page_sz = iommu_pagesize;
831 		}
832 	}
833 	if (Adapter->lso_enable) {
834 		Adapter->dvma_page_num = E1000_LSO_MAXLEN /
835 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
836 	} else {
837 		Adapter->dvma_page_num = Adapter->max_frame_size /
838 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
839 	}
840 	ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
841 #endif
842 
843 	Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
844 
845 	if (Adapter->mem_workaround_82546 &&
846 	    ((mac->type == e1000_82545) ||
847 	    (mac->type == e1000_82546) ||
848 	    (mac->type == e1000_82546_rev_3))) {
849 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
850 	} else {
851 		rx_size = Adapter->max_frame_size + E1000G_IPALIGNPRESERVEROOM;
852 		if ((rx_size > FRAME_SIZE_UPTO_2K) &&
853 		    (rx_size <= FRAME_SIZE_UPTO_4K))
854 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
855 		else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
856 		    (rx_size <= FRAME_SIZE_UPTO_8K))
857 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
858 		else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
859 		    (rx_size <= FRAME_SIZE_UPTO_16K))
860 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
861 		else
862 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
863 	}
864 
865 	tx_size = Adapter->max_frame_size;
866 	if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
867 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
868 	else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
869 	    (tx_size <= FRAME_SIZE_UPTO_8K))
870 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
871 	else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
872 	    (tx_size <= FRAME_SIZE_UPTO_16K))
873 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
874 	else
875 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
876 
877 	/*
878 	 * For Wiseman adapters we have an requirement of having receive
879 	 * buffers aligned at 256 byte boundary. Since Livengood does not
880 	 * require this and forcing it for all hardwares will have
881 	 * performance implications, I am making it applicable only for
882 	 * Wiseman and for Jumbo frames enabled mode as rest of the time,
883 	 * it is okay to have normal frames...but it does involve a
884 	 * potential risk where we may loose data if buffer is not
885 	 * aligned...so all wiseman boards to have 256 byte aligned
886 	 * buffers
887 	 */
888 	if (mac->type < e1000_82543)
889 		Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
890 	else
891 		Adapter->rx_buf_align = 1;
892 }
893 
894 /*
895  * e1000g_detach - driver detach
896  *
897  * The detach() function is the complement of the attach routine.
898  * If cmd is set to DDI_DETACH, detach() is used to remove  the
899  * state  associated  with  a  given  instance of a device node
900  * prior to the removal of that instance from the system.
901  *
902  * The detach() function will be called once for each  instance
903  * of the device for which there has been a successful attach()
904  * once there are no longer  any  opens  on  the  device.
905  *
906  * Interrupts routine are disabled, All memory allocated by this
907  * driver are freed.
908  */
909 static int
910 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
911 {
912 	struct e1000g *Adapter;
913 	boolean_t rx_drain;
914 
915 	switch (cmd) {
916 	default:
917 		return (DDI_FAILURE);
918 
919 	case DDI_SUSPEND:
920 		return (e1000g_suspend(devinfo));
921 
922 	case DDI_DETACH:
923 		break;
924 	}
925 
926 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
927 	if (Adapter == NULL)
928 		return (DDI_FAILURE);
929 
930 	rx_drain = e1000g_rx_drain(Adapter);
931 	if (!rx_drain && !e1000g_force_detach)
932 		return (DDI_FAILURE);
933 
934 	if (mac_unregister(Adapter->mh) != 0) {
935 		e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
936 		return (DDI_FAILURE);
937 	}
938 	Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
939 
940 	ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
941 
942 	/*
943 	 * If e1000g_force_detach is enabled, driver detach is safe.
944 	 * We will let e1000g_free_priv_devi_node routine determine
945 	 * whether we need to free the priv_dip entry for current
946 	 * driver instance.
947 	 */
948 	if (e1000g_force_detach) {
949 		e1000g_free_priv_devi_node(Adapter, rx_drain);
950 	}
951 
952 	e1000g_unattach(devinfo, Adapter);
953 
954 	return (DDI_SUCCESS);
955 }
956 
957 /*
958  * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
959  *
960  * If free_flag is true, that indicates the upper layer is not holding
961  * the rx buffers, we could free the priv_dip entry safely.
962  *
963  * Otherwise, we have to keep this entry even after driver detached,
964  * and we also need to mark this entry with E1000G_PRIV_DEVI_DETACH flag,
965  * so that driver could free it while all of rx buffers are returned
966  * by upper layer later.
967  */
968 static void
969 e1000g_free_priv_devi_node(struct e1000g *Adapter, boolean_t free_flag)
970 {
971 	private_devi_list_t *devi_node, *devi_del;
972 
973 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
974 	ASSERT(e1000g_private_devi_list != NULL);
975 	ASSERT(Adapter->priv_dip != NULL);
976 
977 	devi_node = e1000g_private_devi_list;
978 	if (devi_node->priv_dip == Adapter->priv_dip) {
979 		if (free_flag) {
980 			e1000g_private_devi_list =
981 			    devi_node->next;
982 			kmem_free(devi_node->priv_dip,
983 			    sizeof (struct dev_info));
984 			kmem_free(devi_node,
985 			    sizeof (private_devi_list_t));
986 		} else {
987 			ASSERT(e1000g_mblks_pending != 0);
988 			devi_node->flag =
989 			    E1000G_PRIV_DEVI_DETACH;
990 		}
991 		rw_exit(&e1000g_rx_detach_lock);
992 		return;
993 	}
994 
995 	devi_node = e1000g_private_devi_list;
996 	while (devi_node->next != NULL) {
997 		if (devi_node->next->priv_dip == Adapter->priv_dip) {
998 			if (free_flag) {
999 				devi_del = devi_node->next;
1000 				devi_node->next = devi_del->next;
1001 				kmem_free(devi_del->priv_dip,
1002 				    sizeof (struct dev_info));
1003 				kmem_free(devi_del,
1004 				    sizeof (private_devi_list_t));
1005 			} else {
1006 				ASSERT(e1000g_mblks_pending != 0);
1007 				devi_node->next->flag =
1008 				    E1000G_PRIV_DEVI_DETACH;
1009 			}
1010 			break;
1011 		}
1012 		devi_node = devi_node->next;
1013 	}
1014 	rw_exit(&e1000g_rx_detach_lock);
1015 }
1016 
1017 static void
1018 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1019 {
1020 	int result;
1021 
1022 	if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1023 		(void) e1000g_disable_intrs(Adapter);
1024 	}
1025 
1026 	if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1027 		(void) mac_unregister(Adapter->mh);
1028 	}
1029 
1030 	if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1031 		(void) e1000g_rem_intrs(Adapter);
1032 	}
1033 
1034 	if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1035 		(void) ddi_prop_remove_all(devinfo);
1036 	}
1037 
1038 	if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1039 		kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1040 	}
1041 
1042 	if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1043 		stop_link_timer(Adapter);
1044 
1045 		mutex_enter(&e1000g_nvm_lock);
1046 		result = e1000_reset_hw(&Adapter->shared);
1047 		mutex_exit(&e1000g_nvm_lock);
1048 
1049 		if (result != E1000_SUCCESS) {
1050 			e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1051 			ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1052 		}
1053 	}
1054 
1055 	if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1056 		if (Adapter->osdep.reg_handle != NULL)
1057 			ddi_regs_map_free(&Adapter->osdep.reg_handle);
1058 		if (Adapter->osdep.ich_flash_handle != NULL)
1059 			ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1060 	}
1061 
1062 	if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1063 		if (Adapter->osdep.cfg_handle != NULL)
1064 			pci_config_teardown(&Adapter->osdep.cfg_handle);
1065 	}
1066 
1067 	if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1068 		e1000g_destroy_locks(Adapter);
1069 	}
1070 
1071 	if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1072 		e1000g_fm_fini(Adapter);
1073 	}
1074 
1075 	kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1076 
1077 	/*
1078 	 * Another hotplug spec requirement,
1079 	 * run ddi_set_driver_private(devinfo, null);
1080 	 */
1081 	ddi_set_driver_private(devinfo, NULL);
1082 }
1083 
1084 static void
1085 e1000g_init_locks(struct e1000g *Adapter)
1086 {
1087 	e1000g_tx_ring_t *tx_ring;
1088 	e1000g_rx_ring_t *rx_ring;
1089 
1090 	rw_init(&Adapter->chip_lock, NULL,
1091 	    RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1092 	mutex_init(&Adapter->link_lock, NULL,
1093 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1094 	mutex_init(&Adapter->watchdog_lock, NULL,
1095 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1096 
1097 	tx_ring = Adapter->tx_ring;
1098 
1099 	mutex_init(&tx_ring->tx_lock, NULL,
1100 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1101 	mutex_init(&tx_ring->usedlist_lock, NULL,
1102 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1103 	mutex_init(&tx_ring->freelist_lock, NULL,
1104 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1105 
1106 	rx_ring = Adapter->rx_ring;
1107 
1108 	mutex_init(&rx_ring->rx_lock, NULL,
1109 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1110 	mutex_init(&rx_ring->freelist_lock, NULL,
1111 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1112 	mutex_init(&rx_ring->recycle_lock, NULL,
1113 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1114 }
1115 
1116 static void
1117 e1000g_destroy_locks(struct e1000g *Adapter)
1118 {
1119 	e1000g_tx_ring_t *tx_ring;
1120 	e1000g_rx_ring_t *rx_ring;
1121 
1122 	tx_ring = Adapter->tx_ring;
1123 	mutex_destroy(&tx_ring->tx_lock);
1124 	mutex_destroy(&tx_ring->usedlist_lock);
1125 	mutex_destroy(&tx_ring->freelist_lock);
1126 
1127 	rx_ring = Adapter->rx_ring;
1128 	mutex_destroy(&rx_ring->rx_lock);
1129 	mutex_destroy(&rx_ring->freelist_lock);
1130 	mutex_destroy(&rx_ring->recycle_lock);
1131 
1132 	mutex_destroy(&Adapter->link_lock);
1133 	mutex_destroy(&Adapter->watchdog_lock);
1134 	rw_destroy(&Adapter->chip_lock);
1135 }
1136 
1137 static int
1138 e1000g_resume(dev_info_t *devinfo)
1139 {
1140 	struct e1000g *Adapter;
1141 
1142 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1143 	if (Adapter == NULL)
1144 		e1000g_log(Adapter, CE_PANIC,
1145 		    "Instance pointer is null\n");
1146 
1147 	if (Adapter->dip != devinfo)
1148 		e1000g_log(Adapter, CE_PANIC,
1149 		    "Devinfo is not the same as saved devinfo\n");
1150 
1151 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1152 
1153 	if (Adapter->e1000g_state & E1000G_STARTED) {
1154 		if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1155 			rw_exit(&Adapter->chip_lock);
1156 			/*
1157 			 * We note the failure, but return success, as the
1158 			 * system is still usable without this controller.
1159 			 */
1160 			e1000g_log(Adapter, CE_WARN,
1161 			    "e1000g_resume: failed to restart controller\n");
1162 			return (DDI_SUCCESS);
1163 		}
1164 		/* Enable and start the watchdog timer */
1165 		enable_watchdog_timer(Adapter);
1166 	}
1167 
1168 	Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1169 
1170 	rw_exit(&Adapter->chip_lock);
1171 
1172 	return (DDI_SUCCESS);
1173 }
1174 
1175 static int
1176 e1000g_suspend(dev_info_t *devinfo)
1177 {
1178 	struct e1000g *Adapter;
1179 
1180 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1181 	if (Adapter == NULL)
1182 		return (DDI_FAILURE);
1183 
1184 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1185 
1186 	Adapter->e1000g_state |= E1000G_SUSPENDED;
1187 
1188 	/* if the port isn't plumbed, we can simply return */
1189 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1190 		rw_exit(&Adapter->chip_lock);
1191 		return (DDI_SUCCESS);
1192 	}
1193 
1194 	e1000g_stop(Adapter, B_FALSE);
1195 
1196 	rw_exit(&Adapter->chip_lock);
1197 
1198 	/* Disable and stop all the timers */
1199 	disable_watchdog_timer(Adapter);
1200 	stop_link_timer(Adapter);
1201 	stop_82547_timer(Adapter->tx_ring);
1202 
1203 	return (DDI_SUCCESS);
1204 }
1205 
1206 static int
1207 e1000g_init(struct e1000g *Adapter)
1208 {
1209 	uint32_t pba;
1210 	uint32_t high_water;
1211 	struct e1000_hw *hw;
1212 	clock_t link_timeout;
1213 	int result;
1214 
1215 	hw = &Adapter->shared;
1216 
1217 	/*
1218 	 * reset to put the hardware in a known state
1219 	 * before we try to do anything with the eeprom
1220 	 */
1221 	mutex_enter(&e1000g_nvm_lock);
1222 	result = e1000_reset_hw(hw);
1223 	mutex_exit(&e1000g_nvm_lock);
1224 
1225 	if (result != E1000_SUCCESS) {
1226 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1227 		goto init_fail;
1228 	}
1229 
1230 	mutex_enter(&e1000g_nvm_lock);
1231 	result = e1000_validate_nvm_checksum(hw);
1232 	if (result < E1000_SUCCESS) {
1233 		/*
1234 		 * Some PCI-E parts fail the first check due to
1235 		 * the link being in sleep state.  Call it again,
1236 		 * if it fails a second time its a real issue.
1237 		 */
1238 		result = e1000_validate_nvm_checksum(hw);
1239 	}
1240 	mutex_exit(&e1000g_nvm_lock);
1241 
1242 	if (result < E1000_SUCCESS) {
1243 		e1000g_log(Adapter, CE_WARN,
1244 		    "Invalid NVM checksum. Please contact "
1245 		    "the vendor to update the NVM.");
1246 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1247 		goto init_fail;
1248 	}
1249 
1250 	result = 0;
1251 #ifdef __sparc
1252 	/*
1253 	 * First, we try to get the local ethernet address from OBP. If
1254 	 * failed, then we get it from the EEPROM of NIC card.
1255 	 */
1256 	result = e1000g_find_mac_address(Adapter);
1257 #endif
1258 	/* Get the local ethernet address. */
1259 	if (!result) {
1260 		mutex_enter(&e1000g_nvm_lock);
1261 		result = e1000_read_mac_addr(hw);
1262 		mutex_exit(&e1000g_nvm_lock);
1263 	}
1264 
1265 	if (result < E1000_SUCCESS) {
1266 		e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1267 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1268 		goto init_fail;
1269 	}
1270 
1271 	/* check for valid mac address */
1272 	if (!is_valid_mac_addr(hw->mac.addr)) {
1273 		e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1274 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1275 		goto init_fail;
1276 	}
1277 
1278 	/* Set LAA state for 82571 chipset */
1279 	e1000_set_laa_state_82571(hw, B_TRUE);
1280 
1281 	/* Master Latency Timer implementation */
1282 	if (Adapter->master_latency_timer) {
1283 		pci_config_put8(Adapter->osdep.cfg_handle,
1284 		    PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1285 	}
1286 
1287 	if (hw->mac.type < e1000_82547) {
1288 		/*
1289 		 * Total FIFO is 64K
1290 		 */
1291 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1292 			pba = E1000_PBA_40K;	/* 40K for Rx, 24K for Tx */
1293 		else
1294 			pba = E1000_PBA_48K;	/* 48K for Rx, 16K for Tx */
1295 	} else if ((hw->mac.type == e1000_82571) ||
1296 	    (hw->mac.type == e1000_82572) ||
1297 	    (hw->mac.type == e1000_80003es2lan)) {
1298 		/*
1299 		 * Total FIFO is 48K
1300 		 */
1301 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1302 			pba = E1000_PBA_30K;	/* 30K for Rx, 18K for Tx */
1303 		else
1304 			pba = E1000_PBA_38K;	/* 38K for Rx, 10K for Tx */
1305 	} else if (hw->mac.type == e1000_82573) {
1306 		pba = E1000_PBA_20K;		/* 20K for Rx, 12K for Tx */
1307 	} else if (hw->mac.type == e1000_82574) {
1308 		/* Keep adapter default: 20K for Rx, 20K for Tx */
1309 		pba = E1000_READ_REG(hw, E1000_PBA);
1310 	} else if (hw->mac.type == e1000_ich8lan) {
1311 		pba = E1000_PBA_8K;		/* 8K for Rx, 12K for Tx */
1312 	} else if (hw->mac.type == e1000_ich9lan) {
1313 		pba = E1000_PBA_10K;
1314 	} else if (hw->mac.type == e1000_ich10lan) {
1315 		pba = E1000_PBA_10K;
1316 	} else {
1317 		/*
1318 		 * Total FIFO is 40K
1319 		 */
1320 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1321 			pba = E1000_PBA_22K;	/* 22K for Rx, 18K for Tx */
1322 		else
1323 			pba = E1000_PBA_30K;	/* 30K for Rx, 10K for Tx */
1324 	}
1325 	E1000_WRITE_REG(hw, E1000_PBA, pba);
1326 
1327 	/*
1328 	 * These parameters set thresholds for the adapter's generation(Tx)
1329 	 * and response(Rx) to Ethernet PAUSE frames.  These are just threshold
1330 	 * settings.  Flow control is enabled or disabled in the configuration
1331 	 * file.
1332 	 * High-water mark is set down from the top of the rx fifo (not
1333 	 * sensitive to max_frame_size) and low-water is set just below
1334 	 * high-water mark.
1335 	 * The high water mark must be low enough to fit one full frame above
1336 	 * it in the rx FIFO.  Should be the lower of:
1337 	 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1338 	 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1339 	 * Rx FIFO size minus one full frame.
1340 	 */
1341 	high_water = min(((pba << 10) * 9 / 10),
1342 	    ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1343 	    hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1344 	    ((pba << 10) - (E1000_ERT_2048 << 3)) :
1345 	    ((pba << 10) - Adapter->max_frame_size)));
1346 
1347 	hw->fc.high_water = high_water & 0xFFF8;
1348 	hw->fc.low_water = hw->fc.high_water - 8;
1349 
1350 	if (hw->mac.type == e1000_80003es2lan)
1351 		hw->fc.pause_time = 0xFFFF;
1352 	else
1353 		hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1354 	hw->fc.send_xon = B_TRUE;
1355 
1356 	/*
1357 	 * Reset the adapter hardware the second time.
1358 	 */
1359 	mutex_enter(&e1000g_nvm_lock);
1360 	result = e1000_reset_hw(hw);
1361 	mutex_exit(&e1000g_nvm_lock);
1362 
1363 	if (result != E1000_SUCCESS) {
1364 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1365 		goto init_fail;
1366 	}
1367 
1368 	/* disable wakeup control by default */
1369 	if (hw->mac.type >= e1000_82544)
1370 		E1000_WRITE_REG(hw, E1000_WUC, 0);
1371 
1372 	/*
1373 	 * MWI should be disabled on 82546.
1374 	 */
1375 	if (hw->mac.type == e1000_82546)
1376 		e1000_pci_clear_mwi(hw);
1377 	else
1378 		e1000_pci_set_mwi(hw);
1379 
1380 	/*
1381 	 * Configure/Initialize hardware
1382 	 */
1383 	mutex_enter(&e1000g_nvm_lock);
1384 	result = e1000_init_hw(hw);
1385 	mutex_exit(&e1000g_nvm_lock);
1386 
1387 	if (result < E1000_SUCCESS) {
1388 		e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1389 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1390 		goto init_fail;
1391 	}
1392 
1393 	/*
1394 	 * Restore LED settings to the default from EEPROM
1395 	 * to meet the standard for Sun platforms.
1396 	 */
1397 	if ((hw->mac.type != e1000_82541) &&
1398 	    (hw->mac.type != e1000_82541_rev_2) &&
1399 	    (hw->mac.type != e1000_82547) &&
1400 	    (hw->mac.type != e1000_82547_rev_2))
1401 		(void) e1000_cleanup_led(hw);
1402 
1403 	/* Disable Smart Power Down */
1404 	phy_spd_state(hw, B_FALSE);
1405 
1406 	/* Make sure driver has control */
1407 	e1000g_get_driver_control(hw);
1408 
1409 	/*
1410 	 * Initialize unicast addresses.
1411 	 */
1412 	e1000g_init_unicst(Adapter);
1413 
1414 	/*
1415 	 * Setup and initialize the mctable structures.  After this routine
1416 	 * completes  Multicast table will be set
1417 	 */
1418 	e1000g_setup_multicast(Adapter);
1419 	msec_delay(5);
1420 
1421 	/*
1422 	 * Implement Adaptive IFS
1423 	 */
1424 	e1000_reset_adaptive(hw);
1425 
1426 	/* Setup Interrupt Throttling Register */
1427 	if (hw->mac.type >= e1000_82540) {
1428 		E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1429 	} else
1430 		Adapter->intr_adaptive = B_FALSE;
1431 
1432 	/* Start the timer for link setup */
1433 	if (hw->mac.autoneg)
1434 		link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1435 	else
1436 		link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1437 
1438 	mutex_enter(&Adapter->link_lock);
1439 	if (hw->phy.autoneg_wait_to_complete) {
1440 		Adapter->link_complete = B_TRUE;
1441 	} else {
1442 		Adapter->link_complete = B_FALSE;
1443 		Adapter->link_tid = timeout(e1000g_link_timer,
1444 		    (void *)Adapter, link_timeout);
1445 	}
1446 	mutex_exit(&Adapter->link_lock);
1447 
1448 	/* Enable PCI-Ex master */
1449 	if (hw->bus.type == e1000_bus_type_pci_express) {
1450 		e1000_enable_pciex_master(hw);
1451 	}
1452 
1453 	/* Save the state of the phy */
1454 	e1000g_get_phy_state(Adapter);
1455 
1456 	e1000g_param_sync(Adapter);
1457 
1458 	Adapter->init_count++;
1459 
1460 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1461 		goto init_fail;
1462 	}
1463 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1464 		goto init_fail;
1465 	}
1466 
1467 	Adapter->poll_mode = e1000g_poll_mode;
1468 
1469 	return (DDI_SUCCESS);
1470 
1471 init_fail:
1472 	ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1473 	return (DDI_FAILURE);
1474 }
1475 
1476 /*
1477  * Check if the link is up
1478  */
1479 static boolean_t
1480 e1000g_link_up(struct e1000g *Adapter)
1481 {
1482 	struct e1000_hw *hw;
1483 	boolean_t link_up;
1484 
1485 	hw = &Adapter->shared;
1486 
1487 	(void) e1000_check_for_link(hw);
1488 
1489 	if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) ||
1490 	    ((!hw->mac.get_link_status) && (hw->mac.type == e1000_82543)) ||
1491 	    ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1492 	    (hw->mac.serdes_has_link))) {
1493 		link_up = B_TRUE;
1494 	} else {
1495 		link_up = B_FALSE;
1496 	}
1497 
1498 	return (link_up);
1499 }
1500 
1501 static void
1502 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1503 {
1504 	struct iocblk *iocp;
1505 	struct e1000g *e1000gp;
1506 	enum ioc_reply status;
1507 
1508 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1509 	iocp->ioc_error = 0;
1510 	e1000gp = (struct e1000g *)arg;
1511 
1512 	ASSERT(e1000gp);
1513 	if (e1000gp == NULL) {
1514 		miocnak(q, mp, 0, EINVAL);
1515 		return;
1516 	}
1517 
1518 	rw_enter(&e1000gp->chip_lock, RW_READER);
1519 	if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1520 		rw_exit(&e1000gp->chip_lock);
1521 		miocnak(q, mp, 0, EINVAL);
1522 		return;
1523 	}
1524 	rw_exit(&e1000gp->chip_lock);
1525 
1526 	switch (iocp->ioc_cmd) {
1527 
1528 	case LB_GET_INFO_SIZE:
1529 	case LB_GET_INFO:
1530 	case LB_GET_MODE:
1531 	case LB_SET_MODE:
1532 		status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1533 		break;
1534 
1535 
1536 #ifdef E1000G_DEBUG
1537 	case E1000G_IOC_REG_PEEK:
1538 	case E1000G_IOC_REG_POKE:
1539 		status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1540 		break;
1541 	case E1000G_IOC_CHIP_RESET:
1542 		e1000gp->reset_count++;
1543 		if (e1000g_reset_adapter(e1000gp))
1544 			status = IOC_ACK;
1545 		else
1546 			status = IOC_INVAL;
1547 		break;
1548 #endif
1549 	default:
1550 		status = IOC_INVAL;
1551 		break;
1552 	}
1553 
1554 	/*
1555 	 * Decide how to reply
1556 	 */
1557 	switch (status) {
1558 	default:
1559 	case IOC_INVAL:
1560 		/*
1561 		 * Error, reply with a NAK and EINVAL or the specified error
1562 		 */
1563 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1564 		    EINVAL : iocp->ioc_error);
1565 		break;
1566 
1567 	case IOC_DONE:
1568 		/*
1569 		 * OK, reply already sent
1570 		 */
1571 		break;
1572 
1573 	case IOC_ACK:
1574 		/*
1575 		 * OK, reply with an ACK
1576 		 */
1577 		miocack(q, mp, 0, 0);
1578 		break;
1579 
1580 	case IOC_REPLY:
1581 		/*
1582 		 * OK, send prepared reply as ACK or NAK
1583 		 */
1584 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1585 		    M_IOCACK : M_IOCNAK;
1586 		qreply(q, mp);
1587 		break;
1588 	}
1589 }
1590 
1591 /*
1592  * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1593  * capable of supporting only one interrupt and we shouldn't disable
1594  * the physical interrupt. In this case we let the interrupt come and
1595  * we queue the packets in the rx ring itself in case we are in polling
1596  * mode (better latency but slightly lower performance and a very
1597  * high intrrupt count in mpstat which is harmless).
1598  *
1599  * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1600  * which can be disabled in poll mode. This gives better overall
1601  * throughput (compared to the mode above), shows very low interrupt
1602  * count but has slightly higher latency since we pick the packets when
1603  * the poll thread does polling.
1604  *
1605  * Currently, this flag should be enabled only while doing performance
1606  * measurement or when it can be guaranteed that entire NIC going
1607  * in poll mode will not harm any traffic like cluster heartbeat etc.
1608  */
1609 int e1000g_poll_mode = 0;
1610 
1611 /*
1612  * Called from the upper layers when driver is in polling mode to
1613  * pick up any queued packets. Care should be taken to not block
1614  * this thread.
1615  */
1616 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1617 {
1618 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)arg;
1619 	mblk_t			*mp = NULL;
1620 	mblk_t			*tail;
1621 	uint_t			sz = 0;
1622 	struct e1000g 		*adapter;
1623 
1624 	adapter = rx_ring->adapter;
1625 
1626 	rw_enter(&adapter->chip_lock, RW_READER);
1627 
1628 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
1629 		rw_exit(&adapter->chip_lock);
1630 		return (NULL);
1631 	}
1632 
1633 	mutex_enter(&rx_ring->rx_lock);
1634 	ASSERT(rx_ring->poll_flag);
1635 
1636 	/*
1637 	 * Get any packets that have arrived. Works only if we
1638 	 * actually disable the physical adapter/rx_ring interrupt.
1639 	 * (e1000g_poll_mode == 1). In case e1000g_poll_mode == 0,
1640 	 * packets will have already been added to the poll list
1641 	 * by the interrupt (see e1000g_intr_work()).
1642 	 */
1643 	if (adapter->poll_mode) {
1644 		mp = e1000g_receive(rx_ring, &tail, &sz);
1645 		if (mp != NULL) {
1646 			if (rx_ring->poll_list_head == NULL)
1647 				rx_ring->poll_list_head = mp;
1648 			else
1649 				rx_ring->poll_list_tail->b_next = mp;
1650 			rx_ring->poll_list_tail = tail;
1651 			rx_ring->poll_list_sz += sz;
1652 		}
1653 	}
1654 
1655 	mp = rx_ring->poll_list_head;
1656 	if (mp == NULL) {
1657 		mutex_exit(&rx_ring->rx_lock);
1658 		rw_exit(&adapter->chip_lock);
1659 		return (NULL);
1660 	}
1661 
1662 	/* Check if we can sendup the entire chain */
1663 	if (bytes_to_pickup >= rx_ring->poll_list_sz) {
1664 		mp = rx_ring->poll_list_head;
1665 		rx_ring->poll_list_head = NULL;
1666 		rx_ring->poll_list_tail = NULL;
1667 		rx_ring->poll_list_sz = 0;
1668 		mutex_exit(&rx_ring->rx_lock);
1669 		rw_exit(&adapter->chip_lock);
1670 		return (mp);
1671 	}
1672 
1673 	/*
1674 	 * We need to find out how much chain we can send up. We
1675 	 * are guaranteed that atleast one packet will go up since
1676 	 * we already checked that.
1677 	 */
1678 	tail = mp;
1679 	sz = 0;
1680 	while (mp != NULL) {
1681 		sz += MBLKL(mp);
1682 		if (sz > bytes_to_pickup) {
1683 			sz -= MBLKL(mp);
1684 			break;
1685 		}
1686 		tail = mp;
1687 		mp = mp->b_next;
1688 	}
1689 
1690 	mp = rx_ring->poll_list_head;
1691 	rx_ring->poll_list_head = tail->b_next;
1692 	if (rx_ring->poll_list_head == NULL)
1693 		rx_ring->poll_list_tail = NULL;
1694 	rx_ring->poll_list_sz -= sz;
1695 	tail->b_next = NULL;
1696 	mutex_exit(&rx_ring->rx_lock);
1697 	rw_exit(&adapter->chip_lock);
1698 	return (mp);
1699 }
1700 
1701 static int
1702 e1000g_m_start(void *arg)
1703 {
1704 	struct e1000g *Adapter = (struct e1000g *)arg;
1705 
1706 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1707 
1708 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1709 		rw_exit(&Adapter->chip_lock);
1710 		return (ECANCELED);
1711 	}
1712 
1713 	if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1714 		rw_exit(&Adapter->chip_lock);
1715 		return (ENOTACTIVE);
1716 	}
1717 
1718 	Adapter->e1000g_state |= E1000G_STARTED;
1719 
1720 	rw_exit(&Adapter->chip_lock);
1721 
1722 	/* Enable and start the watchdog timer */
1723 	enable_watchdog_timer(Adapter);
1724 
1725 	return (0);
1726 }
1727 
1728 static int
1729 e1000g_start(struct e1000g *Adapter, boolean_t global)
1730 {
1731 	if (global) {
1732 		/* Allocate dma resources for descriptors and buffers */
1733 		if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1734 			e1000g_log(Adapter, CE_WARN,
1735 			    "Alloc DMA resources failed");
1736 			return (DDI_FAILURE);
1737 		}
1738 		Adapter->rx_buffer_setup = B_FALSE;
1739 	}
1740 
1741 	if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1742 		if (e1000g_init(Adapter) != DDI_SUCCESS) {
1743 			e1000g_log(Adapter, CE_WARN,
1744 			    "Adapter initialization failed");
1745 			if (global)
1746 				e1000g_release_dma_resources(Adapter);
1747 			return (DDI_FAILURE);
1748 		}
1749 	}
1750 
1751 	/* Setup and initialize the transmit structures */
1752 	e1000g_tx_setup(Adapter);
1753 	msec_delay(5);
1754 
1755 	/* Setup and initialize the receive structures */
1756 	e1000g_rx_setup(Adapter);
1757 	msec_delay(5);
1758 
1759 	/* Restore the e1000g promiscuous mode */
1760 	e1000g_restore_promisc(Adapter);
1761 
1762 	e1000g_mask_interrupt(Adapter);
1763 
1764 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1765 
1766 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1767 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1768 		return (DDI_FAILURE);
1769 	}
1770 
1771 	return (DDI_SUCCESS);
1772 }
1773 
1774 static void
1775 e1000g_m_stop(void *arg)
1776 {
1777 	struct e1000g *Adapter = (struct e1000g *)arg;
1778 
1779 	/* Drain tx sessions */
1780 	(void) e1000g_tx_drain(Adapter);
1781 
1782 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1783 
1784 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1785 		rw_exit(&Adapter->chip_lock);
1786 		return;
1787 	}
1788 	Adapter->e1000g_state &= ~E1000G_STARTED;
1789 	e1000g_stop(Adapter, B_TRUE);
1790 
1791 	rw_exit(&Adapter->chip_lock);
1792 
1793 	/* Disable and stop all the timers */
1794 	disable_watchdog_timer(Adapter);
1795 	stop_link_timer(Adapter);
1796 	stop_82547_timer(Adapter->tx_ring);
1797 }
1798 
1799 static void
1800 e1000g_stop(struct e1000g *Adapter, boolean_t global)
1801 {
1802 	int result;
1803 
1804 	Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
1805 
1806 	/* Stop the chip and release pending resources */
1807 
1808 	/* Tell firmware driver is no longer in control */
1809 	e1000g_release_driver_control(&Adapter->shared);
1810 
1811 	e1000g_clear_all_interrupts(Adapter);
1812 
1813 	mutex_enter(&e1000g_nvm_lock);
1814 	result = e1000_reset_hw(&Adapter->shared);
1815 	mutex_exit(&e1000g_nvm_lock);
1816 
1817 	if (result != E1000_SUCCESS) {
1818 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1819 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1820 	}
1821 
1822 	/* Release resources still held by the TX descriptors */
1823 	e1000g_tx_clean(Adapter);
1824 
1825 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1826 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1827 
1828 	/* Clean the pending rx jumbo packet fragment */
1829 	e1000g_rx_clean(Adapter);
1830 
1831 	if (global)
1832 		e1000g_release_dma_resources(Adapter);
1833 }
1834 
1835 static void
1836 e1000g_rx_clean(struct e1000g *Adapter)
1837 {
1838 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
1839 
1840 	if (rx_ring->rx_mblk != NULL) {
1841 		freemsg(rx_ring->rx_mblk);
1842 		rx_ring->rx_mblk = NULL;
1843 		rx_ring->rx_mblk_tail = NULL;
1844 		rx_ring->rx_mblk_len = 0;
1845 	}
1846 }
1847 
1848 static void
1849 e1000g_tx_clean(struct e1000g *Adapter)
1850 {
1851 	e1000g_tx_ring_t *tx_ring;
1852 	p_tx_sw_packet_t packet;
1853 	mblk_t *mp;
1854 	mblk_t *nmp;
1855 	uint32_t packet_count;
1856 
1857 	tx_ring = Adapter->tx_ring;
1858 
1859 	/*
1860 	 * Here we don't need to protect the lists using
1861 	 * the usedlist_lock and freelist_lock, for they
1862 	 * have been protected by the chip_lock.
1863 	 */
1864 	mp = NULL;
1865 	nmp = NULL;
1866 	packet_count = 0;
1867 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
1868 	while (packet != NULL) {
1869 		if (packet->mp != NULL) {
1870 			/* Assemble the message chain */
1871 			if (mp == NULL) {
1872 				mp = packet->mp;
1873 				nmp = packet->mp;
1874 			} else {
1875 				nmp->b_next = packet->mp;
1876 				nmp = packet->mp;
1877 			}
1878 			/* Disconnect the message from the sw packet */
1879 			packet->mp = NULL;
1880 		}
1881 
1882 		e1000g_free_tx_swpkt(packet);
1883 		packet_count++;
1884 
1885 		packet = (p_tx_sw_packet_t)
1886 		    QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
1887 	}
1888 
1889 	if (mp != NULL)
1890 		freemsgchain(mp);
1891 
1892 	if (packet_count > 0) {
1893 		QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
1894 		QUEUE_INIT_LIST(&tx_ring->used_list);
1895 
1896 		/* Setup TX descriptor pointers */
1897 		tx_ring->tbd_next = tx_ring->tbd_first;
1898 		tx_ring->tbd_oldest = tx_ring->tbd_first;
1899 
1900 		/* Setup our HW Tx Head & Tail descriptor pointers */
1901 		E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
1902 		E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
1903 	}
1904 }
1905 
1906 static boolean_t
1907 e1000g_tx_drain(struct e1000g *Adapter)
1908 {
1909 	int i;
1910 	boolean_t done;
1911 	e1000g_tx_ring_t *tx_ring;
1912 
1913 	tx_ring = Adapter->tx_ring;
1914 
1915 	/* Allow up to 'wsdraintime' for pending xmit's to complete. */
1916 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1917 		mutex_enter(&tx_ring->usedlist_lock);
1918 		done = IS_QUEUE_EMPTY(&tx_ring->used_list);
1919 		mutex_exit(&tx_ring->usedlist_lock);
1920 
1921 		if (done)
1922 			break;
1923 
1924 		msec_delay(1);
1925 	}
1926 
1927 	return (done);
1928 }
1929 
1930 static boolean_t
1931 e1000g_rx_drain(struct e1000g *Adapter)
1932 {
1933 	e1000g_rx_ring_t *rx_ring;
1934 	p_rx_sw_packet_t packet;
1935 	boolean_t done;
1936 
1937 	rx_ring = Adapter->rx_ring;
1938 	done = B_TRUE;
1939 
1940 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1941 
1942 	while (rx_ring->pending_list != NULL) {
1943 		packet = rx_ring->pending_list;
1944 		rx_ring->pending_list =
1945 		    rx_ring->pending_list->next;
1946 
1947 		if (packet->flag == E1000G_RX_SW_STOP) {
1948 			packet->flag = E1000G_RX_SW_DETACH;
1949 			done = B_FALSE;
1950 		} else {
1951 			ASSERT(packet->flag == E1000G_RX_SW_FREE);
1952 			ASSERT(packet->mp == NULL);
1953 			e1000g_free_rx_sw_packet(packet);
1954 		}
1955 	}
1956 
1957 	rw_exit(&e1000g_rx_detach_lock);
1958 
1959 	return (done);
1960 }
1961 
1962 static boolean_t
1963 e1000g_reset_adapter(struct e1000g *Adapter)
1964 {
1965 	/* Disable and stop all the timers */
1966 	disable_watchdog_timer(Adapter);
1967 	stop_link_timer(Adapter);
1968 	stop_82547_timer(Adapter->tx_ring);
1969 
1970 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1971 
1972 	e1000g_stop(Adapter, B_FALSE);
1973 
1974 	if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1975 		rw_exit(&Adapter->chip_lock);
1976 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1977 			return (B_FALSE);
1978 	}
1979 
1980 	rw_exit(&Adapter->chip_lock);
1981 
1982 	/* Enable and start the watchdog timer */
1983 	enable_watchdog_timer(Adapter);
1984 
1985 	return (B_TRUE);
1986 }
1987 
1988 boolean_t
1989 e1000g_global_reset(struct e1000g *Adapter)
1990 {
1991 	/* Disable and stop all the timers */
1992 	disable_watchdog_timer(Adapter);
1993 	stop_link_timer(Adapter);
1994 	stop_82547_timer(Adapter->tx_ring);
1995 
1996 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1997 
1998 	e1000g_stop(Adapter, B_TRUE);
1999 
2000 	Adapter->init_count = 0;
2001 
2002 	if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
2003 		rw_exit(&Adapter->chip_lock);
2004 		e1000g_log(Adapter, CE_WARN, "Reset failed");
2005 		return (B_FALSE);
2006 	}
2007 
2008 	rw_exit(&Adapter->chip_lock);
2009 
2010 	/* Enable and start the watchdog timer */
2011 	enable_watchdog_timer(Adapter);
2012 
2013 	return (B_TRUE);
2014 }
2015 
2016 /*
2017  * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2018  *
2019  * This interrupt service routine is for PCI-Express adapters.
2020  * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2021  * bit is set.
2022  */
2023 static uint_t
2024 e1000g_intr_pciexpress(caddr_t arg)
2025 {
2026 	struct e1000g *Adapter;
2027 	uint32_t icr;
2028 
2029 	Adapter = (struct e1000g *)(uintptr_t)arg;
2030 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2031 
2032 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2033 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2034 
2035 	if (icr & E1000_ICR_INT_ASSERTED) {
2036 		/*
2037 		 * E1000_ICR_INT_ASSERTED bit was set:
2038 		 * Read(Clear) the ICR, claim this interrupt,
2039 		 * look for work to do.
2040 		 */
2041 		e1000g_intr_work(Adapter, icr);
2042 		return (DDI_INTR_CLAIMED);
2043 	} else {
2044 		/*
2045 		 * E1000_ICR_INT_ASSERTED bit was not set:
2046 		 * Don't claim this interrupt, return immediately.
2047 		 */
2048 		return (DDI_INTR_UNCLAIMED);
2049 	}
2050 }
2051 
2052 /*
2053  * e1000g_intr - ISR for PCI/PCI-X chipsets
2054  *
2055  * This interrupt service routine is for PCI/PCI-X adapters.
2056  * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2057  * bit is set or not.
2058  */
2059 static uint_t
2060 e1000g_intr(caddr_t arg)
2061 {
2062 	struct e1000g *Adapter;
2063 	uint32_t icr;
2064 
2065 	Adapter = (struct e1000g *)(uintptr_t)arg;
2066 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2067 
2068 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2069 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2070 
2071 	if (icr) {
2072 		/*
2073 		 * Any bit was set in ICR:
2074 		 * Read(Clear) the ICR, claim this interrupt,
2075 		 * look for work to do.
2076 		 */
2077 		e1000g_intr_work(Adapter, icr);
2078 		return (DDI_INTR_CLAIMED);
2079 	} else {
2080 		/*
2081 		 * No bit was set in ICR:
2082 		 * Don't claim this interrupt, return immediately.
2083 		 */
2084 		return (DDI_INTR_UNCLAIMED);
2085 	}
2086 }
2087 
2088 /*
2089  * e1000g_intr_work - actual processing of ISR
2090  *
2091  * Read(clear) the ICR contents and call appropriate interrupt
2092  * processing routines.
2093  */
2094 static void
2095 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2096 {
2097 	struct e1000_hw *hw;
2098 	hw = &Adapter->shared;
2099 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2100 
2101 	Adapter->rx_pkt_cnt = 0;
2102 	Adapter->tx_pkt_cnt = 0;
2103 
2104 	rw_enter(&Adapter->chip_lock, RW_READER);
2105 
2106 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2107 		rw_exit(&Adapter->chip_lock);
2108 		return;
2109 	}
2110 	/*
2111 	 * Here we need to check the "e1000g_state" flag within the chip_lock to
2112 	 * ensure the receive routine will not execute when the adapter is
2113 	 * being reset.
2114 	 */
2115 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2116 		rw_exit(&Adapter->chip_lock);
2117 		return;
2118 	}
2119 
2120 	if (icr & E1000_ICR_RXT0) {
2121 		mblk_t			*mp;
2122 		uint_t			sz = 0;
2123 		mblk_t			*tmp, *tail = NULL;
2124 		e1000g_rx_ring_t	*rx_ring;
2125 
2126 		rx_ring = Adapter->rx_ring;
2127 		mutex_enter(&rx_ring->rx_lock);
2128 
2129 		/*
2130 		 * If the real interrupt for the Rx ring was
2131 		 * not disabled (e1000g_poll_mode == 0), then
2132 		 * we still pick up the packets and queue them
2133 		 * on Rx ring if we were in polling mode. this
2134 		 * enables the polling thread to pick up packets
2135 		 * really fast in polling mode and helps improve
2136 		 * latency.
2137 		 */
2138 		mp = e1000g_receive(rx_ring, &tail, &sz);
2139 		rw_exit(&Adapter->chip_lock);
2140 
2141 		if (mp != NULL) {
2142 			ASSERT(tail != NULL);
2143 			if (!rx_ring->poll_flag) {
2144 				/*
2145 				 * If not polling, see if something was
2146 				 * already queued. Take care not to
2147 				 * reorder packets.
2148 				 */
2149 				if (rx_ring->poll_list_head == NULL) {
2150 					mutex_exit(&rx_ring->rx_lock);
2151 					mac_rx_ring(Adapter->mh, rx_ring->mrh,
2152 					    mp, rx_ring->ring_gen_num);
2153 				} else {
2154 					tmp = rx_ring->poll_list_head;
2155 					rx_ring->poll_list_head = NULL;
2156 					rx_ring->poll_list_tail->b_next = mp;
2157 					rx_ring->poll_list_tail = NULL;
2158 					rx_ring->poll_list_sz = 0;
2159 					mutex_exit(&rx_ring->rx_lock);
2160 					mac_rx_ring(Adapter->mh, rx_ring->mrh,
2161 					    tmp, rx_ring->ring_gen_num);
2162 				}
2163 			} else {
2164 				/*
2165 				 * We are in a polling mode. Put the
2166 				 * processed packets on the poll list.
2167 				 */
2168 				if (rx_ring->poll_list_head == NULL)
2169 					rx_ring->poll_list_head = mp;
2170 				else
2171 					rx_ring->poll_list_tail->b_next = mp;
2172 				rx_ring->poll_list_tail = tail;
2173 				rx_ring->poll_list_sz += sz;
2174 				mutex_exit(&rx_ring->rx_lock);
2175 			}
2176 		} else if (!rx_ring->poll_flag &&
2177 		    rx_ring->poll_list_head != NULL) {
2178 			/*
2179 			 * Nothing new has arrived (then why
2180 			 * was the interrupt raised??). Check
2181 			 * if something queued from the last
2182 			 * time.
2183 			 */
2184 			tmp = rx_ring->poll_list_head;
2185 			rx_ring->poll_list_head = NULL;
2186 			rx_ring->poll_list_tail = NULL;
2187 			rx_ring->poll_list_sz = 0;
2188 			mutex_exit(&rx_ring->rx_lock);
2189 			mac_rx_ring(Adapter->mh, rx_ring->mrh,
2190 			    tmp, rx_ring->ring_gen_num);
2191 		} else {
2192 			mutex_exit(&rx_ring->rx_lock);
2193 		}
2194 	} else
2195 		rw_exit(&Adapter->chip_lock);
2196 
2197 	if (icr & E1000_ICR_TXDW) {
2198 		if (!Adapter->tx_intr_enable)
2199 			e1000g_clear_tx_interrupt(Adapter);
2200 
2201 		/* Recycle the tx descriptors */
2202 		rw_enter(&Adapter->chip_lock, RW_READER);
2203 		(void) e1000g_recycle(tx_ring);
2204 		E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2205 		rw_exit(&Adapter->chip_lock);
2206 
2207 		if (tx_ring->resched_needed &&
2208 		    (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2209 			tx_ring->resched_needed = B_FALSE;
2210 			mac_tx_update(Adapter->mh);
2211 			E1000G_STAT(tx_ring->stat_reschedule);
2212 		}
2213 	}
2214 
2215 	/*
2216 	 * The Receive Sequence errors RXSEQ and the link status change LSC
2217 	 * are checked to detect that the cable has been pulled out. For
2218 	 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2219 	 * are an indication that cable is not connected.
2220 	 */
2221 	if ((icr & E1000_ICR_RXSEQ) ||
2222 	    (icr & E1000_ICR_LSC) ||
2223 	    (icr & E1000_ICR_GPI_EN1)) {
2224 		boolean_t link_changed;
2225 		timeout_id_t tid = 0;
2226 
2227 		stop_watchdog_timer(Adapter);
2228 
2229 		rw_enter(&Adapter->chip_lock, RW_WRITER);
2230 
2231 		/*
2232 		 * Because we got a link-status-change interrupt, force
2233 		 * e1000_check_for_link() to look at phy
2234 		 */
2235 		Adapter->shared.mac.get_link_status = B_TRUE;
2236 
2237 		/* e1000g_link_check takes care of link status change */
2238 		link_changed = e1000g_link_check(Adapter);
2239 
2240 		/* Get new phy state */
2241 		e1000g_get_phy_state(Adapter);
2242 
2243 		/*
2244 		 * If the link timer has not timed out, we'll not notify
2245 		 * the upper layer with any link state until the link is up.
2246 		 */
2247 		if (link_changed && !Adapter->link_complete) {
2248 			if (Adapter->link_state == LINK_STATE_UP) {
2249 				mutex_enter(&Adapter->link_lock);
2250 				Adapter->link_complete = B_TRUE;
2251 				tid = Adapter->link_tid;
2252 				Adapter->link_tid = 0;
2253 				mutex_exit(&Adapter->link_lock);
2254 			} else {
2255 				link_changed = B_FALSE;
2256 			}
2257 		}
2258 		rw_exit(&Adapter->chip_lock);
2259 
2260 		if (link_changed) {
2261 			if (tid != 0)
2262 				(void) untimeout(tid);
2263 
2264 			/*
2265 			 * Workaround for esb2. Data stuck in fifo on a link
2266 			 * down event. Stop receiver here and reset in watchdog.
2267 			 */
2268 			if ((Adapter->link_state == LINK_STATE_DOWN) &&
2269 			    (Adapter->shared.mac.type == e1000_80003es2lan)) {
2270 				uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2271 				E1000_WRITE_REG(hw, E1000_RCTL,
2272 				    rctl & ~E1000_RCTL_EN);
2273 				e1000g_log(Adapter, CE_WARN,
2274 				    "ESB2 receiver disabled");
2275 				Adapter->esb2_workaround = B_TRUE;
2276 			}
2277 
2278 			mac_link_update(Adapter->mh, Adapter->link_state);
2279 		}
2280 
2281 		start_watchdog_timer(Adapter);
2282 	}
2283 }
2284 
2285 static void
2286 e1000g_init_unicst(struct e1000g *Adapter)
2287 {
2288 	struct e1000_hw *hw;
2289 	int slot;
2290 
2291 	hw = &Adapter->shared;
2292 
2293 	if (Adapter->init_count == 0) {
2294 		/* Initialize the multiple unicast addresses */
2295 		Adapter->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2296 
2297 		/* Workaround for an erratum of 82571 chipst */
2298 		if ((hw->mac.type == e1000_82571) &&
2299 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2300 			Adapter->unicst_total--;
2301 
2302 		Adapter->unicst_avail = Adapter->unicst_total;
2303 
2304 		for (slot = 0; slot < Adapter->unicst_total; slot++) {
2305 			/* Clear both the flag and MAC address */
2306 			Adapter->unicst_addr[slot].reg.high = 0;
2307 			Adapter->unicst_addr[slot].reg.low = 0;
2308 		}
2309 	} else {
2310 		/* Workaround for an erratum of 82571 chipst */
2311 		if ((hw->mac.type == e1000_82571) &&
2312 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2313 			e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2314 
2315 		/* Re-configure the RAR registers */
2316 		for (slot = 0; slot < Adapter->unicst_total; slot++)
2317 			if (Adapter->unicst_addr[slot].mac.set == 1)
2318 				e1000_rar_set(hw,
2319 				    Adapter->unicst_addr[slot].mac.addr, slot);
2320 	}
2321 
2322 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2323 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2324 }
2325 
2326 static int
2327 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2328     int slot)
2329 {
2330 	struct e1000_hw *hw;
2331 
2332 	hw = &Adapter->shared;
2333 
2334 	/*
2335 	 * The first revision of Wiseman silicon (rev 2.0) has an errata
2336 	 * that requires the receiver to be in reset when any of the
2337 	 * receive address registers (RAR regs) are accessed.  The first
2338 	 * rev of Wiseman silicon also requires MWI to be disabled when
2339 	 * a global reset or a receive reset is issued.  So before we
2340 	 * initialize the RARs, we check the rev of the Wiseman controller
2341 	 * and work around any necessary HW errata.
2342 	 */
2343 	if ((hw->mac.type == e1000_82542) &&
2344 	    (hw->revision_id == E1000_REVISION_2)) {
2345 		e1000_pci_clear_mwi(hw);
2346 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2347 		msec_delay(5);
2348 	}
2349 	if (mac_addr == NULL) {
2350 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2351 		E1000_WRITE_FLUSH(hw);
2352 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2353 		E1000_WRITE_FLUSH(hw);
2354 		/* Clear both the flag and MAC address */
2355 		Adapter->unicst_addr[slot].reg.high = 0;
2356 		Adapter->unicst_addr[slot].reg.low = 0;
2357 	} else {
2358 		bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2359 		    ETHERADDRL);
2360 		e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2361 		Adapter->unicst_addr[slot].mac.set = 1;
2362 	}
2363 
2364 	/* Workaround for an erratum of 82571 chipst */
2365 	if (slot == 0) {
2366 		if ((hw->mac.type == e1000_82571) &&
2367 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2368 			if (mac_addr == NULL) {
2369 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2370 				    slot << 1, 0);
2371 				E1000_WRITE_FLUSH(hw);
2372 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2373 				    (slot << 1) + 1, 0);
2374 				E1000_WRITE_FLUSH(hw);
2375 			} else {
2376 				e1000_rar_set(hw, (uint8_t *)mac_addr,
2377 				    LAST_RAR_ENTRY);
2378 			}
2379 	}
2380 
2381 	/*
2382 	 * If we are using Wiseman rev 2.0 silicon, we will have previously
2383 	 * put the receive in reset, and disabled MWI, to work around some
2384 	 * HW errata.  Now we should take the receiver out of reset, and
2385 	 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2386 	 */
2387 	if ((hw->mac.type == e1000_82542) &&
2388 	    (hw->revision_id == E1000_REVISION_2)) {
2389 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2390 		msec_delay(1);
2391 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2392 			e1000_pci_set_mwi(hw);
2393 		e1000g_rx_setup(Adapter);
2394 	}
2395 
2396 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2397 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2398 		return (EIO);
2399 	}
2400 
2401 	return (0);
2402 }
2403 
2404 static int
2405 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2406 {
2407 	struct e1000_hw *hw = &Adapter->shared;
2408 	int res = 0;
2409 
2410 	if ((multiaddr[0] & 01) == 0) {
2411 		res = EINVAL;
2412 		goto done;
2413 	}
2414 
2415 	if (Adapter->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2416 		res = ENOENT;
2417 		goto done;
2418 	}
2419 
2420 	bcopy(multiaddr,
2421 	    &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2422 	Adapter->mcast_count++;
2423 
2424 	/*
2425 	 * Update the MC table in the hardware
2426 	 */
2427 	e1000g_clear_interrupt(Adapter);
2428 
2429 	e1000g_setup_multicast(Adapter);
2430 
2431 	if ((hw->mac.type == e1000_82542) &&
2432 	    (hw->revision_id == E1000_REVISION_2))
2433 		e1000g_rx_setup(Adapter);
2434 
2435 	e1000g_mask_interrupt(Adapter);
2436 
2437 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2438 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2439 		res = EIO;
2440 	}
2441 
2442 done:
2443 	return (res);
2444 }
2445 
2446 static int
2447 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2448 {
2449 	struct e1000_hw *hw = &Adapter->shared;
2450 	unsigned i;
2451 
2452 	for (i = 0; i < Adapter->mcast_count; i++) {
2453 		if (bcmp(multiaddr, &Adapter->mcast_table[i],
2454 		    ETHERADDRL) == 0) {
2455 			for (i++; i < Adapter->mcast_count; i++) {
2456 				Adapter->mcast_table[i - 1] =
2457 				    Adapter->mcast_table[i];
2458 			}
2459 			Adapter->mcast_count--;
2460 			break;
2461 		}
2462 	}
2463 
2464 	/*
2465 	 * Update the MC table in the hardware
2466 	 */
2467 	e1000g_clear_interrupt(Adapter);
2468 
2469 	e1000g_setup_multicast(Adapter);
2470 
2471 	if ((hw->mac.type == e1000_82542) &&
2472 	    (hw->revision_id == E1000_REVISION_2))
2473 		e1000g_rx_setup(Adapter);
2474 
2475 	e1000g_mask_interrupt(Adapter);
2476 
2477 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2478 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2479 		return (EIO);
2480 	}
2481 
2482 	return (0);
2483 }
2484 
2485 /*
2486  * e1000g_setup_multicast - setup multicast data structures
2487  *
2488  * This routine initializes all of the multicast related structures.
2489  */
2490 void
2491 e1000g_setup_multicast(struct e1000g *Adapter)
2492 {
2493 	uint8_t *mc_addr_list;
2494 	uint32_t mc_addr_count;
2495 	uint32_t rctl;
2496 	struct e1000_hw *hw;
2497 
2498 	hw = &Adapter->shared;
2499 
2500 	/*
2501 	 * The e1000g has the ability to do perfect filtering of 16
2502 	 * addresses. The driver uses one of the e1000g's 16 receive
2503 	 * address registers for its node/network/mac/individual address.
2504 	 * So, we have room for up to 15 multicast addresses in the CAM,
2505 	 * additional MC addresses are handled by the MTA (Multicast Table
2506 	 * Array)
2507 	 */
2508 
2509 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2510 
2511 	mc_addr_list = (uint8_t *)Adapter->mcast_table;
2512 
2513 	if (Adapter->mcast_count > MAX_NUM_MULTICAST_ADDRESSES) {
2514 		E1000G_DEBUGLOG_1(Adapter, CE_WARN,
2515 		    "Adapter requested more than %d MC Addresses.\n",
2516 		    MAX_NUM_MULTICAST_ADDRESSES);
2517 		mc_addr_count = MAX_NUM_MULTICAST_ADDRESSES;
2518 	} else {
2519 		/*
2520 		 * Set the number of MC addresses that we are being
2521 		 * requested to use
2522 		 */
2523 		mc_addr_count = Adapter->mcast_count;
2524 	}
2525 	/*
2526 	 * The Wiseman 2.0 silicon has an errata by which the receiver will
2527 	 * hang  while writing to the receive address registers if the receiver
2528 	 * is not in reset before writing to the registers. Updating the RAR
2529 	 * is done during the setting up of the multicast table, hence the
2530 	 * receiver has to be put in reset before updating the multicast table
2531 	 * and then taken out of reset at the end
2532 	 */
2533 	/*
2534 	 * if WMI was enabled then dis able it before issueing the global
2535 	 * reset to the hardware.
2536 	 */
2537 	/*
2538 	 * Only required for WISEMAN_2_0
2539 	 */
2540 	if ((hw->mac.type == e1000_82542) &&
2541 	    (hw->revision_id == E1000_REVISION_2)) {
2542 		e1000_pci_clear_mwi(hw);
2543 		/*
2544 		 * The e1000g must be in reset before changing any RA
2545 		 * registers. Reset receive unit.  The chip will remain in
2546 		 * the reset state until software explicitly restarts it.
2547 		 */
2548 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2549 		/* Allow receiver time to go in to reset */
2550 		msec_delay(5);
2551 	}
2552 
2553 	e1000_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2554 	    Adapter->unicst_total, hw->mac.rar_entry_count);
2555 
2556 	/*
2557 	 * Only for Wiseman_2_0
2558 	 * If MWI was enabled then re-enable it after issueing (as we
2559 	 * disabled it up there) the receive reset command.
2560 	 * Wainwright does not have a receive reset command and only thing
2561 	 * close to it is global reset which will require tx setup also
2562 	 */
2563 	if ((hw->mac.type == e1000_82542) &&
2564 	    (hw->revision_id == E1000_REVISION_2)) {
2565 		/*
2566 		 * if WMI was enabled then reenable it after issueing the
2567 		 * global or receive reset to the hardware.
2568 		 */
2569 
2570 		/*
2571 		 * Take receiver out of reset
2572 		 * clear E1000_RCTL_RST bit (and all others)
2573 		 */
2574 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2575 		msec_delay(5);
2576 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2577 			e1000_pci_set_mwi(hw);
2578 	}
2579 
2580 	/*
2581 	 * Restore original value
2582 	 */
2583 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2584 }
2585 
2586 int
2587 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2588 {
2589 	struct e1000g *Adapter = (struct e1000g *)arg;
2590 	int result;
2591 
2592 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2593 
2594 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2595 		result = ECANCELED;
2596 		goto done;
2597 	}
2598 
2599 	result = (add) ? multicst_add(Adapter, addr)
2600 	    : multicst_remove(Adapter, addr);
2601 
2602 done:
2603 	rw_exit(&Adapter->chip_lock);
2604 	return (result);
2605 
2606 }
2607 
2608 int
2609 e1000g_m_promisc(void *arg, boolean_t on)
2610 {
2611 	struct e1000g *Adapter = (struct e1000g *)arg;
2612 	uint32_t rctl;
2613 
2614 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2615 
2616 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2617 		rw_exit(&Adapter->chip_lock);
2618 		return (ECANCELED);
2619 	}
2620 
2621 	rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2622 
2623 	if (on)
2624 		rctl |=
2625 		    (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2626 	else
2627 		rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2628 
2629 	E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2630 
2631 	Adapter->e1000g_promisc = on;
2632 
2633 	rw_exit(&Adapter->chip_lock);
2634 
2635 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2636 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2637 		return (EIO);
2638 	}
2639 
2640 	return (0);
2641 }
2642 
2643 /*
2644  * Entry points to enable and disable interrupts at the granularity of
2645  * a group.
2646  * Turns the poll_mode for the whole adapter on and off to enable or
2647  * override the ring level polling control over the hardware interrupts.
2648  */
2649 static int
2650 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2651 {
2652 	struct e1000g		*adapter = (struct e1000g *)arg;
2653 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2654 
2655 	/*
2656 	 * Later interrupts at the granularity of the this ring will
2657 	 * invoke mac_rx() with NULL, indicating the need for another
2658 	 * software classification.
2659 	 * We have a single ring usable per adapter now, so we only need to
2660 	 * reset the rx handle for that one.
2661 	 * When more RX rings can be used, we should update each one of them.
2662 	 */
2663 	mutex_enter(&rx_ring->rx_lock);
2664 	rx_ring->mrh = NULL;
2665 	adapter->poll_mode = B_FALSE;
2666 	mutex_exit(&rx_ring->rx_lock);
2667 	return (0);
2668 }
2669 
2670 static int
2671 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2672 {
2673 	struct e1000g *adapter = (struct e1000g *)arg;
2674 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2675 
2676 	mutex_enter(&rx_ring->rx_lock);
2677 
2678 	/*
2679 	 * Later interrupts at the granularity of the this ring will
2680 	 * invoke mac_rx() with the handle for this ring;
2681 	 */
2682 	adapter->poll_mode = B_TRUE;
2683 	rx_ring->mrh = rx_ring->mrh_init;
2684 	mutex_exit(&rx_ring->rx_lock);
2685 	return (0);
2686 }
2687 
2688 /*
2689  * Entry points to enable and disable interrupts at the granularity of
2690  * a ring.
2691  * adapter poll_mode controls whether we actually proceed with hardware
2692  * interrupt toggling.
2693  */
2694 static int
2695 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2696 {
2697 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2698 	struct e1000g 		*adapter = rx_ring->adapter;
2699 	struct e1000_hw 	*hw = &adapter->shared;
2700 	uint32_t		intr_mask;
2701 	boolean_t		poll_mode;
2702 
2703 	rw_enter(&adapter->chip_lock, RW_READER);
2704 
2705 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
2706 		rw_exit(&adapter->chip_lock);
2707 		return (0);
2708 	}
2709 
2710 	mutex_enter(&rx_ring->rx_lock);
2711 	rx_ring->poll_flag = 0;
2712 	poll_mode = adapter->poll_mode;
2713 	mutex_exit(&rx_ring->rx_lock);
2714 
2715 	if (poll_mode) {
2716 		/* Rx interrupt enabling for MSI and legacy */
2717 		intr_mask = E1000_READ_REG(hw, E1000_IMS);
2718 		intr_mask |= E1000_IMS_RXT0;
2719 		E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2720 		E1000_WRITE_FLUSH(hw);
2721 
2722 		/* Trigger a Rx interrupt to check Rx ring */
2723 		E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2724 		E1000_WRITE_FLUSH(hw);
2725 	}
2726 
2727 	rw_exit(&adapter->chip_lock);
2728 	return (0);
2729 }
2730 
2731 static int
2732 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2733 {
2734 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2735 	struct e1000g 		*adapter = rx_ring->adapter;
2736 	struct e1000_hw 	*hw = &adapter->shared;
2737 	boolean_t		poll_mode;
2738 
2739 	rw_enter(&adapter->chip_lock, RW_READER);
2740 
2741 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
2742 		rw_exit(&adapter->chip_lock);
2743 		return (0);
2744 	}
2745 
2746 	/*
2747 	 * Once the adapter can support per Rx ring interrupt,
2748 	 * we should disable the real interrupt instead of just setting
2749 	 * the flag.
2750 	 */
2751 	mutex_enter(&rx_ring->rx_lock);
2752 	rx_ring->poll_flag = 1;
2753 	poll_mode = adapter->poll_mode;
2754 	mutex_exit(&rx_ring->rx_lock);
2755 
2756 	if (poll_mode) {
2757 		/* Rx interrupt disabling for MSI and legacy */
2758 		E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2759 		E1000_WRITE_FLUSH(hw);
2760 	}
2761 
2762 	rw_exit(&adapter->chip_lock);
2763 	return (0);
2764 }
2765 
2766 /*
2767  * e1000g_unicst_find - Find the slot for the specified unicast address
2768  */
2769 static int
2770 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2771 {
2772 	int slot;
2773 
2774 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2775 		if ((Adapter->unicst_addr[slot].mac.set == 1) &&
2776 		    (bcmp(Adapter->unicst_addr[slot].mac.addr,
2777 		    mac_addr, ETHERADDRL) == 0))
2778 				return (slot);
2779 	}
2780 
2781 	return (-1);
2782 }
2783 
2784 /*
2785  * Entry points to add and remove a MAC address to a ring group.
2786  * The caller takes care of adding and removing the MAC addresses
2787  * to the filter via these two routines.
2788  */
2789 
2790 static int
2791 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2792 {
2793 	struct e1000g *Adapter = (struct e1000g *)arg;
2794 	int slot, err;
2795 
2796 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2797 
2798 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2799 		rw_exit(&Adapter->chip_lock);
2800 		return (ECANCELED);
2801 	}
2802 
2803 	if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
2804 		/* The same address is already in slot */
2805 		rw_exit(&Adapter->chip_lock);
2806 		return (0);
2807 	}
2808 
2809 	if (Adapter->unicst_avail == 0) {
2810 		/* no slots available */
2811 		rw_exit(&Adapter->chip_lock);
2812 		return (ENOSPC);
2813 	}
2814 
2815 	/* Search for a free slot */
2816 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2817 		if (Adapter->unicst_addr[slot].mac.set == 0)
2818 			break;
2819 	}
2820 	ASSERT(slot < Adapter->unicst_total);
2821 
2822 	err = e1000g_unicst_set(Adapter, mac_addr, slot);
2823 	if (err == 0)
2824 		Adapter->unicst_avail--;
2825 
2826 	rw_exit(&Adapter->chip_lock);
2827 
2828 	return (err);
2829 }
2830 
2831 static int
2832 e1000g_remmac(void *arg, const uint8_t *mac_addr)
2833 {
2834 	struct e1000g *Adapter = (struct e1000g *)arg;
2835 	int slot, err;
2836 
2837 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2838 
2839 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2840 		rw_exit(&Adapter->chip_lock);
2841 		return (ECANCELED);
2842 	}
2843 
2844 	slot = e1000g_unicst_find(Adapter, mac_addr);
2845 	if (slot == -1) {
2846 		rw_exit(&Adapter->chip_lock);
2847 		return (EINVAL);
2848 	}
2849 
2850 	ASSERT(Adapter->unicst_addr[slot].mac.set);
2851 
2852 	/* Clear this slot */
2853 	err = e1000g_unicst_set(Adapter, NULL, slot);
2854 	if (err == 0)
2855 		Adapter->unicst_avail++;
2856 
2857 	rw_exit(&Adapter->chip_lock);
2858 
2859 	return (err);
2860 }
2861 
2862 static int
2863 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
2864 {
2865 	e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
2866 
2867 	mutex_enter(&rx_ring->rx_lock);
2868 	rx_ring->ring_gen_num = mr_gen_num;
2869 	mutex_exit(&rx_ring->rx_lock);
2870 	return (0);
2871 }
2872 
2873 /*
2874  * Callback funtion for MAC layer to register all rings.
2875  *
2876  * The hardware supports a single group with currently only one ring
2877  * available.
2878  * Though not offering virtualization ability per se, exposing the
2879  * group/ring still enables the polling and interrupt toggling.
2880  */
2881 void
2882 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
2883     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2884 {
2885 	struct e1000g *Adapter = (struct e1000g *)arg;
2886 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
2887 	mac_intr_t *mintr;
2888 
2889 	/*
2890 	 * We advertised only RX group/rings, so the MAC framework shouldn't
2891 	 * ask for any thing else.
2892 	 */
2893 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
2894 
2895 	rx_ring->mrh = rx_ring->mrh_init = rh;
2896 	infop->mri_driver = (mac_ring_driver_t)rx_ring;
2897 	infop->mri_start = e1000g_ring_start;
2898 	infop->mri_stop = NULL;
2899 	infop->mri_poll = e1000g_poll_ring;
2900 
2901 	/* Ring level interrupts */
2902 	mintr = &infop->mri_intr;
2903 	mintr->mi_handle = (mac_intr_handle_t)rx_ring;
2904 	mintr->mi_enable = e1000g_rx_ring_intr_enable;
2905 	mintr->mi_disable = e1000g_rx_ring_intr_disable;
2906 }
2907 
2908 static void
2909 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
2910     mac_group_info_t *infop, mac_group_handle_t gh)
2911 {
2912 	struct e1000g *Adapter = (struct e1000g *)arg;
2913 	mac_intr_t *mintr;
2914 
2915 	/*
2916 	 * We advertised a single RX ring. Getting a request for anything else
2917 	 * signifies a bug in the MAC framework.
2918 	 */
2919 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
2920 
2921 	Adapter->rx_group = gh;
2922 
2923 	infop->mgi_driver = (mac_group_driver_t)Adapter;
2924 	infop->mgi_start = NULL;
2925 	infop->mgi_stop = NULL;
2926 	infop->mgi_addmac = e1000g_addmac;
2927 	infop->mgi_remmac = e1000g_remmac;
2928 	infop->mgi_count = 1;
2929 
2930 	/* Group level interrupts */
2931 	mintr = &infop->mgi_intr;
2932 	mintr->mi_handle = (mac_intr_handle_t)Adapter;
2933 	mintr->mi_enable = e1000g_rx_group_intr_enable;
2934 	mintr->mi_disable = e1000g_rx_group_intr_disable;
2935 }
2936 
2937 static boolean_t
2938 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2939 {
2940 	struct e1000g *Adapter = (struct e1000g *)arg;
2941 
2942 	switch (cap) {
2943 	case MAC_CAPAB_HCKSUM: {
2944 		uint32_t *txflags = cap_data;
2945 
2946 		if (Adapter->tx_hcksum_enable)
2947 			*txflags = HCKSUM_IPHDRCKSUM |
2948 			    HCKSUM_INET_PARTIAL;
2949 		else
2950 			return (B_FALSE);
2951 		break;
2952 	}
2953 
2954 	case MAC_CAPAB_LSO: {
2955 		mac_capab_lso_t *cap_lso = cap_data;
2956 
2957 		if (Adapter->lso_enable) {
2958 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2959 			cap_lso->lso_basic_tcp_ipv4.lso_max =
2960 			    E1000_LSO_MAXLEN;
2961 		} else
2962 			return (B_FALSE);
2963 		break;
2964 	}
2965 	case MAC_CAPAB_RINGS: {
2966 		mac_capab_rings_t *cap_rings = cap_data;
2967 
2968 		/* No TX rings exposed yet */
2969 		if (cap_rings->mr_type != MAC_RING_TYPE_RX)
2970 			return (B_FALSE);
2971 
2972 		cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2973 		cap_rings->mr_rnum = 1;
2974 		cap_rings->mr_gnum = 1;
2975 		cap_rings->mr_rget = e1000g_fill_ring;
2976 		cap_rings->mr_gget = e1000g_fill_group;
2977 		break;
2978 	}
2979 	default:
2980 		return (B_FALSE);
2981 	}
2982 	return (B_TRUE);
2983 }
2984 
2985 static boolean_t
2986 e1000g_param_locked(mac_prop_id_t pr_num)
2987 {
2988 	/*
2989 	 * All en_* parameters are locked (read-only) while
2990 	 * the device is in any sort of loopback mode ...
2991 	 */
2992 	switch (pr_num) {
2993 		case MAC_PROP_EN_1000FDX_CAP:
2994 		case MAC_PROP_EN_1000HDX_CAP:
2995 		case MAC_PROP_EN_100FDX_CAP:
2996 		case MAC_PROP_EN_100HDX_CAP:
2997 		case MAC_PROP_EN_10FDX_CAP:
2998 		case MAC_PROP_EN_10HDX_CAP:
2999 		case MAC_PROP_AUTONEG:
3000 		case MAC_PROP_FLOWCTRL:
3001 			return (B_TRUE);
3002 	}
3003 	return (B_FALSE);
3004 }
3005 
3006 /*
3007  * callback function for set/get of properties
3008  */
3009 static int
3010 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3011     uint_t pr_valsize, const void *pr_val)
3012 {
3013 	struct e1000g *Adapter = arg;
3014 	struct e1000_mac_info *mac = &Adapter->shared.mac;
3015 	struct e1000_phy_info *phy = &Adapter->shared.phy;
3016 	struct e1000_fc_info *fc = &Adapter->shared.fc;
3017 	int err = 0;
3018 	link_flowctrl_t flowctrl;
3019 	uint32_t cur_mtu, new_mtu;
3020 	uint64_t tmp = 0;
3021 
3022 	rw_enter(&Adapter->chip_lock, RW_WRITER);
3023 
3024 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3025 		rw_exit(&Adapter->chip_lock);
3026 		return (ECANCELED);
3027 	}
3028 
3029 	if (Adapter->loopback_mode != E1000G_LB_NONE &&
3030 	    e1000g_param_locked(pr_num)) {
3031 		/*
3032 		 * All en_* parameters are locked (read-only)
3033 		 * while the device is in any sort of loopback mode.
3034 		 */
3035 		rw_exit(&Adapter->chip_lock);
3036 		return (EBUSY);
3037 	}
3038 
3039 	switch (pr_num) {
3040 		case MAC_PROP_EN_1000FDX_CAP:
3041 			Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
3042 			Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
3043 			goto reset;
3044 		case MAC_PROP_EN_100FDX_CAP:
3045 			Adapter->param_en_100fdx = *(uint8_t *)pr_val;
3046 			Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
3047 			goto reset;
3048 		case MAC_PROP_EN_100HDX_CAP:
3049 			Adapter->param_en_100hdx = *(uint8_t *)pr_val;
3050 			Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
3051 			goto reset;
3052 		case MAC_PROP_EN_10FDX_CAP:
3053 			Adapter->param_en_10fdx = *(uint8_t *)pr_val;
3054 			Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
3055 			goto reset;
3056 		case MAC_PROP_EN_10HDX_CAP:
3057 			Adapter->param_en_10hdx = *(uint8_t *)pr_val;
3058 			Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
3059 			goto reset;
3060 		case MAC_PROP_AUTONEG:
3061 			Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
3062 			goto reset;
3063 		case MAC_PROP_FLOWCTRL:
3064 			fc->send_xon = B_TRUE;
3065 			bcopy(pr_val, &flowctrl, sizeof (flowctrl));
3066 
3067 			switch (flowctrl) {
3068 			default:
3069 				err = EINVAL;
3070 				break;
3071 			case LINK_FLOWCTRL_NONE:
3072 				fc->requested_mode = e1000_fc_none;
3073 				break;
3074 			case LINK_FLOWCTRL_RX:
3075 				fc->requested_mode = e1000_fc_rx_pause;
3076 				break;
3077 			case LINK_FLOWCTRL_TX:
3078 				fc->requested_mode = e1000_fc_tx_pause;
3079 				break;
3080 			case LINK_FLOWCTRL_BI:
3081 				fc->requested_mode = e1000_fc_full;
3082 				break;
3083 			}
3084 reset:
3085 			if (err == 0) {
3086 				if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3087 					err = EINVAL;
3088 			}
3089 			break;
3090 		case MAC_PROP_ADV_1000FDX_CAP:
3091 		case MAC_PROP_ADV_1000HDX_CAP:
3092 		case MAC_PROP_ADV_100FDX_CAP:
3093 		case MAC_PROP_ADV_100HDX_CAP:
3094 		case MAC_PROP_ADV_10FDX_CAP:
3095 		case MAC_PROP_ADV_10HDX_CAP:
3096 		case MAC_PROP_EN_1000HDX_CAP:
3097 		case MAC_PROP_STATUS:
3098 		case MAC_PROP_SPEED:
3099 		case MAC_PROP_DUPLEX:
3100 			err = ENOTSUP; /* read-only prop. Can't set this. */
3101 			break;
3102 		case MAC_PROP_MTU:
3103 			cur_mtu = Adapter->default_mtu;
3104 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3105 			if (new_mtu == cur_mtu) {
3106 				err = 0;
3107 				break;
3108 			}
3109 
3110 			tmp = new_mtu + sizeof (struct ether_vlan_header) +
3111 			    ETHERFCSL;
3112 			if ((tmp < DEFAULT_FRAME_SIZE) ||
3113 			    (tmp > MAXIMUM_FRAME_SIZE)) {
3114 				err = EINVAL;
3115 				break;
3116 			}
3117 
3118 			/* ich8 does not support jumbo frames */
3119 			if ((mac->type == e1000_ich8lan) &&
3120 			    (tmp > DEFAULT_FRAME_SIZE)) {
3121 				err = EINVAL;
3122 				break;
3123 			}
3124 			/* ich9 does not do jumbo frames on one phy type */
3125 			if ((mac->type == e1000_ich9lan) &&
3126 			    (phy->type == e1000_phy_ife) &&
3127 			    (tmp > DEFAULT_FRAME_SIZE)) {
3128 				err = EINVAL;
3129 				break;
3130 			}
3131 			if (Adapter->e1000g_state & E1000G_STARTED) {
3132 				err = EBUSY;
3133 				break;
3134 			}
3135 
3136 			err = mac_maxsdu_update(Adapter->mh, new_mtu);
3137 			if (err == 0) {
3138 				Adapter->max_frame_size = (uint32_t)tmp;
3139 				Adapter->default_mtu = new_mtu;
3140 				e1000g_set_bufsize(Adapter);
3141 			}
3142 			break;
3143 		case MAC_PROP_PRIVATE:
3144 			err = e1000g_set_priv_prop(Adapter, pr_name,
3145 			    pr_valsize, pr_val);
3146 			break;
3147 		default:
3148 			err = ENOTSUP;
3149 			break;
3150 	}
3151 	rw_exit(&Adapter->chip_lock);
3152 	return (err);
3153 }
3154 
3155 static int
3156 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3157     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3158 {
3159 	struct e1000g *Adapter = arg;
3160 	struct e1000_fc_info *fc = &Adapter->shared.fc;
3161 	int err = 0;
3162 	link_flowctrl_t flowctrl;
3163 	uint64_t tmp = 0;
3164 
3165 	if (pr_valsize == 0)
3166 		return (EINVAL);
3167 
3168 	*perm = MAC_PROP_PERM_RW;
3169 
3170 	bzero(pr_val, pr_valsize);
3171 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3172 		return (e1000g_get_def_val(Adapter, pr_num,
3173 		    pr_valsize, pr_val));
3174 	}
3175 
3176 	switch (pr_num) {
3177 		case MAC_PROP_DUPLEX:
3178 			*perm = MAC_PROP_PERM_READ;
3179 			if (pr_valsize >= sizeof (link_duplex_t)) {
3180 				bcopy(&Adapter->link_duplex, pr_val,
3181 				    sizeof (link_duplex_t));
3182 			} else
3183 				err = EINVAL;
3184 			break;
3185 		case MAC_PROP_SPEED:
3186 			*perm = MAC_PROP_PERM_READ;
3187 			if (pr_valsize >= sizeof (uint64_t)) {
3188 				tmp = Adapter->link_speed * 1000000ull;
3189 				bcopy(&tmp, pr_val, sizeof (tmp));
3190 			} else
3191 				err = EINVAL;
3192 			break;
3193 		case MAC_PROP_AUTONEG:
3194 			*(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3195 			break;
3196 		case MAC_PROP_FLOWCTRL:
3197 			if (pr_valsize >= sizeof (link_flowctrl_t)) {
3198 				switch (fc->current_mode) {
3199 					case e1000_fc_none:
3200 						flowctrl = LINK_FLOWCTRL_NONE;
3201 						break;
3202 					case e1000_fc_rx_pause:
3203 						flowctrl = LINK_FLOWCTRL_RX;
3204 						break;
3205 					case e1000_fc_tx_pause:
3206 						flowctrl = LINK_FLOWCTRL_TX;
3207 						break;
3208 					case e1000_fc_full:
3209 						flowctrl = LINK_FLOWCTRL_BI;
3210 						break;
3211 				}
3212 				bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3213 			} else
3214 				err = EINVAL;
3215 			break;
3216 		case MAC_PROP_ADV_1000FDX_CAP:
3217 			*perm = MAC_PROP_PERM_READ;
3218 			*(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3219 			break;
3220 		case MAC_PROP_EN_1000FDX_CAP:
3221 			*(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3222 			break;
3223 		case MAC_PROP_ADV_1000HDX_CAP:
3224 			*perm = MAC_PROP_PERM_READ;
3225 			*(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3226 			break;
3227 		case MAC_PROP_EN_1000HDX_CAP:
3228 			*perm = MAC_PROP_PERM_READ;
3229 			*(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3230 			break;
3231 		case MAC_PROP_ADV_100FDX_CAP:
3232 			*perm = MAC_PROP_PERM_READ;
3233 			*(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3234 			break;
3235 		case MAC_PROP_EN_100FDX_CAP:
3236 			*(uint8_t *)pr_val = Adapter->param_en_100fdx;
3237 			break;
3238 		case MAC_PROP_ADV_100HDX_CAP:
3239 			*perm = MAC_PROP_PERM_READ;
3240 			*(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3241 			break;
3242 		case MAC_PROP_EN_100HDX_CAP:
3243 			*(uint8_t *)pr_val = Adapter->param_en_100hdx;
3244 			break;
3245 		case MAC_PROP_ADV_10FDX_CAP:
3246 			*perm = MAC_PROP_PERM_READ;
3247 			*(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3248 			break;
3249 		case MAC_PROP_EN_10FDX_CAP:
3250 			*(uint8_t *)pr_val = Adapter->param_en_10fdx;
3251 			break;
3252 		case MAC_PROP_ADV_10HDX_CAP:
3253 			*perm = MAC_PROP_PERM_READ;
3254 			*(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3255 			break;
3256 		case MAC_PROP_EN_10HDX_CAP:
3257 			*(uint8_t *)pr_val = Adapter->param_en_10hdx;
3258 			break;
3259 		case MAC_PROP_ADV_100T4_CAP:
3260 		case MAC_PROP_EN_100T4_CAP:
3261 			*perm = MAC_PROP_PERM_READ;
3262 			*(uint8_t *)pr_val = Adapter->param_adv_100t4;
3263 			break;
3264 		case MAC_PROP_PRIVATE:
3265 			err = e1000g_get_priv_prop(Adapter, pr_name,
3266 			    pr_flags, pr_valsize, pr_val, perm);
3267 			break;
3268 		default:
3269 			err = ENOTSUP;
3270 			break;
3271 	}
3272 	return (err);
3273 }
3274 
3275 /* ARGSUSED2 */
3276 static int
3277 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3278     uint_t pr_valsize, const void *pr_val)
3279 {
3280 	int err = 0;
3281 	long result;
3282 	struct e1000_hw *hw = &Adapter->shared;
3283 
3284 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3285 		if (pr_val == NULL) {
3286 			err = EINVAL;
3287 			return (err);
3288 		}
3289 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3290 		if (result < MIN_TX_BCOPY_THRESHOLD ||
3291 		    result > MAX_TX_BCOPY_THRESHOLD)
3292 			err = EINVAL;
3293 		else {
3294 			Adapter->tx_bcopy_thresh = (uint32_t)result;
3295 		}
3296 		return (err);
3297 	}
3298 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3299 		if (pr_val == NULL) {
3300 			err = EINVAL;
3301 			return (err);
3302 		}
3303 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3304 		if (result < 0 || result > 1)
3305 			err = EINVAL;
3306 		else {
3307 			Adapter->tx_intr_enable = (result == 1) ?
3308 			    B_TRUE: B_FALSE;
3309 			if (Adapter->tx_intr_enable)
3310 				e1000g_mask_tx_interrupt(Adapter);
3311 			else
3312 				e1000g_clear_tx_interrupt(Adapter);
3313 			if (e1000g_check_acc_handle(
3314 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3315 				ddi_fm_service_impact(Adapter->dip,
3316 				    DDI_SERVICE_DEGRADED);
3317 		}
3318 		return (err);
3319 	}
3320 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3321 		if (pr_val == NULL) {
3322 			err = EINVAL;
3323 			return (err);
3324 		}
3325 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3326 		if (result < MIN_TX_INTR_DELAY ||
3327 		    result > MAX_TX_INTR_DELAY)
3328 			err = EINVAL;
3329 		else {
3330 			Adapter->tx_intr_delay = (uint32_t)result;
3331 			E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3332 			if (e1000g_check_acc_handle(
3333 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3334 				ddi_fm_service_impact(Adapter->dip,
3335 				    DDI_SERVICE_DEGRADED);
3336 		}
3337 		return (err);
3338 	}
3339 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3340 		if (pr_val == NULL) {
3341 			err = EINVAL;
3342 			return (err);
3343 		}
3344 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3345 		if (result < MIN_TX_INTR_ABS_DELAY ||
3346 		    result > MAX_TX_INTR_ABS_DELAY)
3347 			err = EINVAL;
3348 		else {
3349 			Adapter->tx_intr_abs_delay = (uint32_t)result;
3350 			E1000_WRITE_REG(hw, E1000_TADV,
3351 			    Adapter->tx_intr_abs_delay);
3352 			if (e1000g_check_acc_handle(
3353 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3354 				ddi_fm_service_impact(Adapter->dip,
3355 				    DDI_SERVICE_DEGRADED);
3356 		}
3357 		return (err);
3358 	}
3359 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3360 		if (pr_val == NULL) {
3361 			err = EINVAL;
3362 			return (err);
3363 		}
3364 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3365 		if (result < MIN_RX_BCOPY_THRESHOLD ||
3366 		    result > MAX_RX_BCOPY_THRESHOLD)
3367 			err = EINVAL;
3368 		else
3369 			Adapter->rx_bcopy_thresh = (uint32_t)result;
3370 		return (err);
3371 	}
3372 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3373 		if (pr_val == NULL) {
3374 			err = EINVAL;
3375 			return (err);
3376 		}
3377 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3378 		if (result < MIN_RX_LIMIT_ON_INTR ||
3379 		    result > MAX_RX_LIMIT_ON_INTR)
3380 			err = EINVAL;
3381 		else
3382 			Adapter->rx_limit_onintr = (uint32_t)result;
3383 		return (err);
3384 	}
3385 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3386 		if (pr_val == NULL) {
3387 			err = EINVAL;
3388 			return (err);
3389 		}
3390 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3391 		if (result < MIN_RX_INTR_DELAY ||
3392 		    result > MAX_RX_INTR_DELAY)
3393 			err = EINVAL;
3394 		else {
3395 			Adapter->rx_intr_delay = (uint32_t)result;
3396 			E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3397 			if (e1000g_check_acc_handle(
3398 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3399 				ddi_fm_service_impact(Adapter->dip,
3400 				    DDI_SERVICE_DEGRADED);
3401 		}
3402 		return (err);
3403 	}
3404 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3405 		if (pr_val == NULL) {
3406 			err = EINVAL;
3407 			return (err);
3408 		}
3409 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3410 		if (result < MIN_RX_INTR_ABS_DELAY ||
3411 		    result > MAX_RX_INTR_ABS_DELAY)
3412 			err = EINVAL;
3413 		else {
3414 			Adapter->rx_intr_abs_delay = (uint32_t)result;
3415 			E1000_WRITE_REG(hw, E1000_RADV,
3416 			    Adapter->rx_intr_abs_delay);
3417 			if (e1000g_check_acc_handle(
3418 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3419 				ddi_fm_service_impact(Adapter->dip,
3420 				    DDI_SERVICE_DEGRADED);
3421 		}
3422 		return (err);
3423 	}
3424 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3425 		if (pr_val == NULL) {
3426 			err = EINVAL;
3427 			return (err);
3428 		}
3429 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3430 		if (result < MIN_INTR_THROTTLING ||
3431 		    result > MAX_INTR_THROTTLING)
3432 			err = EINVAL;
3433 		else {
3434 			if (hw->mac.type >= e1000_82540) {
3435 				Adapter->intr_throttling_rate =
3436 				    (uint32_t)result;
3437 				E1000_WRITE_REG(hw, E1000_ITR,
3438 				    Adapter->intr_throttling_rate);
3439 				if (e1000g_check_acc_handle(
3440 				    Adapter->osdep.reg_handle) != DDI_FM_OK)
3441 					ddi_fm_service_impact(Adapter->dip,
3442 					    DDI_SERVICE_DEGRADED);
3443 			} else
3444 				err = EINVAL;
3445 		}
3446 		return (err);
3447 	}
3448 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3449 		if (pr_val == NULL) {
3450 			err = EINVAL;
3451 			return (err);
3452 		}
3453 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3454 		if (result < 0 || result > 1)
3455 			err = EINVAL;
3456 		else {
3457 			if (hw->mac.type >= e1000_82540) {
3458 				Adapter->intr_adaptive = (result == 1) ?
3459 				    B_TRUE : B_FALSE;
3460 			} else {
3461 				err = EINVAL;
3462 			}
3463 		}
3464 		return (err);
3465 	}
3466 	return (ENOTSUP);
3467 }
3468 
3469 static int
3470 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
3471     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3472 {
3473 	int err = ENOTSUP;
3474 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
3475 	int value;
3476 
3477 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
3478 		*perm = MAC_PROP_PERM_READ;
3479 		if (is_default)
3480 			goto done;
3481 		value = Adapter->param_adv_pause;
3482 		err = 0;
3483 		goto done;
3484 	}
3485 	if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3486 		*perm = MAC_PROP_PERM_READ;
3487 		if (is_default)
3488 			goto done;
3489 		value = Adapter->param_adv_asym_pause;
3490 		err = 0;
3491 		goto done;
3492 	}
3493 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3494 		value = (is_default ? DEFAULT_TX_BCOPY_THRESHOLD :
3495 		    Adapter->tx_bcopy_thresh);
3496 		err = 0;
3497 		goto done;
3498 	}
3499 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3500 		value = (is_default ? DEFAULT_TX_INTR_ENABLE :
3501 		    Adapter->tx_intr_enable);
3502 		err = 0;
3503 		goto done;
3504 	}
3505 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3506 		value = (is_default ? DEFAULT_TX_INTR_DELAY :
3507 		    Adapter->tx_intr_delay);
3508 		err = 0;
3509 		goto done;
3510 	}
3511 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3512 		value = (is_default ? DEFAULT_TX_INTR_ABS_DELAY :
3513 		    Adapter->tx_intr_abs_delay);
3514 		err = 0;
3515 		goto done;
3516 	}
3517 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3518 		value = (is_default ? DEFAULT_RX_BCOPY_THRESHOLD :
3519 		    Adapter->rx_bcopy_thresh);
3520 		err = 0;
3521 		goto done;
3522 	}
3523 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3524 		value = (is_default ? DEFAULT_RX_LIMIT_ON_INTR :
3525 		    Adapter->rx_limit_onintr);
3526 		err = 0;
3527 		goto done;
3528 	}
3529 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3530 		value = (is_default ? DEFAULT_RX_INTR_DELAY :
3531 		    Adapter->rx_intr_delay);
3532 		err = 0;
3533 		goto done;
3534 	}
3535 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3536 		value = (is_default ? DEFAULT_RX_INTR_ABS_DELAY :
3537 		    Adapter->rx_intr_abs_delay);
3538 		err = 0;
3539 		goto done;
3540 	}
3541 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3542 		value = (is_default ? DEFAULT_INTR_THROTTLING :
3543 		    Adapter->intr_throttling_rate);
3544 		err = 0;
3545 		goto done;
3546 	}
3547 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3548 		value = (is_default ? 1 : Adapter->intr_adaptive);
3549 		err = 0;
3550 		goto done;
3551 	}
3552 done:
3553 	if (err == 0) {
3554 		(void) snprintf(pr_val, pr_valsize, "%d", value);
3555 	}
3556 	return (err);
3557 }
3558 
3559 /*
3560  * e1000g_get_conf - get configurations set in e1000g.conf
3561  * This routine gets user-configured values out of the configuration
3562  * file e1000g.conf.
3563  *
3564  * For each configurable value, there is a minimum, a maximum, and a
3565  * default.
3566  * If user does not configure a value, use the default.
3567  * If user configures below the minimum, use the minumum.
3568  * If user configures above the maximum, use the maxumum.
3569  */
3570 static void
3571 e1000g_get_conf(struct e1000g *Adapter)
3572 {
3573 	struct e1000_hw *hw = &Adapter->shared;
3574 	boolean_t tbi_compatibility = B_FALSE;
3575 
3576 	/*
3577 	 * get each configurable property from e1000g.conf
3578 	 */
3579 
3580 	/*
3581 	 * NumTxDescriptors
3582 	 */
3583 	Adapter->tx_desc_num =
3584 	    e1000g_get_prop(Adapter, "NumTxDescriptors",
3585 	    MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
3586 	    DEFAULT_NUM_TX_DESCRIPTOR);
3587 
3588 	/*
3589 	 * NumRxDescriptors
3590 	 */
3591 	Adapter->rx_desc_num =
3592 	    e1000g_get_prop(Adapter, "NumRxDescriptors",
3593 	    MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
3594 	    DEFAULT_NUM_RX_DESCRIPTOR);
3595 
3596 	/*
3597 	 * NumRxFreeList
3598 	 */
3599 	Adapter->rx_freelist_num =
3600 	    e1000g_get_prop(Adapter, "NumRxFreeList",
3601 	    MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
3602 	    DEFAULT_NUM_RX_FREELIST);
3603 
3604 	/*
3605 	 * NumTxPacketList
3606 	 */
3607 	Adapter->tx_freelist_num =
3608 	    e1000g_get_prop(Adapter, "NumTxPacketList",
3609 	    MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
3610 	    DEFAULT_NUM_TX_FREELIST);
3611 
3612 	/*
3613 	 * FlowControl
3614 	 */
3615 	hw->fc.send_xon = B_TRUE;
3616 	hw->fc.requested_mode =
3617 	    e1000g_get_prop(Adapter, "FlowControl",
3618 	    e1000_fc_none, 4, DEFAULT_FLOW_CONTROL);
3619 	/* 4 is the setting that says "let the eeprom decide" */
3620 	if (hw->fc.requested_mode == 4)
3621 		hw->fc.requested_mode = e1000_fc_default;
3622 
3623 	/*
3624 	 * Max Num Receive Packets on Interrupt
3625 	 */
3626 	Adapter->rx_limit_onintr =
3627 	    e1000g_get_prop(Adapter, "MaxNumReceivePackets",
3628 	    MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
3629 	    DEFAULT_RX_LIMIT_ON_INTR);
3630 
3631 	/*
3632 	 * PHY master slave setting
3633 	 */
3634 	hw->phy.ms_type =
3635 	    e1000g_get_prop(Adapter, "SetMasterSlave",
3636 	    e1000_ms_hw_default, e1000_ms_auto,
3637 	    e1000_ms_hw_default);
3638 
3639 	/*
3640 	 * Parameter which controls TBI mode workaround, which is only
3641 	 * needed on certain switches such as Cisco 6500/Foundry
3642 	 */
3643 	tbi_compatibility =
3644 	    e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
3645 	    0, 1, DEFAULT_TBI_COMPAT_ENABLE);
3646 	e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
3647 
3648 	/*
3649 	 * MSI Enable
3650 	 */
3651 	Adapter->msi_enable =
3652 	    e1000g_get_prop(Adapter, "MSIEnable",
3653 	    0, 1, DEFAULT_MSI_ENABLE);
3654 
3655 	/*
3656 	 * Interrupt Throttling Rate
3657 	 */
3658 	Adapter->intr_throttling_rate =
3659 	    e1000g_get_prop(Adapter, "intr_throttling_rate",
3660 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
3661 	    DEFAULT_INTR_THROTTLING);
3662 
3663 	/*
3664 	 * Adaptive Interrupt Blanking Enable/Disable
3665 	 * It is enabled by default
3666 	 */
3667 	Adapter->intr_adaptive =
3668 	    (e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1) == 1) ?
3669 	    B_TRUE : B_FALSE;
3670 
3671 	/*
3672 	 * Hardware checksum enable/disable parameter
3673 	 */
3674 	Adapter->tx_hcksum_enable =
3675 	    e1000g_get_prop(Adapter, "tx_hcksum_enable",
3676 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3677 	/*
3678 	 * Checksum on/off selection via global parameters.
3679 	 *
3680 	 * If the chip is flagged as not capable of (correctly)
3681 	 * handling checksumming, we don't enable it on either
3682 	 * Rx or Tx side.  Otherwise, we take this chip's settings
3683 	 * from the patchable global defaults.
3684 	 *
3685 	 * We advertise our capabilities only if TX offload is
3686 	 * enabled.  On receive, the stack will accept checksummed
3687 	 * packets anyway, even if we haven't said we can deliver
3688 	 * them.
3689 	 */
3690 	switch (hw->mac.type) {
3691 		case e1000_82540:
3692 		case e1000_82544:
3693 		case e1000_82545:
3694 		case e1000_82545_rev_3:
3695 		case e1000_82546:
3696 		case e1000_82546_rev_3:
3697 		case e1000_82571:
3698 		case e1000_82572:
3699 		case e1000_82573:
3700 		case e1000_80003es2lan:
3701 			break;
3702 		/*
3703 		 * For the following Intel PRO/1000 chipsets, we have not
3704 		 * tested the hardware checksum offload capability, so we
3705 		 * disable the capability for them.
3706 		 *	e1000_82542,
3707 		 *	e1000_82543,
3708 		 *	e1000_82541,
3709 		 *	e1000_82541_rev_2,
3710 		 *	e1000_82547,
3711 		 *	e1000_82547_rev_2,
3712 		 */
3713 		default:
3714 			Adapter->tx_hcksum_enable = B_FALSE;
3715 	}
3716 
3717 	/*
3718 	 * Large Send Offloading(LSO) Enable/Disable
3719 	 * If the tx hardware checksum is not enabled, LSO should be
3720 	 * disabled.
3721 	 */
3722 	Adapter->lso_enable =
3723 	    e1000g_get_prop(Adapter, "lso_enable",
3724 	    0, 1, DEFAULT_LSO_ENABLE);
3725 
3726 	switch (hw->mac.type) {
3727 		case e1000_82546:
3728 		case e1000_82546_rev_3:
3729 			if (Adapter->lso_enable)
3730 				Adapter->lso_premature_issue = B_TRUE;
3731 			/* FALLTHRU */
3732 		case e1000_82571:
3733 		case e1000_82572:
3734 		case e1000_82573:
3735 		case e1000_80003es2lan:
3736 			break;
3737 		default:
3738 			Adapter->lso_enable = B_FALSE;
3739 	}
3740 
3741 	if (!Adapter->tx_hcksum_enable) {
3742 		Adapter->lso_premature_issue = B_FALSE;
3743 		Adapter->lso_enable = B_FALSE;
3744 	}
3745 
3746 	/*
3747 	 * If mem_workaround_82546 is enabled, the rx buffer allocated by
3748 	 * e1000_82545, e1000_82546 and e1000_82546_rev_3
3749 	 * will not cross 64k boundary.
3750 	 */
3751 	Adapter->mem_workaround_82546 =
3752 	    e1000g_get_prop(Adapter, "mem_workaround_82546",
3753 	    0, 1, DEFAULT_MEM_WORKAROUND_82546);
3754 }
3755 
3756 /*
3757  * e1000g_get_prop - routine to read properties
3758  *
3759  * Get a user-configure property value out of the configuration
3760  * file e1000g.conf.
3761  *
3762  * Caller provides name of the property, a default value, a minimum
3763  * value, and a maximum value.
3764  *
3765  * Return configured value of the property, with default, minimum and
3766  * maximum properly applied.
3767  */
3768 static int
3769 e1000g_get_prop(struct e1000g *Adapter,	/* point to per-adapter structure */
3770     char *propname,		/* name of the property */
3771     int minval,			/* minimum acceptable value */
3772     int maxval,			/* maximim acceptable value */
3773     int defval)			/* default value */
3774 {
3775 	int propval;		/* value returned for requested property */
3776 	int *props;		/* point to array of properties returned */
3777 	uint_t nprops;		/* number of property value returned */
3778 
3779 	/*
3780 	 * get the array of properties from the config file
3781 	 */
3782 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
3783 	    DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
3784 		/* got some properties, test if we got enough */
3785 		if (Adapter->instance < nprops) {
3786 			propval = props[Adapter->instance];
3787 		} else {
3788 			/* not enough properties configured */
3789 			propval = defval;
3790 			E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3791 			    "Not Enough %s values found in e1000g.conf"
3792 			    " - set to %d\n",
3793 			    propname, propval);
3794 		}
3795 
3796 		/* free memory allocated for properties */
3797 		ddi_prop_free(props);
3798 
3799 	} else {
3800 		propval = defval;
3801 	}
3802 
3803 	/*
3804 	 * enforce limits
3805 	 */
3806 	if (propval > maxval) {
3807 		propval = maxval;
3808 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3809 		    "Too High %s value in e1000g.conf - set to %d\n",
3810 		    propname, propval);
3811 	}
3812 
3813 	if (propval < minval) {
3814 		propval = minval;
3815 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3816 		    "Too Low %s value in e1000g.conf - set to %d\n",
3817 		    propname, propval);
3818 	}
3819 
3820 	return (propval);
3821 }
3822 
3823 static boolean_t
3824 e1000g_link_check(struct e1000g *Adapter)
3825 {
3826 	uint16_t speed, duplex, phydata;
3827 	boolean_t link_changed = B_FALSE;
3828 	struct e1000_hw *hw;
3829 	uint32_t reg_tarc;
3830 
3831 	hw = &Adapter->shared;
3832 
3833 	if (e1000g_link_up(Adapter)) {
3834 		/*
3835 		 * The Link is up, check whether it was marked as down earlier
3836 		 */
3837 		if (Adapter->link_state != LINK_STATE_UP) {
3838 			(void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
3839 			Adapter->link_speed = speed;
3840 			Adapter->link_duplex = duplex;
3841 			Adapter->link_state = LINK_STATE_UP;
3842 			link_changed = B_TRUE;
3843 
3844 			Adapter->tx_link_down_timeout = 0;
3845 
3846 			if ((hw->mac.type == e1000_82571) ||
3847 			    (hw->mac.type == e1000_82572)) {
3848 				reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
3849 				if (speed == SPEED_1000)
3850 					reg_tarc |= (1 << 21);
3851 				else
3852 					reg_tarc &= ~(1 << 21);
3853 				E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
3854 			}
3855 		}
3856 		Adapter->smartspeed = 0;
3857 	} else {
3858 		if (Adapter->link_state != LINK_STATE_DOWN) {
3859 			Adapter->link_speed = 0;
3860 			Adapter->link_duplex = 0;
3861 			Adapter->link_state = LINK_STATE_DOWN;
3862 			link_changed = B_TRUE;
3863 
3864 			/*
3865 			 * SmartSpeed workaround for Tabor/TanaX, When the
3866 			 * driver loses link disable auto master/slave
3867 			 * resolution.
3868 			 */
3869 			if (hw->phy.type == e1000_phy_igp) {
3870 				(void) e1000_read_phy_reg(hw,
3871 				    PHY_1000T_CTRL, &phydata);
3872 				phydata |= CR_1000T_MS_ENABLE;
3873 				(void) e1000_write_phy_reg(hw,
3874 				    PHY_1000T_CTRL, phydata);
3875 			}
3876 		} else {
3877 			e1000g_smartspeed(Adapter);
3878 		}
3879 
3880 		if (Adapter->e1000g_state & E1000G_STARTED) {
3881 			if (Adapter->tx_link_down_timeout <
3882 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3883 				Adapter->tx_link_down_timeout++;
3884 			} else if (Adapter->tx_link_down_timeout ==
3885 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3886 				e1000g_tx_clean(Adapter);
3887 				Adapter->tx_link_down_timeout++;
3888 			}
3889 		}
3890 	}
3891 
3892 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
3893 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
3894 
3895 	return (link_changed);
3896 }
3897 
3898 /*
3899  * e1000g_reset_link - Using the link properties to setup the link
3900  */
3901 int
3902 e1000g_reset_link(struct e1000g *Adapter)
3903 {
3904 	struct e1000_mac_info *mac;
3905 	struct e1000_phy_info *phy;
3906 	boolean_t invalid;
3907 
3908 	mac = &Adapter->shared.mac;
3909 	phy = &Adapter->shared.phy;
3910 	invalid = B_FALSE;
3911 
3912 	if (Adapter->param_adv_autoneg == 1) {
3913 		mac->autoneg = B_TRUE;
3914 		phy->autoneg_advertised = 0;
3915 
3916 		/*
3917 		 * 1000hdx is not supported for autonegotiation
3918 		 */
3919 		if (Adapter->param_adv_1000fdx == 1)
3920 			phy->autoneg_advertised |= ADVERTISE_1000_FULL;
3921 
3922 		if (Adapter->param_adv_100fdx == 1)
3923 			phy->autoneg_advertised |= ADVERTISE_100_FULL;
3924 
3925 		if (Adapter->param_adv_100hdx == 1)
3926 			phy->autoneg_advertised |= ADVERTISE_100_HALF;
3927 
3928 		if (Adapter->param_adv_10fdx == 1)
3929 			phy->autoneg_advertised |= ADVERTISE_10_FULL;
3930 
3931 		if (Adapter->param_adv_10hdx == 1)
3932 			phy->autoneg_advertised |= ADVERTISE_10_HALF;
3933 
3934 		if (phy->autoneg_advertised == 0)
3935 			invalid = B_TRUE;
3936 	} else {
3937 		mac->autoneg = B_FALSE;
3938 
3939 		/*
3940 		 * 1000fdx and 1000hdx are not supported for forced link
3941 		 */
3942 		if (Adapter->param_adv_100fdx == 1)
3943 			mac->forced_speed_duplex = ADVERTISE_100_FULL;
3944 		else if (Adapter->param_adv_100hdx == 1)
3945 			mac->forced_speed_duplex = ADVERTISE_100_HALF;
3946 		else if (Adapter->param_adv_10fdx == 1)
3947 			mac->forced_speed_duplex = ADVERTISE_10_FULL;
3948 		else if (Adapter->param_adv_10hdx == 1)
3949 			mac->forced_speed_duplex = ADVERTISE_10_HALF;
3950 		else
3951 			invalid = B_TRUE;
3952 
3953 	}
3954 
3955 	if (invalid) {
3956 		e1000g_log(Adapter, CE_WARN,
3957 		    "Invalid link sets. Setup link to"
3958 		    "support autonegotiation with all link capabilities.");
3959 		mac->autoneg = B_TRUE;
3960 		phy->autoneg_advertised = ADVERTISE_1000_FULL |
3961 		    ADVERTISE_100_FULL | ADVERTISE_100_HALF |
3962 		    ADVERTISE_10_FULL | ADVERTISE_10_HALF;
3963 	}
3964 
3965 	return (e1000_setup_link(&Adapter->shared));
3966 }
3967 
3968 static void
3969 e1000g_timer_tx_resched(struct e1000g *Adapter)
3970 {
3971 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
3972 
3973 	rw_enter(&Adapter->chip_lock, RW_READER);
3974 
3975 	if (tx_ring->resched_needed &&
3976 	    ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
3977 	    drv_usectohz(1000000)) &&
3978 	    (Adapter->e1000g_state & E1000G_STARTED) &&
3979 	    (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
3980 		tx_ring->resched_needed = B_FALSE;
3981 		mac_tx_update(Adapter->mh);
3982 		E1000G_STAT(tx_ring->stat_reschedule);
3983 		E1000G_STAT(tx_ring->stat_timer_reschedule);
3984 	}
3985 
3986 	rw_exit(&Adapter->chip_lock);
3987 }
3988 
3989 static void
3990 e1000g_local_timer(void *ws)
3991 {
3992 	struct e1000g *Adapter = (struct e1000g *)ws;
3993 	struct e1000_hw *hw;
3994 	e1000g_ether_addr_t ether_addr;
3995 	boolean_t link_changed;
3996 
3997 	hw = &Adapter->shared;
3998 
3999 	if (Adapter->e1000g_state & E1000G_ERROR) {
4000 		rw_enter(&Adapter->chip_lock, RW_WRITER);
4001 		Adapter->e1000g_state &= ~E1000G_ERROR;
4002 		rw_exit(&Adapter->chip_lock);
4003 
4004 		Adapter->reset_count++;
4005 		if (e1000g_global_reset(Adapter)) {
4006 			ddi_fm_service_impact(Adapter->dip,
4007 			    DDI_SERVICE_RESTORED);
4008 			e1000g_timer_tx_resched(Adapter);
4009 		} else
4010 			ddi_fm_service_impact(Adapter->dip,
4011 			    DDI_SERVICE_LOST);
4012 		return;
4013 	}
4014 
4015 	if (e1000g_stall_check(Adapter)) {
4016 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4017 		    "Tx stall detected. Activate automatic recovery.\n");
4018 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
4019 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
4020 		Adapter->reset_count++;
4021 		if (e1000g_reset_adapter(Adapter)) {
4022 			ddi_fm_service_impact(Adapter->dip,
4023 			    DDI_SERVICE_RESTORED);
4024 			e1000g_timer_tx_resched(Adapter);
4025 		}
4026 		return;
4027 	}
4028 
4029 	link_changed = B_FALSE;
4030 	rw_enter(&Adapter->chip_lock, RW_READER);
4031 	if (Adapter->link_complete)
4032 		link_changed = e1000g_link_check(Adapter);
4033 	rw_exit(&Adapter->chip_lock);
4034 
4035 	if (link_changed)
4036 		mac_link_update(Adapter->mh, Adapter->link_state);
4037 
4038 	/*
4039 	 * Workaround for esb2. Data stuck in fifo on a link
4040 	 * down event. Reset the adapter to recover it.
4041 	 */
4042 	if (Adapter->esb2_workaround) {
4043 		Adapter->esb2_workaround = B_FALSE;
4044 		(void) e1000g_reset_adapter(Adapter);
4045 		return;
4046 	}
4047 
4048 	/*
4049 	 * With 82571 controllers, any locally administered address will
4050 	 * be overwritten when there is a reset on the other port.
4051 	 * Detect this circumstance and correct it.
4052 	 */
4053 	if ((hw->mac.type == e1000_82571) &&
4054 	    (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4055 		ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4056 		ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4057 
4058 		ether_addr.reg.low = ntohl(ether_addr.reg.low);
4059 		ether_addr.reg.high = ntohl(ether_addr.reg.high);
4060 
4061 		if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4062 		    (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4063 		    (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4064 		    (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4065 		    (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4066 		    (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4067 			e1000_rar_set(hw, hw->mac.addr, 0);
4068 		}
4069 	}
4070 
4071 	/*
4072 	 * Long TTL workaround for 82541/82547
4073 	 */
4074 	(void) e1000_igp_ttl_workaround_82547(hw);
4075 
4076 	/*
4077 	 * Check for Adaptive IFS settings If there are lots of collisions
4078 	 * change the value in steps...
4079 	 * These properties should only be set for 10/100
4080 	 */
4081 	if ((hw->phy.media_type == e1000_media_type_copper) &&
4082 	    ((Adapter->link_speed == SPEED_100) ||
4083 	    (Adapter->link_speed == SPEED_10))) {
4084 		e1000_update_adaptive(hw);
4085 	}
4086 	/*
4087 	 * Set Timer Interrupts
4088 	 */
4089 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4090 
4091 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4092 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4093 	else
4094 		e1000g_timer_tx_resched(Adapter);
4095 
4096 	restart_watchdog_timer(Adapter);
4097 }
4098 
4099 /*
4100  * The function e1000g_link_timer() is called when the timer for link setup
4101  * is expired, which indicates the completion of the link setup. The link
4102  * state will not be updated until the link setup is completed. And the
4103  * link state will not be sent to the upper layer through mac_link_update()
4104  * in this function. It will be updated in the local timer routine or the
4105  * interrupt service routine after the interface is started (plumbed).
4106  */
4107 static void
4108 e1000g_link_timer(void *arg)
4109 {
4110 	struct e1000g *Adapter = (struct e1000g *)arg;
4111 
4112 	mutex_enter(&Adapter->link_lock);
4113 	Adapter->link_complete = B_TRUE;
4114 	Adapter->link_tid = 0;
4115 	mutex_exit(&Adapter->link_lock);
4116 }
4117 
4118 /*
4119  * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4120  *
4121  * This function read the forced speed and duplex for 10/100 Mbps speeds
4122  * and also for 1000 Mbps speeds from the e1000g.conf file
4123  */
4124 static void
4125 e1000g_force_speed_duplex(struct e1000g *Adapter)
4126 {
4127 	int forced;
4128 	struct e1000_mac_info *mac = &Adapter->shared.mac;
4129 	struct e1000_phy_info *phy = &Adapter->shared.phy;
4130 
4131 	/*
4132 	 * get value out of config file
4133 	 */
4134 	forced = e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4135 	    GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY);
4136 
4137 	switch (forced) {
4138 	case GDIAG_10_HALF:
4139 		/*
4140 		 * Disable Auto Negotiation
4141 		 */
4142 		mac->autoneg = B_FALSE;
4143 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
4144 		break;
4145 	case GDIAG_10_FULL:
4146 		/*
4147 		 * Disable Auto Negotiation
4148 		 */
4149 		mac->autoneg = B_FALSE;
4150 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
4151 		break;
4152 	case GDIAG_100_HALF:
4153 		/*
4154 		 * Disable Auto Negotiation
4155 		 */
4156 		mac->autoneg = B_FALSE;
4157 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
4158 		break;
4159 	case GDIAG_100_FULL:
4160 		/*
4161 		 * Disable Auto Negotiation
4162 		 */
4163 		mac->autoneg = B_FALSE;
4164 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
4165 		break;
4166 	case GDIAG_1000_FULL:
4167 		/*
4168 		 * The gigabit spec requires autonegotiation.  Therefore,
4169 		 * when the user wants to force the speed to 1000Mbps, we
4170 		 * enable AutoNeg, but only allow the harware to advertise
4171 		 * 1000Mbps.  This is different from 10/100 operation, where
4172 		 * we are allowed to link without any negotiation.
4173 		 */
4174 		mac->autoneg = B_TRUE;
4175 		phy->autoneg_advertised = ADVERTISE_1000_FULL;
4176 		break;
4177 	default:	/* obey the setting of AutoNegAdvertised */
4178 		mac->autoneg = B_TRUE;
4179 		phy->autoneg_advertised =
4180 		    (uint16_t)e1000g_get_prop(Adapter, "AutoNegAdvertised",
4181 		    0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4182 		    AUTONEG_ADVERTISE_SPEED_DEFAULT);
4183 		break;
4184 	}	/* switch */
4185 }
4186 
4187 /*
4188  * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4189  *
4190  * This function reads MaxFrameSize from e1000g.conf
4191  */
4192 static void
4193 e1000g_get_max_frame_size(struct e1000g *Adapter)
4194 {
4195 	int max_frame;
4196 	struct e1000_mac_info *mac = &Adapter->shared.mac;
4197 	struct e1000_phy_info *phy = &Adapter->shared.phy;
4198 
4199 	/*
4200 	 * get value out of config file
4201 	 */
4202 	max_frame = e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0);
4203 
4204 	switch (max_frame) {
4205 	case 0:
4206 		Adapter->default_mtu = ETHERMTU;
4207 		break;
4208 	/*
4209 	 * To avoid excessive memory allocation for rx buffers,
4210 	 * the bytes of E1000G_IPALIGNPRESERVEROOM are reserved.
4211 	 */
4212 	case 1:
4213 		Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4214 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4215 		    E1000G_IPALIGNPRESERVEROOM;
4216 		break;
4217 	case 2:
4218 		Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4219 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4220 		    E1000G_IPALIGNPRESERVEROOM;
4221 		break;
4222 	case 3:
4223 		if (mac->type >= e1000_82571)
4224 			Adapter->default_mtu = MAXIMUM_MTU;
4225 		else
4226 			Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4227 			    sizeof (struct ether_vlan_header) - ETHERFCSL -
4228 			    E1000G_IPALIGNPRESERVEROOM;
4229 		break;
4230 	default:
4231 		Adapter->default_mtu = ETHERMTU;
4232 		break;
4233 	}	/* switch */
4234 
4235 	Adapter->max_frame_size = Adapter->default_mtu +
4236 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
4237 
4238 	/* ich8 does not do jumbo frames */
4239 	if (mac->type == e1000_ich8lan) {
4240 		Adapter->default_mtu = ETHERMTU;
4241 		Adapter->max_frame_size = ETHERMTU +
4242 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4243 	}
4244 
4245 	/* ich9 does not do jumbo frames on one phy type */
4246 	if ((mac->type == e1000_ich9lan) &&
4247 	    (phy->type == e1000_phy_ife)) {
4248 		Adapter->default_mtu = ETHERMTU;
4249 		Adapter->max_frame_size = ETHERMTU +
4250 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4251 	}
4252 }
4253 
4254 static void
4255 arm_watchdog_timer(struct e1000g *Adapter)
4256 {
4257 	Adapter->watchdog_tid =
4258 	    timeout(e1000g_local_timer,
4259 	    (void *)Adapter, 1 * drv_usectohz(1000000));
4260 }
4261 #pragma inline(arm_watchdog_timer)
4262 
4263 static void
4264 enable_watchdog_timer(struct e1000g *Adapter)
4265 {
4266 	mutex_enter(&Adapter->watchdog_lock);
4267 
4268 	if (!Adapter->watchdog_timer_enabled) {
4269 		Adapter->watchdog_timer_enabled = B_TRUE;
4270 		Adapter->watchdog_timer_started = B_TRUE;
4271 		arm_watchdog_timer(Adapter);
4272 	}
4273 
4274 	mutex_exit(&Adapter->watchdog_lock);
4275 }
4276 
4277 static void
4278 disable_watchdog_timer(struct e1000g *Adapter)
4279 {
4280 	timeout_id_t tid;
4281 
4282 	mutex_enter(&Adapter->watchdog_lock);
4283 
4284 	Adapter->watchdog_timer_enabled = B_FALSE;
4285 	Adapter->watchdog_timer_started = B_FALSE;
4286 	tid = Adapter->watchdog_tid;
4287 	Adapter->watchdog_tid = 0;
4288 
4289 	mutex_exit(&Adapter->watchdog_lock);
4290 
4291 	if (tid != 0)
4292 		(void) untimeout(tid);
4293 }
4294 
4295 static void
4296 start_watchdog_timer(struct e1000g *Adapter)
4297 {
4298 	mutex_enter(&Adapter->watchdog_lock);
4299 
4300 	if (Adapter->watchdog_timer_enabled) {
4301 		if (!Adapter->watchdog_timer_started) {
4302 			Adapter->watchdog_timer_started = B_TRUE;
4303 			arm_watchdog_timer(Adapter);
4304 		}
4305 	}
4306 
4307 	mutex_exit(&Adapter->watchdog_lock);
4308 }
4309 
4310 static void
4311 restart_watchdog_timer(struct e1000g *Adapter)
4312 {
4313 	mutex_enter(&Adapter->watchdog_lock);
4314 
4315 	if (Adapter->watchdog_timer_started)
4316 		arm_watchdog_timer(Adapter);
4317 
4318 	mutex_exit(&Adapter->watchdog_lock);
4319 }
4320 
4321 static void
4322 stop_watchdog_timer(struct e1000g *Adapter)
4323 {
4324 	timeout_id_t tid;
4325 
4326 	mutex_enter(&Adapter->watchdog_lock);
4327 
4328 	Adapter->watchdog_timer_started = B_FALSE;
4329 	tid = Adapter->watchdog_tid;
4330 	Adapter->watchdog_tid = 0;
4331 
4332 	mutex_exit(&Adapter->watchdog_lock);
4333 
4334 	if (tid != 0)
4335 		(void) untimeout(tid);
4336 }
4337 
4338 static void
4339 stop_link_timer(struct e1000g *Adapter)
4340 {
4341 	timeout_id_t tid;
4342 
4343 	/* Disable the link timer */
4344 	mutex_enter(&Adapter->link_lock);
4345 
4346 	tid = Adapter->link_tid;
4347 	Adapter->link_tid = 0;
4348 
4349 	mutex_exit(&Adapter->link_lock);
4350 
4351 	if (tid != 0)
4352 		(void) untimeout(tid);
4353 }
4354 
4355 static void
4356 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4357 {
4358 	timeout_id_t tid;
4359 
4360 	/* Disable the tx timer for 82547 chipset */
4361 	mutex_enter(&tx_ring->tx_lock);
4362 
4363 	tx_ring->timer_enable_82547 = B_FALSE;
4364 	tid = tx_ring->timer_id_82547;
4365 	tx_ring->timer_id_82547 = 0;
4366 
4367 	mutex_exit(&tx_ring->tx_lock);
4368 
4369 	if (tid != 0)
4370 		(void) untimeout(tid);
4371 }
4372 
4373 void
4374 e1000g_clear_interrupt(struct e1000g *Adapter)
4375 {
4376 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4377 	    0xffffffff & ~E1000_IMS_RXSEQ);
4378 }
4379 
4380 void
4381 e1000g_mask_interrupt(struct e1000g *Adapter)
4382 {
4383 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4384 	    IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4385 
4386 	if (Adapter->tx_intr_enable)
4387 		e1000g_mask_tx_interrupt(Adapter);
4388 }
4389 
4390 /*
4391  * This routine is called by e1000g_quiesce(), therefore must not block.
4392  */
4393 void
4394 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4395 {
4396 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4397 }
4398 
4399 void
4400 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
4401 {
4402 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
4403 }
4404 
4405 void
4406 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
4407 {
4408 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
4409 }
4410 
4411 static void
4412 e1000g_smartspeed(struct e1000g *Adapter)
4413 {
4414 	struct e1000_hw *hw = &Adapter->shared;
4415 	uint16_t phy_status;
4416 	uint16_t phy_ctrl;
4417 
4418 	/*
4419 	 * If we're not T-or-T, or we're not autoneg'ing, or we're not
4420 	 * advertising 1000Full, we don't even use the workaround
4421 	 */
4422 	if ((hw->phy.type != e1000_phy_igp) ||
4423 	    !hw->mac.autoneg ||
4424 	    !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
4425 		return;
4426 
4427 	/*
4428 	 * True if this is the first call of this function or after every
4429 	 * 30 seconds of not having link
4430 	 */
4431 	if (Adapter->smartspeed == 0) {
4432 		/*
4433 		 * If Master/Slave config fault is asserted twice, we
4434 		 * assume back-to-back
4435 		 */
4436 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4437 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4438 			return;
4439 
4440 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4441 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4442 			return;
4443 		/*
4444 		 * We're assuming back-2-back because our status register
4445 		 * insists! there's a fault in the master/slave
4446 		 * relationship that was "negotiated"
4447 		 */
4448 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4449 		/*
4450 		 * Is the phy configured for manual configuration of
4451 		 * master/slave?
4452 		 */
4453 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4454 			/*
4455 			 * Yes.  Then disable manual configuration (enable
4456 			 * auto configuration) of master/slave
4457 			 */
4458 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4459 			(void) e1000_write_phy_reg(hw,
4460 			    PHY_1000T_CTRL, phy_ctrl);
4461 			/*
4462 			 * Effectively starting the clock
4463 			 */
4464 			Adapter->smartspeed++;
4465 			/*
4466 			 * Restart autonegotiation
4467 			 */
4468 			if (!e1000_phy_setup_autoneg(hw) &&
4469 			    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4470 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4471 				    MII_CR_RESTART_AUTO_NEG);
4472 				(void) e1000_write_phy_reg(hw,
4473 				    PHY_CONTROL, phy_ctrl);
4474 			}
4475 		}
4476 		return;
4477 		/*
4478 		 * Has 6 seconds transpired still without link? Remember,
4479 		 * you should reset the smartspeed counter once you obtain
4480 		 * link
4481 		 */
4482 	} else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4483 		/*
4484 		 * Yes.  Remember, we did at the start determine that
4485 		 * there's a master/slave configuration fault, so we're
4486 		 * still assuming there's someone on the other end, but we
4487 		 * just haven't yet been able to talk to it. We then
4488 		 * re-enable auto configuration of master/slave to see if
4489 		 * we're running 2/3 pair cables.
4490 		 */
4491 		/*
4492 		 * If still no link, perhaps using 2/3 pair cable
4493 		 */
4494 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4495 		phy_ctrl |= CR_1000T_MS_ENABLE;
4496 		(void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4497 		/*
4498 		 * Restart autoneg with phy enabled for manual
4499 		 * configuration of master/slave
4500 		 */
4501 		if (!e1000_phy_setup_autoneg(hw) &&
4502 		    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4503 			phy_ctrl |=
4504 			    (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
4505 			(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
4506 		}
4507 		/*
4508 		 * Hopefully, there are no more faults and we've obtained
4509 		 * link as a result.
4510 		 */
4511 	}
4512 	/*
4513 	 * Restart process after E1000_SMARTSPEED_MAX iterations (30
4514 	 * seconds)
4515 	 */
4516 	if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4517 		Adapter->smartspeed = 0;
4518 }
4519 
4520 static boolean_t
4521 is_valid_mac_addr(uint8_t *mac_addr)
4522 {
4523 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4524 	const uint8_t addr_test2[6] =
4525 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4526 
4527 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4528 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4529 		return (B_FALSE);
4530 
4531 	return (B_TRUE);
4532 }
4533 
4534 /*
4535  * e1000g_stall_check - check for tx stall
4536  *
4537  * This function checks if the adapter is stalled (in transmit).
4538  *
4539  * It is called each time the watchdog timeout is invoked.
4540  * If the transmit descriptor reclaim continuously fails,
4541  * the watchdog value will increment by 1. If the watchdog
4542  * value exceeds the threshold, the adapter is assumed to
4543  * have stalled and need to be reset.
4544  */
4545 static boolean_t
4546 e1000g_stall_check(struct e1000g *Adapter)
4547 {
4548 	e1000g_tx_ring_t *tx_ring;
4549 
4550 	tx_ring = Adapter->tx_ring;
4551 
4552 	if (Adapter->link_state != LINK_STATE_UP)
4553 		return (B_FALSE);
4554 
4555 	if (tx_ring->recycle_fail > 0)
4556 		tx_ring->stall_watchdog++;
4557 	else
4558 		tx_ring->stall_watchdog = 0;
4559 
4560 	if (tx_ring->stall_watchdog < E1000G_STALL_WATCHDOG_COUNT)
4561 		return (B_FALSE);
4562 
4563 	tx_ring->stall_watchdog = 0;
4564 	tx_ring->recycle_fail = 0;
4565 
4566 	return (B_TRUE);
4567 }
4568 
4569 #ifdef E1000G_DEBUG
4570 static enum ioc_reply
4571 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
4572 {
4573 	void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
4574 	e1000g_peekpoke_t *ppd;
4575 	uint64_t mem_va;
4576 	uint64_t maxoff;
4577 	boolean_t peek;
4578 
4579 	switch (iocp->ioc_cmd) {
4580 
4581 	case E1000G_IOC_REG_PEEK:
4582 		peek = B_TRUE;
4583 		break;
4584 
4585 	case E1000G_IOC_REG_POKE:
4586 		peek = B_FALSE;
4587 		break;
4588 
4589 	deault:
4590 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4591 		    "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
4592 		    iocp->ioc_cmd);
4593 		return (IOC_INVAL);
4594 	}
4595 
4596 	/*
4597 	 * Validate format of ioctl
4598 	 */
4599 	if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
4600 		return (IOC_INVAL);
4601 	if (mp->b_cont == NULL)
4602 		return (IOC_INVAL);
4603 
4604 	ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
4605 
4606 	/*
4607 	 * Validate request parameters
4608 	 */
4609 	switch (ppd->pp_acc_space) {
4610 
4611 	default:
4612 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4613 		    "e1000g_diag_ioctl: invalid access space 0x%X\n",
4614 		    ppd->pp_acc_space);
4615 		return (IOC_INVAL);
4616 
4617 	case E1000G_PP_SPACE_REG:
4618 		/*
4619 		 * Memory-mapped I/O space
4620 		 */
4621 		ASSERT(ppd->pp_acc_size == 4);
4622 		if (ppd->pp_acc_size != 4)
4623 			return (IOC_INVAL);
4624 
4625 		if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
4626 			return (IOC_INVAL);
4627 
4628 		mem_va = 0;
4629 		maxoff = 0x10000;
4630 		ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
4631 		break;
4632 
4633 	case E1000G_PP_SPACE_E1000G:
4634 		/*
4635 		 * E1000g data structure!
4636 		 */
4637 		mem_va = (uintptr_t)e1000gp;
4638 		maxoff = sizeof (struct e1000g);
4639 		ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
4640 		break;
4641 
4642 	}
4643 
4644 	if (ppd->pp_acc_offset >= maxoff)
4645 		return (IOC_INVAL);
4646 
4647 	if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
4648 		return (IOC_INVAL);
4649 
4650 	/*
4651 	 * All OK - go!
4652 	 */
4653 	ppd->pp_acc_offset += mem_va;
4654 	(*ppfn)(e1000gp, ppd);
4655 	return (peek ? IOC_REPLY : IOC_ACK);
4656 }
4657 
4658 static void
4659 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4660 {
4661 	ddi_acc_handle_t handle;
4662 	uint32_t *regaddr;
4663 
4664 	handle = e1000gp->osdep.reg_handle;
4665 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4666 	    (uintptr_t)ppd->pp_acc_offset);
4667 
4668 	ppd->pp_acc_data = ddi_get32(handle, regaddr);
4669 }
4670 
4671 static void
4672 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4673 {
4674 	ddi_acc_handle_t handle;
4675 	uint32_t *regaddr;
4676 	uint32_t value;
4677 
4678 	handle = e1000gp->osdep.reg_handle;
4679 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4680 	    (uintptr_t)ppd->pp_acc_offset);
4681 	value = (uint32_t)ppd->pp_acc_data;
4682 
4683 	ddi_put32(handle, regaddr, value);
4684 }
4685 
4686 static void
4687 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4688 {
4689 	uint64_t value;
4690 	void *vaddr;
4691 
4692 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4693 
4694 	switch (ppd->pp_acc_size) {
4695 	case 1:
4696 		value = *(uint8_t *)vaddr;
4697 		break;
4698 
4699 	case 2:
4700 		value = *(uint16_t *)vaddr;
4701 		break;
4702 
4703 	case 4:
4704 		value = *(uint32_t *)vaddr;
4705 		break;
4706 
4707 	case 8:
4708 		value = *(uint64_t *)vaddr;
4709 		break;
4710 	}
4711 
4712 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4713 	    "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
4714 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4715 
4716 	ppd->pp_acc_data = value;
4717 }
4718 
4719 static void
4720 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4721 {
4722 	uint64_t value;
4723 	void *vaddr;
4724 
4725 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4726 	value = ppd->pp_acc_data;
4727 
4728 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4729 	    "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
4730 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4731 
4732 	switch (ppd->pp_acc_size) {
4733 	case 1:
4734 		*(uint8_t *)vaddr = (uint8_t)value;
4735 		break;
4736 
4737 	case 2:
4738 		*(uint16_t *)vaddr = (uint16_t)value;
4739 		break;
4740 
4741 	case 4:
4742 		*(uint32_t *)vaddr = (uint32_t)value;
4743 		break;
4744 
4745 	case 8:
4746 		*(uint64_t *)vaddr = (uint64_t)value;
4747 		break;
4748 	}
4749 }
4750 #endif
4751 
4752 /*
4753  * Loopback Support
4754  */
4755 static lb_property_t lb_normal =
4756 	{ normal,	"normal",	E1000G_LB_NONE		};
4757 static lb_property_t lb_external1000 =
4758 	{ external,	"1000Mbps",	E1000G_LB_EXTERNAL_1000	};
4759 static lb_property_t lb_external100 =
4760 	{ external,	"100Mbps",	E1000G_LB_EXTERNAL_100	};
4761 static lb_property_t lb_external10 =
4762 	{ external,	"10Mbps",	E1000G_LB_EXTERNAL_10	};
4763 static lb_property_t lb_phy =
4764 	{ internal,	"PHY",		E1000G_LB_INTERNAL_PHY	};
4765 
4766 static enum ioc_reply
4767 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
4768 {
4769 	lb_info_sz_t *lbsp;
4770 	lb_property_t *lbpp;
4771 	struct e1000_hw *hw;
4772 	uint32_t *lbmp;
4773 	uint32_t size;
4774 	uint32_t value;
4775 
4776 	hw = &Adapter->shared;
4777 
4778 	if (mp->b_cont == NULL)
4779 		return (IOC_INVAL);
4780 
4781 	if (!e1000g_check_loopback_support(hw)) {
4782 		e1000g_log(NULL, CE_WARN,
4783 		    "Loopback is not supported on e1000g%d", Adapter->instance);
4784 		return (IOC_INVAL);
4785 	}
4786 
4787 	switch (iocp->ioc_cmd) {
4788 	default:
4789 		return (IOC_INVAL);
4790 
4791 	case LB_GET_INFO_SIZE:
4792 		size = sizeof (lb_info_sz_t);
4793 		if (iocp->ioc_count != size)
4794 			return (IOC_INVAL);
4795 
4796 		rw_enter(&Adapter->chip_lock, RW_WRITER);
4797 		e1000g_get_phy_state(Adapter);
4798 
4799 		/*
4800 		 * Workaround for hardware faults. In order to get a stable
4801 		 * state of phy, we will wait for a specific interval and
4802 		 * try again. The time delay is an experiential value based
4803 		 * on our testing.
4804 		 */
4805 		msec_delay(100);
4806 		e1000g_get_phy_state(Adapter);
4807 		rw_exit(&Adapter->chip_lock);
4808 
4809 		value = sizeof (lb_normal);
4810 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4811 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4812 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4813 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4814 			value += sizeof (lb_phy);
4815 			switch (hw->mac.type) {
4816 			case e1000_82571:
4817 			case e1000_82572:
4818 			case e1000_80003es2lan:
4819 				value += sizeof (lb_external1000);
4820 				break;
4821 			}
4822 		}
4823 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4824 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4825 			value += sizeof (lb_external100);
4826 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4827 			value += sizeof (lb_external10);
4828 
4829 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
4830 		*lbsp = value;
4831 		break;
4832 
4833 	case LB_GET_INFO:
4834 		value = sizeof (lb_normal);
4835 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4836 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4837 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4838 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4839 			value += sizeof (lb_phy);
4840 			switch (hw->mac.type) {
4841 			case e1000_82571:
4842 			case e1000_82572:
4843 			case e1000_80003es2lan:
4844 				value += sizeof (lb_external1000);
4845 				break;
4846 			}
4847 		}
4848 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4849 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4850 			value += sizeof (lb_external100);
4851 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4852 			value += sizeof (lb_external10);
4853 
4854 		size = value;
4855 		if (iocp->ioc_count != size)
4856 			return (IOC_INVAL);
4857 
4858 		value = 0;
4859 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
4860 		lbpp[value++] = lb_normal;
4861 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4862 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4863 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4864 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4865 			lbpp[value++] = lb_phy;
4866 			switch (hw->mac.type) {
4867 			case e1000_82571:
4868 			case e1000_82572:
4869 			case e1000_80003es2lan:
4870 				lbpp[value++] = lb_external1000;
4871 				break;
4872 			}
4873 		}
4874 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4875 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4876 			lbpp[value++] = lb_external100;
4877 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4878 			lbpp[value++] = lb_external10;
4879 		break;
4880 
4881 	case LB_GET_MODE:
4882 		size = sizeof (uint32_t);
4883 		if (iocp->ioc_count != size)
4884 			return (IOC_INVAL);
4885 
4886 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4887 		*lbmp = Adapter->loopback_mode;
4888 		break;
4889 
4890 	case LB_SET_MODE:
4891 		size = 0;
4892 		if (iocp->ioc_count != sizeof (uint32_t))
4893 			return (IOC_INVAL);
4894 
4895 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4896 		if (!e1000g_set_loopback_mode(Adapter, *lbmp))
4897 			return (IOC_INVAL);
4898 		break;
4899 	}
4900 
4901 	iocp->ioc_count = size;
4902 	iocp->ioc_error = 0;
4903 
4904 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
4905 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4906 		return (IOC_INVAL);
4907 	}
4908 
4909 	return (IOC_REPLY);
4910 }
4911 
4912 static boolean_t
4913 e1000g_check_loopback_support(struct e1000_hw *hw)
4914 {
4915 	switch (hw->mac.type) {
4916 	case e1000_82540:
4917 	case e1000_82545:
4918 	case e1000_82545_rev_3:
4919 	case e1000_82546:
4920 	case e1000_82546_rev_3:
4921 	case e1000_82541:
4922 	case e1000_82541_rev_2:
4923 	case e1000_82547:
4924 	case e1000_82547_rev_2:
4925 	case e1000_82571:
4926 	case e1000_82572:
4927 	case e1000_82573:
4928 	case e1000_82574:
4929 	case e1000_80003es2lan:
4930 	case e1000_ich9lan:
4931 	case e1000_ich10lan:
4932 		return (B_TRUE);
4933 	}
4934 	return (B_FALSE);
4935 }
4936 
4937 static boolean_t
4938 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
4939 {
4940 	struct e1000_hw *hw;
4941 	int i, times;
4942 	boolean_t link_up;
4943 
4944 	if (mode == Adapter->loopback_mode)
4945 		return (B_TRUE);
4946 
4947 	hw = &Adapter->shared;
4948 	times = 0;
4949 
4950 	Adapter->loopback_mode = mode;
4951 
4952 	if (mode == E1000G_LB_NONE) {
4953 		/* Reset the chip */
4954 		hw->phy.autoneg_wait_to_complete = B_TRUE;
4955 		(void) e1000g_reset_adapter(Adapter);
4956 		hw->phy.autoneg_wait_to_complete = B_FALSE;
4957 		return (B_TRUE);
4958 	}
4959 
4960 again:
4961 
4962 	rw_enter(&Adapter->chip_lock, RW_WRITER);
4963 
4964 	switch (mode) {
4965 	default:
4966 		rw_exit(&Adapter->chip_lock);
4967 		return (B_FALSE);
4968 
4969 	case E1000G_LB_EXTERNAL_1000:
4970 		e1000g_set_external_loopback_1000(Adapter);
4971 		break;
4972 
4973 	case E1000G_LB_EXTERNAL_100:
4974 		e1000g_set_external_loopback_100(Adapter);
4975 		break;
4976 
4977 	case E1000G_LB_EXTERNAL_10:
4978 		e1000g_set_external_loopback_10(Adapter);
4979 		break;
4980 
4981 	case E1000G_LB_INTERNAL_PHY:
4982 		e1000g_set_internal_loopback(Adapter);
4983 		break;
4984 	}
4985 
4986 	times++;
4987 
4988 	rw_exit(&Adapter->chip_lock);
4989 
4990 	/* Wait for link up */
4991 	for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
4992 		msec_delay(100);
4993 
4994 	rw_enter(&Adapter->chip_lock, RW_WRITER);
4995 
4996 	link_up = e1000g_link_up(Adapter);
4997 
4998 	rw_exit(&Adapter->chip_lock);
4999 
5000 	if (!link_up) {
5001 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5002 		    "Failed to get the link up");
5003 		if (times < 2) {
5004 			/* Reset the link */
5005 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5006 			    "Reset the link ...");
5007 			(void) e1000g_reset_adapter(Adapter);
5008 			goto again;
5009 		}
5010 	}
5011 
5012 	return (B_TRUE);
5013 }
5014 
5015 /*
5016  * The following loopback settings are from Intel's technical
5017  * document - "How To Loopback". All the register settings and
5018  * time delay values are directly inherited from the document
5019  * without more explanations available.
5020  */
5021 static void
5022 e1000g_set_internal_loopback(struct e1000g *Adapter)
5023 {
5024 	struct e1000_hw *hw;
5025 	uint32_t ctrl;
5026 	uint32_t status;
5027 	uint16_t phy_ctrl;
5028 	uint16_t phy_reg;
5029 	uint32_t txcw;
5030 
5031 	hw = &Adapter->shared;
5032 
5033 	/* Disable Smart Power Down */
5034 	phy_spd_state(hw, B_FALSE);
5035 
5036 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5037 	phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
5038 	phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
5039 
5040 	switch (hw->mac.type) {
5041 	case e1000_82540:
5042 	case e1000_82545:
5043 	case e1000_82545_rev_3:
5044 	case e1000_82546:
5045 	case e1000_82546_rev_3:
5046 	case e1000_82573:
5047 		/* Auto-MDI/MDIX off */
5048 		(void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5049 		/* Reset PHY to update Auto-MDI/MDIX */
5050 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5051 		    phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5052 		/* Reset PHY to auto-neg off and force 1000 */
5053 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5054 		    phy_ctrl | MII_CR_RESET);
5055 		/*
5056 		 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5057 		 * See comments above e1000g_set_internal_loopback() for the
5058 		 * background.
5059 		 */
5060 		(void) e1000_write_phy_reg(hw, 29, 0x001F);
5061 		(void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5062 		(void) e1000_write_phy_reg(hw, 29, 0x001A);
5063 		(void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5064 		break;
5065 	case e1000_80003es2lan:
5066 		/* Force Link Up */
5067 		(void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5068 		    0x1CC);
5069 		/* Sets PCS loopback at 1Gbs */
5070 		(void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5071 		    0x1046);
5072 		break;
5073 	}
5074 
5075 	/*
5076 	 * The following registers should be set for e1000_phy_bm phy type.
5077 	 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5078 	 * For others, we do not need to set these registers.
5079 	 */
5080 	if (hw->phy.type == e1000_phy_bm) {
5081 		/* Set Default MAC Interface speed to 1GB */
5082 		e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5083 		phy_reg &= ~0x0007;
5084 		phy_reg |= 0x006;
5085 		e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5086 		/* Assert SW reset for above settings to take effect */
5087 		e1000_phy_commit(hw);
5088 		msec_delay(1);
5089 		/* Force Full Duplex */
5090 		e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5091 		e1000_write_phy_reg(hw, PHY_REG(769, 16), phy_reg | 0x000C);
5092 		/* Set Link Up (in force link) */
5093 		e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5094 		e1000_write_phy_reg(hw, PHY_REG(776, 16), phy_reg | 0x0040);
5095 		/* Force Link */
5096 		e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5097 		e1000_write_phy_reg(hw, PHY_REG(769, 16), phy_reg | 0x0040);
5098 		/* Set Early Link Enable */
5099 		e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5100 		e1000_write_phy_reg(hw, PHY_REG(769, 20), phy_reg | 0x0400);
5101 	}
5102 
5103 	/* Set loopback */
5104 	(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5105 
5106 	msec_delay(250);
5107 
5108 	/* Now set up the MAC to the same speed/duplex as the PHY. */
5109 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5110 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5111 	ctrl |= (E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
5112 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5113 	    E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
5114 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5115 
5116 	switch (hw->mac.type) {
5117 	case e1000_82540:
5118 	case e1000_82545:
5119 	case e1000_82545_rev_3:
5120 	case e1000_82546:
5121 	case e1000_82546_rev_3:
5122 		/*
5123 		 * For some serdes we'll need to commit the writes now
5124 		 * so that the status is updated on link
5125 		 */
5126 		if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5127 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5128 			msec_delay(100);
5129 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5130 		}
5131 
5132 		if (hw->phy.media_type == e1000_media_type_copper) {
5133 			/* Invert Loss of Signal */
5134 			ctrl |= E1000_CTRL_ILOS;
5135 		} else {
5136 			/* Set ILOS on fiber nic if half duplex is detected */
5137 			status = E1000_READ_REG(hw, E1000_STATUS);
5138 			if ((status & E1000_STATUS_FD) == 0)
5139 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5140 		}
5141 		break;
5142 
5143 	case e1000_82571:
5144 	case e1000_82572:
5145 		/*
5146 		 * The fiber/SerDes versions of this adapter do not contain an
5147 		 * accessible PHY. Therefore, loopback beyond MAC must be done
5148 		 * using SerDes analog loopback.
5149 		 */
5150 		if (hw->phy.media_type != e1000_media_type_copper) {
5151 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5152 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5153 			txcw &= ~((uint32_t)1 << 31);
5154 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5155 
5156 			/*
5157 			 * Write 0x410 to Serdes Control register
5158 			 * to enable Serdes analog loopback
5159 			 */
5160 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5161 			msec_delay(10);
5162 		}
5163 
5164 		status = E1000_READ_REG(hw, E1000_STATUS);
5165 		/* Set ILOS on fiber nic if half duplex is detected */
5166 		if ((hw->phy.media_type == e1000_media_type_fiber) &&
5167 		    ((status & E1000_STATUS_FD) == 0 ||
5168 		    (status & E1000_STATUS_LU) == 0))
5169 			ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5170 		else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5171 			ctrl |= E1000_CTRL_SLU;
5172 		break;
5173 
5174 	case e1000_82573:
5175 		ctrl |= E1000_CTRL_ILOS;
5176 		break;
5177 	case e1000_ich9lan:
5178 	case e1000_ich10lan:
5179 		ctrl |= E1000_CTRL_SLU;
5180 		break;
5181 	}
5182 	if (hw->phy.type == e1000_phy_bm)
5183 		ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5184 
5185 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5186 }
5187 
5188 static void
5189 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5190 {
5191 	struct e1000_hw *hw;
5192 	uint32_t rctl;
5193 	uint32_t ctrl_ext;
5194 	uint32_t ctrl;
5195 	uint32_t status;
5196 	uint32_t txcw;
5197 	uint16_t phydata;
5198 
5199 	hw = &Adapter->shared;
5200 
5201 	/* Disable Smart Power Down */
5202 	phy_spd_state(hw, B_FALSE);
5203 
5204 	switch (hw->mac.type) {
5205 	case e1000_82571:
5206 	case e1000_82572:
5207 		switch (hw->phy.media_type) {
5208 		case e1000_media_type_copper:
5209 			/* Force link up (Must be done before the PHY writes) */
5210 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5211 			ctrl |= E1000_CTRL_SLU;	/* Force Link Up */
5212 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5213 
5214 			rctl = E1000_READ_REG(hw, E1000_RCTL);
5215 			rctl |= (E1000_RCTL_EN |
5216 			    E1000_RCTL_SBP |
5217 			    E1000_RCTL_UPE |
5218 			    E1000_RCTL_MPE |
5219 			    E1000_RCTL_LPE |
5220 			    E1000_RCTL_BAM);		/* 0x803E */
5221 			E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5222 
5223 			ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5224 			ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5225 			    E1000_CTRL_EXT_SDP6_DATA |
5226 			    E1000_CTRL_EXT_SDP7_DATA |
5227 			    E1000_CTRL_EXT_SDP4_DIR |
5228 			    E1000_CTRL_EXT_SDP6_DIR |
5229 			    E1000_CTRL_EXT_SDP7_DIR);	/* 0x0DD0 */
5230 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5231 
5232 			/*
5233 			 * This sequence tunes the PHY's SDP and no customer
5234 			 * settable values. For background, see comments above
5235 			 * e1000g_set_internal_loopback().
5236 			 */
5237 			(void) e1000_write_phy_reg(hw, 0x0, 0x140);
5238 			msec_delay(10);
5239 			(void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5240 			(void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5241 			(void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5242 			(void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5243 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5244 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5245 
5246 			(void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5247 			(void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5248 			(void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5249 			(void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5250 			(void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5251 
5252 			msec_delay(50);
5253 			break;
5254 		case e1000_media_type_fiber:
5255 		case e1000_media_type_internal_serdes:
5256 			status = E1000_READ_REG(hw, E1000_STATUS);
5257 			if (((status & E1000_STATUS_LU) == 0) ||
5258 			    (hw->phy.media_type ==
5259 			    e1000_media_type_internal_serdes)) {
5260 				ctrl = E1000_READ_REG(hw, E1000_CTRL);
5261 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5262 				E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5263 			}
5264 
5265 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5266 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5267 			txcw &= ~((uint32_t)1 << 31);
5268 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5269 
5270 			/*
5271 			 * Write 0x410 to Serdes Control register
5272 			 * to enable Serdes analog loopback
5273 			 */
5274 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5275 			msec_delay(10);
5276 			break;
5277 		default:
5278 			break;
5279 		}
5280 		break;
5281 	case e1000_82574:
5282 	case e1000_80003es2lan:
5283 	case e1000_ich9lan:
5284 	case e1000_ich10lan:
5285 		(void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5286 		(void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5287 		    phydata | (1 << 5));
5288 		Adapter->param_adv_autoneg = 1;
5289 		Adapter->param_adv_1000fdx = 1;
5290 		(void) e1000g_reset_link(Adapter);
5291 		break;
5292 	}
5293 }
5294 
5295 static void
5296 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5297 {
5298 	struct e1000_hw *hw;
5299 	uint32_t ctrl;
5300 	uint16_t phy_ctrl;
5301 
5302 	hw = &Adapter->shared;
5303 
5304 	/* Disable Smart Power Down */
5305 	phy_spd_state(hw, B_FALSE);
5306 
5307 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5308 	    MII_CR_SPEED_100);
5309 
5310 	/* Force 100/FD, reset PHY */
5311 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5312 	    phy_ctrl | MII_CR_RESET);	/* 0xA100 */
5313 	msec_delay(10);
5314 
5315 	/* Force 100/FD */
5316 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5317 	    phy_ctrl);			/* 0x2100 */
5318 	msec_delay(10);
5319 
5320 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5321 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5322 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5323 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5324 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5325 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5326 	    E1000_CTRL_SPD_100 |	/* Force Speed to 100 */
5327 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5328 
5329 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5330 }
5331 
5332 static void
5333 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5334 {
5335 	struct e1000_hw *hw;
5336 	uint32_t ctrl;
5337 	uint16_t phy_ctrl;
5338 
5339 	hw = &Adapter->shared;
5340 
5341 	/* Disable Smart Power Down */
5342 	phy_spd_state(hw, B_FALSE);
5343 
5344 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5345 	    MII_CR_SPEED_10);
5346 
5347 	/* Force 10/FD, reset PHY */
5348 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5349 	    phy_ctrl | MII_CR_RESET);	/* 0x8100 */
5350 	msec_delay(10);
5351 
5352 	/* Force 10/FD */
5353 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5354 	    phy_ctrl);			/* 0x0100 */
5355 	msec_delay(10);
5356 
5357 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5358 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5359 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5360 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5361 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5362 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5363 	    E1000_CTRL_SPD_10 |		/* Force Speed to 10 */
5364 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5365 
5366 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5367 }
5368 
5369 #ifdef __sparc
5370 static boolean_t
5371 e1000g_find_mac_address(struct e1000g *Adapter)
5372 {
5373 	struct e1000_hw *hw = &Adapter->shared;
5374 	uchar_t *bytes;
5375 	struct ether_addr sysaddr;
5376 	uint_t nelts;
5377 	int err;
5378 	boolean_t found = B_FALSE;
5379 
5380 	/*
5381 	 * The "vendor's factory-set address" may already have
5382 	 * been extracted from the chip, but if the property
5383 	 * "local-mac-address" is set we use that instead.
5384 	 *
5385 	 * We check whether it looks like an array of 6
5386 	 * bytes (which it should, if OBP set it).  If we can't
5387 	 * make sense of it this way, we'll ignore it.
5388 	 */
5389 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5390 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
5391 	if (err == DDI_PROP_SUCCESS) {
5392 		if (nelts == ETHERADDRL) {
5393 			while (nelts--)
5394 				hw->mac.addr[nelts] = bytes[nelts];
5395 			found = B_TRUE;
5396 		}
5397 		ddi_prop_free(bytes);
5398 	}
5399 
5400 	/*
5401 	 * Look up the OBP property "local-mac-address?". If the user has set
5402 	 * 'local-mac-address? = false', use "the system address" instead.
5403 	 */
5404 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
5405 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
5406 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
5407 			if (localetheraddr(NULL, &sysaddr) != 0) {
5408 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
5409 				found = B_TRUE;
5410 			}
5411 		}
5412 		ddi_prop_free(bytes);
5413 	}
5414 
5415 	/*
5416 	 * Finally(!), if there's a valid "mac-address" property (created
5417 	 * if we netbooted from this interface), we must use this instead
5418 	 * of any of the above to ensure that the NFS/install server doesn't
5419 	 * get confused by the address changing as Solaris takes over!
5420 	 */
5421 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5422 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
5423 	if (err == DDI_PROP_SUCCESS) {
5424 		if (nelts == ETHERADDRL) {
5425 			while (nelts--)
5426 				hw->mac.addr[nelts] = bytes[nelts];
5427 			found = B_TRUE;
5428 		}
5429 		ddi_prop_free(bytes);
5430 	}
5431 
5432 	if (found) {
5433 		bcopy(hw->mac.addr, hw->mac.perm_addr,
5434 		    ETHERADDRL);
5435 	}
5436 
5437 	return (found);
5438 }
5439 #endif
5440 
5441 static int
5442 e1000g_add_intrs(struct e1000g *Adapter)
5443 {
5444 	dev_info_t *devinfo;
5445 	int intr_types;
5446 	int rc;
5447 
5448 	devinfo = Adapter->dip;
5449 
5450 	/* Get supported interrupt types */
5451 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
5452 
5453 	if (rc != DDI_SUCCESS) {
5454 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5455 		    "Get supported interrupt types failed: %d\n", rc);
5456 		return (DDI_FAILURE);
5457 	}
5458 
5459 	/*
5460 	 * Based on Intel Technical Advisory document (TA-160), there are some
5461 	 * cases where some older Intel PCI-X NICs may "advertise" to the OS
5462 	 * that it supports MSI, but in fact has problems.
5463 	 * So we should only enable MSI for PCI-E NICs and disable MSI for old
5464 	 * PCI/PCI-X NICs.
5465 	 */
5466 	if (Adapter->shared.mac.type < e1000_82571)
5467 		Adapter->msi_enable = B_FALSE;
5468 
5469 	if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
5470 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
5471 
5472 		if (rc != DDI_SUCCESS) {
5473 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5474 			    "Add MSI failed, trying Legacy interrupts\n");
5475 		} else {
5476 			Adapter->intr_type = DDI_INTR_TYPE_MSI;
5477 		}
5478 	}
5479 
5480 	if ((Adapter->intr_type == 0) &&
5481 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
5482 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
5483 
5484 		if (rc != DDI_SUCCESS) {
5485 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5486 			    "Add Legacy interrupts failed\n");
5487 			return (DDI_FAILURE);
5488 		}
5489 
5490 		Adapter->intr_type = DDI_INTR_TYPE_FIXED;
5491 	}
5492 
5493 	if (Adapter->intr_type == 0) {
5494 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5495 		    "No interrupts registered\n");
5496 		return (DDI_FAILURE);
5497 	}
5498 
5499 	return (DDI_SUCCESS);
5500 }
5501 
5502 /*
5503  * e1000g_intr_add() handles MSI/Legacy interrupts
5504  */
5505 static int
5506 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
5507 {
5508 	dev_info_t *devinfo;
5509 	int count, avail, actual;
5510 	int x, y, rc, inum = 0;
5511 	int flag;
5512 	ddi_intr_handler_t *intr_handler;
5513 
5514 	devinfo = Adapter->dip;
5515 
5516 	/* get number of interrupts */
5517 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5518 	if ((rc != DDI_SUCCESS) || (count == 0)) {
5519 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5520 		    "Get interrupt number failed. Return: %d, count: %d\n",
5521 		    rc, count);
5522 		return (DDI_FAILURE);
5523 	}
5524 
5525 	/* get number of available interrupts */
5526 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
5527 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
5528 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5529 		    "Get interrupt available number failed. "
5530 		    "Return: %d, available: %d\n", rc, avail);
5531 		return (DDI_FAILURE);
5532 	}
5533 
5534 	if (avail < count) {
5535 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5536 		    "Interrupts count: %d, available: %d\n",
5537 		    count, avail);
5538 	}
5539 
5540 	/* Allocate an array of interrupt handles */
5541 	Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
5542 	Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
5543 
5544 	/* Set NORMAL behavior for both MSI and FIXED interrupt */
5545 	flag = DDI_INTR_ALLOC_NORMAL;
5546 
5547 	/* call ddi_intr_alloc() */
5548 	rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
5549 	    count, &actual, flag);
5550 
5551 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
5552 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5553 		    "Allocate interrupts failed: %d\n", rc);
5554 
5555 		kmem_free(Adapter->htable, Adapter->intr_size);
5556 		return (DDI_FAILURE);
5557 	}
5558 
5559 	if (actual < count) {
5560 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5561 		    "Interrupts requested: %d, received: %d\n",
5562 		    count, actual);
5563 	}
5564 
5565 	Adapter->intr_cnt = actual;
5566 
5567 	/* Get priority for first msi, assume remaining are all the same */
5568 	rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
5569 
5570 	if (rc != DDI_SUCCESS) {
5571 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5572 		    "Get interrupt priority failed: %d\n", rc);
5573 
5574 		/* Free already allocated intr */
5575 		for (y = 0; y < actual; y++)
5576 			(void) ddi_intr_free(Adapter->htable[y]);
5577 
5578 		kmem_free(Adapter->htable, Adapter->intr_size);
5579 		return (DDI_FAILURE);
5580 	}
5581 
5582 	/*
5583 	 * In Legacy Interrupt mode, for PCI-Express adapters, we should
5584 	 * use the interrupt service routine e1000g_intr_pciexpress()
5585 	 * to avoid interrupt stealing when sharing interrupt with other
5586 	 * devices.
5587 	 */
5588 	if (Adapter->shared.mac.type < e1000_82571)
5589 		intr_handler = (ddi_intr_handler_t *)e1000g_intr;
5590 	else
5591 		intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
5592 
5593 	/* Call ddi_intr_add_handler() */
5594 	for (x = 0; x < actual; x++) {
5595 		rc = ddi_intr_add_handler(Adapter->htable[x],
5596 		    intr_handler, (caddr_t)Adapter, NULL);
5597 
5598 		if (rc != DDI_SUCCESS) {
5599 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5600 			    "Add interrupt handler failed: %d\n", rc);
5601 
5602 			/* Remove already added handler */
5603 			for (y = 0; y < x; y++)
5604 				(void) ddi_intr_remove_handler(
5605 				    Adapter->htable[y]);
5606 
5607 			/* Free already allocated intr */
5608 			for (y = 0; y < actual; y++)
5609 				(void) ddi_intr_free(Adapter->htable[y]);
5610 
5611 			kmem_free(Adapter->htable, Adapter->intr_size);
5612 			return (DDI_FAILURE);
5613 		}
5614 	}
5615 
5616 	rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
5617 
5618 	if (rc != DDI_SUCCESS) {
5619 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5620 		    "Get interrupt cap failed: %d\n", rc);
5621 
5622 		/* Free already allocated intr */
5623 		for (y = 0; y < actual; y++) {
5624 			(void) ddi_intr_remove_handler(Adapter->htable[y]);
5625 			(void) ddi_intr_free(Adapter->htable[y]);
5626 		}
5627 
5628 		kmem_free(Adapter->htable, Adapter->intr_size);
5629 		return (DDI_FAILURE);
5630 	}
5631 
5632 	return (DDI_SUCCESS);
5633 }
5634 
5635 static int
5636 e1000g_rem_intrs(struct e1000g *Adapter)
5637 {
5638 	int x;
5639 	int rc;
5640 
5641 	for (x = 0; x < Adapter->intr_cnt; x++) {
5642 		rc = ddi_intr_remove_handler(Adapter->htable[x]);
5643 		if (rc != DDI_SUCCESS) {
5644 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5645 			    "Remove intr handler failed: %d\n", rc);
5646 			return (DDI_FAILURE);
5647 		}
5648 
5649 		rc = ddi_intr_free(Adapter->htable[x]);
5650 		if (rc != DDI_SUCCESS) {
5651 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5652 			    "Free intr failed: %d\n", rc);
5653 			return (DDI_FAILURE);
5654 		}
5655 	}
5656 
5657 	kmem_free(Adapter->htable, Adapter->intr_size);
5658 
5659 	return (DDI_SUCCESS);
5660 }
5661 
5662 static int
5663 e1000g_enable_intrs(struct e1000g *Adapter)
5664 {
5665 	int x;
5666 	int rc;
5667 
5668 	/* Enable interrupts */
5669 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5670 		/* Call ddi_intr_block_enable() for MSI */
5671 		rc = ddi_intr_block_enable(Adapter->htable,
5672 		    Adapter->intr_cnt);
5673 		if (rc != DDI_SUCCESS) {
5674 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5675 			    "Enable block intr failed: %d\n", rc);
5676 			return (DDI_FAILURE);
5677 		}
5678 	} else {
5679 		/* Call ddi_intr_enable() for Legacy/MSI non block enable */
5680 		for (x = 0; x < Adapter->intr_cnt; x++) {
5681 			rc = ddi_intr_enable(Adapter->htable[x]);
5682 			if (rc != DDI_SUCCESS) {
5683 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5684 				    "Enable intr failed: %d\n", rc);
5685 				return (DDI_FAILURE);
5686 			}
5687 		}
5688 	}
5689 
5690 	return (DDI_SUCCESS);
5691 }
5692 
5693 static int
5694 e1000g_disable_intrs(struct e1000g *Adapter)
5695 {
5696 	int x;
5697 	int rc;
5698 
5699 	/* Disable all interrupts */
5700 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5701 		rc = ddi_intr_block_disable(Adapter->htable,
5702 		    Adapter->intr_cnt);
5703 		if (rc != DDI_SUCCESS) {
5704 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5705 			    "Disable block intr failed: %d\n", rc);
5706 			return (DDI_FAILURE);
5707 		}
5708 	} else {
5709 		for (x = 0; x < Adapter->intr_cnt; x++) {
5710 			rc = ddi_intr_disable(Adapter->htable[x]);
5711 			if (rc != DDI_SUCCESS) {
5712 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5713 				    "Disable intr failed: %d\n", rc);
5714 				return (DDI_FAILURE);
5715 			}
5716 		}
5717 	}
5718 
5719 	return (DDI_SUCCESS);
5720 }
5721 
5722 /*
5723  * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
5724  */
5725 static void
5726 e1000g_get_phy_state(struct e1000g *Adapter)
5727 {
5728 	struct e1000_hw *hw = &Adapter->shared;
5729 
5730 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
5731 	(void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
5732 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &Adapter->phy_an_adv);
5733 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &Adapter->phy_an_exp);
5734 	(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &Adapter->phy_ext_status);
5735 	(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &Adapter->phy_1000t_ctrl);
5736 	(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
5737 	    &Adapter->phy_1000t_status);
5738 	(void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &Adapter->phy_lp_able);
5739 
5740 	Adapter->param_autoneg_cap =
5741 	    (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
5742 	Adapter->param_pause_cap =
5743 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5744 	Adapter->param_asym_pause_cap =
5745 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5746 	Adapter->param_1000fdx_cap =
5747 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5748 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5749 	Adapter->param_1000hdx_cap =
5750 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5751 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5752 	Adapter->param_100t4_cap =
5753 	    (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
5754 	Adapter->param_100fdx_cap =
5755 	    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5756 	    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
5757 	Adapter->param_100hdx_cap =
5758 	    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
5759 	    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
5760 	Adapter->param_10fdx_cap =
5761 	    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
5762 	Adapter->param_10hdx_cap =
5763 	    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
5764 
5765 	Adapter->param_adv_autoneg = hw->mac.autoneg;
5766 	Adapter->param_adv_pause =
5767 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5768 	Adapter->param_adv_asym_pause =
5769 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5770 	Adapter->param_adv_1000hdx =
5771 	    (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
5772 	Adapter->param_adv_100t4 =
5773 	    (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
5774 	if (Adapter->param_adv_autoneg == 1) {
5775 		Adapter->param_adv_1000fdx =
5776 		    (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0;
5777 		Adapter->param_adv_100fdx =
5778 		    (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0;
5779 		Adapter->param_adv_100hdx =
5780 		    (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0;
5781 		Adapter->param_adv_10fdx =
5782 		    (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
5783 		Adapter->param_adv_10hdx =
5784 		    (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
5785 	}
5786 
5787 	Adapter->param_lp_autoneg =
5788 	    (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
5789 	Adapter->param_lp_pause =
5790 	    (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
5791 	Adapter->param_lp_asym_pause =
5792 	    (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
5793 	Adapter->param_lp_1000fdx =
5794 	    (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
5795 	Adapter->param_lp_1000hdx =
5796 	    (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
5797 	Adapter->param_lp_100t4 =
5798 	    (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
5799 	Adapter->param_lp_100fdx =
5800 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
5801 	Adapter->param_lp_100hdx =
5802 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
5803 	Adapter->param_lp_10fdx =
5804 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
5805 	Adapter->param_lp_10hdx =
5806 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
5807 }
5808 
5809 /*
5810  * FMA support
5811  */
5812 
5813 int
5814 e1000g_check_acc_handle(ddi_acc_handle_t handle)
5815 {
5816 	ddi_fm_error_t de;
5817 
5818 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5819 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5820 	return (de.fme_status);
5821 }
5822 
5823 int
5824 e1000g_check_dma_handle(ddi_dma_handle_t handle)
5825 {
5826 	ddi_fm_error_t de;
5827 
5828 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5829 	return (de.fme_status);
5830 }
5831 
5832 /*
5833  * The IO fault service error handling callback function
5834  */
5835 /* ARGSUSED2 */
5836 static int
5837 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5838 {
5839 	/*
5840 	 * as the driver can always deal with an error in any dma or
5841 	 * access handle, we can just return the fme_status value.
5842 	 */
5843 	pci_ereport_post(dip, err, NULL);
5844 	return (err->fme_status);
5845 }
5846 
5847 static void
5848 e1000g_fm_init(struct e1000g *Adapter)
5849 {
5850 	ddi_iblock_cookie_t iblk;
5851 	int fma_acc_flag, fma_dma_flag;
5852 
5853 	/* Only register with IO Fault Services if we have some capability */
5854 	if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5855 		e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5856 		fma_acc_flag = 1;
5857 	} else {
5858 		e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5859 		fma_acc_flag = 0;
5860 	}
5861 
5862 	if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5863 		fma_dma_flag = 1;
5864 	} else {
5865 		fma_dma_flag = 0;
5866 	}
5867 
5868 	(void) e1000g_set_fma_flags(Adapter, fma_acc_flag, fma_dma_flag);
5869 
5870 	if (Adapter->fm_capabilities) {
5871 
5872 		/* Register capabilities with IO Fault Services */
5873 		ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
5874 
5875 		/*
5876 		 * Initialize pci ereport capabilities if ereport capable
5877 		 */
5878 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5879 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5880 			pci_ereport_setup(Adapter->dip);
5881 
5882 		/*
5883 		 * Register error callback if error callback capable
5884 		 */
5885 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5886 			ddi_fm_handler_register(Adapter->dip,
5887 			    e1000g_fm_error_cb, (void*) Adapter);
5888 	}
5889 }
5890 
5891 static void
5892 e1000g_fm_fini(struct e1000g *Adapter)
5893 {
5894 	/* Only unregister FMA capabilities if we registered some */
5895 	if (Adapter->fm_capabilities) {
5896 
5897 		/*
5898 		 * Release any resources allocated by pci_ereport_setup()
5899 		 */
5900 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5901 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5902 			pci_ereport_teardown(Adapter->dip);
5903 
5904 		/*
5905 		 * Un-register error callback if error callback capable
5906 		 */
5907 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5908 			ddi_fm_handler_unregister(Adapter->dip);
5909 
5910 		/* Unregister from IO Fault Services */
5911 		ddi_fm_fini(Adapter->dip);
5912 	}
5913 }
5914 
5915 void
5916 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
5917 {
5918 	uint64_t ena;
5919 	char buf[FM_MAX_CLASS];
5920 
5921 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5922 	ena = fm_ena_generate(0, FM_ENA_FMT1);
5923 	if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
5924 		ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
5925 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5926 	}
5927 }
5928 
5929 /*
5930  * quiesce(9E) entry point.
5931  *
5932  * This function is called when the system is single-threaded at high
5933  * PIL with preemption disabled. Therefore, this function must not be
5934  * blocked.
5935  *
5936  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5937  * DDI_FAILURE indicates an error condition and should almost never happen.
5938  */
5939 static int
5940 e1000g_quiesce(dev_info_t *devinfo)
5941 {
5942 	struct e1000g *Adapter;
5943 
5944 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
5945 
5946 	if (Adapter == NULL)
5947 		return (DDI_FAILURE);
5948 
5949 	e1000g_clear_all_interrupts(Adapter);
5950 
5951 	(void) e1000_reset_hw(&Adapter->shared);
5952 
5953 	/* Setup our HW Tx Head & Tail descriptor pointers */
5954 	E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
5955 	E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
5956 
5957 	/* Setup our HW Rx Head & Tail descriptor pointers */
5958 	E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
5959 	E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
5960 
5961 	return (DDI_SUCCESS);
5962 }
5963 
5964 static int
5965 e1000g_get_def_val(struct e1000g *Adapter, mac_prop_id_t pr_num,
5966     uint_t pr_valsize, void *pr_val)
5967 {
5968 	link_flowctrl_t fl;
5969 	int err = 0;
5970 
5971 	ASSERT(pr_valsize > 0);
5972 	switch (pr_num) {
5973 	case MAC_PROP_AUTONEG:
5974 		*(uint8_t *)pr_val =
5975 		    ((Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
5976 		break;
5977 	case MAC_PROP_FLOWCTRL:
5978 		if (pr_valsize < sizeof (link_flowctrl_t))
5979 			return (EINVAL);
5980 		fl = LINK_FLOWCTRL_BI;
5981 		bcopy(&fl, pr_val, sizeof (fl));
5982 		break;
5983 	case MAC_PROP_ADV_1000FDX_CAP:
5984 	case MAC_PROP_EN_1000FDX_CAP:
5985 		*(uint8_t *)pr_val =
5986 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5987 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5988 		break;
5989 	case MAC_PROP_ADV_1000HDX_CAP:
5990 	case MAC_PROP_EN_1000HDX_CAP:
5991 		*(uint8_t *)pr_val =
5992 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5993 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5994 		break;
5995 	case MAC_PROP_ADV_100FDX_CAP:
5996 	case MAC_PROP_EN_100FDX_CAP:
5997 		*(uint8_t *)pr_val =
5998 		    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5999 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
6000 		break;
6001 	case MAC_PROP_ADV_100HDX_CAP:
6002 	case MAC_PROP_EN_100HDX_CAP:
6003 		*(uint8_t *)pr_val =
6004 		    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
6005 		    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
6006 		break;
6007 	case MAC_PROP_ADV_10FDX_CAP:
6008 	case MAC_PROP_EN_10FDX_CAP:
6009 		*(uint8_t *)pr_val =
6010 		    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
6011 		break;
6012 	case MAC_PROP_ADV_10HDX_CAP:
6013 	case MAC_PROP_EN_10HDX_CAP:
6014 		*(uint8_t *)pr_val =
6015 		    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
6016 		break;
6017 	default:
6018 		err = ENOTSUP;
6019 		break;
6020 	}
6021 	return (err);
6022 }
6023 
6024 /*
6025  * synchronize the adv* and en* parameters.
6026  *
6027  * See comments in <sys/dld.h> for details of the *_en_*
6028  * parameters. The usage of ndd for setting adv parameters will
6029  * synchronize all the en parameters with the e1000g parameters,
6030  * implicitly disabling any settings made via dladm.
6031  */
6032 static void
6033 e1000g_param_sync(struct e1000g *Adapter)
6034 {
6035 	Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6036 	Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6037 	Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6038 	Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6039 	Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6040 	Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6041 }
6042 
6043 /*
6044  * e1000g_get_driver_control - tell manageability firmware that the driver
6045  * has control.
6046  */
6047 static void
6048 e1000g_get_driver_control(struct e1000_hw *hw)
6049 {
6050 	uint32_t ctrl_ext;
6051 	uint32_t swsm;
6052 
6053 	/* tell manageability firmware the driver has taken over */
6054 	switch (hw->mac.type) {
6055 	case e1000_82573:
6056 		swsm = E1000_READ_REG(hw, E1000_SWSM);
6057 		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6058 		break;
6059 	case e1000_82571:
6060 	case e1000_82572:
6061 	case e1000_82574:
6062 	case e1000_80003es2lan:
6063 	case e1000_ich8lan:
6064 	case e1000_ich9lan:
6065 	case e1000_ich10lan:
6066 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6067 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6068 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6069 		break;
6070 	default:
6071 		/* no manageability firmware: do nothing */
6072 		break;
6073 	}
6074 }
6075 
6076 /*
6077  * e1000g_release_driver_control - tell manageability firmware that the driver
6078  * has released control.
6079  */
6080 static void
6081 e1000g_release_driver_control(struct e1000_hw *hw)
6082 {
6083 	uint32_t ctrl_ext;
6084 	uint32_t swsm;
6085 
6086 	/* tell manageability firmware the driver has released control */
6087 	switch (hw->mac.type) {
6088 	case e1000_82573:
6089 		swsm = E1000_READ_REG(hw, E1000_SWSM);
6090 		E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6091 		break;
6092 	case e1000_82571:
6093 	case e1000_82572:
6094 	case e1000_82574:
6095 	case e1000_80003es2lan:
6096 	case e1000_ich8lan:
6097 	case e1000_ich9lan:
6098 	case e1000_ich10lan:
6099 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6100 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6101 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6102 		break;
6103 	default:
6104 		/* no manageability firmware: do nothing */
6105 		break;
6106 	}
6107 }
6108 
6109 /*
6110  * Restore e1000g promiscuous mode.
6111  */
6112 static void
6113 e1000g_restore_promisc(struct e1000g *Adapter)
6114 {
6115 	if (Adapter->e1000g_promisc) {
6116 		uint32_t rctl;
6117 
6118 		rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6119 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6120 		E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
6121 	}
6122 }
6123