xref: /illumos-gate/usr/src/uts/common/io/e1000g/e1000g_main.c (revision 9b4e3ac25d882519cad3fc11f0c53b07f4e60536)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * **********************************************************************
28  *									*
29  * Module Name:								*
30  *   e1000g_main.c							*
31  *									*
32  * Abstract:								*
33  *   This file contains the interface routines for the solaris OS.	*
34  *   It has all DDI entry point routines and GLD entry point routines.	*
35  *									*
36  *   This file also contains routines that take care of initialization	*
37  *   uninit routine and interrupt routine.				*
38  *									*
39  * **********************************************************************
40  */
41 
42 #include <sys/dlpi.h>
43 #include <sys/mac.h>
44 #include "e1000g_sw.h"
45 #include "e1000g_debug.h"
46 
47 static char ident[] = "Intel PRO/1000 Ethernet";
48 static char e1000g_string[] = "Intel(R) PRO/1000 Network Connection";
49 static char e1000g_version[] = "Driver Ver. 5.2.14";
50 
51 /*
52  * Proto types for DDI entry points
53  */
54 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
55 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
56 static int e1000g_quiesce(dev_info_t *);
57 
58 /*
59  * init and intr routines prototype
60  */
61 static int e1000g_resume(dev_info_t *);
62 static int e1000g_suspend(dev_info_t *);
63 static uint_t e1000g_intr_pciexpress(caddr_t);
64 static uint_t e1000g_intr(caddr_t);
65 static void e1000g_intr_work(struct e1000g *, uint32_t);
66 #pragma inline(e1000g_intr_work)
67 static int e1000g_init(struct e1000g *);
68 static int e1000g_start(struct e1000g *, boolean_t);
69 static void e1000g_stop(struct e1000g *, boolean_t);
70 static int e1000g_m_start(void *);
71 static void e1000g_m_stop(void *);
72 static int e1000g_m_promisc(void *, boolean_t);
73 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
74 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
75 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
76 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
77     uint_t, const void *);
78 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
79     uint_t, uint_t, void *, uint_t *);
80 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
81     const void *);
82 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t,
83     uint_t, void *, uint_t *);
84 static void e1000g_init_locks(struct e1000g *);
85 static void e1000g_destroy_locks(struct e1000g *);
86 static int e1000g_identify_hardware(struct e1000g *);
87 static int e1000g_regs_map(struct e1000g *);
88 static int e1000g_set_driver_params(struct e1000g *);
89 static void e1000g_set_bufsize(struct e1000g *);
90 static int e1000g_register_mac(struct e1000g *);
91 static boolean_t e1000g_rx_drain(struct e1000g *);
92 static boolean_t e1000g_tx_drain(struct e1000g *);
93 static void e1000g_init_unicst(struct e1000g *);
94 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
95 
96 /*
97  * Local routines
98  */
99 static boolean_t e1000g_reset_adapter(struct e1000g *);
100 static void e1000g_tx_clean(struct e1000g *);
101 static void e1000g_rx_clean(struct e1000g *);
102 static void e1000g_link_timer(void *);
103 static void e1000g_local_timer(void *);
104 static boolean_t e1000g_link_check(struct e1000g *);
105 static boolean_t e1000g_stall_check(struct e1000g *);
106 static void e1000g_smartspeed(struct e1000g *);
107 static void e1000g_get_conf(struct e1000g *);
108 static int e1000g_get_prop(struct e1000g *, char *, int, int, int);
109 static void enable_watchdog_timer(struct e1000g *);
110 static void disable_watchdog_timer(struct e1000g *);
111 static void start_watchdog_timer(struct e1000g *);
112 static void restart_watchdog_timer(struct e1000g *);
113 static void stop_watchdog_timer(struct e1000g *);
114 static void stop_link_timer(struct e1000g *);
115 static void stop_82547_timer(e1000g_tx_ring_t *);
116 static void e1000g_force_speed_duplex(struct e1000g *);
117 static void e1000g_get_max_frame_size(struct e1000g *);
118 static boolean_t is_valid_mac_addr(uint8_t *);
119 static void e1000g_unattach(dev_info_t *, struct e1000g *);
120 #ifdef E1000G_DEBUG
121 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
122 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
123 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
124 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
125 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
126     struct iocblk *, mblk_t *);
127 #endif
128 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
129     struct iocblk *, mblk_t *);
130 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
131 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
132 static void e1000g_set_internal_loopback(struct e1000g *);
133 static void e1000g_set_external_loopback_1000(struct e1000g *);
134 static void e1000g_set_external_loopback_100(struct e1000g *);
135 static void e1000g_set_external_loopback_10(struct e1000g *);
136 static int e1000g_add_intrs(struct e1000g *);
137 static int e1000g_intr_add(struct e1000g *, int);
138 static int e1000g_rem_intrs(struct e1000g *);
139 static int e1000g_enable_intrs(struct e1000g *);
140 static int e1000g_disable_intrs(struct e1000g *);
141 static boolean_t e1000g_link_up(struct e1000g *);
142 #ifdef __sparc
143 static boolean_t e1000g_find_mac_address(struct e1000g *);
144 #endif
145 static void e1000g_get_phy_state(struct e1000g *);
146 static void e1000g_free_priv_devi_node(struct e1000g *, boolean_t);
147 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
148     const void *impl_data);
149 static void e1000g_fm_init(struct e1000g *Adapter);
150 static void e1000g_fm_fini(struct e1000g *Adapter);
151 static int e1000g_get_def_val(struct e1000g *, mac_prop_id_t, uint_t, void *);
152 static void e1000g_param_sync(struct e1000g *);
153 static void e1000g_get_driver_control(struct e1000_hw *);
154 static void e1000g_release_driver_control(struct e1000_hw *);
155 static void e1000g_restore_promisc(struct e1000g *Adapter);
156 
157 mac_priv_prop_t e1000g_priv_props[] = {
158 	{"_tx_bcopy_threshold", MAC_PROP_PERM_RW},
159 	{"_tx_interrupt_enable", MAC_PROP_PERM_RW},
160 	{"_tx_intr_delay", MAC_PROP_PERM_RW},
161 	{"_tx_intr_abs_delay", MAC_PROP_PERM_RW},
162 	{"_rx_bcopy_threshold", MAC_PROP_PERM_RW},
163 	{"_max_num_rcv_packets", MAC_PROP_PERM_RW},
164 	{"_rx_intr_delay", MAC_PROP_PERM_RW},
165 	{"_rx_intr_abs_delay", MAC_PROP_PERM_RW},
166 	{"_intr_throttling_rate", MAC_PROP_PERM_RW},
167 	{"_intr_adaptive", MAC_PROP_PERM_RW},
168 	{"_adv_pause_cap", MAC_PROP_PERM_READ},
169 	{"_adv_asym_pause_cap", MAC_PROP_PERM_READ},
170 };
171 #define	E1000G_MAX_PRIV_PROPS	\
172 	(sizeof (e1000g_priv_props)/sizeof (mac_priv_prop_t))
173 
174 
175 static struct cb_ops cb_ws_ops = {
176 	nulldev,		/* cb_open */
177 	nulldev,		/* cb_close */
178 	nodev,			/* cb_strategy */
179 	nodev,			/* cb_print */
180 	nodev,			/* cb_dump */
181 	nodev,			/* cb_read */
182 	nodev,			/* cb_write */
183 	nodev,			/* cb_ioctl */
184 	nodev,			/* cb_devmap */
185 	nodev,			/* cb_mmap */
186 	nodev,			/* cb_segmap */
187 	nochpoll,		/* cb_chpoll */
188 	ddi_prop_op,		/* cb_prop_op */
189 	NULL,			/* cb_stream */
190 	D_MP | D_HOTPLUG,	/* cb_flag */
191 	CB_REV,			/* cb_rev */
192 	nodev,			/* cb_aread */
193 	nodev			/* cb_awrite */
194 };
195 
196 static struct dev_ops ws_ops = {
197 	DEVO_REV,		/* devo_rev */
198 	0,			/* devo_refcnt */
199 	NULL,			/* devo_getinfo */
200 	nulldev,		/* devo_identify */
201 	nulldev,		/* devo_probe */
202 	e1000g_attach,		/* devo_attach */
203 	e1000g_detach,		/* devo_detach */
204 	nodev,			/* devo_reset */
205 	&cb_ws_ops,		/* devo_cb_ops */
206 	NULL,			/* devo_bus_ops */
207 	ddi_power,		/* devo_power */
208 	e1000g_quiesce		/* devo_quiesce */
209 };
210 
211 static struct modldrv modldrv = {
212 	&mod_driverops,		/* Type of module.  This one is a driver */
213 	ident,			/* Discription string */
214 	&ws_ops,		/* driver ops */
215 };
216 
217 static struct modlinkage modlinkage = {
218 	MODREV_1, &modldrv, NULL
219 };
220 
221 /* Access attributes for register mapping */
222 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
223 	DDI_DEVICE_ATTR_V0,
224 	DDI_STRUCTURE_LE_ACC,
225 	DDI_STRICTORDER_ACC,
226 	DDI_FLAGERR_ACC
227 };
228 
229 #define	E1000G_M_CALLBACK_FLAGS \
230 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
231 
232 static mac_callbacks_t e1000g_m_callbacks = {
233 	E1000G_M_CALLBACK_FLAGS,
234 	e1000g_m_stat,
235 	e1000g_m_start,
236 	e1000g_m_stop,
237 	e1000g_m_promisc,
238 	e1000g_m_multicst,
239 	NULL,
240 	e1000g_m_tx,
241 	e1000g_m_ioctl,
242 	e1000g_m_getcapab,
243 	NULL,
244 	NULL,
245 	e1000g_m_setprop,
246 	e1000g_m_getprop
247 };
248 
249 /*
250  * Global variables
251  */
252 uint32_t e1000g_mblks_pending = 0;
253 /*
254  * Workaround for Dynamic Reconfiguration support, for x86 platform only.
255  * Here we maintain a private dev_info list if e1000g_force_detach is
256  * enabled. If we force the driver to detach while there are still some
257  * rx buffers retained in the upper layer, we have to keep a copy of the
258  * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
259  * structure will be freed after the driver is detached. However when we
260  * finally free those rx buffers released by the upper layer, we need to
261  * refer to the dev_info to free the dma buffers. So we save a copy of
262  * the dev_info for this purpose. On x86 platform, we assume this copy
263  * of dev_info is always valid, but on SPARC platform, it could be invalid
264  * after the system board level DR operation. For this reason, the global
265  * variable e1000g_force_detach must be B_FALSE on SPARC platform.
266  */
267 #ifdef __sparc
268 boolean_t e1000g_force_detach = B_FALSE;
269 #else
270 boolean_t e1000g_force_detach = B_TRUE;
271 #endif
272 private_devi_list_t *e1000g_private_devi_list = NULL;
273 
274 /*
275  * The rwlock is defined to protect the whole processing of rx recycling
276  * and the rx packets release in detach processing to make them mutually
277  * exclusive.
278  * The rx recycling processes different rx packets in different threads,
279  * so it will be protected with RW_READER and it won't block any other rx
280  * recycling threads.
281  * While the detach processing will be protected with RW_WRITER to make
282  * it mutually exclusive with the rx recycling.
283  */
284 krwlock_t e1000g_rx_detach_lock;
285 /*
286  * The rwlock e1000g_dma_type_lock is defined to protect the global flag
287  * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
288  * If there are many e1000g instances, the system may run out of DVMA
289  * resources during the initialization of the instances, then the flag will
290  * be changed to "USE_DMA". Because different e1000g instances are initialized
291  * in parallel, we need to use this lock to protect the flag.
292  */
293 krwlock_t e1000g_dma_type_lock;
294 
295 /*
296  * The 82546 chipset is a dual-port device, both the ports share one eeprom.
297  * Based on the information from Intel, the 82546 chipset has some hardware
298  * problem. When one port is being reset and the other port is trying to
299  * access the eeprom, it could cause system hang or panic. To workaround this
300  * hardware problem, we use a global mutex to prevent such operations from
301  * happening simultaneously on different instances. This workaround is applied
302  * to all the devices supported by this driver.
303  */
304 kmutex_t e1000g_nvm_lock;
305 
306 /*
307  * Loadable module configuration entry points for the driver
308  */
309 
310 /*
311  * _init - module initialization
312  */
313 int
314 _init(void)
315 {
316 	int status;
317 
318 	mac_init_ops(&ws_ops, WSNAME);
319 	status = mod_install(&modlinkage);
320 	if (status != DDI_SUCCESS)
321 		mac_fini_ops(&ws_ops);
322 	else {
323 		rw_init(&e1000g_rx_detach_lock, NULL, RW_DRIVER, NULL);
324 		rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
325 		mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
326 	}
327 
328 	return (status);
329 }
330 
331 /*
332  * _fini - module finalization
333  */
334 int
335 _fini(void)
336 {
337 	int status;
338 
339 	rw_enter(&e1000g_rx_detach_lock, RW_READER);
340 	if (e1000g_mblks_pending != 0) {
341 		rw_exit(&e1000g_rx_detach_lock);
342 		return (EBUSY);
343 	}
344 	rw_exit(&e1000g_rx_detach_lock);
345 
346 	status = mod_remove(&modlinkage);
347 	if (status == DDI_SUCCESS) {
348 		mac_fini_ops(&ws_ops);
349 
350 		if (e1000g_force_detach) {
351 			private_devi_list_t *devi_node;
352 
353 			rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
354 			while (e1000g_private_devi_list != NULL) {
355 				devi_node = e1000g_private_devi_list;
356 				e1000g_private_devi_list =
357 				    e1000g_private_devi_list->next;
358 
359 				kmem_free(devi_node->priv_dip,
360 				    sizeof (struct dev_info));
361 				kmem_free(devi_node,
362 				    sizeof (private_devi_list_t));
363 			}
364 			rw_exit(&e1000g_rx_detach_lock);
365 		}
366 
367 		rw_destroy(&e1000g_rx_detach_lock);
368 		rw_destroy(&e1000g_dma_type_lock);
369 		mutex_destroy(&e1000g_nvm_lock);
370 	}
371 
372 	return (status);
373 }
374 
375 /*
376  * _info - module information
377  */
378 int
379 _info(struct modinfo *modinfop)
380 {
381 	return (mod_info(&modlinkage, modinfop));
382 }
383 
384 /*
385  * e1000g_attach - driver attach
386  *
387  * This function is the device-specific initialization entry
388  * point. This entry point is required and must be written.
389  * The DDI_ATTACH command must be provided in the attach entry
390  * point. When attach() is called with cmd set to DDI_ATTACH,
391  * all normal kernel services (such as kmem_alloc(9F)) are
392  * available for use by the driver.
393  *
394  * The attach() function will be called once for each instance
395  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
396  * Until attach() succeeds, the only driver entry points which
397  * may be called are open(9E) and getinfo(9E).
398  */
399 static int
400 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
401 {
402 	struct e1000g *Adapter;
403 	struct e1000_hw *hw;
404 	struct e1000g_osdep *osdep;
405 	int instance;
406 
407 	switch (cmd) {
408 	default:
409 		e1000g_log(NULL, CE_WARN,
410 		    "Unsupported command send to e1000g_attach... ");
411 		return (DDI_FAILURE);
412 
413 	case DDI_RESUME:
414 		return (e1000g_resume(devinfo));
415 
416 	case DDI_ATTACH:
417 		break;
418 	}
419 
420 	/*
421 	 * get device instance number
422 	 */
423 	instance = ddi_get_instance(devinfo);
424 
425 	/*
426 	 * Allocate soft data structure
427 	 */
428 	Adapter =
429 	    (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
430 
431 	Adapter->dip = devinfo;
432 	Adapter->instance = instance;
433 	Adapter->tx_ring->adapter = Adapter;
434 	Adapter->rx_ring->adapter = Adapter;
435 
436 	hw = &Adapter->shared;
437 	osdep = &Adapter->osdep;
438 	hw->back = osdep;
439 	osdep->adapter = Adapter;
440 
441 	ddi_set_driver_private(devinfo, (caddr_t)Adapter);
442 
443 	/*
444 	 * Initialize for fma support
445 	 */
446 	Adapter->fm_capabilities = e1000g_get_prop(Adapter, "fm-capable",
447 	    0, 0x0f,
448 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
449 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
450 	e1000g_fm_init(Adapter);
451 	Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
452 
453 	/*
454 	 * PCI Configure
455 	 */
456 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
457 		e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
458 		goto attach_fail;
459 	}
460 	Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
461 
462 	/*
463 	 * Setup hardware
464 	 */
465 	if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
466 		e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
467 		goto attach_fail;
468 	}
469 
470 	/*
471 	 * Map in the device registers.
472 	 */
473 	if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
474 		e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
475 		goto attach_fail;
476 	}
477 	Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
478 
479 	/*
480 	 * Initialize driver parameters
481 	 */
482 	if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
483 		goto attach_fail;
484 	}
485 	Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
486 
487 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
488 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
489 		goto attach_fail;
490 	}
491 
492 	/*
493 	 * Initialize interrupts
494 	 */
495 	if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
496 		e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
497 		goto attach_fail;
498 	}
499 	Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
500 
501 	/*
502 	 * Initialize mutex's for this device.
503 	 * Do this before enabling the interrupt handler and
504 	 * register the softint to avoid the condition where
505 	 * interrupt handler can try using uninitialized mutex
506 	 */
507 	e1000g_init_locks(Adapter);
508 	Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
509 
510 	/*
511 	 * Initialize Driver Counters
512 	 */
513 	if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
514 		e1000g_log(Adapter, CE_WARN, "Init stats failed");
515 		goto attach_fail;
516 	}
517 	Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
518 
519 	/*
520 	 * Initialize chip hardware and software structures
521 	 */
522 	if (e1000g_init(Adapter) != DDI_SUCCESS) {
523 		e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
524 		goto attach_fail;
525 	}
526 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
527 
528 	/*
529 	 * Register the driver to the MAC
530 	 */
531 	if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
532 		e1000g_log(Adapter, CE_WARN, "Register MAC failed");
533 		goto attach_fail;
534 	}
535 	Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
536 
537 	/*
538 	 * Now that mutex locks are initialized, and the chip is also
539 	 * initialized, enable interrupts.
540 	 */
541 	if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
542 		e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
543 		goto attach_fail;
544 	}
545 	Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
546 
547 	/*
548 	 * If e1000g_force_detach is enabled, in global private dip list,
549 	 * we will create a new entry, which maintains the priv_dip for DR
550 	 * supports after driver detached.
551 	 */
552 	if (e1000g_force_detach) {
553 		private_devi_list_t *devi_node;
554 
555 		Adapter->priv_dip =
556 		    kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
557 		bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
558 		    sizeof (struct dev_info));
559 
560 		devi_node =
561 		    kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
562 
563 		rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
564 		devi_node->priv_dip = Adapter->priv_dip;
565 		devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
566 		devi_node->next = e1000g_private_devi_list;
567 		e1000g_private_devi_list = devi_node;
568 		rw_exit(&e1000g_rx_detach_lock);
569 	}
570 
571 	cmn_err(CE_CONT, "!%s, %s\n", e1000g_string, e1000g_version);
572 
573 	return (DDI_SUCCESS);
574 
575 attach_fail:
576 	e1000g_unattach(devinfo, Adapter);
577 	return (DDI_FAILURE);
578 }
579 
580 static int
581 e1000g_register_mac(struct e1000g *Adapter)
582 {
583 	struct e1000_hw *hw = &Adapter->shared;
584 	mac_register_t *mac;
585 	int err;
586 
587 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
588 		return (DDI_FAILURE);
589 
590 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
591 	mac->m_driver = Adapter;
592 	mac->m_dip = Adapter->dip;
593 	mac->m_src_addr = hw->mac.addr;
594 	mac->m_callbacks = &e1000g_m_callbacks;
595 	mac->m_min_sdu = 0;
596 	mac->m_max_sdu = Adapter->default_mtu;
597 	mac->m_margin = VLAN_TAGSZ;
598 	mac->m_priv_props = e1000g_priv_props;
599 	mac->m_priv_prop_count = E1000G_MAX_PRIV_PROPS;
600 	mac->m_v12n = MAC_VIRT_LEVEL1;
601 
602 	err = mac_register(mac, &Adapter->mh);
603 	mac_free(mac);
604 
605 	return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
606 }
607 
608 static int
609 e1000g_identify_hardware(struct e1000g *Adapter)
610 {
611 	struct e1000_hw *hw = &Adapter->shared;
612 	struct e1000g_osdep *osdep = &Adapter->osdep;
613 
614 	/* Get the device id */
615 	hw->vendor_id =
616 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
617 	hw->device_id =
618 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
619 	hw->revision_id =
620 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
621 	hw->subsystem_device_id =
622 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
623 	hw->subsystem_vendor_id =
624 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
625 
626 	if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
627 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
628 		    "MAC type could not be set properly.");
629 		return (DDI_FAILURE);
630 	}
631 
632 	return (DDI_SUCCESS);
633 }
634 
635 static int
636 e1000g_regs_map(struct e1000g *Adapter)
637 {
638 	dev_info_t *devinfo = Adapter->dip;
639 	struct e1000_hw *hw = &Adapter->shared;
640 	struct e1000g_osdep *osdep = &Adapter->osdep;
641 	off_t mem_size;
642 
643 	/* Get size of adapter register memory */
644 	if (ddi_dev_regsize(devinfo, ADAPTER_REG_SET, &mem_size) !=
645 	    DDI_SUCCESS) {
646 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
647 		    "ddi_dev_regsize for registers failed");
648 		return (DDI_FAILURE);
649 	}
650 
651 	/* Map adapter register memory */
652 	if ((ddi_regs_map_setup(devinfo, ADAPTER_REG_SET,
653 	    (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
654 	    &osdep->reg_handle)) != DDI_SUCCESS) {
655 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
656 		    "ddi_regs_map_setup for registers failed");
657 		goto regs_map_fail;
658 	}
659 
660 	/* ICH needs to map flash memory */
661 	if (hw->mac.type == e1000_ich8lan ||
662 	    hw->mac.type == e1000_ich9lan ||
663 	    hw->mac.type == e1000_ich10lan) {
664 		/* get flash size */
665 		if (ddi_dev_regsize(devinfo, ICH_FLASH_REG_SET,
666 		    &mem_size) != DDI_SUCCESS) {
667 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
668 			    "ddi_dev_regsize for ICH flash failed");
669 			goto regs_map_fail;
670 		}
671 
672 		/* map flash in */
673 		if (ddi_regs_map_setup(devinfo, ICH_FLASH_REG_SET,
674 		    (caddr_t *)&hw->flash_address, 0,
675 		    mem_size, &e1000g_regs_acc_attr,
676 		    &osdep->ich_flash_handle) != DDI_SUCCESS) {
677 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
678 			    "ddi_regs_map_setup for ICH flash failed");
679 			goto regs_map_fail;
680 		}
681 	}
682 
683 	return (DDI_SUCCESS);
684 
685 regs_map_fail:
686 	if (osdep->reg_handle != NULL)
687 		ddi_regs_map_free(&osdep->reg_handle);
688 
689 	return (DDI_FAILURE);
690 }
691 
692 static int
693 e1000g_set_driver_params(struct e1000g *Adapter)
694 {
695 	struct e1000_hw *hw;
696 	uint32_t mem_bar, io_bar, bar64;
697 
698 	hw = &Adapter->shared;
699 
700 	/* Set MAC type and initialize hardware functions */
701 	if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
702 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
703 		    "Could not setup hardware functions");
704 		return (DDI_FAILURE);
705 	}
706 
707 	/* Get bus information */
708 	if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
709 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
710 		    "Could not get bus information");
711 		return (DDI_FAILURE);
712 	}
713 
714 	/* get mem_base addr */
715 	mem_bar = pci_config_get32(Adapter->osdep.cfg_handle, PCI_CONF_BASE0);
716 	bar64 = mem_bar & PCI_BASE_TYPE_ALL;
717 
718 	/* get io_base addr */
719 	if (hw->mac.type >= e1000_82544) {
720 		if (bar64) {
721 			/* IO BAR is different for 64 bit BAR mode */
722 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
723 			    PCI_CONF_BASE4);
724 		} else {
725 			/* normal 32-bit BAR mode */
726 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
727 			    PCI_CONF_BASE2);
728 		}
729 		hw->io_base = io_bar & PCI_BASE_IO_ADDR_M;
730 	} else {
731 		/* no I/O access for adapters prior to 82544 */
732 		hw->io_base = 0x0;
733 	}
734 
735 	e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
736 
737 	hw->mac.autoneg_failed = B_TRUE;
738 
739 	/* Set the autoneg_wait_to_complete flag to B_FALSE */
740 	hw->phy.autoneg_wait_to_complete = B_FALSE;
741 
742 	/* Adaptive IFS related changes */
743 	hw->mac.adaptive_ifs = B_TRUE;
744 
745 	/* Enable phy init script for IGP phy of 82541/82547 */
746 	if ((hw->mac.type == e1000_82547) ||
747 	    (hw->mac.type == e1000_82541) ||
748 	    (hw->mac.type == e1000_82547_rev_2) ||
749 	    (hw->mac.type == e1000_82541_rev_2))
750 		e1000_init_script_state_82541(hw, B_TRUE);
751 
752 	/* Enable the TTL workaround for 82541/82547 */
753 	e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
754 
755 #ifdef __sparc
756 	Adapter->strip_crc = B_TRUE;
757 #else
758 	Adapter->strip_crc = B_FALSE;
759 #endif
760 
761 	/* Get conf file properties */
762 	e1000g_get_conf(Adapter);
763 
764 	/* Get speed/duplex settings in conf file */
765 	hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
766 	hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
767 	e1000g_force_speed_duplex(Adapter);
768 
769 	/* Get Jumbo Frames settings in conf file */
770 	e1000g_get_max_frame_size(Adapter);
771 
772 	/* Set Rx/Tx buffer size */
773 	e1000g_set_bufsize(Adapter);
774 
775 	/* Master Latency Timer */
776 	Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
777 
778 	/* copper options */
779 	if (hw->phy.media_type == e1000_media_type_copper) {
780 		hw->phy.mdix = 0;	/* AUTO_ALL_MODES */
781 		hw->phy.disable_polarity_correction = B_FALSE;
782 		hw->phy.ms_type = e1000_ms_hw_default;	/* E1000_MASTER_SLAVE */
783 	}
784 
785 	/* The initial link state should be "unknown" */
786 	Adapter->link_state = LINK_STATE_UNKNOWN;
787 
788 	/* Initialize rx parameters */
789 	Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
790 	Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
791 
792 	/* Initialize tx parameters */
793 	Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
794 	Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
795 	Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
796 	Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
797 
798 	/* Initialize rx parameters */
799 	Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
800 
801 	return (DDI_SUCCESS);
802 }
803 
804 static void
805 e1000g_set_bufsize(struct e1000g *Adapter)
806 {
807 	struct e1000_mac_info *mac = &Adapter->shared.mac;
808 	uint64_t rx_size;
809 	uint64_t tx_size;
810 
811 	dev_info_t *devinfo = Adapter->dip;
812 #ifdef __sparc
813 	ulong_t iommu_pagesize;
814 #endif
815 	/* Get the system page size */
816 	Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
817 
818 #ifdef __sparc
819 	iommu_pagesize = dvma_pagesize(devinfo);
820 	if (iommu_pagesize != 0) {
821 		if (Adapter->sys_page_sz == iommu_pagesize) {
822 			if (iommu_pagesize > 0x4000)
823 				Adapter->sys_page_sz = 0x4000;
824 		} else {
825 			if (Adapter->sys_page_sz > iommu_pagesize)
826 				Adapter->sys_page_sz = iommu_pagesize;
827 		}
828 	}
829 	if (Adapter->lso_enable) {
830 		Adapter->dvma_page_num = E1000_LSO_MAXLEN /
831 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
832 	} else {
833 		Adapter->dvma_page_num = Adapter->max_frame_size /
834 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
835 	}
836 	ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
837 #endif
838 
839 	Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
840 
841 	if ((mac->type == e1000_82545) ||
842 	    (mac->type == e1000_82546) ||
843 	    (mac->type == e1000_82546_rev_3)) {
844 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
845 	} else {
846 		rx_size = Adapter->max_frame_size + E1000G_IPALIGNPRESERVEROOM;
847 		if ((rx_size > FRAME_SIZE_UPTO_2K) &&
848 		    (rx_size <= FRAME_SIZE_UPTO_4K))
849 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
850 		else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
851 		    (rx_size <= FRAME_SIZE_UPTO_8K))
852 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
853 		else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
854 		    (rx_size <= FRAME_SIZE_UPTO_16K))
855 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
856 		else
857 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
858 	}
859 
860 	tx_size = Adapter->max_frame_size;
861 	if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
862 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
863 	else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
864 	    (tx_size <= FRAME_SIZE_UPTO_8K))
865 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
866 	else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
867 	    (tx_size <= FRAME_SIZE_UPTO_16K))
868 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
869 	else
870 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
871 
872 	/*
873 	 * For Wiseman adapters we have an requirement of having receive
874 	 * buffers aligned at 256 byte boundary. Since Livengood does not
875 	 * require this and forcing it for all hardwares will have
876 	 * performance implications, I am making it applicable only for
877 	 * Wiseman and for Jumbo frames enabled mode as rest of the time,
878 	 * it is okay to have normal frames...but it does involve a
879 	 * potential risk where we may loose data if buffer is not
880 	 * aligned...so all wiseman boards to have 256 byte aligned
881 	 * buffers
882 	 */
883 	if (mac->type < e1000_82543)
884 		Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
885 	else if ((mac->type == e1000_82545) ||
886 	    (mac->type == e1000_82546) ||
887 	    (mac->type == e1000_82546_rev_3))
888 		Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE_82546;
889 	else
890 		Adapter->rx_buf_align = 1;
891 }
892 
893 /*
894  * e1000g_detach - driver detach
895  *
896  * The detach() function is the complement of the attach routine.
897  * If cmd is set to DDI_DETACH, detach() is used to remove  the
898  * state  associated  with  a  given  instance of a device node
899  * prior to the removal of that instance from the system.
900  *
901  * The detach() function will be called once for each  instance
902  * of the device for which there has been a successful attach()
903  * once there are no longer  any  opens  on  the  device.
904  *
905  * Interrupts routine are disabled, All memory allocated by this
906  * driver are freed.
907  */
908 static int
909 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
910 {
911 	struct e1000g *Adapter;
912 	boolean_t rx_drain;
913 
914 	switch (cmd) {
915 	default:
916 		return (DDI_FAILURE);
917 
918 	case DDI_SUSPEND:
919 		return (e1000g_suspend(devinfo));
920 
921 	case DDI_DETACH:
922 		break;
923 	}
924 
925 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
926 	if (Adapter == NULL)
927 		return (DDI_FAILURE);
928 
929 	rx_drain = e1000g_rx_drain(Adapter);
930 	if (!rx_drain && !e1000g_force_detach)
931 		return (DDI_FAILURE);
932 
933 	if (mac_unregister(Adapter->mh) != 0) {
934 		e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
935 		return (DDI_FAILURE);
936 	}
937 	Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
938 
939 	ASSERT(Adapter->chip_state == E1000G_STOP);
940 
941 	/*
942 	 * If e1000g_force_detach is enabled, driver detach is safe.
943 	 * We will let e1000g_free_priv_devi_node routine determine
944 	 * whether we need to free the priv_dip entry for current
945 	 * driver instance.
946 	 */
947 	if (e1000g_force_detach) {
948 		e1000g_free_priv_devi_node(Adapter, rx_drain);
949 	}
950 
951 	e1000g_unattach(devinfo, Adapter);
952 
953 	return (DDI_SUCCESS);
954 }
955 
956 /*
957  * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
958  *
959  * If free_flag is true, that indicates the upper layer is not holding
960  * the rx buffers, we could free the priv_dip entry safely.
961  *
962  * Otherwise, we have to keep this entry even after driver detached,
963  * and we also need to mark this entry with E1000G_PRIV_DEVI_DETACH flag,
964  * so that driver could free it while all of rx buffers are returned
965  * by upper layer later.
966  */
967 static void
968 e1000g_free_priv_devi_node(struct e1000g *Adapter, boolean_t free_flag)
969 {
970 	private_devi_list_t *devi_node, *devi_del;
971 
972 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
973 	ASSERT(e1000g_private_devi_list != NULL);
974 	ASSERT(Adapter->priv_dip != NULL);
975 
976 	devi_node = e1000g_private_devi_list;
977 	if (devi_node->priv_dip == Adapter->priv_dip) {
978 		if (free_flag) {
979 			e1000g_private_devi_list =
980 			    devi_node->next;
981 			kmem_free(devi_node->priv_dip,
982 			    sizeof (struct dev_info));
983 			kmem_free(devi_node,
984 			    sizeof (private_devi_list_t));
985 		} else {
986 			ASSERT(e1000g_mblks_pending != 0);
987 			devi_node->flag =
988 			    E1000G_PRIV_DEVI_DETACH;
989 		}
990 		rw_exit(&e1000g_rx_detach_lock);
991 		return;
992 	}
993 
994 	devi_node = e1000g_private_devi_list;
995 	while (devi_node->next != NULL) {
996 		if (devi_node->next->priv_dip == Adapter->priv_dip) {
997 			if (free_flag) {
998 				devi_del = devi_node->next;
999 				devi_node->next = devi_del->next;
1000 				kmem_free(devi_del->priv_dip,
1001 				    sizeof (struct dev_info));
1002 				kmem_free(devi_del,
1003 				    sizeof (private_devi_list_t));
1004 			} else {
1005 				ASSERT(e1000g_mblks_pending != 0);
1006 				devi_node->next->flag =
1007 				    E1000G_PRIV_DEVI_DETACH;
1008 			}
1009 			break;
1010 		}
1011 		devi_node = devi_node->next;
1012 	}
1013 	rw_exit(&e1000g_rx_detach_lock);
1014 }
1015 
1016 static void
1017 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1018 {
1019 	int result;
1020 
1021 	if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1022 		(void) e1000g_disable_intrs(Adapter);
1023 	}
1024 
1025 	if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1026 		(void) mac_unregister(Adapter->mh);
1027 	}
1028 
1029 	if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1030 		(void) e1000g_rem_intrs(Adapter);
1031 	}
1032 
1033 	if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1034 		(void) ddi_prop_remove_all(devinfo);
1035 	}
1036 
1037 	if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1038 		kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1039 	}
1040 
1041 	if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1042 		stop_link_timer(Adapter);
1043 
1044 		mutex_enter(&e1000g_nvm_lock);
1045 		result = e1000_reset_hw(&Adapter->shared);
1046 		mutex_exit(&e1000g_nvm_lock);
1047 
1048 		if (result != E1000_SUCCESS) {
1049 			e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1050 			ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1051 		}
1052 	}
1053 
1054 	if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1055 		if (Adapter->osdep.reg_handle != NULL)
1056 			ddi_regs_map_free(&Adapter->osdep.reg_handle);
1057 		if (Adapter->osdep.ich_flash_handle != NULL)
1058 			ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1059 	}
1060 
1061 	if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1062 		if (Adapter->osdep.cfg_handle != NULL)
1063 			pci_config_teardown(&Adapter->osdep.cfg_handle);
1064 	}
1065 
1066 	if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1067 		e1000g_destroy_locks(Adapter);
1068 	}
1069 
1070 	if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1071 		e1000g_fm_fini(Adapter);
1072 	}
1073 
1074 	e1000_remove_device(&Adapter->shared);
1075 
1076 	kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1077 
1078 	/*
1079 	 * Another hotplug spec requirement,
1080 	 * run ddi_set_driver_private(devinfo, null);
1081 	 */
1082 	ddi_set_driver_private(devinfo, NULL);
1083 }
1084 
1085 static void
1086 e1000g_init_locks(struct e1000g *Adapter)
1087 {
1088 	e1000g_tx_ring_t *tx_ring;
1089 	e1000g_rx_ring_t *rx_ring;
1090 
1091 	rw_init(&Adapter->chip_lock, NULL,
1092 	    RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1093 	mutex_init(&Adapter->link_lock, NULL,
1094 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1095 	mutex_init(&Adapter->watchdog_lock, NULL,
1096 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1097 
1098 	tx_ring = Adapter->tx_ring;
1099 
1100 	mutex_init(&tx_ring->tx_lock, NULL,
1101 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1102 	mutex_init(&tx_ring->usedlist_lock, NULL,
1103 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1104 	mutex_init(&tx_ring->freelist_lock, NULL,
1105 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1106 
1107 	rx_ring = Adapter->rx_ring;
1108 
1109 	mutex_init(&rx_ring->rx_lock, NULL,
1110 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1111 	mutex_init(&rx_ring->freelist_lock, NULL,
1112 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1113 	mutex_init(&rx_ring->recycle_lock, NULL,
1114 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1115 }
1116 
1117 static void
1118 e1000g_destroy_locks(struct e1000g *Adapter)
1119 {
1120 	e1000g_tx_ring_t *tx_ring;
1121 	e1000g_rx_ring_t *rx_ring;
1122 
1123 	tx_ring = Adapter->tx_ring;
1124 	mutex_destroy(&tx_ring->tx_lock);
1125 	mutex_destroy(&tx_ring->usedlist_lock);
1126 	mutex_destroy(&tx_ring->freelist_lock);
1127 
1128 	rx_ring = Adapter->rx_ring;
1129 	mutex_destroy(&rx_ring->rx_lock);
1130 	mutex_destroy(&rx_ring->freelist_lock);
1131 	mutex_destroy(&rx_ring->recycle_lock);
1132 
1133 	mutex_destroy(&Adapter->link_lock);
1134 	mutex_destroy(&Adapter->watchdog_lock);
1135 	rw_destroy(&Adapter->chip_lock);
1136 }
1137 
1138 static int
1139 e1000g_resume(dev_info_t *devinfo)
1140 {
1141 	struct e1000g *Adapter;
1142 
1143 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1144 	if (Adapter == NULL)
1145 		return (DDI_FAILURE);
1146 
1147 	if (e1000g_start(Adapter, B_TRUE))
1148 		return (DDI_FAILURE);
1149 
1150 	return (DDI_SUCCESS);
1151 }
1152 
1153 static int
1154 e1000g_suspend(dev_info_t *devinfo)
1155 {
1156 	struct e1000g *Adapter;
1157 
1158 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1159 	if (Adapter == NULL)
1160 		return (DDI_FAILURE);
1161 
1162 	e1000g_stop(Adapter, B_TRUE);
1163 
1164 	return (DDI_SUCCESS);
1165 }
1166 
1167 static int
1168 e1000g_init(struct e1000g *Adapter)
1169 {
1170 	uint32_t pba;
1171 	uint32_t high_water;
1172 	struct e1000_hw *hw;
1173 	clock_t link_timeout;
1174 	int result;
1175 
1176 	hw = &Adapter->shared;
1177 
1178 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1179 
1180 	/*
1181 	 * reset to put the hardware in a known state
1182 	 * before we try to do anything with the eeprom
1183 	 */
1184 	mutex_enter(&e1000g_nvm_lock);
1185 	result = e1000_reset_hw(hw);
1186 	mutex_exit(&e1000g_nvm_lock);
1187 
1188 	if (result != E1000_SUCCESS) {
1189 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1190 		goto init_fail;
1191 	}
1192 
1193 	mutex_enter(&e1000g_nvm_lock);
1194 	result = e1000_validate_nvm_checksum(hw);
1195 	if (result < E1000_SUCCESS) {
1196 		/*
1197 		 * Some PCI-E parts fail the first check due to
1198 		 * the link being in sleep state.  Call it again,
1199 		 * if it fails a second time its a real issue.
1200 		 */
1201 		result = e1000_validate_nvm_checksum(hw);
1202 	}
1203 	mutex_exit(&e1000g_nvm_lock);
1204 
1205 	if (result < E1000_SUCCESS) {
1206 		e1000g_log(Adapter, CE_WARN,
1207 		    "Invalid NVM checksum. Please contact "
1208 		    "the vendor to update the NVM.");
1209 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1210 		goto init_fail;
1211 	}
1212 
1213 	result = 0;
1214 #ifdef __sparc
1215 	/*
1216 	 * First, we try to get the local ethernet address from OBP. If
1217 	 * failed, then we get it from the EEPROM of NIC card.
1218 	 */
1219 	result = e1000g_find_mac_address(Adapter);
1220 #endif
1221 	/* Get the local ethernet address. */
1222 	if (!result) {
1223 		mutex_enter(&e1000g_nvm_lock);
1224 		result = e1000_read_mac_addr(hw);
1225 		mutex_exit(&e1000g_nvm_lock);
1226 	}
1227 
1228 	if (result < E1000_SUCCESS) {
1229 		e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1230 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1231 		goto init_fail;
1232 	}
1233 
1234 	/* check for valid mac address */
1235 	if (!is_valid_mac_addr(hw->mac.addr)) {
1236 		e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1237 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1238 		goto init_fail;
1239 	}
1240 
1241 	/* Set LAA state for 82571 chipset */
1242 	e1000_set_laa_state_82571(hw, B_TRUE);
1243 
1244 	/* Master Latency Timer implementation */
1245 	if (Adapter->master_latency_timer) {
1246 		pci_config_put8(Adapter->osdep.cfg_handle,
1247 		    PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1248 	}
1249 
1250 	if (hw->mac.type < e1000_82547) {
1251 		/*
1252 		 * Total FIFO is 64K
1253 		 */
1254 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1255 			pba = E1000_PBA_40K;	/* 40K for Rx, 24K for Tx */
1256 		else
1257 			pba = E1000_PBA_48K;	/* 48K for Rx, 16K for Tx */
1258 	} else if ((hw->mac.type == e1000_82571) ||
1259 	    (hw->mac.type == e1000_82572) ||
1260 	    (hw->mac.type == e1000_80003es2lan)) {
1261 		/*
1262 		 * Total FIFO is 48K
1263 		 */
1264 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1265 			pba = E1000_PBA_30K;	/* 30K for Rx, 18K for Tx */
1266 		else
1267 			pba = E1000_PBA_38K;	/* 38K for Rx, 10K for Tx */
1268 	} else if (hw->mac.type == e1000_82573) {
1269 		pba = E1000_PBA_20K;		/* 20K for Rx, 12K for Tx */
1270 	} else if (hw->mac.type == e1000_82574) {
1271 		/* Keep adapter default: 20K for Rx, 20K for Tx */
1272 		pba = E1000_READ_REG(hw, E1000_PBA);
1273 	} else if (hw->mac.type == e1000_ich8lan) {
1274 		pba = E1000_PBA_8K;		/* 8K for Rx, 12K for Tx */
1275 	} else if (hw->mac.type == e1000_ich9lan) {
1276 		pba = E1000_PBA_10K;
1277 	} else if (hw->mac.type == e1000_ich10lan) {
1278 		pba = E1000_PBA_10K;
1279 	} else {
1280 		/*
1281 		 * Total FIFO is 40K
1282 		 */
1283 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1284 			pba = E1000_PBA_22K;	/* 22K for Rx, 18K for Tx */
1285 		else
1286 			pba = E1000_PBA_30K;	/* 30K for Rx, 10K for Tx */
1287 	}
1288 	E1000_WRITE_REG(hw, E1000_PBA, pba);
1289 
1290 	/*
1291 	 * These parameters set thresholds for the adapter's generation(Tx)
1292 	 * and response(Rx) to Ethernet PAUSE frames.  These are just threshold
1293 	 * settings.  Flow control is enabled or disabled in the configuration
1294 	 * file.
1295 	 * High-water mark is set down from the top of the rx fifo (not
1296 	 * sensitive to max_frame_size) and low-water is set just below
1297 	 * high-water mark.
1298 	 * The high water mark must be low enough to fit one full frame above
1299 	 * it in the rx FIFO.  Should be the lower of:
1300 	 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1301 	 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1302 	 * Rx FIFO size minus one full frame.
1303 	 */
1304 	high_water = min(((pba << 10) * 9 / 10),
1305 	    ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_ich9lan ||
1306 	    hw->mac.type == e1000_ich10lan) ?
1307 	    ((pba << 10) - (E1000_ERT_2048 << 3)) :
1308 	    ((pba << 10) - Adapter->max_frame_size)));
1309 
1310 	hw->fc.high_water = high_water & 0xFFF8;
1311 	hw->fc.low_water = hw->fc.high_water - 8;
1312 
1313 	if (hw->mac.type == e1000_80003es2lan)
1314 		hw->fc.pause_time = 0xFFFF;
1315 	else
1316 		hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1317 	hw->fc.send_xon = B_TRUE;
1318 
1319 	/*
1320 	 * Reset the adapter hardware the second time.
1321 	 */
1322 	mutex_enter(&e1000g_nvm_lock);
1323 	result = e1000_reset_hw(hw);
1324 	mutex_exit(&e1000g_nvm_lock);
1325 
1326 	if (result != E1000_SUCCESS) {
1327 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1328 		goto init_fail;
1329 	}
1330 
1331 	/* disable wakeup control by default */
1332 	if (hw->mac.type >= e1000_82544)
1333 		E1000_WRITE_REG(hw, E1000_WUC, 0);
1334 
1335 	/*
1336 	 * MWI should be disabled on 82546.
1337 	 */
1338 	if (hw->mac.type == e1000_82546)
1339 		e1000_pci_clear_mwi(hw);
1340 	else
1341 		e1000_pci_set_mwi(hw);
1342 
1343 	/*
1344 	 * Configure/Initialize hardware
1345 	 */
1346 	mutex_enter(&e1000g_nvm_lock);
1347 	result = e1000_init_hw(hw);
1348 	mutex_exit(&e1000g_nvm_lock);
1349 
1350 	if (result < E1000_SUCCESS) {
1351 		e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1352 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1353 		goto init_fail;
1354 	}
1355 
1356 	/*
1357 	 * Restore LED settings to the default from EEPROM
1358 	 * to meet the standard for Sun platforms.
1359 	 */
1360 	(void) e1000_cleanup_led(hw);
1361 
1362 	/* Disable Smart Power Down */
1363 	phy_spd_state(hw, B_FALSE);
1364 
1365 	/* Make sure driver has control */
1366 	e1000g_get_driver_control(hw);
1367 
1368 	/*
1369 	 * Initialize unicast addresses.
1370 	 */
1371 	e1000g_init_unicst(Adapter);
1372 
1373 	/*
1374 	 * Setup and initialize the mctable structures.  After this routine
1375 	 * completes  Multicast table will be set
1376 	 */
1377 	e1000g_setup_multicast(Adapter);
1378 	msec_delay(5);
1379 
1380 	/*
1381 	 * Implement Adaptive IFS
1382 	 */
1383 	e1000_reset_adaptive(hw);
1384 
1385 	/* Setup Interrupt Throttling Register */
1386 	if (hw->mac.type >= e1000_82540) {
1387 		E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1388 	} else
1389 		Adapter->intr_adaptive = B_FALSE;
1390 
1391 	/* Start the timer for link setup */
1392 	if (hw->mac.autoneg)
1393 		link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1394 	else
1395 		link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1396 
1397 	mutex_enter(&Adapter->link_lock);
1398 	if (hw->phy.autoneg_wait_to_complete) {
1399 		Adapter->link_complete = B_TRUE;
1400 	} else {
1401 		Adapter->link_complete = B_FALSE;
1402 		Adapter->link_tid = timeout(e1000g_link_timer,
1403 		    (void *)Adapter, link_timeout);
1404 	}
1405 	mutex_exit(&Adapter->link_lock);
1406 
1407 	/* Enable PCI-Ex master */
1408 	if (hw->bus.type == e1000_bus_type_pci_express) {
1409 		e1000_enable_pciex_master(hw);
1410 	}
1411 
1412 	/* Save the state of the phy */
1413 	e1000g_get_phy_state(Adapter);
1414 
1415 	e1000g_param_sync(Adapter);
1416 
1417 	Adapter->init_count++;
1418 
1419 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1420 		goto init_fail;
1421 	}
1422 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1423 		goto init_fail;
1424 	}
1425 
1426 	Adapter->poll_mode = e1000g_poll_mode;
1427 
1428 	rw_exit(&Adapter->chip_lock);
1429 
1430 	return (DDI_SUCCESS);
1431 
1432 init_fail:
1433 	rw_exit(&Adapter->chip_lock);
1434 	ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1435 	return (DDI_FAILURE);
1436 }
1437 
1438 /*
1439  * Check if the link is up
1440  */
1441 static boolean_t
1442 e1000g_link_up(struct e1000g *Adapter)
1443 {
1444 	struct e1000_hw *hw;
1445 	boolean_t link_up;
1446 
1447 	hw = &Adapter->shared;
1448 
1449 	(void) e1000_check_for_link(hw);
1450 
1451 	if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) ||
1452 	    ((!hw->mac.get_link_status) && (hw->mac.type == e1000_82543)) ||
1453 	    ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1454 	    (hw->mac.serdes_has_link))) {
1455 		link_up = B_TRUE;
1456 	} else {
1457 		link_up = B_FALSE;
1458 	}
1459 
1460 	return (link_up);
1461 }
1462 
1463 static void
1464 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1465 {
1466 	struct iocblk *iocp;
1467 	struct e1000g *e1000gp;
1468 	enum ioc_reply status;
1469 
1470 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1471 	iocp->ioc_error = 0;
1472 	e1000gp = (struct e1000g *)arg;
1473 
1474 	ASSERT(e1000gp);
1475 	if (e1000gp == NULL) {
1476 		miocnak(q, mp, 0, EINVAL);
1477 		return;
1478 	}
1479 
1480 	switch (iocp->ioc_cmd) {
1481 
1482 	case LB_GET_INFO_SIZE:
1483 	case LB_GET_INFO:
1484 	case LB_GET_MODE:
1485 	case LB_SET_MODE:
1486 		status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1487 		break;
1488 
1489 
1490 #ifdef E1000G_DEBUG
1491 	case E1000G_IOC_REG_PEEK:
1492 	case E1000G_IOC_REG_POKE:
1493 		status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1494 		break;
1495 	case E1000G_IOC_CHIP_RESET:
1496 		e1000gp->reset_count++;
1497 		if (e1000g_reset_adapter(e1000gp))
1498 			status = IOC_ACK;
1499 		else
1500 			status = IOC_INVAL;
1501 		break;
1502 #endif
1503 	default:
1504 		status = IOC_INVAL;
1505 		break;
1506 	}
1507 
1508 	/*
1509 	 * Decide how to reply
1510 	 */
1511 	switch (status) {
1512 	default:
1513 	case IOC_INVAL:
1514 		/*
1515 		 * Error, reply with a NAK and EINVAL or the specified error
1516 		 */
1517 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1518 		    EINVAL : iocp->ioc_error);
1519 		break;
1520 
1521 	case IOC_DONE:
1522 		/*
1523 		 * OK, reply already sent
1524 		 */
1525 		break;
1526 
1527 	case IOC_ACK:
1528 		/*
1529 		 * OK, reply with an ACK
1530 		 */
1531 		miocack(q, mp, 0, 0);
1532 		break;
1533 
1534 	case IOC_REPLY:
1535 		/*
1536 		 * OK, send prepared reply as ACK or NAK
1537 		 */
1538 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1539 		    M_IOCACK : M_IOCNAK;
1540 		qreply(q, mp);
1541 		break;
1542 	}
1543 }
1544 
1545 /*
1546  * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1547  * capable of supporting only one interrupt and we shouldn't disable
1548  * the physical interrupt. In this case we let the interrupt come and
1549  * we queue the packets in the rx ring itself in case we are in polling
1550  * mode (better latency but slightly lower performance and a very
1551  * high intrrupt count in mpstat which is harmless).
1552  *
1553  * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1554  * which can be disabled in poll mode. This gives better overall
1555  * throughput (compared to the mode above), shows very low interrupt
1556  * count but has slightly higher latency since we pick the packets when
1557  * the poll thread does polling.
1558  *
1559  * Currently, this flag should be enabled only while doing performance
1560  * measurement or when it can be guaranteed that entire NIC going
1561  * in poll mode will not harm any traffic like cluster heartbeat etc.
1562  */
1563 int e1000g_poll_mode = 0;
1564 
1565 /*
1566  * Called from the upper layers when driver is in polling mode to
1567  * pick up any queued packets. Care should be taken to not block
1568  * this thread.
1569  */
1570 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1571 {
1572 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)arg;
1573 	mblk_t			*mp = NULL;
1574 	mblk_t			*tail;
1575 	uint_t			sz = 0;
1576 	struct e1000g 		*adapter;
1577 
1578 	adapter = rx_ring->adapter;
1579 
1580 	mutex_enter(&rx_ring->rx_lock);
1581 	ASSERT(rx_ring->poll_flag);
1582 
1583 	/*
1584 	 * Get any packets that have arrived. Works only if we
1585 	 * actually disable the physical adapter/rx_ring interrupt.
1586 	 * (e1000g_poll_mode == 1). In case e1000g_poll_mode == 0,
1587 	 * packets will have already been added to the poll list
1588 	 * by the interrupt (see e1000g_intr_work()).
1589 	 */
1590 	if (adapter->poll_mode) {
1591 		mp = e1000g_receive(rx_ring, &tail, &sz);
1592 		if (mp != NULL) {
1593 			if (rx_ring->poll_list_head == NULL)
1594 				rx_ring->poll_list_head = mp;
1595 			else
1596 				rx_ring->poll_list_tail->b_next = mp;
1597 			rx_ring->poll_list_tail = tail;
1598 			rx_ring->poll_list_sz += sz;
1599 		}
1600 	}
1601 
1602 	mp = rx_ring->poll_list_head;
1603 	if (mp == NULL) {
1604 		mutex_exit(&rx_ring->rx_lock);
1605 		return (NULL);
1606 	}
1607 
1608 	/* Check if we can sendup the entire chain */
1609 	if (bytes_to_pickup >= rx_ring->poll_list_sz) {
1610 		mp = rx_ring->poll_list_head;
1611 		rx_ring->poll_list_head = NULL;
1612 		rx_ring->poll_list_tail = NULL;
1613 		rx_ring->poll_list_sz = 0;
1614 		mutex_exit(&rx_ring->rx_lock);
1615 		return (mp);
1616 	}
1617 
1618 	/*
1619 	 * We need to find out how much chain we can send up. We
1620 	 * are guaranteed that atleast one packet will go up since
1621 	 * we already checked that.
1622 	 */
1623 	tail = mp;
1624 	sz = 0;
1625 	while (mp != NULL) {
1626 		sz += MBLKL(mp);
1627 		if (sz > bytes_to_pickup) {
1628 			sz -= MBLKL(mp);
1629 			break;
1630 		}
1631 		tail = mp;
1632 		mp = mp->b_next;
1633 	}
1634 
1635 	mp = rx_ring->poll_list_head;
1636 	rx_ring->poll_list_head = tail->b_next;
1637 	if (rx_ring->poll_list_head == NULL)
1638 		rx_ring->poll_list_tail = NULL;
1639 	rx_ring->poll_list_sz -= sz;
1640 	tail->b_next = NULL;
1641 	mutex_exit(&rx_ring->rx_lock);
1642 	return (mp);
1643 }
1644 
1645 static int
1646 e1000g_m_start(void *arg)
1647 {
1648 	struct e1000g *Adapter = (struct e1000g *)arg;
1649 
1650 	return (e1000g_start(Adapter, B_TRUE));
1651 }
1652 
1653 static int
1654 e1000g_start(struct e1000g *Adapter, boolean_t global)
1655 {
1656 	if (global) {
1657 		/* Allocate dma resources for descriptors and buffers */
1658 		if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1659 			e1000g_log(Adapter, CE_WARN,
1660 			    "Alloc DMA resources failed");
1661 			return (ENOTACTIVE);
1662 		}
1663 		Adapter->rx_buffer_setup = B_FALSE;
1664 	}
1665 
1666 	if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1667 		if (e1000g_init(Adapter) != DDI_SUCCESS) {
1668 			e1000g_log(Adapter, CE_WARN,
1669 			    "Adapter initialization failed");
1670 			if (global)
1671 				e1000g_release_dma_resources(Adapter);
1672 			return (ENOTACTIVE);
1673 		}
1674 	}
1675 
1676 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1677 
1678 	/* Setup and initialize the transmit structures */
1679 	e1000g_tx_setup(Adapter);
1680 	msec_delay(5);
1681 
1682 	/* Setup and initialize the receive structures */
1683 	e1000g_rx_setup(Adapter);
1684 	msec_delay(5);
1685 
1686 	/* Restore the e1000g promiscuous mode */
1687 	e1000g_restore_promisc(Adapter);
1688 
1689 	e1000g_mask_interrupt(Adapter);
1690 
1691 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1692 		rw_exit(&Adapter->chip_lock);
1693 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1694 		return (ENOTACTIVE);
1695 	}
1696 
1697 	Adapter->chip_state = E1000G_START;
1698 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1699 
1700 	rw_exit(&Adapter->chip_lock);
1701 
1702 	/* Enable and start the watchdog timer */
1703 	enable_watchdog_timer(Adapter);
1704 
1705 	return (0);
1706 }
1707 
1708 static void
1709 e1000g_m_stop(void *arg)
1710 {
1711 	struct e1000g *Adapter = (struct e1000g *)arg;
1712 
1713 	e1000g_stop(Adapter, B_TRUE);
1714 }
1715 
1716 static void
1717 e1000g_stop(struct e1000g *Adapter, boolean_t global)
1718 {
1719 	int result;
1720 
1721 	/* Set stop flags */
1722 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1723 
1724 	Adapter->chip_state = E1000G_STOP;
1725 	Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
1726 
1727 	rw_exit(&Adapter->chip_lock);
1728 
1729 	/* Drain tx sessions */
1730 	(void) e1000g_tx_drain(Adapter);
1731 
1732 	/* Disable and stop all the timers */
1733 	disable_watchdog_timer(Adapter);
1734 	stop_link_timer(Adapter);
1735 	stop_82547_timer(Adapter->tx_ring);
1736 
1737 	/* Stop the chip and release pending resources */
1738 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1739 
1740 	/* Tell firmware driver is no longer in control */
1741 	e1000g_release_driver_control(&Adapter->shared);
1742 
1743 	e1000g_clear_all_interrupts(Adapter);
1744 
1745 	mutex_enter(&e1000g_nvm_lock);
1746 	result = e1000_reset_hw(&Adapter->shared);
1747 	mutex_exit(&e1000g_nvm_lock);
1748 
1749 	if (result != E1000_SUCCESS) {
1750 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1751 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1752 	}
1753 
1754 	/* Release resources still held by the TX descriptors */
1755 	e1000g_tx_clean(Adapter);
1756 
1757 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1758 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1759 
1760 	/* Clean the pending rx jumbo packet fragment */
1761 	e1000g_rx_clean(Adapter);
1762 
1763 	rw_exit(&Adapter->chip_lock);
1764 
1765 	if (global)
1766 		e1000g_release_dma_resources(Adapter);
1767 }
1768 
1769 static void
1770 e1000g_rx_clean(struct e1000g *Adapter)
1771 {
1772 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
1773 
1774 	if (rx_ring->rx_mblk != NULL) {
1775 		freemsg(rx_ring->rx_mblk);
1776 		rx_ring->rx_mblk = NULL;
1777 		rx_ring->rx_mblk_tail = NULL;
1778 		rx_ring->rx_mblk_len = 0;
1779 	}
1780 }
1781 
1782 static void
1783 e1000g_tx_clean(struct e1000g *Adapter)
1784 {
1785 	e1000g_tx_ring_t *tx_ring;
1786 	p_tx_sw_packet_t packet;
1787 	mblk_t *mp;
1788 	mblk_t *nmp;
1789 	uint32_t packet_count;
1790 
1791 	tx_ring = Adapter->tx_ring;
1792 
1793 	/*
1794 	 * Here we don't need to protect the lists using
1795 	 * the usedlist_lock and freelist_lock, for they
1796 	 * have been protected by the chip_lock.
1797 	 */
1798 	mp = NULL;
1799 	nmp = NULL;
1800 	packet_count = 0;
1801 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
1802 	while (packet != NULL) {
1803 		if (packet->mp != NULL) {
1804 			/* Assemble the message chain */
1805 			if (mp == NULL) {
1806 				mp = packet->mp;
1807 				nmp = packet->mp;
1808 			} else {
1809 				nmp->b_next = packet->mp;
1810 				nmp = packet->mp;
1811 			}
1812 			/* Disconnect the message from the sw packet */
1813 			packet->mp = NULL;
1814 		}
1815 
1816 		e1000g_free_tx_swpkt(packet);
1817 		packet_count++;
1818 
1819 		packet = (p_tx_sw_packet_t)
1820 		    QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
1821 	}
1822 
1823 	if (mp != NULL)
1824 		freemsgchain(mp);
1825 
1826 	if (packet_count > 0) {
1827 		QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
1828 		QUEUE_INIT_LIST(&tx_ring->used_list);
1829 
1830 		/* Setup TX descriptor pointers */
1831 		tx_ring->tbd_next = tx_ring->tbd_first;
1832 		tx_ring->tbd_oldest = tx_ring->tbd_first;
1833 
1834 		/* Setup our HW Tx Head & Tail descriptor pointers */
1835 		E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
1836 		E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
1837 	}
1838 }
1839 
1840 static boolean_t
1841 e1000g_tx_drain(struct e1000g *Adapter)
1842 {
1843 	int i;
1844 	boolean_t done;
1845 	e1000g_tx_ring_t *tx_ring;
1846 
1847 	tx_ring = Adapter->tx_ring;
1848 
1849 	/* Allow up to 'wsdraintime' for pending xmit's to complete. */
1850 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1851 		mutex_enter(&tx_ring->usedlist_lock);
1852 		done = IS_QUEUE_EMPTY(&tx_ring->used_list);
1853 		mutex_exit(&tx_ring->usedlist_lock);
1854 
1855 		if (done)
1856 			break;
1857 
1858 		msec_delay(1);
1859 	}
1860 
1861 	return (done);
1862 }
1863 
1864 static boolean_t
1865 e1000g_rx_drain(struct e1000g *Adapter)
1866 {
1867 	e1000g_rx_ring_t *rx_ring;
1868 	p_rx_sw_packet_t packet;
1869 	boolean_t done;
1870 
1871 	rx_ring = Adapter->rx_ring;
1872 	done = B_TRUE;
1873 
1874 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1875 
1876 	while (rx_ring->pending_list != NULL) {
1877 		packet = rx_ring->pending_list;
1878 		rx_ring->pending_list =
1879 		    rx_ring->pending_list->next;
1880 
1881 		if (packet->flag == E1000G_RX_SW_STOP) {
1882 			packet->flag = E1000G_RX_SW_DETACH;
1883 			done = B_FALSE;
1884 		} else {
1885 			ASSERT(packet->flag == E1000G_RX_SW_FREE);
1886 			ASSERT(packet->mp == NULL);
1887 			e1000g_free_rx_sw_packet(packet);
1888 		}
1889 	}
1890 
1891 	rw_exit(&e1000g_rx_detach_lock);
1892 
1893 	return (done);
1894 }
1895 
1896 static boolean_t
1897 e1000g_reset_adapter(struct e1000g *Adapter)
1898 {
1899 	e1000g_stop(Adapter, B_FALSE);
1900 
1901 	if (e1000g_start(Adapter, B_FALSE)) {
1902 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1903 		return (B_FALSE);
1904 	}
1905 
1906 	return (B_TRUE);
1907 }
1908 
1909 boolean_t
1910 e1000g_global_reset(struct e1000g *Adapter)
1911 {
1912 	e1000g_stop(Adapter, B_TRUE);
1913 
1914 	Adapter->init_count = 0;
1915 
1916 	if (e1000g_start(Adapter, B_TRUE)) {
1917 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1918 		return (B_FALSE);
1919 	}
1920 
1921 	return (B_TRUE);
1922 }
1923 
1924 /*
1925  * e1000g_intr_pciexpress - ISR for PCI Express chipsets
1926  *
1927  * This interrupt service routine is for PCI-Express adapters.
1928  * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
1929  * bit is set.
1930  */
1931 static uint_t
1932 e1000g_intr_pciexpress(caddr_t arg)
1933 {
1934 	struct e1000g *Adapter;
1935 	uint32_t icr;
1936 
1937 	Adapter = (struct e1000g *)(uintptr_t)arg;
1938 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
1939 
1940 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1941 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
1942 
1943 	if (icr & E1000_ICR_INT_ASSERTED) {
1944 		/*
1945 		 * E1000_ICR_INT_ASSERTED bit was set:
1946 		 * Read(Clear) the ICR, claim this interrupt,
1947 		 * look for work to do.
1948 		 */
1949 		e1000g_intr_work(Adapter, icr);
1950 		return (DDI_INTR_CLAIMED);
1951 	} else {
1952 		/*
1953 		 * E1000_ICR_INT_ASSERTED bit was not set:
1954 		 * Don't claim this interrupt, return immediately.
1955 		 */
1956 		return (DDI_INTR_UNCLAIMED);
1957 	}
1958 }
1959 
1960 /*
1961  * e1000g_intr - ISR for PCI/PCI-X chipsets
1962  *
1963  * This interrupt service routine is for PCI/PCI-X adapters.
1964  * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
1965  * bit is set or not.
1966  */
1967 static uint_t
1968 e1000g_intr(caddr_t arg)
1969 {
1970 	struct e1000g *Adapter;
1971 	uint32_t icr;
1972 
1973 	Adapter = (struct e1000g *)(uintptr_t)arg;
1974 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
1975 
1976 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1977 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
1978 
1979 	if (icr) {
1980 		/*
1981 		 * Any bit was set in ICR:
1982 		 * Read(Clear) the ICR, claim this interrupt,
1983 		 * look for work to do.
1984 		 */
1985 		e1000g_intr_work(Adapter, icr);
1986 		return (DDI_INTR_CLAIMED);
1987 	} else {
1988 		/*
1989 		 * No bit was set in ICR:
1990 		 * Don't claim this interrupt, return immediately.
1991 		 */
1992 		return (DDI_INTR_UNCLAIMED);
1993 	}
1994 }
1995 
1996 /*
1997  * e1000g_intr_work - actual processing of ISR
1998  *
1999  * Read(clear) the ICR contents and call appropriate interrupt
2000  * processing routines.
2001  */
2002 static void
2003 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2004 {
2005 	struct e1000_hw *hw;
2006 	hw = &Adapter->shared;
2007 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2008 
2009 	Adapter->rx_pkt_cnt = 0;
2010 	Adapter->tx_pkt_cnt = 0;
2011 
2012 	rw_enter(&Adapter->chip_lock, RW_READER);
2013 	/*
2014 	 * Here we need to check the "chip_state" flag within the chip_lock to
2015 	 * ensure the receive routine will not execute when the adapter is
2016 	 * being reset.
2017 	 */
2018 	if (Adapter->chip_state != E1000G_START) {
2019 		rw_exit(&Adapter->chip_lock);
2020 		return;
2021 	}
2022 
2023 	if (icr & E1000_ICR_RXT0) {
2024 		mblk_t			*mp;
2025 		uint_t			sz = 0;
2026 		mblk_t			*tmp, *tail = NULL;
2027 		e1000g_rx_ring_t	*rx_ring;
2028 
2029 		rx_ring = Adapter->rx_ring;
2030 		mutex_enter(&rx_ring->rx_lock);
2031 
2032 		/*
2033 		 * If the real interrupt for the Rx ring was
2034 		 * not disabled (e1000g_poll_mode == 0), then
2035 		 * we still pick up the packets and queue them
2036 		 * on Rx ring if we were in polling mode. this
2037 		 * enables the polling thread to pick up packets
2038 		 * really fast in polling mode and helps improve
2039 		 * latency.
2040 		 */
2041 		mp = e1000g_receive(rx_ring, &tail, &sz);
2042 		rw_exit(&Adapter->chip_lock);
2043 
2044 		if (mp != NULL) {
2045 			ASSERT(tail != NULL);
2046 			if (!rx_ring->poll_flag) {
2047 				/*
2048 				 * If not polling, see if something was
2049 				 * already queued. Take care not to
2050 				 * reorder packets.
2051 				 */
2052 				if (rx_ring->poll_list_head == NULL) {
2053 					mutex_exit(&rx_ring->rx_lock);
2054 					mac_rx_ring(Adapter->mh, rx_ring->mrh,
2055 					    mp, rx_ring->ring_gen_num);
2056 				} else {
2057 					tmp = rx_ring->poll_list_head;
2058 					rx_ring->poll_list_head = NULL;
2059 					rx_ring->poll_list_tail->b_next = mp;
2060 					rx_ring->poll_list_tail = NULL;
2061 					rx_ring->poll_list_sz = 0;
2062 					mutex_exit(&rx_ring->rx_lock);
2063 					mac_rx_ring(Adapter->mh, rx_ring->mrh,
2064 					    tmp, rx_ring->ring_gen_num);
2065 				}
2066 			} else {
2067 				/*
2068 				 * We are in a polling mode. Put the
2069 				 * processed packets on the poll list.
2070 				 */
2071 				if (rx_ring->poll_list_head == NULL)
2072 					rx_ring->poll_list_head = mp;
2073 				else
2074 					rx_ring->poll_list_tail->b_next = mp;
2075 				rx_ring->poll_list_tail = tail;
2076 				rx_ring->poll_list_sz += sz;
2077 				mutex_exit(&rx_ring->rx_lock);
2078 			}
2079 		} else if (!rx_ring->poll_flag &&
2080 		    rx_ring->poll_list_head != NULL) {
2081 			/*
2082 			 * Nothing new has arrived (then why
2083 			 * was the interrupt raised??). Check
2084 			 * if something queued from the last
2085 			 * time.
2086 			 */
2087 			tmp = rx_ring->poll_list_head;
2088 			rx_ring->poll_list_head = NULL;
2089 			rx_ring->poll_list_tail = NULL;
2090 			rx_ring->poll_list_sz = 0;
2091 			mutex_exit(&rx_ring->rx_lock);
2092 			mac_rx_ring(Adapter->mh, rx_ring->mrh,
2093 			    tmp, rx_ring->ring_gen_num);
2094 		} else {
2095 			mutex_exit(&rx_ring->rx_lock);
2096 		}
2097 	} else
2098 		rw_exit(&Adapter->chip_lock);
2099 
2100 	if (icr & E1000_ICR_TXDW) {
2101 		if (!Adapter->tx_intr_enable)
2102 			e1000g_clear_tx_interrupt(Adapter);
2103 
2104 		/* Recycle the tx descriptors */
2105 		rw_enter(&Adapter->chip_lock, RW_READER);
2106 		(void) e1000g_recycle(tx_ring);
2107 		E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2108 		rw_exit(&Adapter->chip_lock);
2109 
2110 		if (tx_ring->resched_needed &&
2111 		    (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2112 			tx_ring->resched_needed = B_FALSE;
2113 			mac_tx_update(Adapter->mh);
2114 			E1000G_STAT(tx_ring->stat_reschedule);
2115 		}
2116 	}
2117 
2118 	/*
2119 	 * The Receive Sequence errors RXSEQ and the link status change LSC
2120 	 * are checked to detect that the cable has been pulled out. For
2121 	 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2122 	 * are an indication that cable is not connected.
2123 	 */
2124 	if ((icr & E1000_ICR_RXSEQ) ||
2125 	    (icr & E1000_ICR_LSC) ||
2126 	    (icr & E1000_ICR_GPI_EN1)) {
2127 		boolean_t link_changed;
2128 		timeout_id_t tid = 0;
2129 
2130 		stop_watchdog_timer(Adapter);
2131 
2132 		rw_enter(&Adapter->chip_lock, RW_WRITER);
2133 
2134 		/*
2135 		 * Because we got a link-status-change interrupt, force
2136 		 * e1000_check_for_link() to look at phy
2137 		 */
2138 		Adapter->shared.mac.get_link_status = B_TRUE;
2139 
2140 		/* e1000g_link_check takes care of link status change */
2141 		link_changed = e1000g_link_check(Adapter);
2142 
2143 		/* Get new phy state */
2144 		e1000g_get_phy_state(Adapter);
2145 
2146 		/*
2147 		 * If the link timer has not timed out, we'll not notify
2148 		 * the upper layer with any link state until the link is up.
2149 		 */
2150 		if (link_changed && !Adapter->link_complete) {
2151 			if (Adapter->link_state == LINK_STATE_UP) {
2152 				mutex_enter(&Adapter->link_lock);
2153 				Adapter->link_complete = B_TRUE;
2154 				tid = Adapter->link_tid;
2155 				Adapter->link_tid = 0;
2156 				mutex_exit(&Adapter->link_lock);
2157 			} else {
2158 				link_changed = B_FALSE;
2159 			}
2160 		}
2161 		rw_exit(&Adapter->chip_lock);
2162 
2163 		if (link_changed) {
2164 			if (tid != 0)
2165 				(void) untimeout(tid);
2166 
2167 			/*
2168 			 * Workaround for esb2. Data stuck in fifo on a link
2169 			 * down event. Stop receiver here and reset in watchdog.
2170 			 */
2171 			if ((Adapter->link_state == LINK_STATE_DOWN) &&
2172 			    (Adapter->shared.mac.type == e1000_80003es2lan)) {
2173 				uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2174 				E1000_WRITE_REG(hw, E1000_RCTL,
2175 				    rctl & ~E1000_RCTL_EN);
2176 				e1000g_log(Adapter, CE_WARN,
2177 				    "ESB2 receiver disabled");
2178 				Adapter->esb2_workaround = B_TRUE;
2179 			}
2180 
2181 			mac_link_update(Adapter->mh, Adapter->link_state);
2182 		}
2183 
2184 		start_watchdog_timer(Adapter);
2185 	}
2186 }
2187 
2188 static void
2189 e1000g_init_unicst(struct e1000g *Adapter)
2190 {
2191 	struct e1000_hw *hw;
2192 	int slot;
2193 
2194 	hw = &Adapter->shared;
2195 
2196 	if (Adapter->init_count == 0) {
2197 		/* Initialize the multiple unicast addresses */
2198 		Adapter->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2199 
2200 		/* Workaround for an erratum of 82571 chipst */
2201 		if ((hw->mac.type == e1000_82571) &&
2202 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2203 			Adapter->unicst_total--;
2204 
2205 		Adapter->unicst_avail = Adapter->unicst_total;
2206 
2207 		for (slot = 0; slot < Adapter->unicst_total; slot++) {
2208 			/* Clear both the flag and MAC address */
2209 			Adapter->unicst_addr[slot].reg.high = 0;
2210 			Adapter->unicst_addr[slot].reg.low = 0;
2211 		}
2212 	} else {
2213 		/* Workaround for an erratum of 82571 chipst */
2214 		if ((hw->mac.type == e1000_82571) &&
2215 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2216 			e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2217 
2218 		/* Re-configure the RAR registers */
2219 		for (slot = 0; slot < Adapter->unicst_total; slot++)
2220 			if (Adapter->unicst_addr[slot].mac.set == 1)
2221 				e1000_rar_set(hw,
2222 				    Adapter->unicst_addr[slot].mac.addr, slot);
2223 	}
2224 
2225 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2226 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2227 }
2228 
2229 static int
2230 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2231     int slot)
2232 {
2233 	struct e1000_hw *hw;
2234 
2235 	hw = &Adapter->shared;
2236 
2237 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2238 
2239 	/*
2240 	 * The first revision of Wiseman silicon (rev 2.0) has an errata
2241 	 * that requires the receiver to be in reset when any of the
2242 	 * receive address registers (RAR regs) are accessed.  The first
2243 	 * rev of Wiseman silicon also requires MWI to be disabled when
2244 	 * a global reset or a receive reset is issued.  So before we
2245 	 * initialize the RARs, we check the rev of the Wiseman controller
2246 	 * and work around any necessary HW errata.
2247 	 */
2248 	if ((hw->mac.type == e1000_82542) &&
2249 	    (hw->revision_id == E1000_REVISION_2)) {
2250 		e1000_pci_clear_mwi(hw);
2251 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2252 		msec_delay(5);
2253 	}
2254 	if (mac_addr == NULL) {
2255 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2256 		E1000_WRITE_FLUSH(hw);
2257 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2258 		E1000_WRITE_FLUSH(hw);
2259 		/* Clear both the flag and MAC address */
2260 		Adapter->unicst_addr[slot].reg.high = 0;
2261 		Adapter->unicst_addr[slot].reg.low = 0;
2262 	} else {
2263 		bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2264 		    ETHERADDRL);
2265 		e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2266 		Adapter->unicst_addr[slot].mac.set = 1;
2267 	}
2268 
2269 	/* Workaround for an erratum of 82571 chipst */
2270 	if (slot == 0) {
2271 		if ((hw->mac.type == e1000_82571) &&
2272 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2273 			if (mac_addr == NULL) {
2274 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2275 				    slot << 1, 0);
2276 				E1000_WRITE_FLUSH(hw);
2277 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2278 				    (slot << 1) + 1, 0);
2279 				E1000_WRITE_FLUSH(hw);
2280 			} else {
2281 				e1000_rar_set(hw, (uint8_t *)mac_addr,
2282 				    LAST_RAR_ENTRY);
2283 			}
2284 	}
2285 
2286 	/*
2287 	 * If we are using Wiseman rev 2.0 silicon, we will have previously
2288 	 * put the receive in reset, and disabled MWI, to work around some
2289 	 * HW errata.  Now we should take the receiver out of reset, and
2290 	 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2291 	 */
2292 	if ((hw->mac.type == e1000_82542) &&
2293 	    (hw->revision_id == E1000_REVISION_2)) {
2294 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2295 		msec_delay(1);
2296 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2297 			e1000_pci_set_mwi(hw);
2298 		e1000g_rx_setup(Adapter);
2299 	}
2300 
2301 	rw_exit(&Adapter->chip_lock);
2302 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2303 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2304 		return (EIO);
2305 	}
2306 
2307 	return (0);
2308 }
2309 
2310 static int
2311 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2312 {
2313 	struct e1000_hw *hw = &Adapter->shared;
2314 	int res = 0;
2315 
2316 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2317 
2318 	if ((multiaddr[0] & 01) == 0) {
2319 		res = EINVAL;
2320 		goto done;
2321 	}
2322 
2323 	if (Adapter->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2324 		res = ENOENT;
2325 		goto done;
2326 	}
2327 
2328 	bcopy(multiaddr,
2329 	    &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2330 	Adapter->mcast_count++;
2331 
2332 	/*
2333 	 * Update the MC table in the hardware
2334 	 */
2335 	e1000g_clear_interrupt(Adapter);
2336 
2337 	e1000g_setup_multicast(Adapter);
2338 
2339 	if ((hw->mac.type == e1000_82542) &&
2340 	    (hw->revision_id == E1000_REVISION_2))
2341 		e1000g_rx_setup(Adapter);
2342 
2343 	e1000g_mask_interrupt(Adapter);
2344 
2345 done:
2346 	rw_exit(&Adapter->chip_lock);
2347 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2348 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2349 		res = EIO;
2350 	}
2351 
2352 	return (res);
2353 }
2354 
2355 static int
2356 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2357 {
2358 	struct e1000_hw *hw = &Adapter->shared;
2359 	unsigned i;
2360 
2361 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2362 
2363 	for (i = 0; i < Adapter->mcast_count; i++) {
2364 		if (bcmp(multiaddr, &Adapter->mcast_table[i],
2365 		    ETHERADDRL) == 0) {
2366 			for (i++; i < Adapter->mcast_count; i++) {
2367 				Adapter->mcast_table[i - 1] =
2368 				    Adapter->mcast_table[i];
2369 			}
2370 			Adapter->mcast_count--;
2371 			break;
2372 		}
2373 	}
2374 
2375 	/*
2376 	 * Update the MC table in the hardware
2377 	 */
2378 	e1000g_clear_interrupt(Adapter);
2379 
2380 	e1000g_setup_multicast(Adapter);
2381 
2382 	if ((hw->mac.type == e1000_82542) &&
2383 	    (hw->revision_id == E1000_REVISION_2))
2384 		e1000g_rx_setup(Adapter);
2385 
2386 	e1000g_mask_interrupt(Adapter);
2387 
2388 done:
2389 	rw_exit(&Adapter->chip_lock);
2390 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2391 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2392 		return (EIO);
2393 	}
2394 
2395 	return (0);
2396 }
2397 
2398 /*
2399  * e1000g_setup_multicast - setup multicast data structures
2400  *
2401  * This routine initializes all of the multicast related structures.
2402  */
2403 void
2404 e1000g_setup_multicast(struct e1000g *Adapter)
2405 {
2406 	uint8_t *mc_addr_list;
2407 	uint32_t mc_addr_count;
2408 	uint32_t rctl;
2409 	struct e1000_hw *hw;
2410 
2411 	hw = &Adapter->shared;
2412 
2413 	/*
2414 	 * The e1000g has the ability to do perfect filtering of 16
2415 	 * addresses. The driver uses one of the e1000g's 16 receive
2416 	 * address registers for its node/network/mac/individual address.
2417 	 * So, we have room for up to 15 multicast addresses in the CAM,
2418 	 * additional MC addresses are handled by the MTA (Multicast Table
2419 	 * Array)
2420 	 */
2421 
2422 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2423 
2424 	mc_addr_list = (uint8_t *)Adapter->mcast_table;
2425 
2426 	if (Adapter->mcast_count > MAX_NUM_MULTICAST_ADDRESSES) {
2427 		E1000G_DEBUGLOG_1(Adapter, CE_WARN,
2428 		    "Adapter requested more than %d MC Addresses.\n",
2429 		    MAX_NUM_MULTICAST_ADDRESSES);
2430 		mc_addr_count = MAX_NUM_MULTICAST_ADDRESSES;
2431 	} else {
2432 		/*
2433 		 * Set the number of MC addresses that we are being
2434 		 * requested to use
2435 		 */
2436 		mc_addr_count = Adapter->mcast_count;
2437 	}
2438 	/*
2439 	 * The Wiseman 2.0 silicon has an errata by which the receiver will
2440 	 * hang  while writing to the receive address registers if the receiver
2441 	 * is not in reset before writing to the registers. Updating the RAR
2442 	 * is done during the setting up of the multicast table, hence the
2443 	 * receiver has to be put in reset before updating the multicast table
2444 	 * and then taken out of reset at the end
2445 	 */
2446 	/*
2447 	 * if WMI was enabled then dis able it before issueing the global
2448 	 * reset to the hardware.
2449 	 */
2450 	/*
2451 	 * Only required for WISEMAN_2_0
2452 	 */
2453 	if ((hw->mac.type == e1000_82542) &&
2454 	    (hw->revision_id == E1000_REVISION_2)) {
2455 		e1000_pci_clear_mwi(hw);
2456 		/*
2457 		 * The e1000g must be in reset before changing any RA
2458 		 * registers. Reset receive unit.  The chip will remain in
2459 		 * the reset state until software explicitly restarts it.
2460 		 */
2461 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2462 		/* Allow receiver time to go in to reset */
2463 		msec_delay(5);
2464 	}
2465 
2466 	e1000_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2467 	    Adapter->unicst_total, hw->mac.rar_entry_count);
2468 
2469 	/*
2470 	 * Only for Wiseman_2_0
2471 	 * If MWI was enabled then re-enable it after issueing (as we
2472 	 * disabled it up there) the receive reset command.
2473 	 * Wainwright does not have a receive reset command and only thing
2474 	 * close to it is global reset which will require tx setup also
2475 	 */
2476 	if ((hw->mac.type == e1000_82542) &&
2477 	    (hw->revision_id == E1000_REVISION_2)) {
2478 		/*
2479 		 * if WMI was enabled then reenable it after issueing the
2480 		 * global or receive reset to the hardware.
2481 		 */
2482 
2483 		/*
2484 		 * Take receiver out of reset
2485 		 * clear E1000_RCTL_RST bit (and all others)
2486 		 */
2487 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2488 		msec_delay(5);
2489 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2490 			e1000_pci_set_mwi(hw);
2491 	}
2492 
2493 	/*
2494 	 * Restore original value
2495 	 */
2496 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2497 }
2498 
2499 int
2500 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2501 {
2502 	struct e1000g *Adapter = (struct e1000g *)arg;
2503 
2504 	return ((add) ? multicst_add(Adapter, addr)
2505 	    : multicst_remove(Adapter, addr));
2506 }
2507 
2508 int
2509 e1000g_m_promisc(void *arg, boolean_t on)
2510 {
2511 	struct e1000g *Adapter = (struct e1000g *)arg;
2512 	uint32_t rctl;
2513 
2514 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2515 
2516 	rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2517 
2518 	if (on)
2519 		rctl |=
2520 		    (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2521 	else
2522 		rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2523 
2524 	E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2525 
2526 	Adapter->e1000g_promisc = on;
2527 
2528 	rw_exit(&Adapter->chip_lock);
2529 
2530 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2531 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2532 		return (EIO);
2533 	}
2534 
2535 	return (0);
2536 }
2537 
2538 /*
2539  * Entry points to enable and disable interrupts at the granularity of
2540  * a group.
2541  * Turns the poll_mode for the whole adapter on and off to enable or
2542  * override the ring level polling control over the hardware interrupts.
2543  */
2544 static int
2545 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2546 {
2547 	struct e1000g		*adapter = (struct e1000g *)arg;
2548 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2549 
2550 	/*
2551 	 * Later interrupts at the granularity of the this ring will
2552 	 * invoke mac_rx() with NULL, indicating the need for another
2553 	 * software classification.
2554 	 * We have a single ring usable per adapter now, so we only need to
2555 	 * reset the rx handle for that one.
2556 	 * When more RX rings can be used, we should update each one of them.
2557 	 */
2558 	mutex_enter(&rx_ring->rx_lock);
2559 	rx_ring->mrh = NULL;
2560 	adapter->poll_mode = B_FALSE;
2561 	mutex_exit(&rx_ring->rx_lock);
2562 	return (0);
2563 }
2564 
2565 static int
2566 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2567 {
2568 	struct e1000g *adapter = (struct e1000g *)arg;
2569 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2570 
2571 	mutex_enter(&rx_ring->rx_lock);
2572 
2573 	/*
2574 	 * Later interrupts at the granularity of the this ring will
2575 	 * invoke mac_rx() with the handle for this ring;
2576 	 */
2577 	adapter->poll_mode = B_TRUE;
2578 	rx_ring->mrh = rx_ring->mrh_init;
2579 	mutex_exit(&rx_ring->rx_lock);
2580 	return (0);
2581 }
2582 
2583 /*
2584  * Entry points to enable and disable interrupts at the granularity of
2585  * a ring.
2586  * adapter poll_mode controls whether we actually proceed with hardware
2587  * interrupt toggling.
2588  */
2589 static int
2590 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2591 {
2592 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2593 	struct e1000g 		*adapter = rx_ring->adapter;
2594 	struct e1000_hw 	*hw = &adapter->shared;
2595 	uint32_t		intr_mask;
2596 	boolean_t		poll_mode;
2597 
2598 	mutex_enter(&rx_ring->rx_lock);
2599 	rx_ring->poll_flag = 0;
2600 	poll_mode = adapter->poll_mode;
2601 	mutex_exit(&rx_ring->rx_lock);
2602 
2603 	if (poll_mode) {
2604 		/* Rx interrupt enabling for MSI and legacy */
2605 		intr_mask = E1000_READ_REG(hw, E1000_IMS);
2606 		intr_mask |= E1000_IMS_RXT0;
2607 		E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2608 		E1000_WRITE_FLUSH(hw);
2609 
2610 		/* Trigger a Rx interrupt to check Rx ring */
2611 		E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2612 		E1000_WRITE_FLUSH(hw);
2613 	}
2614 	return (0);
2615 }
2616 
2617 static int
2618 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2619 {
2620 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2621 	struct e1000g 		*adapter = rx_ring->adapter;
2622 	struct e1000_hw 	*hw = &adapter->shared;
2623 	boolean_t		poll_mode;
2624 
2625 	/*
2626 	 * Once the adapter can support per Rx ring interrupt,
2627 	 * we should disable the real interrupt instead of just setting
2628 	 * the flag.
2629 	 */
2630 	mutex_enter(&rx_ring->rx_lock);
2631 	rx_ring->poll_flag = 1;
2632 	poll_mode = adapter->poll_mode;
2633 	mutex_exit(&rx_ring->rx_lock);
2634 
2635 	if (poll_mode) {
2636 		/* Rx interrupt disabling for MSI and legacy */
2637 		E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2638 		E1000_WRITE_FLUSH(hw);
2639 	}
2640 	return (0);
2641 }
2642 
2643 /*
2644  * e1000g_unicst_find - Find the slot for the specified unicast address
2645  */
2646 static int
2647 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2648 {
2649 	int slot;
2650 
2651 	ASSERT(mutex_owned(&Adapter->gen_lock));
2652 
2653 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2654 		if (Adapter->unicst_addr[slot].mac.set == 1) {
2655 			if (bcmp(Adapter->unicst_addr[slot].mac.addr,
2656 			    mac_addr, ETHERADDRL) == 0)
2657 				return (slot);
2658 		} else
2659 			continue;
2660 	}
2661 
2662 	return (-1);
2663 }
2664 
2665 /*
2666  * Entry points to add and remove a MAC address to a ring group.
2667  * The caller takes care of adding and removing the MAC addresses
2668  * to the filter via these two routines.
2669  */
2670 
2671 static int
2672 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2673 {
2674 	struct e1000g *Adapter = (struct e1000g *)arg;
2675 	int slot;
2676 
2677 	mutex_enter(&Adapter->gen_lock);
2678 
2679 	if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
2680 		/* The same address is already in slot */
2681 		mutex_exit(&Adapter->gen_lock);
2682 		return (0);
2683 	}
2684 
2685 	if (Adapter->unicst_avail == 0) {
2686 		/* no slots available */
2687 		mutex_exit(&Adapter->gen_lock);
2688 		return (ENOSPC);
2689 	}
2690 
2691 	/* Search for a free slot */
2692 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2693 		if (Adapter->unicst_addr[slot].mac.set == 0)
2694 			break;
2695 	}
2696 	ASSERT(slot < Adapter->unicst_total);
2697 
2698 	e1000g_unicst_set(Adapter, mac_addr, slot);
2699 	Adapter->unicst_avail--;
2700 
2701 	mutex_exit(&Adapter->gen_lock);
2702 
2703 	return (0);
2704 }
2705 
2706 static int
2707 e1000g_remmac(void *arg, const uint8_t *mac_addr)
2708 {
2709 	struct e1000g *Adapter = (struct e1000g *)arg;
2710 	int slot;
2711 
2712 	mutex_enter(&Adapter->gen_lock);
2713 
2714 	slot = e1000g_unicst_find(Adapter, mac_addr);
2715 	if (slot == -1) {
2716 		mutex_exit(&Adapter->gen_lock);
2717 		return (EINVAL);
2718 	}
2719 
2720 	ASSERT(Adapter->unicst_addr[slot].mac.set);
2721 
2722 	/* Clear this slot */
2723 	e1000g_unicst_set(Adapter, NULL, slot);
2724 	Adapter->unicst_avail++;
2725 
2726 	mutex_exit(&Adapter->gen_lock);
2727 
2728 	return (0);
2729 }
2730 
2731 static int
2732 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
2733 {
2734 	e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
2735 
2736 	mutex_enter(&rx_ring->rx_lock);
2737 	rx_ring->ring_gen_num = mr_gen_num;
2738 	mutex_exit(&rx_ring->rx_lock);
2739 	return (0);
2740 }
2741 
2742 /*
2743  * Callback funtion for MAC layer to register all rings.
2744  *
2745  * The hardware supports a single group with currently only one ring
2746  * available.
2747  * Though not offering virtualization ability per se, exposing the
2748  * group/ring still enables the polling and interrupt toggling.
2749  */
2750 void
2751 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
2752     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2753 {
2754 	struct e1000g *Adapter = (struct e1000g *)arg;
2755 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
2756 	mac_intr_t *mintr;
2757 
2758 	/*
2759 	 * We advertised only RX group/rings, so the MAC framework shouldn't
2760 	 * ask for any thing else.
2761 	 */
2762 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
2763 
2764 	rx_ring->mrh = rx_ring->mrh_init = rh;
2765 	infop->mri_driver = (mac_ring_driver_t)rx_ring;
2766 	infop->mri_start = e1000g_ring_start;
2767 	infop->mri_stop = NULL;
2768 	infop->mri_poll = e1000g_poll_ring;
2769 
2770 	/* Ring level interrupts */
2771 	mintr = &infop->mri_intr;
2772 	mintr->mi_handle = (mac_intr_handle_t)rx_ring;
2773 	mintr->mi_enable = e1000g_rx_ring_intr_enable;
2774 	mintr->mi_disable = e1000g_rx_ring_intr_disable;
2775 }
2776 
2777 static void
2778 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
2779     mac_group_info_t *infop, mac_group_handle_t gh)
2780 {
2781 	struct e1000g *Adapter = (struct e1000g *)arg;
2782 	mac_intr_t *mintr;
2783 
2784 	/*
2785 	 * We advertised a single RX ring. Getting a request for anything else
2786 	 * signifies a bug in the MAC framework.
2787 	 */
2788 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
2789 
2790 	Adapter->rx_group = gh;
2791 
2792 	infop->mgi_driver = (mac_group_driver_t)Adapter;
2793 	infop->mgi_start = NULL;
2794 	infop->mgi_stop = NULL;
2795 	infop->mgi_addmac = e1000g_addmac;
2796 	infop->mgi_remmac = e1000g_remmac;
2797 	infop->mgi_count = 1;
2798 
2799 	/* Group level interrupts */
2800 	mintr = &infop->mgi_intr;
2801 	mintr->mi_handle = (mac_intr_handle_t)Adapter;
2802 	mintr->mi_enable = e1000g_rx_group_intr_enable;
2803 	mintr->mi_disable = e1000g_rx_group_intr_disable;
2804 }
2805 
2806 static boolean_t
2807 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2808 {
2809 	struct e1000g *Adapter = (struct e1000g *)arg;
2810 
2811 	switch (cap) {
2812 	case MAC_CAPAB_HCKSUM: {
2813 		uint32_t *txflags = cap_data;
2814 
2815 		if (Adapter->tx_hcksum_enable)
2816 			*txflags = HCKSUM_IPHDRCKSUM |
2817 			    HCKSUM_INET_PARTIAL;
2818 		else
2819 			return (B_FALSE);
2820 		break;
2821 	}
2822 
2823 	case MAC_CAPAB_LSO: {
2824 		mac_capab_lso_t *cap_lso = cap_data;
2825 
2826 		if (Adapter->lso_enable) {
2827 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2828 			cap_lso->lso_basic_tcp_ipv4.lso_max =
2829 			    E1000_LSO_MAXLEN;
2830 		} else
2831 			return (B_FALSE);
2832 		break;
2833 	}
2834 	case MAC_CAPAB_RINGS: {
2835 		mac_capab_rings_t *cap_rings = cap_data;
2836 
2837 		/* No TX rings exposed yet */
2838 		if (cap_rings->mr_type != MAC_RING_TYPE_RX)
2839 			return (B_FALSE);
2840 
2841 		cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2842 		cap_rings->mr_rnum = 1;
2843 		cap_rings->mr_gnum = 1;
2844 		cap_rings->mr_rget = e1000g_fill_ring;
2845 		cap_rings->mr_gget = e1000g_fill_group;
2846 		break;
2847 	}
2848 	default:
2849 		return (B_FALSE);
2850 	}
2851 	return (B_TRUE);
2852 }
2853 
2854 static boolean_t
2855 e1000g_param_locked(mac_prop_id_t pr_num)
2856 {
2857 	/*
2858 	 * All en_* parameters are locked (read-only) while
2859 	 * the device is in any sort of loopback mode ...
2860 	 */
2861 	switch (pr_num) {
2862 		case MAC_PROP_EN_1000FDX_CAP:
2863 		case MAC_PROP_EN_1000HDX_CAP:
2864 		case MAC_PROP_EN_100FDX_CAP:
2865 		case MAC_PROP_EN_100HDX_CAP:
2866 		case MAC_PROP_EN_10FDX_CAP:
2867 		case MAC_PROP_EN_10HDX_CAP:
2868 		case MAC_PROP_AUTONEG:
2869 		case MAC_PROP_FLOWCTRL:
2870 			return (B_TRUE);
2871 	}
2872 	return (B_FALSE);
2873 }
2874 
2875 /*
2876  * callback function for set/get of properties
2877  */
2878 static int
2879 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
2880     uint_t pr_valsize, const void *pr_val)
2881 {
2882 	struct e1000g *Adapter = arg;
2883 	struct e1000_mac_info *mac = &Adapter->shared.mac;
2884 	struct e1000_phy_info *phy = &Adapter->shared.phy;
2885 	struct e1000_fc_info *fc = &Adapter->shared.fc;
2886 	int err = 0;
2887 	link_flowctrl_t flowctrl;
2888 	uint32_t cur_mtu, new_mtu;
2889 	uint64_t tmp = 0;
2890 
2891 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2892 	if (Adapter->loopback_mode != E1000G_LB_NONE &&
2893 	    e1000g_param_locked(pr_num)) {
2894 		/*
2895 		 * All en_* parameters are locked (read-only)
2896 		 * while the device is in any sort of loopback mode.
2897 		 */
2898 		rw_exit(&Adapter->chip_lock);
2899 		return (EBUSY);
2900 	}
2901 
2902 	switch (pr_num) {
2903 		case MAC_PROP_EN_1000FDX_CAP:
2904 			Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
2905 			Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
2906 			goto reset;
2907 		case MAC_PROP_EN_100FDX_CAP:
2908 			Adapter->param_en_100fdx = *(uint8_t *)pr_val;
2909 			Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
2910 			goto reset;
2911 		case MAC_PROP_EN_100HDX_CAP:
2912 			Adapter->param_en_100hdx = *(uint8_t *)pr_val;
2913 			Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
2914 			goto reset;
2915 		case MAC_PROP_EN_10FDX_CAP:
2916 			Adapter->param_en_10fdx = *(uint8_t *)pr_val;
2917 			Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
2918 			goto reset;
2919 		case MAC_PROP_EN_10HDX_CAP:
2920 			Adapter->param_en_10hdx = *(uint8_t *)pr_val;
2921 			Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
2922 			goto reset;
2923 		case MAC_PROP_AUTONEG:
2924 			Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
2925 			goto reset;
2926 		case MAC_PROP_FLOWCTRL:
2927 			fc->send_xon = B_TRUE;
2928 			bcopy(pr_val, &flowctrl, sizeof (flowctrl));
2929 
2930 			switch (flowctrl) {
2931 			default:
2932 				err = EINVAL;
2933 				break;
2934 			case LINK_FLOWCTRL_NONE:
2935 				fc->type = e1000_fc_none;
2936 				break;
2937 			case LINK_FLOWCTRL_RX:
2938 				fc->type = e1000_fc_rx_pause;
2939 				break;
2940 			case LINK_FLOWCTRL_TX:
2941 				fc->type = e1000_fc_tx_pause;
2942 				break;
2943 			case LINK_FLOWCTRL_BI:
2944 				fc->type = e1000_fc_full;
2945 				break;
2946 			}
2947 reset:
2948 			if (err == 0) {
2949 				if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
2950 					err = EINVAL;
2951 			}
2952 			break;
2953 		case MAC_PROP_ADV_1000FDX_CAP:
2954 		case MAC_PROP_ADV_1000HDX_CAP:
2955 		case MAC_PROP_ADV_100FDX_CAP:
2956 		case MAC_PROP_ADV_100HDX_CAP:
2957 		case MAC_PROP_ADV_10FDX_CAP:
2958 		case MAC_PROP_ADV_10HDX_CAP:
2959 		case MAC_PROP_EN_1000HDX_CAP:
2960 		case MAC_PROP_STATUS:
2961 		case MAC_PROP_SPEED:
2962 		case MAC_PROP_DUPLEX:
2963 			err = ENOTSUP; /* read-only prop. Can't set this. */
2964 			break;
2965 		case MAC_PROP_MTU:
2966 			cur_mtu = Adapter->default_mtu;
2967 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
2968 			if (new_mtu == cur_mtu) {
2969 				err = 0;
2970 				break;
2971 			}
2972 
2973 			tmp = new_mtu + sizeof (struct ether_vlan_header) +
2974 			    ETHERFCSL;
2975 			if ((tmp < DEFAULT_FRAME_SIZE) ||
2976 			    (tmp > MAXIMUM_FRAME_SIZE)) {
2977 				err = EINVAL;
2978 				break;
2979 			}
2980 
2981 			/* ich8 does not support jumbo frames */
2982 			if ((mac->type == e1000_ich8lan) &&
2983 			    (tmp > DEFAULT_FRAME_SIZE)) {
2984 				err = EINVAL;
2985 				break;
2986 			}
2987 			/* ich9 does not do jumbo frames on one phy type */
2988 			if ((mac->type == e1000_ich9lan) &&
2989 			    (phy->type == e1000_phy_ife) &&
2990 			    (tmp > DEFAULT_FRAME_SIZE)) {
2991 				err = EINVAL;
2992 				break;
2993 			}
2994 			if (Adapter->chip_state != E1000G_STOP) {
2995 				err = EBUSY;
2996 				break;
2997 			}
2998 
2999 			err = mac_maxsdu_update(Adapter->mh, new_mtu);
3000 			if (err == 0) {
3001 				Adapter->max_frame_size = (uint32_t)tmp;
3002 				Adapter->default_mtu = new_mtu;
3003 				e1000g_set_bufsize(Adapter);
3004 			}
3005 			break;
3006 		case MAC_PROP_PRIVATE:
3007 			err = e1000g_set_priv_prop(Adapter, pr_name,
3008 			    pr_valsize, pr_val);
3009 			break;
3010 		default:
3011 			err = ENOTSUP;
3012 			break;
3013 	}
3014 	rw_exit(&Adapter->chip_lock);
3015 	return (err);
3016 }
3017 
3018 static int
3019 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3020     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3021 {
3022 	struct e1000g *Adapter = arg;
3023 	struct e1000_fc_info *fc = &Adapter->shared.fc;
3024 	int err = 0;
3025 	link_flowctrl_t flowctrl;
3026 	uint64_t tmp = 0;
3027 
3028 	if (pr_valsize == 0)
3029 		return (EINVAL);
3030 
3031 	*perm = MAC_PROP_PERM_RW;
3032 
3033 	bzero(pr_val, pr_valsize);
3034 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3035 		return (e1000g_get_def_val(Adapter, pr_num,
3036 		    pr_valsize, pr_val));
3037 	}
3038 
3039 	switch (pr_num) {
3040 		case MAC_PROP_DUPLEX:
3041 			*perm = MAC_PROP_PERM_READ;
3042 			if (pr_valsize >= sizeof (link_duplex_t)) {
3043 				bcopy(&Adapter->link_duplex, pr_val,
3044 				    sizeof (link_duplex_t));
3045 			} else
3046 				err = EINVAL;
3047 			break;
3048 		case MAC_PROP_SPEED:
3049 			*perm = MAC_PROP_PERM_READ;
3050 			if (pr_valsize >= sizeof (uint64_t)) {
3051 				tmp = Adapter->link_speed * 1000000ull;
3052 				bcopy(&tmp, pr_val, sizeof (tmp));
3053 			} else
3054 				err = EINVAL;
3055 			break;
3056 		case MAC_PROP_AUTONEG:
3057 			*(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3058 			break;
3059 		case MAC_PROP_FLOWCTRL:
3060 			if (pr_valsize >= sizeof (link_flowctrl_t)) {
3061 				switch (fc->type) {
3062 					case e1000_fc_none:
3063 						flowctrl = LINK_FLOWCTRL_NONE;
3064 						break;
3065 					case e1000_fc_rx_pause:
3066 						flowctrl = LINK_FLOWCTRL_RX;
3067 						break;
3068 					case e1000_fc_tx_pause:
3069 						flowctrl = LINK_FLOWCTRL_TX;
3070 						break;
3071 					case e1000_fc_full:
3072 						flowctrl = LINK_FLOWCTRL_BI;
3073 						break;
3074 				}
3075 				bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3076 			} else
3077 				err = EINVAL;
3078 			break;
3079 		case MAC_PROP_ADV_1000FDX_CAP:
3080 			*perm = MAC_PROP_PERM_READ;
3081 			*(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3082 			break;
3083 		case MAC_PROP_EN_1000FDX_CAP:
3084 			*(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3085 			break;
3086 		case MAC_PROP_ADV_1000HDX_CAP:
3087 			*perm = MAC_PROP_PERM_READ;
3088 			*(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3089 			break;
3090 		case MAC_PROP_EN_1000HDX_CAP:
3091 			*perm = MAC_PROP_PERM_READ;
3092 			*(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3093 			break;
3094 		case MAC_PROP_ADV_100FDX_CAP:
3095 			*perm = MAC_PROP_PERM_READ;
3096 			*(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3097 			break;
3098 		case MAC_PROP_EN_100FDX_CAP:
3099 			*(uint8_t *)pr_val = Adapter->param_en_100fdx;
3100 			break;
3101 		case MAC_PROP_ADV_100HDX_CAP:
3102 			*perm = MAC_PROP_PERM_READ;
3103 			*(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3104 			break;
3105 		case MAC_PROP_EN_100HDX_CAP:
3106 			*(uint8_t *)pr_val = Adapter->param_en_100hdx;
3107 			break;
3108 		case MAC_PROP_ADV_10FDX_CAP:
3109 			*perm = MAC_PROP_PERM_READ;
3110 			*(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3111 			break;
3112 		case MAC_PROP_EN_10FDX_CAP:
3113 			*(uint8_t *)pr_val = Adapter->param_en_10fdx;
3114 			break;
3115 		case MAC_PROP_ADV_10HDX_CAP:
3116 			*perm = MAC_PROP_PERM_READ;
3117 			*(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3118 			break;
3119 		case MAC_PROP_EN_10HDX_CAP:
3120 			*(uint8_t *)pr_val = Adapter->param_en_10hdx;
3121 			break;
3122 		case MAC_PROP_ADV_100T4_CAP:
3123 		case MAC_PROP_EN_100T4_CAP:
3124 			*perm = MAC_PROP_PERM_READ;
3125 			*(uint8_t *)pr_val = Adapter->param_adv_100t4;
3126 			break;
3127 		case MAC_PROP_PRIVATE:
3128 			err = e1000g_get_priv_prop(Adapter, pr_name,
3129 			    pr_flags, pr_valsize, pr_val, perm);
3130 			break;
3131 		default:
3132 			err = ENOTSUP;
3133 			break;
3134 	}
3135 	return (err);
3136 }
3137 
3138 /* ARGSUSED2 */
3139 static int
3140 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3141     uint_t pr_valsize, const void *pr_val)
3142 {
3143 	int err = 0;
3144 	long result;
3145 	struct e1000_hw *hw = &Adapter->shared;
3146 
3147 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3148 		if (pr_val == NULL) {
3149 			err = EINVAL;
3150 			return (err);
3151 		}
3152 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3153 		if (result < MIN_TX_BCOPY_THRESHOLD ||
3154 		    result > MAX_TX_BCOPY_THRESHOLD)
3155 			err = EINVAL;
3156 		else {
3157 			Adapter->tx_bcopy_thresh = (uint32_t)result;
3158 		}
3159 		return (err);
3160 	}
3161 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3162 		if (pr_val == NULL) {
3163 			err = EINVAL;
3164 			return (err);
3165 		}
3166 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3167 		if (result < 0 || result > 1)
3168 			err = EINVAL;
3169 		else {
3170 			Adapter->tx_intr_enable = (result == 1) ?
3171 			    B_TRUE: B_FALSE;
3172 			if (Adapter->tx_intr_enable)
3173 				e1000g_mask_tx_interrupt(Adapter);
3174 			else
3175 				e1000g_clear_tx_interrupt(Adapter);
3176 			if (e1000g_check_acc_handle(
3177 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3178 				ddi_fm_service_impact(Adapter->dip,
3179 				    DDI_SERVICE_DEGRADED);
3180 		}
3181 		return (err);
3182 	}
3183 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3184 		if (pr_val == NULL) {
3185 			err = EINVAL;
3186 			return (err);
3187 		}
3188 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3189 		if (result < MIN_TX_INTR_DELAY ||
3190 		    result > MAX_TX_INTR_DELAY)
3191 			err = EINVAL;
3192 		else {
3193 			Adapter->tx_intr_delay = (uint32_t)result;
3194 			E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3195 			if (e1000g_check_acc_handle(
3196 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3197 				ddi_fm_service_impact(Adapter->dip,
3198 				    DDI_SERVICE_DEGRADED);
3199 		}
3200 		return (err);
3201 	}
3202 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3203 		if (pr_val == NULL) {
3204 			err = EINVAL;
3205 			return (err);
3206 		}
3207 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3208 		if (result < MIN_TX_INTR_ABS_DELAY ||
3209 		    result > MAX_TX_INTR_ABS_DELAY)
3210 			err = EINVAL;
3211 		else {
3212 			Adapter->tx_intr_abs_delay = (uint32_t)result;
3213 			E1000_WRITE_REG(hw, E1000_TADV,
3214 			    Adapter->tx_intr_abs_delay);
3215 			if (e1000g_check_acc_handle(
3216 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3217 				ddi_fm_service_impact(Adapter->dip,
3218 				    DDI_SERVICE_DEGRADED);
3219 		}
3220 		return (err);
3221 	}
3222 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3223 		if (pr_val == NULL) {
3224 			err = EINVAL;
3225 			return (err);
3226 		}
3227 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3228 		if (result < MIN_RX_BCOPY_THRESHOLD ||
3229 		    result > MAX_RX_BCOPY_THRESHOLD)
3230 			err = EINVAL;
3231 		else
3232 			Adapter->rx_bcopy_thresh = (uint32_t)result;
3233 		return (err);
3234 	}
3235 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3236 		if (pr_val == NULL) {
3237 			err = EINVAL;
3238 			return (err);
3239 		}
3240 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3241 		if (result < MIN_RX_LIMIT_ON_INTR ||
3242 		    result > MAX_RX_LIMIT_ON_INTR)
3243 			err = EINVAL;
3244 		else
3245 			Adapter->rx_limit_onintr = (uint32_t)result;
3246 		return (err);
3247 	}
3248 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3249 		if (pr_val == NULL) {
3250 			err = EINVAL;
3251 			return (err);
3252 		}
3253 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3254 		if (result < MIN_RX_INTR_DELAY ||
3255 		    result > MAX_RX_INTR_DELAY)
3256 			err = EINVAL;
3257 		else {
3258 			Adapter->rx_intr_delay = (uint32_t)result;
3259 			E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3260 			if (e1000g_check_acc_handle(
3261 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3262 				ddi_fm_service_impact(Adapter->dip,
3263 				    DDI_SERVICE_DEGRADED);
3264 		}
3265 		return (err);
3266 	}
3267 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3268 		if (pr_val == NULL) {
3269 			err = EINVAL;
3270 			return (err);
3271 		}
3272 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3273 		if (result < MIN_RX_INTR_ABS_DELAY ||
3274 		    result > MAX_RX_INTR_ABS_DELAY)
3275 			err = EINVAL;
3276 		else {
3277 			Adapter->rx_intr_abs_delay = (uint32_t)result;
3278 			E1000_WRITE_REG(hw, E1000_RADV,
3279 			    Adapter->rx_intr_abs_delay);
3280 			if (e1000g_check_acc_handle(
3281 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3282 				ddi_fm_service_impact(Adapter->dip,
3283 				    DDI_SERVICE_DEGRADED);
3284 		}
3285 		return (err);
3286 	}
3287 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3288 		if (pr_val == NULL) {
3289 			err = EINVAL;
3290 			return (err);
3291 		}
3292 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3293 		if (result < MIN_INTR_THROTTLING ||
3294 		    result > MAX_INTR_THROTTLING)
3295 			err = EINVAL;
3296 		else {
3297 			if (hw->mac.type >= e1000_82540) {
3298 				Adapter->intr_throttling_rate =
3299 				    (uint32_t)result;
3300 				E1000_WRITE_REG(hw, E1000_ITR,
3301 				    Adapter->intr_throttling_rate);
3302 				if (e1000g_check_acc_handle(
3303 				    Adapter->osdep.reg_handle) != DDI_FM_OK)
3304 					ddi_fm_service_impact(Adapter->dip,
3305 					    DDI_SERVICE_DEGRADED);
3306 			} else
3307 				err = EINVAL;
3308 		}
3309 		return (err);
3310 	}
3311 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3312 		if (pr_val == NULL) {
3313 			err = EINVAL;
3314 			return (err);
3315 		}
3316 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3317 		if (result < 0 || result > 1)
3318 			err = EINVAL;
3319 		else {
3320 			if (hw->mac.type >= e1000_82540) {
3321 				Adapter->intr_adaptive = (result == 1) ?
3322 				    B_TRUE : B_FALSE;
3323 			} else {
3324 				err = EINVAL;
3325 			}
3326 		}
3327 		return (err);
3328 	}
3329 	return (ENOTSUP);
3330 }
3331 
3332 static int
3333 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
3334     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3335 {
3336 	int err = ENOTSUP;
3337 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
3338 	int value;
3339 
3340 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
3341 		*perm = MAC_PROP_PERM_READ;
3342 		if (is_default)
3343 			goto done;
3344 		value = Adapter->param_adv_pause;
3345 		err = 0;
3346 		goto done;
3347 	}
3348 	if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3349 		*perm = MAC_PROP_PERM_READ;
3350 		if (is_default)
3351 			goto done;
3352 		value = Adapter->param_adv_asym_pause;
3353 		err = 0;
3354 		goto done;
3355 	}
3356 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3357 		value = (is_default ? DEFAULT_TX_BCOPY_THRESHOLD :
3358 		    Adapter->tx_bcopy_thresh);
3359 		err = 0;
3360 		goto done;
3361 	}
3362 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3363 		value = (is_default ? DEFAULT_TX_INTR_ENABLE :
3364 		    Adapter->tx_intr_enable);
3365 		err = 0;
3366 		goto done;
3367 	}
3368 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3369 		value = (is_default ? DEFAULT_TX_INTR_DELAY :
3370 		    Adapter->tx_intr_delay);
3371 		err = 0;
3372 		goto done;
3373 	}
3374 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3375 		value = (is_default ? DEFAULT_TX_INTR_ABS_DELAY :
3376 		    Adapter->tx_intr_abs_delay);
3377 		err = 0;
3378 		goto done;
3379 	}
3380 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3381 		value = (is_default ? DEFAULT_RX_BCOPY_THRESHOLD :
3382 		    Adapter->rx_bcopy_thresh);
3383 		err = 0;
3384 		goto done;
3385 	}
3386 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3387 		value = (is_default ? DEFAULT_RX_LIMIT_ON_INTR :
3388 		    Adapter->rx_limit_onintr);
3389 		err = 0;
3390 		goto done;
3391 	}
3392 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3393 		value = (is_default ? DEFAULT_RX_INTR_DELAY :
3394 		    Adapter->rx_intr_delay);
3395 		err = 0;
3396 		goto done;
3397 	}
3398 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3399 		value = (is_default ? DEFAULT_RX_INTR_ABS_DELAY :
3400 		    Adapter->rx_intr_abs_delay);
3401 		err = 0;
3402 		goto done;
3403 	}
3404 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3405 		value = (is_default ? DEFAULT_INTR_THROTTLING :
3406 		    Adapter->intr_throttling_rate);
3407 		err = 0;
3408 		goto done;
3409 	}
3410 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3411 		value = (is_default ? 1 : Adapter->intr_adaptive);
3412 		err = 0;
3413 		goto done;
3414 	}
3415 done:
3416 	if (err == 0) {
3417 		(void) snprintf(pr_val, pr_valsize, "%d", value);
3418 	}
3419 	return (err);
3420 }
3421 
3422 /*
3423  * e1000g_get_conf - get configurations set in e1000g.conf
3424  * This routine gets user-configured values out of the configuration
3425  * file e1000g.conf.
3426  *
3427  * For each configurable value, there is a minimum, a maximum, and a
3428  * default.
3429  * If user does not configure a value, use the default.
3430  * If user configures below the minimum, use the minumum.
3431  * If user configures above the maximum, use the maxumum.
3432  */
3433 static void
3434 e1000g_get_conf(struct e1000g *Adapter)
3435 {
3436 	struct e1000_hw *hw = &Adapter->shared;
3437 	boolean_t tbi_compatibility = B_FALSE;
3438 
3439 	/*
3440 	 * get each configurable property from e1000g.conf
3441 	 */
3442 
3443 	/*
3444 	 * NumTxDescriptors
3445 	 */
3446 	Adapter->tx_desc_num =
3447 	    e1000g_get_prop(Adapter, "NumTxDescriptors",
3448 	    MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
3449 	    DEFAULT_NUM_TX_DESCRIPTOR);
3450 
3451 	/*
3452 	 * NumRxDescriptors
3453 	 */
3454 	Adapter->rx_desc_num =
3455 	    e1000g_get_prop(Adapter, "NumRxDescriptors",
3456 	    MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
3457 	    DEFAULT_NUM_RX_DESCRIPTOR);
3458 
3459 	/*
3460 	 * NumRxFreeList
3461 	 */
3462 	Adapter->rx_freelist_num =
3463 	    e1000g_get_prop(Adapter, "NumRxFreeList",
3464 	    MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
3465 	    DEFAULT_NUM_RX_FREELIST);
3466 
3467 	/*
3468 	 * NumTxPacketList
3469 	 */
3470 	Adapter->tx_freelist_num =
3471 	    e1000g_get_prop(Adapter, "NumTxPacketList",
3472 	    MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
3473 	    DEFAULT_NUM_TX_FREELIST);
3474 
3475 	/*
3476 	 * FlowControl
3477 	 */
3478 	hw->fc.send_xon = B_TRUE;
3479 	hw->fc.type =
3480 	    e1000g_get_prop(Adapter, "FlowControl",
3481 	    e1000_fc_none, 4, DEFAULT_FLOW_CONTROL);
3482 	/* 4 is the setting that says "let the eeprom decide" */
3483 	if (hw->fc.type == 4)
3484 		hw->fc.type = e1000_fc_default;
3485 
3486 	/*
3487 	 * Max Num Receive Packets on Interrupt
3488 	 */
3489 	Adapter->rx_limit_onintr =
3490 	    e1000g_get_prop(Adapter, "MaxNumReceivePackets",
3491 	    MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
3492 	    DEFAULT_RX_LIMIT_ON_INTR);
3493 
3494 	/*
3495 	 * PHY master slave setting
3496 	 */
3497 	hw->phy.ms_type =
3498 	    e1000g_get_prop(Adapter, "SetMasterSlave",
3499 	    e1000_ms_hw_default, e1000_ms_auto,
3500 	    e1000_ms_hw_default);
3501 
3502 	/*
3503 	 * Parameter which controls TBI mode workaround, which is only
3504 	 * needed on certain switches such as Cisco 6500/Foundry
3505 	 */
3506 	tbi_compatibility =
3507 	    e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
3508 	    0, 1, DEFAULT_TBI_COMPAT_ENABLE);
3509 	e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
3510 
3511 	/*
3512 	 * MSI Enable
3513 	 */
3514 	Adapter->msi_enable =
3515 	    e1000g_get_prop(Adapter, "MSIEnable",
3516 	    0, 1, DEFAULT_MSI_ENABLE);
3517 
3518 	/*
3519 	 * Interrupt Throttling Rate
3520 	 */
3521 	Adapter->intr_throttling_rate =
3522 	    e1000g_get_prop(Adapter, "intr_throttling_rate",
3523 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
3524 	    DEFAULT_INTR_THROTTLING);
3525 
3526 	/*
3527 	 * Adaptive Interrupt Blanking Enable/Disable
3528 	 * It is enabled by default
3529 	 */
3530 	Adapter->intr_adaptive =
3531 	    (e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1) == 1) ?
3532 	    B_TRUE : B_FALSE;
3533 
3534 	/*
3535 	 * Hardware checksum enable/disable parameter
3536 	 */
3537 	Adapter->tx_hcksum_enable =
3538 	    e1000g_get_prop(Adapter, "tx_hcksum_enable",
3539 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3540 	/*
3541 	 * Checksum on/off selection via global parameters.
3542 	 *
3543 	 * If the chip is flagged as not capable of (correctly)
3544 	 * handling checksumming, we don't enable it on either
3545 	 * Rx or Tx side.  Otherwise, we take this chip's settings
3546 	 * from the patchable global defaults.
3547 	 *
3548 	 * We advertise our capabilities only if TX offload is
3549 	 * enabled.  On receive, the stack will accept checksummed
3550 	 * packets anyway, even if we haven't said we can deliver
3551 	 * them.
3552 	 */
3553 	switch (hw->mac.type) {
3554 		case e1000_82540:
3555 		case e1000_82544:
3556 		case e1000_82545:
3557 		case e1000_82545_rev_3:
3558 		case e1000_82546:
3559 		case e1000_82546_rev_3:
3560 		case e1000_82571:
3561 		case e1000_82572:
3562 		case e1000_82573:
3563 		case e1000_80003es2lan:
3564 			break;
3565 		/*
3566 		 * For the following Intel PRO/1000 chipsets, we have not
3567 		 * tested the hardware checksum offload capability, so we
3568 		 * disable the capability for them.
3569 		 *	e1000_82542,
3570 		 *	e1000_82543,
3571 		 *	e1000_82541,
3572 		 *	e1000_82541_rev_2,
3573 		 *	e1000_82547,
3574 		 *	e1000_82547_rev_2,
3575 		 */
3576 		default:
3577 			Adapter->tx_hcksum_enable = B_FALSE;
3578 	}
3579 
3580 	/*
3581 	 * Large Send Offloading(LSO) Enable/Disable
3582 	 * If the tx hardware checksum is not enabled, LSO should be
3583 	 * disabled.
3584 	 */
3585 	Adapter->lso_enable =
3586 	    e1000g_get_prop(Adapter, "lso_enable",
3587 	    0, 1, DEFAULT_LSO_ENABLE);
3588 
3589 	switch (hw->mac.type) {
3590 		case e1000_82546:
3591 		case e1000_82546_rev_3:
3592 			if (Adapter->lso_enable)
3593 				Adapter->lso_premature_issue = B_TRUE;
3594 			/* FALLTHRU */
3595 		case e1000_82571:
3596 		case e1000_82572:
3597 		case e1000_82573:
3598 		case e1000_80003es2lan:
3599 			break;
3600 		default:
3601 			Adapter->lso_enable = B_FALSE;
3602 	}
3603 
3604 	if (!Adapter->tx_hcksum_enable) {
3605 		Adapter->lso_premature_issue = B_FALSE;
3606 		Adapter->lso_enable = B_FALSE;
3607 	}
3608 }
3609 
3610 /*
3611  * e1000g_get_prop - routine to read properties
3612  *
3613  * Get a user-configure property value out of the configuration
3614  * file e1000g.conf.
3615  *
3616  * Caller provides name of the property, a default value, a minimum
3617  * value, and a maximum value.
3618  *
3619  * Return configured value of the property, with default, minimum and
3620  * maximum properly applied.
3621  */
3622 static int
3623 e1000g_get_prop(struct e1000g *Adapter,	/* point to per-adapter structure */
3624     char *propname,		/* name of the property */
3625     int minval,			/* minimum acceptable value */
3626     int maxval,			/* maximim acceptable value */
3627     int defval)			/* default value */
3628 {
3629 	int propval;		/* value returned for requested property */
3630 	int *props;		/* point to array of properties returned */
3631 	uint_t nprops;		/* number of property value returned */
3632 
3633 	/*
3634 	 * get the array of properties from the config file
3635 	 */
3636 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
3637 	    DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
3638 		/* got some properties, test if we got enough */
3639 		if (Adapter->instance < nprops) {
3640 			propval = props[Adapter->instance];
3641 		} else {
3642 			/* not enough properties configured */
3643 			propval = defval;
3644 			E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3645 			    "Not Enough %s values found in e1000g.conf"
3646 			    " - set to %d\n",
3647 			    propname, propval);
3648 		}
3649 
3650 		/* free memory allocated for properties */
3651 		ddi_prop_free(props);
3652 
3653 	} else {
3654 		propval = defval;
3655 	}
3656 
3657 	/*
3658 	 * enforce limits
3659 	 */
3660 	if (propval > maxval) {
3661 		propval = maxval;
3662 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3663 		    "Too High %s value in e1000g.conf - set to %d\n",
3664 		    propname, propval);
3665 	}
3666 
3667 	if (propval < minval) {
3668 		propval = minval;
3669 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3670 		    "Too Low %s value in e1000g.conf - set to %d\n",
3671 		    propname, propval);
3672 	}
3673 
3674 	return (propval);
3675 }
3676 
3677 static boolean_t
3678 e1000g_link_check(struct e1000g *Adapter)
3679 {
3680 	uint16_t speed, duplex, phydata;
3681 	boolean_t link_changed = B_FALSE;
3682 	struct e1000_hw *hw;
3683 	uint32_t reg_tarc;
3684 
3685 	hw = &Adapter->shared;
3686 
3687 	if (e1000g_link_up(Adapter)) {
3688 		/*
3689 		 * The Link is up, check whether it was marked as down earlier
3690 		 */
3691 		if (Adapter->link_state != LINK_STATE_UP) {
3692 			(void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
3693 			Adapter->link_speed = speed;
3694 			Adapter->link_duplex = duplex;
3695 			Adapter->link_state = LINK_STATE_UP;
3696 			link_changed = B_TRUE;
3697 
3698 			Adapter->tx_link_down_timeout = 0;
3699 
3700 			if ((hw->mac.type == e1000_82571) ||
3701 			    (hw->mac.type == e1000_82572)) {
3702 				reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
3703 				if (speed == SPEED_1000)
3704 					reg_tarc |= (1 << 21);
3705 				else
3706 					reg_tarc &= ~(1 << 21);
3707 				E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
3708 			}
3709 		}
3710 		Adapter->smartspeed = 0;
3711 	} else {
3712 		if (Adapter->link_state != LINK_STATE_DOWN) {
3713 			Adapter->link_speed = 0;
3714 			Adapter->link_duplex = 0;
3715 			Adapter->link_state = LINK_STATE_DOWN;
3716 			link_changed = B_TRUE;
3717 
3718 			/*
3719 			 * SmartSpeed workaround for Tabor/TanaX, When the
3720 			 * driver loses link disable auto master/slave
3721 			 * resolution.
3722 			 */
3723 			if (hw->phy.type == e1000_phy_igp) {
3724 				(void) e1000_read_phy_reg(hw,
3725 				    PHY_1000T_CTRL, &phydata);
3726 				phydata |= CR_1000T_MS_ENABLE;
3727 				(void) e1000_write_phy_reg(hw,
3728 				    PHY_1000T_CTRL, phydata);
3729 			}
3730 		} else {
3731 			e1000g_smartspeed(Adapter);
3732 		}
3733 
3734 		if (Adapter->chip_state == E1000G_START) {
3735 			if (Adapter->tx_link_down_timeout <
3736 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3737 				Adapter->tx_link_down_timeout++;
3738 			} else if (Adapter->tx_link_down_timeout ==
3739 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3740 				e1000g_tx_clean(Adapter);
3741 				Adapter->tx_link_down_timeout++;
3742 			}
3743 		}
3744 	}
3745 
3746 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
3747 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
3748 
3749 	return (link_changed);
3750 }
3751 
3752 /*
3753  * e1000g_reset_link - Using the link properties to setup the link
3754  */
3755 int
3756 e1000g_reset_link(struct e1000g *Adapter)
3757 {
3758 	struct e1000_mac_info *mac;
3759 	struct e1000_phy_info *phy;
3760 	boolean_t invalid;
3761 
3762 	mac = &Adapter->shared.mac;
3763 	phy = &Adapter->shared.phy;
3764 	invalid = B_FALSE;
3765 
3766 	if (Adapter->param_adv_autoneg == 1) {
3767 		mac->autoneg = B_TRUE;
3768 		phy->autoneg_advertised = 0;
3769 
3770 		/*
3771 		 * 1000hdx is not supported for autonegotiation
3772 		 */
3773 		if (Adapter->param_adv_1000fdx == 1)
3774 			phy->autoneg_advertised |= ADVERTISE_1000_FULL;
3775 
3776 		if (Adapter->param_adv_100fdx == 1)
3777 			phy->autoneg_advertised |= ADVERTISE_100_FULL;
3778 
3779 		if (Adapter->param_adv_100hdx == 1)
3780 			phy->autoneg_advertised |= ADVERTISE_100_HALF;
3781 
3782 		if (Adapter->param_adv_10fdx == 1)
3783 			phy->autoneg_advertised |= ADVERTISE_10_FULL;
3784 
3785 		if (Adapter->param_adv_10hdx == 1)
3786 			phy->autoneg_advertised |= ADVERTISE_10_HALF;
3787 
3788 		if (phy->autoneg_advertised == 0)
3789 			invalid = B_TRUE;
3790 	} else {
3791 		mac->autoneg = B_FALSE;
3792 
3793 		/*
3794 		 * 1000fdx and 1000hdx are not supported for forced link
3795 		 */
3796 		if (Adapter->param_adv_100fdx == 1)
3797 			mac->forced_speed_duplex = ADVERTISE_100_FULL;
3798 		else if (Adapter->param_adv_100hdx == 1)
3799 			mac->forced_speed_duplex = ADVERTISE_100_HALF;
3800 		else if (Adapter->param_adv_10fdx == 1)
3801 			mac->forced_speed_duplex = ADVERTISE_10_FULL;
3802 		else if (Adapter->param_adv_10hdx == 1)
3803 			mac->forced_speed_duplex = ADVERTISE_10_HALF;
3804 		else
3805 			invalid = B_TRUE;
3806 
3807 	}
3808 
3809 	if (invalid) {
3810 		e1000g_log(Adapter, CE_WARN,
3811 		    "Invalid link sets. Setup link to"
3812 		    "support autonegotiation with all link capabilities.");
3813 		mac->autoneg = B_TRUE;
3814 		phy->autoneg_advertised = ADVERTISE_1000_FULL |
3815 		    ADVERTISE_100_FULL | ADVERTISE_100_HALF |
3816 		    ADVERTISE_10_FULL | ADVERTISE_10_HALF;
3817 	}
3818 
3819 	return (e1000_setup_link(&Adapter->shared));
3820 }
3821 
3822 static void
3823 e1000g_timer_tx_resched(struct e1000g *Adapter)
3824 {
3825 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
3826 
3827 	if (tx_ring->resched_needed &&
3828 	    ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
3829 	    drv_usectohz(1000000)) &&
3830 	    (Adapter->chip_state == E1000G_START) &&
3831 	    (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
3832 		tx_ring->resched_needed = B_FALSE;
3833 		mac_tx_update(Adapter->mh);
3834 		E1000G_STAT(tx_ring->stat_reschedule);
3835 		E1000G_STAT(tx_ring->stat_timer_reschedule);
3836 	}
3837 }
3838 
3839 static void
3840 e1000g_local_timer(void *ws)
3841 {
3842 	struct e1000g *Adapter = (struct e1000g *)ws;
3843 	struct e1000_hw *hw;
3844 	e1000g_ether_addr_t ether_addr;
3845 	boolean_t link_changed;
3846 
3847 	hw = &Adapter->shared;
3848 
3849 	if (Adapter->chip_state == E1000G_ERROR) {
3850 		Adapter->reset_count++;
3851 		if (e1000g_global_reset(Adapter)) {
3852 			ddi_fm_service_impact(Adapter->dip,
3853 			    DDI_SERVICE_RESTORED);
3854 			e1000g_timer_tx_resched(Adapter);
3855 		} else
3856 			ddi_fm_service_impact(Adapter->dip,
3857 			    DDI_SERVICE_LOST);
3858 		return;
3859 	}
3860 
3861 	if (e1000g_stall_check(Adapter)) {
3862 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
3863 		    "Tx stall detected. Activate automatic recovery.\n");
3864 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
3865 		Adapter->reset_count++;
3866 		if (e1000g_reset_adapter(Adapter)) {
3867 			ddi_fm_service_impact(Adapter->dip,
3868 			    DDI_SERVICE_RESTORED);
3869 			e1000g_timer_tx_resched(Adapter);
3870 		} else
3871 			ddi_fm_service_impact(Adapter->dip,
3872 			    DDI_SERVICE_LOST);
3873 		return;
3874 	}
3875 
3876 	link_changed = B_FALSE;
3877 	rw_enter(&Adapter->chip_lock, RW_READER);
3878 	if (Adapter->link_complete)
3879 		link_changed = e1000g_link_check(Adapter);
3880 	rw_exit(&Adapter->chip_lock);
3881 
3882 	if (link_changed)
3883 		mac_link_update(Adapter->mh, Adapter->link_state);
3884 
3885 	/*
3886 	 * Workaround for esb2. Data stuck in fifo on a link
3887 	 * down event. Reset the adapter to recover it.
3888 	 */
3889 	if (Adapter->esb2_workaround) {
3890 		Adapter->esb2_workaround = B_FALSE;
3891 		(void) e1000g_reset_adapter(Adapter);
3892 	}
3893 
3894 	/*
3895 	 * With 82571 controllers, any locally administered address will
3896 	 * be overwritten when there is a reset on the other port.
3897 	 * Detect this circumstance and correct it.
3898 	 */
3899 	if ((hw->mac.type == e1000_82571) &&
3900 	    (e1000_get_laa_state_82571(hw) == B_TRUE)) {
3901 		ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
3902 		ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
3903 
3904 		ether_addr.reg.low = ntohl(ether_addr.reg.low);
3905 		ether_addr.reg.high = ntohl(ether_addr.reg.high);
3906 
3907 		if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
3908 		    (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
3909 		    (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
3910 		    (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
3911 		    (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
3912 		    (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
3913 			e1000_rar_set(hw, hw->mac.addr, 0);
3914 		}
3915 	}
3916 
3917 	/*
3918 	 * Long TTL workaround for 82541/82547
3919 	 */
3920 	(void) e1000_igp_ttl_workaround_82547(hw);
3921 
3922 	/*
3923 	 * Check for Adaptive IFS settings If there are lots of collisions
3924 	 * change the value in steps...
3925 	 * These properties should only be set for 10/100
3926 	 */
3927 	if ((hw->phy.media_type == e1000_media_type_copper) &&
3928 	    ((Adapter->link_speed == SPEED_100) ||
3929 	    (Adapter->link_speed == SPEED_10))) {
3930 		e1000_update_adaptive(hw);
3931 	}
3932 	/*
3933 	 * Set Timer Interrupts
3934 	 */
3935 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
3936 
3937 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
3938 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
3939 	else
3940 		e1000g_timer_tx_resched(Adapter);
3941 
3942 	restart_watchdog_timer(Adapter);
3943 }
3944 
3945 /*
3946  * The function e1000g_link_timer() is called when the timer for link setup
3947  * is expired, which indicates the completion of the link setup. The link
3948  * state will not be updated until the link setup is completed. And the
3949  * link state will not be sent to the upper layer through mac_link_update()
3950  * in this function. It will be updated in the local timer routine or the
3951  * interrupt service routine after the interface is started (plumbed).
3952  */
3953 static void
3954 e1000g_link_timer(void *arg)
3955 {
3956 	struct e1000g *Adapter = (struct e1000g *)arg;
3957 
3958 	mutex_enter(&Adapter->link_lock);
3959 	Adapter->link_complete = B_TRUE;
3960 	Adapter->link_tid = 0;
3961 	mutex_exit(&Adapter->link_lock);
3962 }
3963 
3964 /*
3965  * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
3966  *
3967  * This function read the forced speed and duplex for 10/100 Mbps speeds
3968  * and also for 1000 Mbps speeds from the e1000g.conf file
3969  */
3970 static void
3971 e1000g_force_speed_duplex(struct e1000g *Adapter)
3972 {
3973 	int forced;
3974 	struct e1000_mac_info *mac = &Adapter->shared.mac;
3975 	struct e1000_phy_info *phy = &Adapter->shared.phy;
3976 
3977 	/*
3978 	 * get value out of config file
3979 	 */
3980 	forced = e1000g_get_prop(Adapter, "ForceSpeedDuplex",
3981 	    GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY);
3982 
3983 	switch (forced) {
3984 	case GDIAG_10_HALF:
3985 		/*
3986 		 * Disable Auto Negotiation
3987 		 */
3988 		mac->autoneg = B_FALSE;
3989 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
3990 		break;
3991 	case GDIAG_10_FULL:
3992 		/*
3993 		 * Disable Auto Negotiation
3994 		 */
3995 		mac->autoneg = B_FALSE;
3996 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
3997 		break;
3998 	case GDIAG_100_HALF:
3999 		/*
4000 		 * Disable Auto Negotiation
4001 		 */
4002 		mac->autoneg = B_FALSE;
4003 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
4004 		break;
4005 	case GDIAG_100_FULL:
4006 		/*
4007 		 * Disable Auto Negotiation
4008 		 */
4009 		mac->autoneg = B_FALSE;
4010 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
4011 		break;
4012 	case GDIAG_1000_FULL:
4013 		/*
4014 		 * The gigabit spec requires autonegotiation.  Therefore,
4015 		 * when the user wants to force the speed to 1000Mbps, we
4016 		 * enable AutoNeg, but only allow the harware to advertise
4017 		 * 1000Mbps.  This is different from 10/100 operation, where
4018 		 * we are allowed to link without any negotiation.
4019 		 */
4020 		mac->autoneg = B_TRUE;
4021 		phy->autoneg_advertised = ADVERTISE_1000_FULL;
4022 		break;
4023 	default:	/* obey the setting of AutoNegAdvertised */
4024 		mac->autoneg = B_TRUE;
4025 		phy->autoneg_advertised =
4026 		    (uint16_t)e1000g_get_prop(Adapter, "AutoNegAdvertised",
4027 		    0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4028 		    AUTONEG_ADVERTISE_SPEED_DEFAULT);
4029 		break;
4030 	}	/* switch */
4031 }
4032 
4033 /*
4034  * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4035  *
4036  * This function reads MaxFrameSize from e1000g.conf
4037  */
4038 static void
4039 e1000g_get_max_frame_size(struct e1000g *Adapter)
4040 {
4041 	int max_frame;
4042 	struct e1000_mac_info *mac = &Adapter->shared.mac;
4043 	struct e1000_phy_info *phy = &Adapter->shared.phy;
4044 
4045 	/*
4046 	 * get value out of config file
4047 	 */
4048 	max_frame = e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0);
4049 
4050 	switch (max_frame) {
4051 	case 0:
4052 		Adapter->default_mtu = ETHERMTU;
4053 		break;
4054 	/*
4055 	 * To avoid excessive memory allocation for rx buffers,
4056 	 * the bytes of E1000G_IPALIGNPRESERVEROOM are reserved.
4057 	 */
4058 	case 1:
4059 		Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4060 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4061 		    E1000G_IPALIGNPRESERVEROOM;
4062 		break;
4063 	case 2:
4064 		Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4065 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4066 		    E1000G_IPALIGNPRESERVEROOM;
4067 		break;
4068 	case 3:
4069 		if (mac->type >= e1000_82571)
4070 			Adapter->default_mtu = MAXIMUM_MTU;
4071 		else
4072 			Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4073 			    sizeof (struct ether_vlan_header) - ETHERFCSL -
4074 			    E1000G_IPALIGNPRESERVEROOM;
4075 		break;
4076 	default:
4077 		Adapter->default_mtu = ETHERMTU;
4078 		break;
4079 	}	/* switch */
4080 
4081 	Adapter->max_frame_size = Adapter->default_mtu +
4082 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
4083 
4084 	/* ich8 does not do jumbo frames */
4085 	if (mac->type == e1000_ich8lan) {
4086 		Adapter->default_mtu = ETHERMTU;
4087 		Adapter->max_frame_size = ETHERMTU +
4088 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4089 	}
4090 
4091 	/* ich9 does not do jumbo frames on one phy type */
4092 	if ((mac->type == e1000_ich9lan) &&
4093 	    (phy->type == e1000_phy_ife)) {
4094 		Adapter->default_mtu = ETHERMTU;
4095 		Adapter->max_frame_size = ETHERMTU +
4096 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4097 	}
4098 }
4099 
4100 static void
4101 arm_watchdog_timer(struct e1000g *Adapter)
4102 {
4103 	Adapter->watchdog_tid =
4104 	    timeout(e1000g_local_timer,
4105 	    (void *)Adapter, 1 * drv_usectohz(1000000));
4106 }
4107 #pragma inline(arm_watchdog_timer)
4108 
4109 static void
4110 enable_watchdog_timer(struct e1000g *Adapter)
4111 {
4112 	mutex_enter(&Adapter->watchdog_lock);
4113 
4114 	if (!Adapter->watchdog_timer_enabled) {
4115 		Adapter->watchdog_timer_enabled = B_TRUE;
4116 		Adapter->watchdog_timer_started = B_TRUE;
4117 		arm_watchdog_timer(Adapter);
4118 	}
4119 
4120 	mutex_exit(&Adapter->watchdog_lock);
4121 }
4122 
4123 static void
4124 disable_watchdog_timer(struct e1000g *Adapter)
4125 {
4126 	timeout_id_t tid;
4127 
4128 	mutex_enter(&Adapter->watchdog_lock);
4129 
4130 	Adapter->watchdog_timer_enabled = B_FALSE;
4131 	Adapter->watchdog_timer_started = B_FALSE;
4132 	tid = Adapter->watchdog_tid;
4133 	Adapter->watchdog_tid = 0;
4134 
4135 	mutex_exit(&Adapter->watchdog_lock);
4136 
4137 	if (tid != 0)
4138 		(void) untimeout(tid);
4139 }
4140 
4141 static void
4142 start_watchdog_timer(struct e1000g *Adapter)
4143 {
4144 	mutex_enter(&Adapter->watchdog_lock);
4145 
4146 	if (Adapter->watchdog_timer_enabled) {
4147 		if (!Adapter->watchdog_timer_started) {
4148 			Adapter->watchdog_timer_started = B_TRUE;
4149 			arm_watchdog_timer(Adapter);
4150 		}
4151 	}
4152 
4153 	mutex_exit(&Adapter->watchdog_lock);
4154 }
4155 
4156 static void
4157 restart_watchdog_timer(struct e1000g *Adapter)
4158 {
4159 	mutex_enter(&Adapter->watchdog_lock);
4160 
4161 	if (Adapter->watchdog_timer_started)
4162 		arm_watchdog_timer(Adapter);
4163 
4164 	mutex_exit(&Adapter->watchdog_lock);
4165 }
4166 
4167 static void
4168 stop_watchdog_timer(struct e1000g *Adapter)
4169 {
4170 	timeout_id_t tid;
4171 
4172 	mutex_enter(&Adapter->watchdog_lock);
4173 
4174 	Adapter->watchdog_timer_started = B_FALSE;
4175 	tid = Adapter->watchdog_tid;
4176 	Adapter->watchdog_tid = 0;
4177 
4178 	mutex_exit(&Adapter->watchdog_lock);
4179 
4180 	if (tid != 0)
4181 		(void) untimeout(tid);
4182 }
4183 
4184 static void
4185 stop_link_timer(struct e1000g *Adapter)
4186 {
4187 	timeout_id_t tid;
4188 
4189 	/* Disable the link timer */
4190 	mutex_enter(&Adapter->link_lock);
4191 
4192 	tid = Adapter->link_tid;
4193 	Adapter->link_tid = 0;
4194 
4195 	mutex_exit(&Adapter->link_lock);
4196 
4197 	if (tid != 0)
4198 		(void) untimeout(tid);
4199 }
4200 
4201 static void
4202 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4203 {
4204 	timeout_id_t tid;
4205 
4206 	/* Disable the tx timer for 82547 chipset */
4207 	mutex_enter(&tx_ring->tx_lock);
4208 
4209 	tx_ring->timer_enable_82547 = B_FALSE;
4210 	tid = tx_ring->timer_id_82547;
4211 	tx_ring->timer_id_82547 = 0;
4212 
4213 	mutex_exit(&tx_ring->tx_lock);
4214 
4215 	if (tid != 0)
4216 		(void) untimeout(tid);
4217 }
4218 
4219 void
4220 e1000g_clear_interrupt(struct e1000g *Adapter)
4221 {
4222 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4223 	    0xffffffff & ~E1000_IMS_RXSEQ);
4224 }
4225 
4226 void
4227 e1000g_mask_interrupt(struct e1000g *Adapter)
4228 {
4229 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4230 	    IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4231 
4232 	if (Adapter->tx_intr_enable)
4233 		e1000g_mask_tx_interrupt(Adapter);
4234 }
4235 
4236 /*
4237  * This routine is called by e1000g_quiesce(), therefore must not block.
4238  */
4239 void
4240 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4241 {
4242 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4243 }
4244 
4245 void
4246 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
4247 {
4248 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
4249 }
4250 
4251 void
4252 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
4253 {
4254 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
4255 }
4256 
4257 static void
4258 e1000g_smartspeed(struct e1000g *Adapter)
4259 {
4260 	struct e1000_hw *hw = &Adapter->shared;
4261 	uint16_t phy_status;
4262 	uint16_t phy_ctrl;
4263 
4264 	/*
4265 	 * If we're not T-or-T, or we're not autoneg'ing, or we're not
4266 	 * advertising 1000Full, we don't even use the workaround
4267 	 */
4268 	if ((hw->phy.type != e1000_phy_igp) ||
4269 	    !hw->mac.autoneg ||
4270 	    !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
4271 		return;
4272 
4273 	/*
4274 	 * True if this is the first call of this function or after every
4275 	 * 30 seconds of not having link
4276 	 */
4277 	if (Adapter->smartspeed == 0) {
4278 		/*
4279 		 * If Master/Slave config fault is asserted twice, we
4280 		 * assume back-to-back
4281 		 */
4282 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4283 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4284 			return;
4285 
4286 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4287 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4288 			return;
4289 		/*
4290 		 * We're assuming back-2-back because our status register
4291 		 * insists! there's a fault in the master/slave
4292 		 * relationship that was "negotiated"
4293 		 */
4294 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4295 		/*
4296 		 * Is the phy configured for manual configuration of
4297 		 * master/slave?
4298 		 */
4299 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4300 			/*
4301 			 * Yes.  Then disable manual configuration (enable
4302 			 * auto configuration) of master/slave
4303 			 */
4304 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4305 			(void) e1000_write_phy_reg(hw,
4306 			    PHY_1000T_CTRL, phy_ctrl);
4307 			/*
4308 			 * Effectively starting the clock
4309 			 */
4310 			Adapter->smartspeed++;
4311 			/*
4312 			 * Restart autonegotiation
4313 			 */
4314 			if (!e1000_phy_setup_autoneg(hw) &&
4315 			    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4316 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4317 				    MII_CR_RESTART_AUTO_NEG);
4318 				(void) e1000_write_phy_reg(hw,
4319 				    PHY_CONTROL, phy_ctrl);
4320 			}
4321 		}
4322 		return;
4323 		/*
4324 		 * Has 6 seconds transpired still without link? Remember,
4325 		 * you should reset the smartspeed counter once you obtain
4326 		 * link
4327 		 */
4328 	} else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4329 		/*
4330 		 * Yes.  Remember, we did at the start determine that
4331 		 * there's a master/slave configuration fault, so we're
4332 		 * still assuming there's someone on the other end, but we
4333 		 * just haven't yet been able to talk to it. We then
4334 		 * re-enable auto configuration of master/slave to see if
4335 		 * we're running 2/3 pair cables.
4336 		 */
4337 		/*
4338 		 * If still no link, perhaps using 2/3 pair cable
4339 		 */
4340 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4341 		phy_ctrl |= CR_1000T_MS_ENABLE;
4342 		(void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4343 		/*
4344 		 * Restart autoneg with phy enabled for manual
4345 		 * configuration of master/slave
4346 		 */
4347 		if (!e1000_phy_setup_autoneg(hw) &&
4348 		    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4349 			phy_ctrl |=
4350 			    (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
4351 			(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
4352 		}
4353 		/*
4354 		 * Hopefully, there are no more faults and we've obtained
4355 		 * link as a result.
4356 		 */
4357 	}
4358 	/*
4359 	 * Restart process after E1000_SMARTSPEED_MAX iterations (30
4360 	 * seconds)
4361 	 */
4362 	if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4363 		Adapter->smartspeed = 0;
4364 }
4365 
4366 static boolean_t
4367 is_valid_mac_addr(uint8_t *mac_addr)
4368 {
4369 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4370 	const uint8_t addr_test2[6] =
4371 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4372 
4373 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4374 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4375 		return (B_FALSE);
4376 
4377 	return (B_TRUE);
4378 }
4379 
4380 /*
4381  * e1000g_stall_check - check for tx stall
4382  *
4383  * This function checks if the adapter is stalled (in transmit).
4384  *
4385  * It is called each time the watchdog timeout is invoked.
4386  * If the transmit descriptor reclaim continuously fails,
4387  * the watchdog value will increment by 1. If the watchdog
4388  * value exceeds the threshold, the adapter is assumed to
4389  * have stalled and need to be reset.
4390  */
4391 static boolean_t
4392 e1000g_stall_check(struct e1000g *Adapter)
4393 {
4394 	e1000g_tx_ring_t *tx_ring;
4395 
4396 	tx_ring = Adapter->tx_ring;
4397 
4398 	if (Adapter->link_state != LINK_STATE_UP)
4399 		return (B_FALSE);
4400 
4401 	if (tx_ring->recycle_fail > 0)
4402 		tx_ring->stall_watchdog++;
4403 	else
4404 		tx_ring->stall_watchdog = 0;
4405 
4406 	if (tx_ring->stall_watchdog < E1000G_STALL_WATCHDOG_COUNT)
4407 		return (B_FALSE);
4408 
4409 	tx_ring->stall_watchdog = 0;
4410 	tx_ring->recycle_fail = 0;
4411 
4412 	return (B_TRUE);
4413 }
4414 
4415 #ifdef E1000G_DEBUG
4416 static enum ioc_reply
4417 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
4418 {
4419 	void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
4420 	e1000g_peekpoke_t *ppd;
4421 	uint64_t mem_va;
4422 	uint64_t maxoff;
4423 	boolean_t peek;
4424 
4425 	switch (iocp->ioc_cmd) {
4426 
4427 	case E1000G_IOC_REG_PEEK:
4428 		peek = B_TRUE;
4429 		break;
4430 
4431 	case E1000G_IOC_REG_POKE:
4432 		peek = B_FALSE;
4433 		break;
4434 
4435 	deault:
4436 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4437 		    "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
4438 		    iocp->ioc_cmd);
4439 		return (IOC_INVAL);
4440 	}
4441 
4442 	/*
4443 	 * Validate format of ioctl
4444 	 */
4445 	if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
4446 		return (IOC_INVAL);
4447 	if (mp->b_cont == NULL)
4448 		return (IOC_INVAL);
4449 
4450 	ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
4451 
4452 	/*
4453 	 * Validate request parameters
4454 	 */
4455 	switch (ppd->pp_acc_space) {
4456 
4457 	default:
4458 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4459 		    "e1000g_diag_ioctl: invalid access space 0x%X\n",
4460 		    ppd->pp_acc_space);
4461 		return (IOC_INVAL);
4462 
4463 	case E1000G_PP_SPACE_REG:
4464 		/*
4465 		 * Memory-mapped I/O space
4466 		 */
4467 		ASSERT(ppd->pp_acc_size == 4);
4468 		if (ppd->pp_acc_size != 4)
4469 			return (IOC_INVAL);
4470 
4471 		if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
4472 			return (IOC_INVAL);
4473 
4474 		mem_va = 0;
4475 		maxoff = 0x10000;
4476 		ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
4477 		break;
4478 
4479 	case E1000G_PP_SPACE_E1000G:
4480 		/*
4481 		 * E1000g data structure!
4482 		 */
4483 		mem_va = (uintptr_t)e1000gp;
4484 		maxoff = sizeof (struct e1000g);
4485 		ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
4486 		break;
4487 
4488 	}
4489 
4490 	if (ppd->pp_acc_offset >= maxoff)
4491 		return (IOC_INVAL);
4492 
4493 	if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
4494 		return (IOC_INVAL);
4495 
4496 	/*
4497 	 * All OK - go!
4498 	 */
4499 	ppd->pp_acc_offset += mem_va;
4500 	(*ppfn)(e1000gp, ppd);
4501 	return (peek ? IOC_REPLY : IOC_ACK);
4502 }
4503 
4504 static void
4505 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4506 {
4507 	ddi_acc_handle_t handle;
4508 	uint32_t *regaddr;
4509 
4510 	handle = e1000gp->osdep.reg_handle;
4511 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4512 	    (uintptr_t)ppd->pp_acc_offset);
4513 
4514 	ppd->pp_acc_data = ddi_get32(handle, regaddr);
4515 }
4516 
4517 static void
4518 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4519 {
4520 	ddi_acc_handle_t handle;
4521 	uint32_t *regaddr;
4522 	uint32_t value;
4523 
4524 	handle = e1000gp->osdep.reg_handle;
4525 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4526 	    (uintptr_t)ppd->pp_acc_offset);
4527 	value = (uint32_t)ppd->pp_acc_data;
4528 
4529 	ddi_put32(handle, regaddr, value);
4530 }
4531 
4532 static void
4533 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4534 {
4535 	uint64_t value;
4536 	void *vaddr;
4537 
4538 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4539 
4540 	switch (ppd->pp_acc_size) {
4541 	case 1:
4542 		value = *(uint8_t *)vaddr;
4543 		break;
4544 
4545 	case 2:
4546 		value = *(uint16_t *)vaddr;
4547 		break;
4548 
4549 	case 4:
4550 		value = *(uint32_t *)vaddr;
4551 		break;
4552 
4553 	case 8:
4554 		value = *(uint64_t *)vaddr;
4555 		break;
4556 	}
4557 
4558 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4559 	    "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
4560 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4561 
4562 	ppd->pp_acc_data = value;
4563 }
4564 
4565 static void
4566 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4567 {
4568 	uint64_t value;
4569 	void *vaddr;
4570 
4571 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4572 	value = ppd->pp_acc_data;
4573 
4574 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4575 	    "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
4576 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4577 
4578 	switch (ppd->pp_acc_size) {
4579 	case 1:
4580 		*(uint8_t *)vaddr = (uint8_t)value;
4581 		break;
4582 
4583 	case 2:
4584 		*(uint16_t *)vaddr = (uint16_t)value;
4585 		break;
4586 
4587 	case 4:
4588 		*(uint32_t *)vaddr = (uint32_t)value;
4589 		break;
4590 
4591 	case 8:
4592 		*(uint64_t *)vaddr = (uint64_t)value;
4593 		break;
4594 	}
4595 }
4596 #endif
4597 
4598 /*
4599  * Loopback Support
4600  */
4601 static lb_property_t lb_normal =
4602 	{ normal,	"normal",	E1000G_LB_NONE		};
4603 static lb_property_t lb_external1000 =
4604 	{ external,	"1000Mbps",	E1000G_LB_EXTERNAL_1000	};
4605 static lb_property_t lb_external100 =
4606 	{ external,	"100Mbps",	E1000G_LB_EXTERNAL_100	};
4607 static lb_property_t lb_external10 =
4608 	{ external,	"10Mbps",	E1000G_LB_EXTERNAL_10	};
4609 static lb_property_t lb_phy =
4610 	{ internal,	"PHY",		E1000G_LB_INTERNAL_PHY	};
4611 
4612 static enum ioc_reply
4613 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
4614 {
4615 	lb_info_sz_t *lbsp;
4616 	lb_property_t *lbpp;
4617 	struct e1000_hw *hw;
4618 	uint32_t *lbmp;
4619 	uint32_t size;
4620 	uint32_t value;
4621 
4622 	hw = &Adapter->shared;
4623 
4624 	if (mp->b_cont == NULL)
4625 		return (IOC_INVAL);
4626 
4627 	if (!e1000g_check_loopback_support(hw)) {
4628 		e1000g_log(NULL, CE_WARN,
4629 		    "Loopback is not supported on e1000g%d", Adapter->instance);
4630 		return (IOC_INVAL);
4631 	}
4632 
4633 	switch (iocp->ioc_cmd) {
4634 	default:
4635 		return (IOC_INVAL);
4636 
4637 	case LB_GET_INFO_SIZE:
4638 		size = sizeof (lb_info_sz_t);
4639 		if (iocp->ioc_count != size)
4640 			return (IOC_INVAL);
4641 
4642 		rw_enter(&Adapter->chip_lock, RW_WRITER);
4643 		e1000g_get_phy_state(Adapter);
4644 
4645 		/*
4646 		 * Workaround for hardware faults. In order to get a stable
4647 		 * state of phy, we will wait for a specific interval and
4648 		 * try again. The time delay is an experiential value based
4649 		 * on our testing.
4650 		 */
4651 		msec_delay(100);
4652 		e1000g_get_phy_state(Adapter);
4653 		rw_exit(&Adapter->chip_lock);
4654 
4655 		value = sizeof (lb_normal);
4656 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4657 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4658 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4659 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4660 			value += sizeof (lb_phy);
4661 			switch (hw->mac.type) {
4662 			case e1000_82571:
4663 			case e1000_82572:
4664 			case e1000_80003es2lan:
4665 				value += sizeof (lb_external1000);
4666 				break;
4667 			}
4668 		}
4669 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4670 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4671 			value += sizeof (lb_external100);
4672 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4673 			value += sizeof (lb_external10);
4674 
4675 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
4676 		*lbsp = value;
4677 		break;
4678 
4679 	case LB_GET_INFO:
4680 		value = sizeof (lb_normal);
4681 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4682 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4683 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4684 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4685 			value += sizeof (lb_phy);
4686 			switch (hw->mac.type) {
4687 			case e1000_82571:
4688 			case e1000_82572:
4689 			case e1000_80003es2lan:
4690 				value += sizeof (lb_external1000);
4691 				break;
4692 			}
4693 		}
4694 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4695 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4696 			value += sizeof (lb_external100);
4697 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4698 			value += sizeof (lb_external10);
4699 
4700 		size = value;
4701 		if (iocp->ioc_count != size)
4702 			return (IOC_INVAL);
4703 
4704 		value = 0;
4705 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
4706 		lbpp[value++] = lb_normal;
4707 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4708 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4709 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4710 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4711 			lbpp[value++] = lb_phy;
4712 			switch (hw->mac.type) {
4713 			case e1000_82571:
4714 			case e1000_82572:
4715 			case e1000_80003es2lan:
4716 				lbpp[value++] = lb_external1000;
4717 				break;
4718 			}
4719 		}
4720 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4721 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4722 			lbpp[value++] = lb_external100;
4723 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4724 			lbpp[value++] = lb_external10;
4725 		break;
4726 
4727 	case LB_GET_MODE:
4728 		size = sizeof (uint32_t);
4729 		if (iocp->ioc_count != size)
4730 			return (IOC_INVAL);
4731 
4732 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4733 		*lbmp = Adapter->loopback_mode;
4734 		break;
4735 
4736 	case LB_SET_MODE:
4737 		size = 0;
4738 		if (iocp->ioc_count != sizeof (uint32_t))
4739 			return (IOC_INVAL);
4740 
4741 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4742 		if (!e1000g_set_loopback_mode(Adapter, *lbmp))
4743 			return (IOC_INVAL);
4744 		break;
4745 	}
4746 
4747 	iocp->ioc_count = size;
4748 	iocp->ioc_error = 0;
4749 
4750 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
4751 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4752 		return (IOC_INVAL);
4753 	}
4754 
4755 	return (IOC_REPLY);
4756 }
4757 
4758 static boolean_t
4759 e1000g_check_loopback_support(struct e1000_hw *hw)
4760 {
4761 	switch (hw->mac.type) {
4762 	case e1000_82540:
4763 	case e1000_82545:
4764 	case e1000_82545_rev_3:
4765 	case e1000_82546:
4766 	case e1000_82546_rev_3:
4767 	case e1000_82541:
4768 	case e1000_82541_rev_2:
4769 	case e1000_82547:
4770 	case e1000_82547_rev_2:
4771 	case e1000_82571:
4772 	case e1000_82572:
4773 	case e1000_82573:
4774 	case e1000_80003es2lan:
4775 		return (B_TRUE);
4776 	}
4777 	return (B_FALSE);
4778 }
4779 
4780 static boolean_t
4781 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
4782 {
4783 	struct e1000_hw *hw;
4784 	int i, times;
4785 	boolean_t link_up;
4786 
4787 	if (mode == Adapter->loopback_mode)
4788 		return (B_TRUE);
4789 
4790 	hw = &Adapter->shared;
4791 	times = 0;
4792 
4793 	Adapter->loopback_mode = mode;
4794 
4795 	if (mode == E1000G_LB_NONE) {
4796 		/* Reset the chip */
4797 		hw->phy.autoneg_wait_to_complete = B_TRUE;
4798 		(void) e1000g_reset_adapter(Adapter);
4799 		hw->phy.autoneg_wait_to_complete = B_FALSE;
4800 		return (B_TRUE);
4801 	}
4802 
4803 again:
4804 
4805 	rw_enter(&Adapter->chip_lock, RW_WRITER);
4806 
4807 	switch (mode) {
4808 	default:
4809 		rw_exit(&Adapter->chip_lock);
4810 		return (B_FALSE);
4811 
4812 	case E1000G_LB_EXTERNAL_1000:
4813 		e1000g_set_external_loopback_1000(Adapter);
4814 		break;
4815 
4816 	case E1000G_LB_EXTERNAL_100:
4817 		e1000g_set_external_loopback_100(Adapter);
4818 		break;
4819 
4820 	case E1000G_LB_EXTERNAL_10:
4821 		e1000g_set_external_loopback_10(Adapter);
4822 		break;
4823 
4824 	case E1000G_LB_INTERNAL_PHY:
4825 		e1000g_set_internal_loopback(Adapter);
4826 		break;
4827 	}
4828 
4829 	times++;
4830 
4831 	rw_exit(&Adapter->chip_lock);
4832 
4833 	/* Wait for link up */
4834 	for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
4835 		msec_delay(100);
4836 
4837 	rw_enter(&Adapter->chip_lock, RW_WRITER);
4838 
4839 	link_up = e1000g_link_up(Adapter);
4840 
4841 	rw_exit(&Adapter->chip_lock);
4842 
4843 	if (!link_up) {
4844 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4845 		    "Failed to get the link up");
4846 		if (times < 2) {
4847 			/* Reset the link */
4848 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4849 			    "Reset the link ...");
4850 			(void) e1000g_reset_adapter(Adapter);
4851 			goto again;
4852 		}
4853 	}
4854 
4855 	return (B_TRUE);
4856 }
4857 
4858 /*
4859  * The following loopback settings are from Intel's technical
4860  * document - "How To Loopback". All the register settings and
4861  * time delay values are directly inherited from the document
4862  * without more explanations available.
4863  */
4864 static void
4865 e1000g_set_internal_loopback(struct e1000g *Adapter)
4866 {
4867 	struct e1000_hw *hw;
4868 	uint32_t ctrl;
4869 	uint32_t status;
4870 	uint16_t phy_ctrl;
4871 	uint32_t txcw;
4872 
4873 	hw = &Adapter->shared;
4874 
4875 	/* Disable Smart Power Down */
4876 	phy_spd_state(hw, B_FALSE);
4877 
4878 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
4879 	phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
4880 	phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
4881 
4882 	switch (hw->mac.type) {
4883 	case e1000_82540:
4884 	case e1000_82545:
4885 	case e1000_82545_rev_3:
4886 	case e1000_82546:
4887 	case e1000_82546_rev_3:
4888 	case e1000_82573:
4889 		/* Auto-MDI/MDIX off */
4890 		(void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
4891 		/* Reset PHY to update Auto-MDI/MDIX */
4892 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
4893 		    phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
4894 		/* Reset PHY to auto-neg off and force 1000 */
4895 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
4896 		    phy_ctrl | MII_CR_RESET);
4897 		/*
4898 		 * Disable PHY receiver for 82540/545/546 and 82573 Family.
4899 		 * See comments above e1000g_set_internal_loopback() for the
4900 		 * background.
4901 		 */
4902 		(void) e1000_write_phy_reg(hw, 29, 0x001F);
4903 		(void) e1000_write_phy_reg(hw, 30, 0x8FFC);
4904 		(void) e1000_write_phy_reg(hw, 29, 0x001A);
4905 		(void) e1000_write_phy_reg(hw, 30, 0x8FF0);
4906 		break;
4907 	case e1000_80003es2lan:
4908 		/* Force Link Up */
4909 		(void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
4910 		    0x1CC);
4911 		/* Sets PCS loopback at 1Gbs */
4912 		(void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
4913 		    0x1046);
4914 		break;
4915 	}
4916 
4917 	/* Set loopback */
4918 	(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
4919 
4920 	msec_delay(250);
4921 
4922 	/* Now set up the MAC to the same speed/duplex as the PHY. */
4923 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4924 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
4925 	ctrl |= (E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
4926 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
4927 	    E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
4928 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
4929 
4930 	switch (hw->mac.type) {
4931 	case e1000_82540:
4932 	case e1000_82545:
4933 	case e1000_82545_rev_3:
4934 	case e1000_82546:
4935 	case e1000_82546_rev_3:
4936 		/*
4937 		 * For some serdes we'll need to commit the writes now
4938 		 * so that the status is updated on link
4939 		 */
4940 		if (hw->phy.media_type == e1000_media_type_internal_serdes) {
4941 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4942 			msec_delay(100);
4943 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
4944 		}
4945 
4946 		if (hw->phy.media_type == e1000_media_type_copper) {
4947 			/* Invert Loss of Signal */
4948 			ctrl |= E1000_CTRL_ILOS;
4949 		} else {
4950 			/* Set ILOS on fiber nic if half duplex is detected */
4951 			status = E1000_READ_REG(hw, E1000_STATUS);
4952 			if ((status & E1000_STATUS_FD) == 0)
4953 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
4954 		}
4955 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4956 		break;
4957 
4958 	case e1000_82571:
4959 	case e1000_82572:
4960 		/*
4961 		 * The fiber/SerDes versions of this adapter do not contain an
4962 		 * accessible PHY. Therefore, loopback beyond MAC must be done
4963 		 * using SerDes analog loopback.
4964 		 */
4965 		if (hw->phy.media_type != e1000_media_type_copper) {
4966 			status = E1000_READ_REG(hw, E1000_STATUS);
4967 			/* Set ILOS on fiber nic if half duplex is detected */
4968 			if (((status & E1000_STATUS_LU) == 0) ||
4969 			    ((status & E1000_STATUS_FD) == 0) ||
4970 			    (hw->phy.media_type ==
4971 			    e1000_media_type_internal_serdes))
4972 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
4973 
4974 			/* Disable autoneg by setting bit 31 of TXCW to zero */
4975 			txcw = E1000_READ_REG(hw, E1000_TXCW);
4976 			txcw &= ~((uint32_t)1 << 31);
4977 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
4978 
4979 			/*
4980 			 * Write 0x410 to Serdes Control register
4981 			 * to enable Serdes analog loopback
4982 			 */
4983 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
4984 			msec_delay(10);
4985 		}
4986 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4987 		break;
4988 
4989 	case e1000_82573:
4990 		ctrl |= E1000_CTRL_ILOS;
4991 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4992 		break;
4993 	}
4994 }
4995 
4996 static void
4997 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
4998 {
4999 	struct e1000_hw *hw;
5000 	uint32_t rctl;
5001 	uint32_t ctrl_ext;
5002 	uint32_t ctrl;
5003 	uint32_t status;
5004 	uint32_t txcw;
5005 	uint16_t phydata;
5006 
5007 	hw = &Adapter->shared;
5008 
5009 	/* Disable Smart Power Down */
5010 	phy_spd_state(hw, B_FALSE);
5011 
5012 	switch (hw->mac.type) {
5013 	case e1000_82571:
5014 	case e1000_82572:
5015 		switch (hw->phy.media_type) {
5016 		case e1000_media_type_copper:
5017 			/* Force link up (Must be done before the PHY writes) */
5018 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5019 			ctrl |= E1000_CTRL_SLU;	/* Force Link Up */
5020 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5021 
5022 			rctl = E1000_READ_REG(hw, E1000_RCTL);
5023 			rctl |= (E1000_RCTL_EN |
5024 			    E1000_RCTL_SBP |
5025 			    E1000_RCTL_UPE |
5026 			    E1000_RCTL_MPE |
5027 			    E1000_RCTL_LPE |
5028 			    E1000_RCTL_BAM);		/* 0x803E */
5029 			E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5030 
5031 			ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5032 			ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5033 			    E1000_CTRL_EXT_SDP6_DATA |
5034 			    E1000_CTRL_EXT_SDP7_DATA |
5035 			    E1000_CTRL_EXT_SDP4_DIR |
5036 			    E1000_CTRL_EXT_SDP6_DIR |
5037 			    E1000_CTRL_EXT_SDP7_DIR);	/* 0x0DD0 */
5038 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5039 
5040 			/*
5041 			 * This sequence tunes the PHY's SDP and no customer
5042 			 * settable values. For background, see comments above
5043 			 * e1000g_set_internal_loopback().
5044 			 */
5045 			(void) e1000_write_phy_reg(hw, 0x0, 0x140);
5046 			msec_delay(10);
5047 			(void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5048 			(void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5049 			(void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5050 			(void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5051 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5052 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5053 
5054 			(void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5055 			(void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5056 			(void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5057 			(void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5058 			(void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5059 
5060 			msec_delay(50);
5061 			break;
5062 		case e1000_media_type_fiber:
5063 		case e1000_media_type_internal_serdes:
5064 			status = E1000_READ_REG(hw, E1000_STATUS);
5065 			if (((status & E1000_STATUS_LU) == 0) ||
5066 			    (hw->phy.media_type ==
5067 			    e1000_media_type_internal_serdes)) {
5068 				ctrl = E1000_READ_REG(hw, E1000_CTRL);
5069 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5070 				E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5071 			}
5072 
5073 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5074 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5075 			txcw &= ~((uint32_t)1 << 31);
5076 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5077 
5078 			/*
5079 			 * Write 0x410 to Serdes Control register
5080 			 * to enable Serdes analog loopback
5081 			 */
5082 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5083 			msec_delay(10);
5084 			break;
5085 		default:
5086 			break;
5087 		}
5088 		break;
5089 	case e1000_80003es2lan:
5090 		(void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5091 		(void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5092 		    phydata | (1 << 5));
5093 		Adapter->param_adv_autoneg = 1;
5094 		Adapter->param_adv_1000fdx = 1;
5095 		(void) e1000g_reset_link(Adapter);
5096 		break;
5097 	}
5098 }
5099 
5100 static void
5101 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5102 {
5103 	struct e1000_hw *hw;
5104 	uint32_t ctrl;
5105 	uint16_t phy_ctrl;
5106 
5107 	hw = &Adapter->shared;
5108 
5109 	/* Disable Smart Power Down */
5110 	phy_spd_state(hw, B_FALSE);
5111 
5112 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5113 	    MII_CR_SPEED_100);
5114 
5115 	/* Force 100/FD, reset PHY */
5116 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5117 	    phy_ctrl | MII_CR_RESET);	/* 0xA100 */
5118 	msec_delay(10);
5119 
5120 	/* Force 100/FD */
5121 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5122 	    phy_ctrl);			/* 0x2100 */
5123 	msec_delay(10);
5124 
5125 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5126 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5127 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5128 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5129 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5130 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5131 	    E1000_CTRL_SPD_100 |	/* Force Speed to 100 */
5132 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5133 
5134 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5135 }
5136 
5137 static void
5138 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5139 {
5140 	struct e1000_hw *hw;
5141 	uint32_t ctrl;
5142 	uint16_t phy_ctrl;
5143 
5144 	hw = &Adapter->shared;
5145 
5146 	/* Disable Smart Power Down */
5147 	phy_spd_state(hw, B_FALSE);
5148 
5149 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5150 	    MII_CR_SPEED_10);
5151 
5152 	/* Force 10/FD, reset PHY */
5153 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5154 	    phy_ctrl | MII_CR_RESET);	/* 0x8100 */
5155 	msec_delay(10);
5156 
5157 	/* Force 10/FD */
5158 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5159 	    phy_ctrl);			/* 0x0100 */
5160 	msec_delay(10);
5161 
5162 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5163 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5164 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5165 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5166 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5167 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5168 	    E1000_CTRL_SPD_10 |		/* Force Speed to 10 */
5169 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5170 
5171 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5172 }
5173 
5174 #ifdef __sparc
5175 static boolean_t
5176 e1000g_find_mac_address(struct e1000g *Adapter)
5177 {
5178 	struct e1000_hw *hw = &Adapter->shared;
5179 	uchar_t *bytes;
5180 	struct ether_addr sysaddr;
5181 	uint_t nelts;
5182 	int err;
5183 	boolean_t found = B_FALSE;
5184 
5185 	/*
5186 	 * The "vendor's factory-set address" may already have
5187 	 * been extracted from the chip, but if the property
5188 	 * "local-mac-address" is set we use that instead.
5189 	 *
5190 	 * We check whether it looks like an array of 6
5191 	 * bytes (which it should, if OBP set it).  If we can't
5192 	 * make sense of it this way, we'll ignore it.
5193 	 */
5194 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5195 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
5196 	if (err == DDI_PROP_SUCCESS) {
5197 		if (nelts == ETHERADDRL) {
5198 			while (nelts--)
5199 				hw->mac.addr[nelts] = bytes[nelts];
5200 			found = B_TRUE;
5201 		}
5202 		ddi_prop_free(bytes);
5203 	}
5204 
5205 	/*
5206 	 * Look up the OBP property "local-mac-address?". If the user has set
5207 	 * 'local-mac-address? = false', use "the system address" instead.
5208 	 */
5209 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
5210 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
5211 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
5212 			if (localetheraddr(NULL, &sysaddr) != 0) {
5213 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
5214 				found = B_TRUE;
5215 			}
5216 		}
5217 		ddi_prop_free(bytes);
5218 	}
5219 
5220 	/*
5221 	 * Finally(!), if there's a valid "mac-address" property (created
5222 	 * if we netbooted from this interface), we must use this instead
5223 	 * of any of the above to ensure that the NFS/install server doesn't
5224 	 * get confused by the address changing as Solaris takes over!
5225 	 */
5226 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5227 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
5228 	if (err == DDI_PROP_SUCCESS) {
5229 		if (nelts == ETHERADDRL) {
5230 			while (nelts--)
5231 				hw->mac.addr[nelts] = bytes[nelts];
5232 			found = B_TRUE;
5233 		}
5234 		ddi_prop_free(bytes);
5235 	}
5236 
5237 	if (found) {
5238 		bcopy(hw->mac.addr, hw->mac.perm_addr,
5239 		    ETHERADDRL);
5240 	}
5241 
5242 	return (found);
5243 }
5244 #endif
5245 
5246 static int
5247 e1000g_add_intrs(struct e1000g *Adapter)
5248 {
5249 	dev_info_t *devinfo;
5250 	int intr_types;
5251 	int rc;
5252 
5253 	devinfo = Adapter->dip;
5254 
5255 	/* Get supported interrupt types */
5256 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
5257 
5258 	if (rc != DDI_SUCCESS) {
5259 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5260 		    "Get supported interrupt types failed: %d\n", rc);
5261 		return (DDI_FAILURE);
5262 	}
5263 
5264 	/*
5265 	 * Based on Intel Technical Advisory document (TA-160), there are some
5266 	 * cases where some older Intel PCI-X NICs may "advertise" to the OS
5267 	 * that it supports MSI, but in fact has problems.
5268 	 * So we should only enable MSI for PCI-E NICs and disable MSI for old
5269 	 * PCI/PCI-X NICs.
5270 	 */
5271 	if (Adapter->shared.mac.type < e1000_82571)
5272 		Adapter->msi_enable = B_FALSE;
5273 
5274 	if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
5275 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
5276 
5277 		if (rc != DDI_SUCCESS) {
5278 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5279 			    "Add MSI failed, trying Legacy interrupts\n");
5280 		} else {
5281 			Adapter->intr_type = DDI_INTR_TYPE_MSI;
5282 		}
5283 	}
5284 
5285 	if ((Adapter->intr_type == 0) &&
5286 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
5287 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
5288 
5289 		if (rc != DDI_SUCCESS) {
5290 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5291 			    "Add Legacy interrupts failed\n");
5292 			return (DDI_FAILURE);
5293 		}
5294 
5295 		Adapter->intr_type = DDI_INTR_TYPE_FIXED;
5296 	}
5297 
5298 	if (Adapter->intr_type == 0) {
5299 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5300 		    "No interrupts registered\n");
5301 		return (DDI_FAILURE);
5302 	}
5303 
5304 	return (DDI_SUCCESS);
5305 }
5306 
5307 /*
5308  * e1000g_intr_add() handles MSI/Legacy interrupts
5309  */
5310 static int
5311 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
5312 {
5313 	dev_info_t *devinfo;
5314 	int count, avail, actual;
5315 	int x, y, rc, inum = 0;
5316 	int flag;
5317 	ddi_intr_handler_t *intr_handler;
5318 
5319 	devinfo = Adapter->dip;
5320 
5321 	/* get number of interrupts */
5322 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5323 	if ((rc != DDI_SUCCESS) || (count == 0)) {
5324 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5325 		    "Get interrupt number failed. Return: %d, count: %d\n",
5326 		    rc, count);
5327 		return (DDI_FAILURE);
5328 	}
5329 
5330 	/* get number of available interrupts */
5331 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
5332 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
5333 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5334 		    "Get interrupt available number failed. "
5335 		    "Return: %d, available: %d\n", rc, avail);
5336 		return (DDI_FAILURE);
5337 	}
5338 
5339 	if (avail < count) {
5340 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5341 		    "Interrupts count: %d, available: %d\n",
5342 		    count, avail);
5343 	}
5344 
5345 	/* Allocate an array of interrupt handles */
5346 	Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
5347 	Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
5348 
5349 	/* Set NORMAL behavior for both MSI and FIXED interrupt */
5350 	flag = DDI_INTR_ALLOC_NORMAL;
5351 
5352 	/* call ddi_intr_alloc() */
5353 	rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
5354 	    count, &actual, flag);
5355 
5356 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
5357 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5358 		    "Allocate interrupts failed: %d\n", rc);
5359 
5360 		kmem_free(Adapter->htable, Adapter->intr_size);
5361 		return (DDI_FAILURE);
5362 	}
5363 
5364 	if (actual < count) {
5365 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5366 		    "Interrupts requested: %d, received: %d\n",
5367 		    count, actual);
5368 	}
5369 
5370 	Adapter->intr_cnt = actual;
5371 
5372 	/* Get priority for first msi, assume remaining are all the same */
5373 	rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
5374 
5375 	if (rc != DDI_SUCCESS) {
5376 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5377 		    "Get interrupt priority failed: %d\n", rc);
5378 
5379 		/* Free already allocated intr */
5380 		for (y = 0; y < actual; y++)
5381 			(void) ddi_intr_free(Adapter->htable[y]);
5382 
5383 		kmem_free(Adapter->htable, Adapter->intr_size);
5384 		return (DDI_FAILURE);
5385 	}
5386 
5387 	/*
5388 	 * In Legacy Interrupt mode, for PCI-Express adapters, we should
5389 	 * use the interrupt service routine e1000g_intr_pciexpress()
5390 	 * to avoid interrupt stealing when sharing interrupt with other
5391 	 * devices.
5392 	 */
5393 	if (Adapter->shared.mac.type < e1000_82571)
5394 		intr_handler = (ddi_intr_handler_t *)e1000g_intr;
5395 	else
5396 		intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
5397 
5398 	/* Call ddi_intr_add_handler() */
5399 	for (x = 0; x < actual; x++) {
5400 		rc = ddi_intr_add_handler(Adapter->htable[x],
5401 		    intr_handler, (caddr_t)Adapter, NULL);
5402 
5403 		if (rc != DDI_SUCCESS) {
5404 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5405 			    "Add interrupt handler failed: %d\n", rc);
5406 
5407 			/* Remove already added handler */
5408 			for (y = 0; y < x; y++)
5409 				(void) ddi_intr_remove_handler(
5410 				    Adapter->htable[y]);
5411 
5412 			/* Free already allocated intr */
5413 			for (y = 0; y < actual; y++)
5414 				(void) ddi_intr_free(Adapter->htable[y]);
5415 
5416 			kmem_free(Adapter->htable, Adapter->intr_size);
5417 			return (DDI_FAILURE);
5418 		}
5419 	}
5420 
5421 	rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
5422 
5423 	if (rc != DDI_SUCCESS) {
5424 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5425 		    "Get interrupt cap failed: %d\n", rc);
5426 
5427 		/* Free already allocated intr */
5428 		for (y = 0; y < actual; y++) {
5429 			(void) ddi_intr_remove_handler(Adapter->htable[y]);
5430 			(void) ddi_intr_free(Adapter->htable[y]);
5431 		}
5432 
5433 		kmem_free(Adapter->htable, Adapter->intr_size);
5434 		return (DDI_FAILURE);
5435 	}
5436 
5437 	return (DDI_SUCCESS);
5438 }
5439 
5440 static int
5441 e1000g_rem_intrs(struct e1000g *Adapter)
5442 {
5443 	int x;
5444 	int rc;
5445 
5446 	for (x = 0; x < Adapter->intr_cnt; x++) {
5447 		rc = ddi_intr_remove_handler(Adapter->htable[x]);
5448 		if (rc != DDI_SUCCESS) {
5449 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5450 			    "Remove intr handler failed: %d\n", rc);
5451 			return (DDI_FAILURE);
5452 		}
5453 
5454 		rc = ddi_intr_free(Adapter->htable[x]);
5455 		if (rc != DDI_SUCCESS) {
5456 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5457 			    "Free intr failed: %d\n", rc);
5458 			return (DDI_FAILURE);
5459 		}
5460 	}
5461 
5462 	kmem_free(Adapter->htable, Adapter->intr_size);
5463 
5464 	return (DDI_SUCCESS);
5465 }
5466 
5467 static int
5468 e1000g_enable_intrs(struct e1000g *Adapter)
5469 {
5470 	int x;
5471 	int rc;
5472 
5473 	/* Enable interrupts */
5474 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5475 		/* Call ddi_intr_block_enable() for MSI */
5476 		rc = ddi_intr_block_enable(Adapter->htable,
5477 		    Adapter->intr_cnt);
5478 		if (rc != DDI_SUCCESS) {
5479 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5480 			    "Enable block intr failed: %d\n", rc);
5481 			return (DDI_FAILURE);
5482 		}
5483 	} else {
5484 		/* Call ddi_intr_enable() for Legacy/MSI non block enable */
5485 		for (x = 0; x < Adapter->intr_cnt; x++) {
5486 			rc = ddi_intr_enable(Adapter->htable[x]);
5487 			if (rc != DDI_SUCCESS) {
5488 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5489 				    "Enable intr failed: %d\n", rc);
5490 				return (DDI_FAILURE);
5491 			}
5492 		}
5493 	}
5494 
5495 	return (DDI_SUCCESS);
5496 }
5497 
5498 static int
5499 e1000g_disable_intrs(struct e1000g *Adapter)
5500 {
5501 	int x;
5502 	int rc;
5503 
5504 	/* Disable all interrupts */
5505 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5506 		rc = ddi_intr_block_disable(Adapter->htable,
5507 		    Adapter->intr_cnt);
5508 		if (rc != DDI_SUCCESS) {
5509 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5510 			    "Disable block intr failed: %d\n", rc);
5511 			return (DDI_FAILURE);
5512 		}
5513 	} else {
5514 		for (x = 0; x < Adapter->intr_cnt; x++) {
5515 			rc = ddi_intr_disable(Adapter->htable[x]);
5516 			if (rc != DDI_SUCCESS) {
5517 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5518 				    "Disable intr failed: %d\n", rc);
5519 				return (DDI_FAILURE);
5520 			}
5521 		}
5522 	}
5523 
5524 	return (DDI_SUCCESS);
5525 }
5526 
5527 /*
5528  * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
5529  */
5530 static void
5531 e1000g_get_phy_state(struct e1000g *Adapter)
5532 {
5533 	struct e1000_hw *hw = &Adapter->shared;
5534 
5535 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
5536 	(void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
5537 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &Adapter->phy_an_adv);
5538 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &Adapter->phy_an_exp);
5539 	(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &Adapter->phy_ext_status);
5540 	(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &Adapter->phy_1000t_ctrl);
5541 	(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
5542 	    &Adapter->phy_1000t_status);
5543 	(void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &Adapter->phy_lp_able);
5544 
5545 	Adapter->param_autoneg_cap =
5546 	    (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
5547 	Adapter->param_pause_cap =
5548 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5549 	Adapter->param_asym_pause_cap =
5550 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5551 	Adapter->param_1000fdx_cap =
5552 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5553 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5554 	Adapter->param_1000hdx_cap =
5555 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5556 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5557 	Adapter->param_100t4_cap =
5558 	    (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
5559 	Adapter->param_100fdx_cap =
5560 	    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5561 	    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
5562 	Adapter->param_100hdx_cap =
5563 	    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
5564 	    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
5565 	Adapter->param_10fdx_cap =
5566 	    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
5567 	Adapter->param_10hdx_cap =
5568 	    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
5569 
5570 	Adapter->param_adv_autoneg = hw->mac.autoneg;
5571 	Adapter->param_adv_pause =
5572 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5573 	Adapter->param_adv_asym_pause =
5574 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5575 	Adapter->param_adv_1000hdx =
5576 	    (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
5577 	Adapter->param_adv_100t4 =
5578 	    (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
5579 	if (Adapter->param_adv_autoneg == 1) {
5580 		Adapter->param_adv_1000fdx =
5581 		    (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0;
5582 		Adapter->param_adv_100fdx =
5583 		    (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0;
5584 		Adapter->param_adv_100hdx =
5585 		    (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0;
5586 		Adapter->param_adv_10fdx =
5587 		    (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
5588 		Adapter->param_adv_10hdx =
5589 		    (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
5590 	}
5591 
5592 	Adapter->param_lp_autoneg =
5593 	    (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
5594 	Adapter->param_lp_pause =
5595 	    (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
5596 	Adapter->param_lp_asym_pause =
5597 	    (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
5598 	Adapter->param_lp_1000fdx =
5599 	    (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
5600 	Adapter->param_lp_1000hdx =
5601 	    (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
5602 	Adapter->param_lp_100t4 =
5603 	    (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
5604 	Adapter->param_lp_100fdx =
5605 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
5606 	Adapter->param_lp_100hdx =
5607 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
5608 	Adapter->param_lp_10fdx =
5609 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
5610 	Adapter->param_lp_10hdx =
5611 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
5612 }
5613 
5614 /*
5615  * FMA support
5616  */
5617 
5618 int
5619 e1000g_check_acc_handle(ddi_acc_handle_t handle)
5620 {
5621 	ddi_fm_error_t de;
5622 
5623 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5624 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5625 	return (de.fme_status);
5626 }
5627 
5628 int
5629 e1000g_check_dma_handle(ddi_dma_handle_t handle)
5630 {
5631 	ddi_fm_error_t de;
5632 
5633 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5634 	return (de.fme_status);
5635 }
5636 
5637 /*
5638  * The IO fault service error handling callback function
5639  */
5640 /* ARGSUSED2 */
5641 static int
5642 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5643 {
5644 	/*
5645 	 * as the driver can always deal with an error in any dma or
5646 	 * access handle, we can just return the fme_status value.
5647 	 */
5648 	pci_ereport_post(dip, err, NULL);
5649 	return (err->fme_status);
5650 }
5651 
5652 static void
5653 e1000g_fm_init(struct e1000g *Adapter)
5654 {
5655 	ddi_iblock_cookie_t iblk;
5656 	int fma_acc_flag, fma_dma_flag;
5657 
5658 	/* Only register with IO Fault Services if we have some capability */
5659 	if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5660 		e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5661 		fma_acc_flag = 1;
5662 	} else {
5663 		e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5664 		fma_acc_flag = 0;
5665 	}
5666 
5667 	if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5668 		fma_dma_flag = 1;
5669 	} else {
5670 		fma_dma_flag = 0;
5671 	}
5672 
5673 	(void) e1000g_set_fma_flags(Adapter, fma_acc_flag, fma_dma_flag);
5674 
5675 	if (Adapter->fm_capabilities) {
5676 
5677 		/* Register capabilities with IO Fault Services */
5678 		ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
5679 
5680 		/*
5681 		 * Initialize pci ereport capabilities if ereport capable
5682 		 */
5683 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5684 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5685 			pci_ereport_setup(Adapter->dip);
5686 
5687 		/*
5688 		 * Register error callback if error callback capable
5689 		 */
5690 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5691 			ddi_fm_handler_register(Adapter->dip,
5692 			    e1000g_fm_error_cb, (void*) Adapter);
5693 	}
5694 }
5695 
5696 static void
5697 e1000g_fm_fini(struct e1000g *Adapter)
5698 {
5699 	/* Only unregister FMA capabilities if we registered some */
5700 	if (Adapter->fm_capabilities) {
5701 
5702 		/*
5703 		 * Release any resources allocated by pci_ereport_setup()
5704 		 */
5705 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5706 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5707 			pci_ereport_teardown(Adapter->dip);
5708 
5709 		/*
5710 		 * Un-register error callback if error callback capable
5711 		 */
5712 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5713 			ddi_fm_handler_unregister(Adapter->dip);
5714 
5715 		/* Unregister from IO Fault Services */
5716 		ddi_fm_fini(Adapter->dip);
5717 	}
5718 }
5719 
5720 void
5721 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
5722 {
5723 	uint64_t ena;
5724 	char buf[FM_MAX_CLASS];
5725 
5726 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5727 	ena = fm_ena_generate(0, FM_ENA_FMT1);
5728 	if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
5729 		ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
5730 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5731 	}
5732 }
5733 
5734 /*
5735  * quiesce(9E) entry point.
5736  *
5737  * This function is called when the system is single-threaded at high
5738  * PIL with preemption disabled. Therefore, this function must not be
5739  * blocked.
5740  *
5741  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5742  * DDI_FAILURE indicates an error condition and should almost never happen.
5743  */
5744 static int
5745 e1000g_quiesce(dev_info_t *devinfo)
5746 {
5747 	struct e1000g *Adapter;
5748 
5749 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
5750 
5751 	if (Adapter == NULL)
5752 		return (DDI_FAILURE);
5753 
5754 	e1000g_clear_all_interrupts(Adapter);
5755 
5756 	(void) e1000_reset_hw(&Adapter->shared);
5757 
5758 	/* Setup our HW Tx Head & Tail descriptor pointers */
5759 	E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
5760 	E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
5761 
5762 	/* Setup our HW Rx Head & Tail descriptor pointers */
5763 	E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
5764 	E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
5765 
5766 	return (DDI_SUCCESS);
5767 }
5768 
5769 static int
5770 e1000g_get_def_val(struct e1000g *Adapter, mac_prop_id_t pr_num,
5771     uint_t pr_valsize, void *pr_val)
5772 {
5773 	link_flowctrl_t fl;
5774 	int err = 0;
5775 
5776 	ASSERT(pr_valsize > 0);
5777 	switch (pr_num) {
5778 	case MAC_PROP_AUTONEG:
5779 		*(uint8_t *)pr_val =
5780 		    ((Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
5781 		break;
5782 	case MAC_PROP_FLOWCTRL:
5783 		if (pr_valsize < sizeof (link_flowctrl_t))
5784 			return (EINVAL);
5785 		fl = LINK_FLOWCTRL_BI;
5786 		bcopy(&fl, pr_val, sizeof (fl));
5787 		break;
5788 	case MAC_PROP_ADV_1000FDX_CAP:
5789 	case MAC_PROP_EN_1000FDX_CAP:
5790 		*(uint8_t *)pr_val =
5791 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5792 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5793 		break;
5794 	case MAC_PROP_ADV_1000HDX_CAP:
5795 	case MAC_PROP_EN_1000HDX_CAP:
5796 		*(uint8_t *)pr_val =
5797 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5798 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5799 		break;
5800 	case MAC_PROP_ADV_100FDX_CAP:
5801 	case MAC_PROP_EN_100FDX_CAP:
5802 		*(uint8_t *)pr_val =
5803 		    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5804 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
5805 		break;
5806 	case MAC_PROP_ADV_100HDX_CAP:
5807 	case MAC_PROP_EN_100HDX_CAP:
5808 		*(uint8_t *)pr_val =
5809 		    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
5810 		    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
5811 		break;
5812 	case MAC_PROP_ADV_10FDX_CAP:
5813 	case MAC_PROP_EN_10FDX_CAP:
5814 		*(uint8_t *)pr_val =
5815 		    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
5816 		break;
5817 	case MAC_PROP_ADV_10HDX_CAP:
5818 	case MAC_PROP_EN_10HDX_CAP:
5819 		*(uint8_t *)pr_val =
5820 		    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
5821 		break;
5822 	default:
5823 		err = ENOTSUP;
5824 		break;
5825 	}
5826 	return (err);
5827 }
5828 
5829 /*
5830  * synchronize the adv* and en* parameters.
5831  *
5832  * See comments in <sys/dld.h> for details of the *_en_*
5833  * parameters. The usage of ndd for setting adv parameters will
5834  * synchronize all the en parameters with the e1000g parameters,
5835  * implicitly disabling any settings made via dladm.
5836  */
5837 static void
5838 e1000g_param_sync(struct e1000g *Adapter)
5839 {
5840 	Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
5841 	Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
5842 	Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
5843 	Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
5844 	Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
5845 	Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
5846 }
5847 
5848 /*
5849  * e1000g_get_driver_control - tell manageability firmware that the driver
5850  * has control.
5851  */
5852 static void
5853 e1000g_get_driver_control(struct e1000_hw *hw)
5854 {
5855 	uint32_t ctrl_ext;
5856 	uint32_t swsm;
5857 
5858 	/* tell manageability firmware the driver has taken over */
5859 	switch (hw->mac.type) {
5860 	case e1000_82573:
5861 		swsm = E1000_READ_REG(hw, E1000_SWSM);
5862 		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
5863 		break;
5864 	case e1000_82571:
5865 	case e1000_82572:
5866 	case e1000_82574:
5867 	case e1000_80003es2lan:
5868 	case e1000_ich8lan:
5869 	case e1000_ich9lan:
5870 	case e1000_ich10lan:
5871 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5872 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
5873 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
5874 		break;
5875 	default:
5876 		/* no manageability firmware: do nothing */
5877 		break;
5878 	}
5879 }
5880 
5881 /*
5882  * e1000g_release_driver_control - tell manageability firmware that the driver
5883  * has released control.
5884  */
5885 static void
5886 e1000g_release_driver_control(struct e1000_hw *hw)
5887 {
5888 	uint32_t ctrl_ext;
5889 	uint32_t swsm;
5890 
5891 	/* tell manageability firmware the driver has released control */
5892 	switch (hw->mac.type) {
5893 	case e1000_82573:
5894 		swsm = E1000_READ_REG(hw, E1000_SWSM);
5895 		E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
5896 		break;
5897 	case e1000_82571:
5898 	case e1000_82572:
5899 	case e1000_82574:
5900 	case e1000_80003es2lan:
5901 	case e1000_ich8lan:
5902 	case e1000_ich9lan:
5903 	case e1000_ich10lan:
5904 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5905 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
5906 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
5907 		break;
5908 	default:
5909 		/* no manageability firmware: do nothing */
5910 		break;
5911 	}
5912 }
5913 
5914 /*
5915  * Restore e1000g promiscuous mode.
5916  */
5917 static void
5918 e1000g_restore_promisc(struct e1000g *Adapter)
5919 {
5920 	if (Adapter->e1000g_promisc) {
5921 		uint32_t rctl;
5922 
5923 		rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
5924 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
5925 		E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
5926 	}
5927 }
5928