xref: /illumos-gate/usr/src/uts/common/io/e1000g/e1000g_main.c (revision 7ce76caa61769eef87a2368b9ef90e4661e3f193)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * **********************************************************************
28  *									*
29  * Module Name:								*
30  *   e1000g_main.c							*
31  *									*
32  * Abstract:								*
33  *   This file contains the interface routines for the solaris OS.	*
34  *   It has all DDI entry point routines and GLD entry point routines.	*
35  *									*
36  *   This file also contains routines that take care of initialization	*
37  *   uninit routine and interrupt routine.				*
38  *									*
39  * **********************************************************************
40  */
41 
42 #include <sys/dlpi.h>
43 #include <sys/mac.h>
44 #include "e1000g_sw.h"
45 #include "e1000g_debug.h"
46 
47 static char ident[] = "Intel PRO/1000 Ethernet";
48 static char e1000g_string[] = "Intel(R) PRO/1000 Network Connection";
49 static char e1000g_version[] = "Driver Ver. 5.3.1";
50 
51 /*
52  * Proto types for DDI entry points
53  */
54 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
55 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
56 static int e1000g_quiesce(dev_info_t *);
57 
58 /*
59  * init and intr routines prototype
60  */
61 static int e1000g_resume(dev_info_t *);
62 static int e1000g_suspend(dev_info_t *);
63 static uint_t e1000g_intr_pciexpress(caddr_t);
64 static uint_t e1000g_intr(caddr_t);
65 static void e1000g_intr_work(struct e1000g *, uint32_t);
66 #pragma inline(e1000g_intr_work)
67 static int e1000g_init(struct e1000g *);
68 static int e1000g_start(struct e1000g *, boolean_t);
69 static void e1000g_stop(struct e1000g *, boolean_t);
70 static int e1000g_m_start(void *);
71 static void e1000g_m_stop(void *);
72 static int e1000g_m_promisc(void *, boolean_t);
73 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
74 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
75 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
76 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
77     uint_t, const void *);
78 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
79     uint_t, uint_t, void *, uint_t *);
80 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
81     const void *);
82 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t,
83     uint_t, void *, uint_t *);
84 static void e1000g_init_locks(struct e1000g *);
85 static void e1000g_destroy_locks(struct e1000g *);
86 static int e1000g_identify_hardware(struct e1000g *);
87 static int e1000g_regs_map(struct e1000g *);
88 static int e1000g_set_driver_params(struct e1000g *);
89 static void e1000g_set_bufsize(struct e1000g *);
90 static int e1000g_register_mac(struct e1000g *);
91 static boolean_t e1000g_rx_drain(struct e1000g *);
92 static boolean_t e1000g_tx_drain(struct e1000g *);
93 static void e1000g_init_unicst(struct e1000g *);
94 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
95 
96 /*
97  * Local routines
98  */
99 static boolean_t e1000g_reset_adapter(struct e1000g *);
100 static void e1000g_tx_clean(struct e1000g *);
101 static void e1000g_rx_clean(struct e1000g *);
102 static void e1000g_link_timer(void *);
103 static void e1000g_local_timer(void *);
104 static boolean_t e1000g_link_check(struct e1000g *);
105 static boolean_t e1000g_stall_check(struct e1000g *);
106 static void e1000g_smartspeed(struct e1000g *);
107 static void e1000g_get_conf(struct e1000g *);
108 static int e1000g_get_prop(struct e1000g *, char *, int, int, int);
109 static void enable_watchdog_timer(struct e1000g *);
110 static void disable_watchdog_timer(struct e1000g *);
111 static void start_watchdog_timer(struct e1000g *);
112 static void restart_watchdog_timer(struct e1000g *);
113 static void stop_watchdog_timer(struct e1000g *);
114 static void stop_link_timer(struct e1000g *);
115 static void stop_82547_timer(e1000g_tx_ring_t *);
116 static void e1000g_force_speed_duplex(struct e1000g *);
117 static void e1000g_get_max_frame_size(struct e1000g *);
118 static boolean_t is_valid_mac_addr(uint8_t *);
119 static void e1000g_unattach(dev_info_t *, struct e1000g *);
120 #ifdef E1000G_DEBUG
121 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
122 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
123 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
124 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
125 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
126     struct iocblk *, mblk_t *);
127 #endif
128 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
129     struct iocblk *, mblk_t *);
130 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
131 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
132 static void e1000g_set_internal_loopback(struct e1000g *);
133 static void e1000g_set_external_loopback_1000(struct e1000g *);
134 static void e1000g_set_external_loopback_100(struct e1000g *);
135 static void e1000g_set_external_loopback_10(struct e1000g *);
136 static int e1000g_add_intrs(struct e1000g *);
137 static int e1000g_intr_add(struct e1000g *, int);
138 static int e1000g_rem_intrs(struct e1000g *);
139 static int e1000g_enable_intrs(struct e1000g *);
140 static int e1000g_disable_intrs(struct e1000g *);
141 static boolean_t e1000g_link_up(struct e1000g *);
142 #ifdef __sparc
143 static boolean_t e1000g_find_mac_address(struct e1000g *);
144 #endif
145 static void e1000g_get_phy_state(struct e1000g *);
146 static void e1000g_free_priv_devi_node(struct e1000g *, boolean_t);
147 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
148     const void *impl_data);
149 static void e1000g_fm_init(struct e1000g *Adapter);
150 static void e1000g_fm_fini(struct e1000g *Adapter);
151 static int e1000g_get_def_val(struct e1000g *, mac_prop_id_t, uint_t, void *);
152 static void e1000g_param_sync(struct e1000g *);
153 static void e1000g_get_driver_control(struct e1000_hw *);
154 static void e1000g_release_driver_control(struct e1000_hw *);
155 static void e1000g_restore_promisc(struct e1000g *Adapter);
156 
157 mac_priv_prop_t e1000g_priv_props[] = {
158 	{"_tx_bcopy_threshold", MAC_PROP_PERM_RW},
159 	{"_tx_interrupt_enable", MAC_PROP_PERM_RW},
160 	{"_tx_intr_delay", MAC_PROP_PERM_RW},
161 	{"_tx_intr_abs_delay", MAC_PROP_PERM_RW},
162 	{"_rx_bcopy_threshold", MAC_PROP_PERM_RW},
163 	{"_max_num_rcv_packets", MAC_PROP_PERM_RW},
164 	{"_rx_intr_delay", MAC_PROP_PERM_RW},
165 	{"_rx_intr_abs_delay", MAC_PROP_PERM_RW},
166 	{"_intr_throttling_rate", MAC_PROP_PERM_RW},
167 	{"_intr_adaptive", MAC_PROP_PERM_RW},
168 	{"_adv_pause_cap", MAC_PROP_PERM_READ},
169 	{"_adv_asym_pause_cap", MAC_PROP_PERM_READ},
170 };
171 #define	E1000G_MAX_PRIV_PROPS	\
172 	(sizeof (e1000g_priv_props)/sizeof (mac_priv_prop_t))
173 
174 
175 static struct cb_ops cb_ws_ops = {
176 	nulldev,		/* cb_open */
177 	nulldev,		/* cb_close */
178 	nodev,			/* cb_strategy */
179 	nodev,			/* cb_print */
180 	nodev,			/* cb_dump */
181 	nodev,			/* cb_read */
182 	nodev,			/* cb_write */
183 	nodev,			/* cb_ioctl */
184 	nodev,			/* cb_devmap */
185 	nodev,			/* cb_mmap */
186 	nodev,			/* cb_segmap */
187 	nochpoll,		/* cb_chpoll */
188 	ddi_prop_op,		/* cb_prop_op */
189 	NULL,			/* cb_stream */
190 	D_MP | D_HOTPLUG,	/* cb_flag */
191 	CB_REV,			/* cb_rev */
192 	nodev,			/* cb_aread */
193 	nodev			/* cb_awrite */
194 };
195 
196 static struct dev_ops ws_ops = {
197 	DEVO_REV,		/* devo_rev */
198 	0,			/* devo_refcnt */
199 	NULL,			/* devo_getinfo */
200 	nulldev,		/* devo_identify */
201 	nulldev,		/* devo_probe */
202 	e1000g_attach,		/* devo_attach */
203 	e1000g_detach,		/* devo_detach */
204 	nodev,			/* devo_reset */
205 	&cb_ws_ops,		/* devo_cb_ops */
206 	NULL,			/* devo_bus_ops */
207 	ddi_power,		/* devo_power */
208 	e1000g_quiesce		/* devo_quiesce */
209 };
210 
211 static struct modldrv modldrv = {
212 	&mod_driverops,		/* Type of module.  This one is a driver */
213 	ident,			/* Discription string */
214 	&ws_ops,		/* driver ops */
215 };
216 
217 static struct modlinkage modlinkage = {
218 	MODREV_1, &modldrv, NULL
219 };
220 
221 /* Access attributes for register mapping */
222 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
223 	DDI_DEVICE_ATTR_V0,
224 	DDI_STRUCTURE_LE_ACC,
225 	DDI_STRICTORDER_ACC,
226 	DDI_FLAGERR_ACC
227 };
228 
229 #define	E1000G_M_CALLBACK_FLAGS \
230 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
231 
232 static mac_callbacks_t e1000g_m_callbacks = {
233 	E1000G_M_CALLBACK_FLAGS,
234 	e1000g_m_stat,
235 	e1000g_m_start,
236 	e1000g_m_stop,
237 	e1000g_m_promisc,
238 	e1000g_m_multicst,
239 	NULL,
240 	e1000g_m_tx,
241 	e1000g_m_ioctl,
242 	e1000g_m_getcapab,
243 	NULL,
244 	NULL,
245 	e1000g_m_setprop,
246 	e1000g_m_getprop
247 };
248 
249 /*
250  * Global variables
251  */
252 uint32_t e1000g_mblks_pending = 0;
253 /*
254  * Workaround for Dynamic Reconfiguration support, for x86 platform only.
255  * Here we maintain a private dev_info list if e1000g_force_detach is
256  * enabled. If we force the driver to detach while there are still some
257  * rx buffers retained in the upper layer, we have to keep a copy of the
258  * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
259  * structure will be freed after the driver is detached. However when we
260  * finally free those rx buffers released by the upper layer, we need to
261  * refer to the dev_info to free the dma buffers. So we save a copy of
262  * the dev_info for this purpose. On x86 platform, we assume this copy
263  * of dev_info is always valid, but on SPARC platform, it could be invalid
264  * after the system board level DR operation. For this reason, the global
265  * variable e1000g_force_detach must be B_FALSE on SPARC platform.
266  */
267 #ifdef __sparc
268 boolean_t e1000g_force_detach = B_FALSE;
269 #else
270 boolean_t e1000g_force_detach = B_TRUE;
271 #endif
272 private_devi_list_t *e1000g_private_devi_list = NULL;
273 
274 /*
275  * The rwlock is defined to protect the whole processing of rx recycling
276  * and the rx packets release in detach processing to make them mutually
277  * exclusive.
278  * The rx recycling processes different rx packets in different threads,
279  * so it will be protected with RW_READER and it won't block any other rx
280  * recycling threads.
281  * While the detach processing will be protected with RW_WRITER to make
282  * it mutually exclusive with the rx recycling.
283  */
284 krwlock_t e1000g_rx_detach_lock;
285 /*
286  * The rwlock e1000g_dma_type_lock is defined to protect the global flag
287  * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
288  * If there are many e1000g instances, the system may run out of DVMA
289  * resources during the initialization of the instances, then the flag will
290  * be changed to "USE_DMA". Because different e1000g instances are initialized
291  * in parallel, we need to use this lock to protect the flag.
292  */
293 krwlock_t e1000g_dma_type_lock;
294 
295 /*
296  * The 82546 chipset is a dual-port device, both the ports share one eeprom.
297  * Based on the information from Intel, the 82546 chipset has some hardware
298  * problem. When one port is being reset and the other port is trying to
299  * access the eeprom, it could cause system hang or panic. To workaround this
300  * hardware problem, we use a global mutex to prevent such operations from
301  * happening simultaneously on different instances. This workaround is applied
302  * to all the devices supported by this driver.
303  */
304 kmutex_t e1000g_nvm_lock;
305 
306 /*
307  * Loadable module configuration entry points for the driver
308  */
309 
310 /*
311  * _init - module initialization
312  */
313 int
314 _init(void)
315 {
316 	int status;
317 
318 	mac_init_ops(&ws_ops, WSNAME);
319 	status = mod_install(&modlinkage);
320 	if (status != DDI_SUCCESS)
321 		mac_fini_ops(&ws_ops);
322 	else {
323 		rw_init(&e1000g_rx_detach_lock, NULL, RW_DRIVER, NULL);
324 		rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
325 		mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
326 	}
327 
328 	return (status);
329 }
330 
331 /*
332  * _fini - module finalization
333  */
334 int
335 _fini(void)
336 {
337 	int status;
338 
339 	rw_enter(&e1000g_rx_detach_lock, RW_READER);
340 	if (e1000g_mblks_pending != 0) {
341 		rw_exit(&e1000g_rx_detach_lock);
342 		return (EBUSY);
343 	}
344 	rw_exit(&e1000g_rx_detach_lock);
345 
346 	status = mod_remove(&modlinkage);
347 	if (status == DDI_SUCCESS) {
348 		mac_fini_ops(&ws_ops);
349 
350 		if (e1000g_force_detach) {
351 			private_devi_list_t *devi_node;
352 
353 			rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
354 			while (e1000g_private_devi_list != NULL) {
355 				devi_node = e1000g_private_devi_list;
356 				e1000g_private_devi_list =
357 				    e1000g_private_devi_list->next;
358 
359 				kmem_free(devi_node->priv_dip,
360 				    sizeof (struct dev_info));
361 				kmem_free(devi_node,
362 				    sizeof (private_devi_list_t));
363 			}
364 			rw_exit(&e1000g_rx_detach_lock);
365 		}
366 
367 		rw_destroy(&e1000g_rx_detach_lock);
368 		rw_destroy(&e1000g_dma_type_lock);
369 		mutex_destroy(&e1000g_nvm_lock);
370 	}
371 
372 	return (status);
373 }
374 
375 /*
376  * _info - module information
377  */
378 int
379 _info(struct modinfo *modinfop)
380 {
381 	return (mod_info(&modlinkage, modinfop));
382 }
383 
384 /*
385  * e1000g_attach - driver attach
386  *
387  * This function is the device-specific initialization entry
388  * point. This entry point is required and must be written.
389  * The DDI_ATTACH command must be provided in the attach entry
390  * point. When attach() is called with cmd set to DDI_ATTACH,
391  * all normal kernel services (such as kmem_alloc(9F)) are
392  * available for use by the driver.
393  *
394  * The attach() function will be called once for each instance
395  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
396  * Until attach() succeeds, the only driver entry points which
397  * may be called are open(9E) and getinfo(9E).
398  */
399 static int
400 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
401 {
402 	struct e1000g *Adapter;
403 	struct e1000_hw *hw;
404 	struct e1000g_osdep *osdep;
405 	int instance;
406 
407 	switch (cmd) {
408 	default:
409 		e1000g_log(NULL, CE_WARN,
410 		    "Unsupported command send to e1000g_attach... ");
411 		return (DDI_FAILURE);
412 
413 	case DDI_RESUME:
414 		return (e1000g_resume(devinfo));
415 
416 	case DDI_ATTACH:
417 		break;
418 	}
419 
420 	/*
421 	 * get device instance number
422 	 */
423 	instance = ddi_get_instance(devinfo);
424 
425 	/*
426 	 * Allocate soft data structure
427 	 */
428 	Adapter =
429 	    (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
430 
431 	Adapter->dip = devinfo;
432 	Adapter->instance = instance;
433 	Adapter->tx_ring->adapter = Adapter;
434 	Adapter->rx_ring->adapter = Adapter;
435 
436 	hw = &Adapter->shared;
437 	osdep = &Adapter->osdep;
438 	hw->back = osdep;
439 	osdep->adapter = Adapter;
440 
441 	ddi_set_driver_private(devinfo, (caddr_t)Adapter);
442 
443 	/*
444 	 * Initialize for fma support
445 	 */
446 	Adapter->fm_capabilities = e1000g_get_prop(Adapter, "fm-capable",
447 	    0, 0x0f,
448 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
449 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
450 	e1000g_fm_init(Adapter);
451 	Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
452 
453 	/*
454 	 * PCI Configure
455 	 */
456 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
457 		e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
458 		goto attach_fail;
459 	}
460 	Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
461 
462 	/*
463 	 * Setup hardware
464 	 */
465 	if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
466 		e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
467 		goto attach_fail;
468 	}
469 
470 	/*
471 	 * Map in the device registers.
472 	 */
473 	if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
474 		e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
475 		goto attach_fail;
476 	}
477 	Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
478 
479 	/*
480 	 * Initialize driver parameters
481 	 */
482 	if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
483 		goto attach_fail;
484 	}
485 	Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
486 
487 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
488 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
489 		goto attach_fail;
490 	}
491 
492 	/*
493 	 * Initialize interrupts
494 	 */
495 	if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
496 		e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
497 		goto attach_fail;
498 	}
499 	Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
500 
501 	/*
502 	 * Initialize mutex's for this device.
503 	 * Do this before enabling the interrupt handler and
504 	 * register the softint to avoid the condition where
505 	 * interrupt handler can try using uninitialized mutex
506 	 */
507 	e1000g_init_locks(Adapter);
508 	Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
509 
510 	/*
511 	 * Initialize Driver Counters
512 	 */
513 	if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
514 		e1000g_log(Adapter, CE_WARN, "Init stats failed");
515 		goto attach_fail;
516 	}
517 	Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
518 
519 	/*
520 	 * Initialize chip hardware and software structures
521 	 */
522 	if (e1000g_init(Adapter) != DDI_SUCCESS) {
523 		e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
524 		goto attach_fail;
525 	}
526 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
527 
528 	/*
529 	 * Register the driver to the MAC
530 	 */
531 	if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
532 		e1000g_log(Adapter, CE_WARN, "Register MAC failed");
533 		goto attach_fail;
534 	}
535 	Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
536 
537 	/*
538 	 * Now that mutex locks are initialized, and the chip is also
539 	 * initialized, enable interrupts.
540 	 */
541 	if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
542 		e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
543 		goto attach_fail;
544 	}
545 	Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
546 
547 	/*
548 	 * If e1000g_force_detach is enabled, in global private dip list,
549 	 * we will create a new entry, which maintains the priv_dip for DR
550 	 * supports after driver detached.
551 	 */
552 	if (e1000g_force_detach) {
553 		private_devi_list_t *devi_node;
554 
555 		Adapter->priv_dip =
556 		    kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
557 		bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
558 		    sizeof (struct dev_info));
559 
560 		devi_node =
561 		    kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
562 
563 		rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
564 		devi_node->priv_dip = Adapter->priv_dip;
565 		devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
566 		devi_node->next = e1000g_private_devi_list;
567 		e1000g_private_devi_list = devi_node;
568 		rw_exit(&e1000g_rx_detach_lock);
569 	}
570 
571 	cmn_err(CE_CONT, "!%s, %s\n", e1000g_string, e1000g_version);
572 
573 	return (DDI_SUCCESS);
574 
575 attach_fail:
576 	e1000g_unattach(devinfo, Adapter);
577 	return (DDI_FAILURE);
578 }
579 
580 static int
581 e1000g_register_mac(struct e1000g *Adapter)
582 {
583 	struct e1000_hw *hw = &Adapter->shared;
584 	mac_register_t *mac;
585 	int err;
586 
587 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
588 		return (DDI_FAILURE);
589 
590 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
591 	mac->m_driver = Adapter;
592 	mac->m_dip = Adapter->dip;
593 	mac->m_src_addr = hw->mac.addr;
594 	mac->m_callbacks = &e1000g_m_callbacks;
595 	mac->m_min_sdu = 0;
596 	mac->m_max_sdu = Adapter->default_mtu;
597 	mac->m_margin = VLAN_TAGSZ;
598 	mac->m_priv_props = e1000g_priv_props;
599 	mac->m_priv_prop_count = E1000G_MAX_PRIV_PROPS;
600 	mac->m_v12n = MAC_VIRT_LEVEL1;
601 
602 	err = mac_register(mac, &Adapter->mh);
603 	mac_free(mac);
604 
605 	return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
606 }
607 
608 static int
609 e1000g_identify_hardware(struct e1000g *Adapter)
610 {
611 	struct e1000_hw *hw = &Adapter->shared;
612 	struct e1000g_osdep *osdep = &Adapter->osdep;
613 
614 	/* Get the device id */
615 	hw->vendor_id =
616 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
617 	hw->device_id =
618 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
619 	hw->revision_id =
620 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
621 	hw->subsystem_device_id =
622 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
623 	hw->subsystem_vendor_id =
624 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
625 
626 	if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
627 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
628 		    "MAC type could not be set properly.");
629 		return (DDI_FAILURE);
630 	}
631 
632 	return (DDI_SUCCESS);
633 }
634 
635 static int
636 e1000g_regs_map(struct e1000g *Adapter)
637 {
638 	dev_info_t *devinfo = Adapter->dip;
639 	struct e1000_hw *hw = &Adapter->shared;
640 	struct e1000g_osdep *osdep = &Adapter->osdep;
641 	off_t mem_size;
642 
643 	/* Get size of adapter register memory */
644 	if (ddi_dev_regsize(devinfo, ADAPTER_REG_SET, &mem_size) !=
645 	    DDI_SUCCESS) {
646 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
647 		    "ddi_dev_regsize for registers failed");
648 		return (DDI_FAILURE);
649 	}
650 
651 	/* Map adapter register memory */
652 	if ((ddi_regs_map_setup(devinfo, ADAPTER_REG_SET,
653 	    (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
654 	    &osdep->reg_handle)) != DDI_SUCCESS) {
655 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
656 		    "ddi_regs_map_setup for registers failed");
657 		goto regs_map_fail;
658 	}
659 
660 	/* ICH needs to map flash memory */
661 	if (hw->mac.type == e1000_ich8lan ||
662 	    hw->mac.type == e1000_ich9lan ||
663 	    hw->mac.type == e1000_ich10lan) {
664 		/* get flash size */
665 		if (ddi_dev_regsize(devinfo, ICH_FLASH_REG_SET,
666 		    &mem_size) != DDI_SUCCESS) {
667 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
668 			    "ddi_dev_regsize for ICH flash failed");
669 			goto regs_map_fail;
670 		}
671 
672 		/* map flash in */
673 		if (ddi_regs_map_setup(devinfo, ICH_FLASH_REG_SET,
674 		    (caddr_t *)&hw->flash_address, 0,
675 		    mem_size, &e1000g_regs_acc_attr,
676 		    &osdep->ich_flash_handle) != DDI_SUCCESS) {
677 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
678 			    "ddi_regs_map_setup for ICH flash failed");
679 			goto regs_map_fail;
680 		}
681 	}
682 
683 	return (DDI_SUCCESS);
684 
685 regs_map_fail:
686 	if (osdep->reg_handle != NULL)
687 		ddi_regs_map_free(&osdep->reg_handle);
688 
689 	return (DDI_FAILURE);
690 }
691 
692 static int
693 e1000g_set_driver_params(struct e1000g *Adapter)
694 {
695 	struct e1000_hw *hw;
696 	uint32_t mem_bar, io_bar, bar64;
697 
698 	hw = &Adapter->shared;
699 
700 	/* Set MAC type and initialize hardware functions */
701 	if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
702 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
703 		    "Could not setup hardware functions");
704 		return (DDI_FAILURE);
705 	}
706 
707 	/* Get bus information */
708 	if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
709 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
710 		    "Could not get bus information");
711 		return (DDI_FAILURE);
712 	}
713 
714 	/* get mem_base addr */
715 	mem_bar = pci_config_get32(Adapter->osdep.cfg_handle, PCI_CONF_BASE0);
716 	bar64 = mem_bar & PCI_BASE_TYPE_ALL;
717 
718 	/* get io_base addr */
719 	if (hw->mac.type >= e1000_82544) {
720 		if (bar64) {
721 			/* IO BAR is different for 64 bit BAR mode */
722 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
723 			    PCI_CONF_BASE4);
724 		} else {
725 			/* normal 32-bit BAR mode */
726 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
727 			    PCI_CONF_BASE2);
728 		}
729 		hw->io_base = io_bar & PCI_BASE_IO_ADDR_M;
730 	} else {
731 		/* no I/O access for adapters prior to 82544 */
732 		hw->io_base = 0x0;
733 	}
734 
735 	e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
736 
737 	hw->mac.autoneg_failed = B_TRUE;
738 
739 	/* Set the autoneg_wait_to_complete flag to B_FALSE */
740 	hw->phy.autoneg_wait_to_complete = B_FALSE;
741 
742 	/* Adaptive IFS related changes */
743 	hw->mac.adaptive_ifs = B_TRUE;
744 
745 	/* Enable phy init script for IGP phy of 82541/82547 */
746 	if ((hw->mac.type == e1000_82547) ||
747 	    (hw->mac.type == e1000_82541) ||
748 	    (hw->mac.type == e1000_82547_rev_2) ||
749 	    (hw->mac.type == e1000_82541_rev_2))
750 		e1000_init_script_state_82541(hw, B_TRUE);
751 
752 	/* Enable the TTL workaround for 82541/82547 */
753 	e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
754 
755 #ifdef __sparc
756 	Adapter->strip_crc = B_TRUE;
757 #else
758 	Adapter->strip_crc = B_FALSE;
759 #endif
760 
761 	/* Get conf file properties */
762 	e1000g_get_conf(Adapter);
763 
764 	/* Get speed/duplex settings in conf file */
765 	hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
766 	hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
767 	e1000g_force_speed_duplex(Adapter);
768 
769 	/* Get Jumbo Frames settings in conf file */
770 	e1000g_get_max_frame_size(Adapter);
771 
772 	/* Set Rx/Tx buffer size */
773 	e1000g_set_bufsize(Adapter);
774 
775 	/* Master Latency Timer */
776 	Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
777 
778 	/* copper options */
779 	if (hw->phy.media_type == e1000_media_type_copper) {
780 		hw->phy.mdix = 0;	/* AUTO_ALL_MODES */
781 		hw->phy.disable_polarity_correction = B_FALSE;
782 		hw->phy.ms_type = e1000_ms_hw_default;	/* E1000_MASTER_SLAVE */
783 	}
784 
785 	/* The initial link state should be "unknown" */
786 	Adapter->link_state = LINK_STATE_UNKNOWN;
787 
788 	/* Initialize rx parameters */
789 	Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
790 	Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
791 
792 	/* Initialize tx parameters */
793 	Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
794 	Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
795 	Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
796 	Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
797 
798 	/* Initialize rx parameters */
799 	Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
800 
801 	return (DDI_SUCCESS);
802 }
803 
804 static void
805 e1000g_set_bufsize(struct e1000g *Adapter)
806 {
807 	struct e1000_mac_info *mac = &Adapter->shared.mac;
808 	uint64_t rx_size;
809 	uint64_t tx_size;
810 
811 	dev_info_t *devinfo = Adapter->dip;
812 #ifdef __sparc
813 	ulong_t iommu_pagesize;
814 #endif
815 	/* Get the system page size */
816 	Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
817 
818 #ifdef __sparc
819 	iommu_pagesize = dvma_pagesize(devinfo);
820 	if (iommu_pagesize != 0) {
821 		if (Adapter->sys_page_sz == iommu_pagesize) {
822 			if (iommu_pagesize > 0x4000)
823 				Adapter->sys_page_sz = 0x4000;
824 		} else {
825 			if (Adapter->sys_page_sz > iommu_pagesize)
826 				Adapter->sys_page_sz = iommu_pagesize;
827 		}
828 	}
829 	if (Adapter->lso_enable) {
830 		Adapter->dvma_page_num = E1000_LSO_MAXLEN /
831 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
832 	} else {
833 		Adapter->dvma_page_num = Adapter->max_frame_size /
834 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
835 	}
836 	ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
837 #endif
838 
839 	Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
840 
841 	if (Adapter->mem_workaround_82546 &&
842 	    ((mac->type == e1000_82545) ||
843 	    (mac->type == e1000_82546) ||
844 	    (mac->type == e1000_82546_rev_3))) {
845 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
846 	} else {
847 		rx_size = Adapter->max_frame_size + E1000G_IPALIGNPRESERVEROOM;
848 		if ((rx_size > FRAME_SIZE_UPTO_2K) &&
849 		    (rx_size <= FRAME_SIZE_UPTO_4K))
850 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
851 		else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
852 		    (rx_size <= FRAME_SIZE_UPTO_8K))
853 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
854 		else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
855 		    (rx_size <= FRAME_SIZE_UPTO_16K))
856 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
857 		else
858 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
859 	}
860 
861 	tx_size = Adapter->max_frame_size;
862 	if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
863 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
864 	else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
865 	    (tx_size <= FRAME_SIZE_UPTO_8K))
866 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
867 	else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
868 	    (tx_size <= FRAME_SIZE_UPTO_16K))
869 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
870 	else
871 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
872 
873 	/*
874 	 * For Wiseman adapters we have an requirement of having receive
875 	 * buffers aligned at 256 byte boundary. Since Livengood does not
876 	 * require this and forcing it for all hardwares will have
877 	 * performance implications, I am making it applicable only for
878 	 * Wiseman and for Jumbo frames enabled mode as rest of the time,
879 	 * it is okay to have normal frames...but it does involve a
880 	 * potential risk where we may loose data if buffer is not
881 	 * aligned...so all wiseman boards to have 256 byte aligned
882 	 * buffers
883 	 */
884 	if (mac->type < e1000_82543)
885 		Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
886 	else
887 		Adapter->rx_buf_align = 1;
888 }
889 
890 /*
891  * e1000g_detach - driver detach
892  *
893  * The detach() function is the complement of the attach routine.
894  * If cmd is set to DDI_DETACH, detach() is used to remove  the
895  * state  associated  with  a  given  instance of a device node
896  * prior to the removal of that instance from the system.
897  *
898  * The detach() function will be called once for each  instance
899  * of the device for which there has been a successful attach()
900  * once there are no longer  any  opens  on  the  device.
901  *
902  * Interrupts routine are disabled, All memory allocated by this
903  * driver are freed.
904  */
905 static int
906 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
907 {
908 	struct e1000g *Adapter;
909 	boolean_t rx_drain;
910 
911 	switch (cmd) {
912 	default:
913 		return (DDI_FAILURE);
914 
915 	case DDI_SUSPEND:
916 		return (e1000g_suspend(devinfo));
917 
918 	case DDI_DETACH:
919 		break;
920 	}
921 
922 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
923 	if (Adapter == NULL)
924 		return (DDI_FAILURE);
925 
926 	rx_drain = e1000g_rx_drain(Adapter);
927 	if (!rx_drain && !e1000g_force_detach)
928 		return (DDI_FAILURE);
929 
930 	if (mac_unregister(Adapter->mh) != 0) {
931 		e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
932 		return (DDI_FAILURE);
933 	}
934 	Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
935 
936 	ASSERT(Adapter->chip_state == E1000G_STOP);
937 
938 	/*
939 	 * If e1000g_force_detach is enabled, driver detach is safe.
940 	 * We will let e1000g_free_priv_devi_node routine determine
941 	 * whether we need to free the priv_dip entry for current
942 	 * driver instance.
943 	 */
944 	if (e1000g_force_detach) {
945 		e1000g_free_priv_devi_node(Adapter, rx_drain);
946 	}
947 
948 	e1000g_unattach(devinfo, Adapter);
949 
950 	return (DDI_SUCCESS);
951 }
952 
953 /*
954  * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
955  *
956  * If free_flag is true, that indicates the upper layer is not holding
957  * the rx buffers, we could free the priv_dip entry safely.
958  *
959  * Otherwise, we have to keep this entry even after driver detached,
960  * and we also need to mark this entry with E1000G_PRIV_DEVI_DETACH flag,
961  * so that driver could free it while all of rx buffers are returned
962  * by upper layer later.
963  */
964 static void
965 e1000g_free_priv_devi_node(struct e1000g *Adapter, boolean_t free_flag)
966 {
967 	private_devi_list_t *devi_node, *devi_del;
968 
969 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
970 	ASSERT(e1000g_private_devi_list != NULL);
971 	ASSERT(Adapter->priv_dip != NULL);
972 
973 	devi_node = e1000g_private_devi_list;
974 	if (devi_node->priv_dip == Adapter->priv_dip) {
975 		if (free_flag) {
976 			e1000g_private_devi_list =
977 			    devi_node->next;
978 			kmem_free(devi_node->priv_dip,
979 			    sizeof (struct dev_info));
980 			kmem_free(devi_node,
981 			    sizeof (private_devi_list_t));
982 		} else {
983 			ASSERT(e1000g_mblks_pending != 0);
984 			devi_node->flag =
985 			    E1000G_PRIV_DEVI_DETACH;
986 		}
987 		rw_exit(&e1000g_rx_detach_lock);
988 		return;
989 	}
990 
991 	devi_node = e1000g_private_devi_list;
992 	while (devi_node->next != NULL) {
993 		if (devi_node->next->priv_dip == Adapter->priv_dip) {
994 			if (free_flag) {
995 				devi_del = devi_node->next;
996 				devi_node->next = devi_del->next;
997 				kmem_free(devi_del->priv_dip,
998 				    sizeof (struct dev_info));
999 				kmem_free(devi_del,
1000 				    sizeof (private_devi_list_t));
1001 			} else {
1002 				ASSERT(e1000g_mblks_pending != 0);
1003 				devi_node->next->flag =
1004 				    E1000G_PRIV_DEVI_DETACH;
1005 			}
1006 			break;
1007 		}
1008 		devi_node = devi_node->next;
1009 	}
1010 	rw_exit(&e1000g_rx_detach_lock);
1011 }
1012 
1013 static void
1014 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1015 {
1016 	int result;
1017 
1018 	if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1019 		(void) e1000g_disable_intrs(Adapter);
1020 	}
1021 
1022 	if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1023 		(void) mac_unregister(Adapter->mh);
1024 	}
1025 
1026 	if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1027 		(void) e1000g_rem_intrs(Adapter);
1028 	}
1029 
1030 	if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1031 		(void) ddi_prop_remove_all(devinfo);
1032 	}
1033 
1034 	if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1035 		kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1036 	}
1037 
1038 	if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1039 		stop_link_timer(Adapter);
1040 
1041 		mutex_enter(&e1000g_nvm_lock);
1042 		result = e1000_reset_hw(&Adapter->shared);
1043 		mutex_exit(&e1000g_nvm_lock);
1044 
1045 		if (result != E1000_SUCCESS) {
1046 			e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1047 			ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1048 		}
1049 	}
1050 
1051 	if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1052 		if (Adapter->osdep.reg_handle != NULL)
1053 			ddi_regs_map_free(&Adapter->osdep.reg_handle);
1054 		if (Adapter->osdep.ich_flash_handle != NULL)
1055 			ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1056 	}
1057 
1058 	if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1059 		if (Adapter->osdep.cfg_handle != NULL)
1060 			pci_config_teardown(&Adapter->osdep.cfg_handle);
1061 	}
1062 
1063 	if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1064 		e1000g_destroy_locks(Adapter);
1065 	}
1066 
1067 	if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1068 		e1000g_fm_fini(Adapter);
1069 	}
1070 
1071 	e1000_remove_device(&Adapter->shared);
1072 
1073 	kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1074 
1075 	/*
1076 	 * Another hotplug spec requirement,
1077 	 * run ddi_set_driver_private(devinfo, null);
1078 	 */
1079 	ddi_set_driver_private(devinfo, NULL);
1080 }
1081 
1082 static void
1083 e1000g_init_locks(struct e1000g *Adapter)
1084 {
1085 	e1000g_tx_ring_t *tx_ring;
1086 	e1000g_rx_ring_t *rx_ring;
1087 
1088 	rw_init(&Adapter->chip_lock, NULL,
1089 	    RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1090 	mutex_init(&Adapter->link_lock, NULL,
1091 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1092 	mutex_init(&Adapter->watchdog_lock, NULL,
1093 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1094 
1095 	tx_ring = Adapter->tx_ring;
1096 
1097 	mutex_init(&tx_ring->tx_lock, NULL,
1098 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1099 	mutex_init(&tx_ring->usedlist_lock, NULL,
1100 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1101 	mutex_init(&tx_ring->freelist_lock, NULL,
1102 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1103 
1104 	rx_ring = Adapter->rx_ring;
1105 
1106 	mutex_init(&rx_ring->rx_lock, NULL,
1107 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1108 	mutex_init(&rx_ring->freelist_lock, NULL,
1109 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1110 	mutex_init(&rx_ring->recycle_lock, NULL,
1111 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1112 }
1113 
1114 static void
1115 e1000g_destroy_locks(struct e1000g *Adapter)
1116 {
1117 	e1000g_tx_ring_t *tx_ring;
1118 	e1000g_rx_ring_t *rx_ring;
1119 
1120 	tx_ring = Adapter->tx_ring;
1121 	mutex_destroy(&tx_ring->tx_lock);
1122 	mutex_destroy(&tx_ring->usedlist_lock);
1123 	mutex_destroy(&tx_ring->freelist_lock);
1124 
1125 	rx_ring = Adapter->rx_ring;
1126 	mutex_destroy(&rx_ring->rx_lock);
1127 	mutex_destroy(&rx_ring->freelist_lock);
1128 	mutex_destroy(&rx_ring->recycle_lock);
1129 
1130 	mutex_destroy(&Adapter->link_lock);
1131 	mutex_destroy(&Adapter->watchdog_lock);
1132 	rw_destroy(&Adapter->chip_lock);
1133 }
1134 
1135 static int
1136 e1000g_resume(dev_info_t *devinfo)
1137 {
1138 	struct e1000g *Adapter;
1139 
1140 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1141 	if (Adapter == NULL)
1142 		return (DDI_FAILURE);
1143 
1144 	if (e1000g_start(Adapter, B_TRUE))
1145 		return (DDI_FAILURE);
1146 
1147 	return (DDI_SUCCESS);
1148 }
1149 
1150 static int
1151 e1000g_suspend(dev_info_t *devinfo)
1152 {
1153 	struct e1000g *Adapter;
1154 
1155 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1156 	if (Adapter == NULL)
1157 		return (DDI_FAILURE);
1158 
1159 	e1000g_stop(Adapter, B_TRUE);
1160 
1161 	return (DDI_SUCCESS);
1162 }
1163 
1164 static int
1165 e1000g_init(struct e1000g *Adapter)
1166 {
1167 	uint32_t pba;
1168 	uint32_t high_water;
1169 	struct e1000_hw *hw;
1170 	clock_t link_timeout;
1171 	int result;
1172 
1173 	hw = &Adapter->shared;
1174 
1175 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1176 
1177 	/*
1178 	 * reset to put the hardware in a known state
1179 	 * before we try to do anything with the eeprom
1180 	 */
1181 	mutex_enter(&e1000g_nvm_lock);
1182 	result = e1000_reset_hw(hw);
1183 	mutex_exit(&e1000g_nvm_lock);
1184 
1185 	if (result != E1000_SUCCESS) {
1186 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1187 		goto init_fail;
1188 	}
1189 
1190 	mutex_enter(&e1000g_nvm_lock);
1191 	result = e1000_validate_nvm_checksum(hw);
1192 	if (result < E1000_SUCCESS) {
1193 		/*
1194 		 * Some PCI-E parts fail the first check due to
1195 		 * the link being in sleep state.  Call it again,
1196 		 * if it fails a second time its a real issue.
1197 		 */
1198 		result = e1000_validate_nvm_checksum(hw);
1199 	}
1200 	mutex_exit(&e1000g_nvm_lock);
1201 
1202 	if (result < E1000_SUCCESS) {
1203 		e1000g_log(Adapter, CE_WARN,
1204 		    "Invalid NVM checksum. Please contact "
1205 		    "the vendor to update the NVM.");
1206 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1207 		goto init_fail;
1208 	}
1209 
1210 	result = 0;
1211 #ifdef __sparc
1212 	/*
1213 	 * First, we try to get the local ethernet address from OBP. If
1214 	 * failed, then we get it from the EEPROM of NIC card.
1215 	 */
1216 	result = e1000g_find_mac_address(Adapter);
1217 #endif
1218 	/* Get the local ethernet address. */
1219 	if (!result) {
1220 		mutex_enter(&e1000g_nvm_lock);
1221 		result = e1000_read_mac_addr(hw);
1222 		mutex_exit(&e1000g_nvm_lock);
1223 	}
1224 
1225 	if (result < E1000_SUCCESS) {
1226 		e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1227 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1228 		goto init_fail;
1229 	}
1230 
1231 	/* check for valid mac address */
1232 	if (!is_valid_mac_addr(hw->mac.addr)) {
1233 		e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1234 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1235 		goto init_fail;
1236 	}
1237 
1238 	/* Set LAA state for 82571 chipset */
1239 	e1000_set_laa_state_82571(hw, B_TRUE);
1240 
1241 	/* Master Latency Timer implementation */
1242 	if (Adapter->master_latency_timer) {
1243 		pci_config_put8(Adapter->osdep.cfg_handle,
1244 		    PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1245 	}
1246 
1247 	if (hw->mac.type < e1000_82547) {
1248 		/*
1249 		 * Total FIFO is 64K
1250 		 */
1251 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1252 			pba = E1000_PBA_40K;	/* 40K for Rx, 24K for Tx */
1253 		else
1254 			pba = E1000_PBA_48K;	/* 48K for Rx, 16K for Tx */
1255 	} else if ((hw->mac.type == e1000_82571) ||
1256 	    (hw->mac.type == e1000_82572) ||
1257 	    (hw->mac.type == e1000_80003es2lan)) {
1258 		/*
1259 		 * Total FIFO is 48K
1260 		 */
1261 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1262 			pba = E1000_PBA_30K;	/* 30K for Rx, 18K for Tx */
1263 		else
1264 			pba = E1000_PBA_38K;	/* 38K for Rx, 10K for Tx */
1265 	} else if (hw->mac.type == e1000_82573) {
1266 		pba = E1000_PBA_20K;		/* 20K for Rx, 12K for Tx */
1267 	} else if (hw->mac.type == e1000_82574) {
1268 		/* Keep adapter default: 20K for Rx, 20K for Tx */
1269 		pba = E1000_READ_REG(hw, E1000_PBA);
1270 	} else if (hw->mac.type == e1000_ich8lan) {
1271 		pba = E1000_PBA_8K;		/* 8K for Rx, 12K for Tx */
1272 	} else if (hw->mac.type == e1000_ich9lan) {
1273 		pba = E1000_PBA_10K;
1274 	} else if (hw->mac.type == e1000_ich10lan) {
1275 		pba = E1000_PBA_10K;
1276 	} else {
1277 		/*
1278 		 * Total FIFO is 40K
1279 		 */
1280 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1281 			pba = E1000_PBA_22K;	/* 22K for Rx, 18K for Tx */
1282 		else
1283 			pba = E1000_PBA_30K;	/* 30K for Rx, 10K for Tx */
1284 	}
1285 	E1000_WRITE_REG(hw, E1000_PBA, pba);
1286 
1287 	/*
1288 	 * These parameters set thresholds for the adapter's generation(Tx)
1289 	 * and response(Rx) to Ethernet PAUSE frames.  These are just threshold
1290 	 * settings.  Flow control is enabled or disabled in the configuration
1291 	 * file.
1292 	 * High-water mark is set down from the top of the rx fifo (not
1293 	 * sensitive to max_frame_size) and low-water is set just below
1294 	 * high-water mark.
1295 	 * The high water mark must be low enough to fit one full frame above
1296 	 * it in the rx FIFO.  Should be the lower of:
1297 	 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1298 	 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1299 	 * Rx FIFO size minus one full frame.
1300 	 */
1301 	high_water = min(((pba << 10) * 9 / 10),
1302 	    ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_ich9lan ||
1303 	    hw->mac.type == e1000_ich10lan) ?
1304 	    ((pba << 10) - (E1000_ERT_2048 << 3)) :
1305 	    ((pba << 10) - Adapter->max_frame_size)));
1306 
1307 	hw->fc.high_water = high_water & 0xFFF8;
1308 	hw->fc.low_water = hw->fc.high_water - 8;
1309 
1310 	if (hw->mac.type == e1000_80003es2lan)
1311 		hw->fc.pause_time = 0xFFFF;
1312 	else
1313 		hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1314 	hw->fc.send_xon = B_TRUE;
1315 
1316 	/*
1317 	 * Reset the adapter hardware the second time.
1318 	 */
1319 	mutex_enter(&e1000g_nvm_lock);
1320 	result = e1000_reset_hw(hw);
1321 	mutex_exit(&e1000g_nvm_lock);
1322 
1323 	if (result != E1000_SUCCESS) {
1324 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1325 		goto init_fail;
1326 	}
1327 
1328 	/* disable wakeup control by default */
1329 	if (hw->mac.type >= e1000_82544)
1330 		E1000_WRITE_REG(hw, E1000_WUC, 0);
1331 
1332 	/*
1333 	 * MWI should be disabled on 82546.
1334 	 */
1335 	if (hw->mac.type == e1000_82546)
1336 		e1000_pci_clear_mwi(hw);
1337 	else
1338 		e1000_pci_set_mwi(hw);
1339 
1340 	/*
1341 	 * Configure/Initialize hardware
1342 	 */
1343 	mutex_enter(&e1000g_nvm_lock);
1344 	result = e1000_init_hw(hw);
1345 	mutex_exit(&e1000g_nvm_lock);
1346 
1347 	if (result < E1000_SUCCESS) {
1348 		e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1349 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1350 		goto init_fail;
1351 	}
1352 
1353 	/*
1354 	 * Restore LED settings to the default from EEPROM
1355 	 * to meet the standard for Sun platforms.
1356 	 */
1357 	(void) e1000_cleanup_led(hw);
1358 
1359 	/* Disable Smart Power Down */
1360 	phy_spd_state(hw, B_FALSE);
1361 
1362 	/* Make sure driver has control */
1363 	e1000g_get_driver_control(hw);
1364 
1365 	/*
1366 	 * Initialize unicast addresses.
1367 	 */
1368 	e1000g_init_unicst(Adapter);
1369 
1370 	/*
1371 	 * Setup and initialize the mctable structures.  After this routine
1372 	 * completes  Multicast table will be set
1373 	 */
1374 	e1000g_setup_multicast(Adapter);
1375 	msec_delay(5);
1376 
1377 	/*
1378 	 * Implement Adaptive IFS
1379 	 */
1380 	e1000_reset_adaptive(hw);
1381 
1382 	/* Setup Interrupt Throttling Register */
1383 	if (hw->mac.type >= e1000_82540) {
1384 		E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1385 	} else
1386 		Adapter->intr_adaptive = B_FALSE;
1387 
1388 	/* Start the timer for link setup */
1389 	if (hw->mac.autoneg)
1390 		link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1391 	else
1392 		link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1393 
1394 	mutex_enter(&Adapter->link_lock);
1395 	if (hw->phy.autoneg_wait_to_complete) {
1396 		Adapter->link_complete = B_TRUE;
1397 	} else {
1398 		Adapter->link_complete = B_FALSE;
1399 		Adapter->link_tid = timeout(e1000g_link_timer,
1400 		    (void *)Adapter, link_timeout);
1401 	}
1402 	mutex_exit(&Adapter->link_lock);
1403 
1404 	/* Enable PCI-Ex master */
1405 	if (hw->bus.type == e1000_bus_type_pci_express) {
1406 		e1000_enable_pciex_master(hw);
1407 	}
1408 
1409 	/* Save the state of the phy */
1410 	e1000g_get_phy_state(Adapter);
1411 
1412 	e1000g_param_sync(Adapter);
1413 
1414 	Adapter->init_count++;
1415 
1416 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1417 		goto init_fail;
1418 	}
1419 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1420 		goto init_fail;
1421 	}
1422 
1423 	Adapter->poll_mode = e1000g_poll_mode;
1424 
1425 	rw_exit(&Adapter->chip_lock);
1426 
1427 	return (DDI_SUCCESS);
1428 
1429 init_fail:
1430 	rw_exit(&Adapter->chip_lock);
1431 	ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1432 	return (DDI_FAILURE);
1433 }
1434 
1435 /*
1436  * Check if the link is up
1437  */
1438 static boolean_t
1439 e1000g_link_up(struct e1000g *Adapter)
1440 {
1441 	struct e1000_hw *hw;
1442 	boolean_t link_up;
1443 
1444 	hw = &Adapter->shared;
1445 
1446 	(void) e1000_check_for_link(hw);
1447 
1448 	if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) ||
1449 	    ((!hw->mac.get_link_status) && (hw->mac.type == e1000_82543)) ||
1450 	    ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1451 	    (hw->mac.serdes_has_link))) {
1452 		link_up = B_TRUE;
1453 	} else {
1454 		link_up = B_FALSE;
1455 	}
1456 
1457 	return (link_up);
1458 }
1459 
1460 static void
1461 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1462 {
1463 	struct iocblk *iocp;
1464 	struct e1000g *e1000gp;
1465 	enum ioc_reply status;
1466 
1467 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1468 	iocp->ioc_error = 0;
1469 	e1000gp = (struct e1000g *)arg;
1470 
1471 	ASSERT(e1000gp);
1472 	if (e1000gp == NULL) {
1473 		miocnak(q, mp, 0, EINVAL);
1474 		return;
1475 	}
1476 
1477 	switch (iocp->ioc_cmd) {
1478 
1479 	case LB_GET_INFO_SIZE:
1480 	case LB_GET_INFO:
1481 	case LB_GET_MODE:
1482 	case LB_SET_MODE:
1483 		status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1484 		break;
1485 
1486 
1487 #ifdef E1000G_DEBUG
1488 	case E1000G_IOC_REG_PEEK:
1489 	case E1000G_IOC_REG_POKE:
1490 		status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1491 		break;
1492 	case E1000G_IOC_CHIP_RESET:
1493 		e1000gp->reset_count++;
1494 		if (e1000g_reset_adapter(e1000gp))
1495 			status = IOC_ACK;
1496 		else
1497 			status = IOC_INVAL;
1498 		break;
1499 #endif
1500 	default:
1501 		status = IOC_INVAL;
1502 		break;
1503 	}
1504 
1505 	/*
1506 	 * Decide how to reply
1507 	 */
1508 	switch (status) {
1509 	default:
1510 	case IOC_INVAL:
1511 		/*
1512 		 * Error, reply with a NAK and EINVAL or the specified error
1513 		 */
1514 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1515 		    EINVAL : iocp->ioc_error);
1516 		break;
1517 
1518 	case IOC_DONE:
1519 		/*
1520 		 * OK, reply already sent
1521 		 */
1522 		break;
1523 
1524 	case IOC_ACK:
1525 		/*
1526 		 * OK, reply with an ACK
1527 		 */
1528 		miocack(q, mp, 0, 0);
1529 		break;
1530 
1531 	case IOC_REPLY:
1532 		/*
1533 		 * OK, send prepared reply as ACK or NAK
1534 		 */
1535 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1536 		    M_IOCACK : M_IOCNAK;
1537 		qreply(q, mp);
1538 		break;
1539 	}
1540 }
1541 
1542 /*
1543  * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1544  * capable of supporting only one interrupt and we shouldn't disable
1545  * the physical interrupt. In this case we let the interrupt come and
1546  * we queue the packets in the rx ring itself in case we are in polling
1547  * mode (better latency but slightly lower performance and a very
1548  * high intrrupt count in mpstat which is harmless).
1549  *
1550  * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1551  * which can be disabled in poll mode. This gives better overall
1552  * throughput (compared to the mode above), shows very low interrupt
1553  * count but has slightly higher latency since we pick the packets when
1554  * the poll thread does polling.
1555  *
1556  * Currently, this flag should be enabled only while doing performance
1557  * measurement or when it can be guaranteed that entire NIC going
1558  * in poll mode will not harm any traffic like cluster heartbeat etc.
1559  */
1560 int e1000g_poll_mode = 0;
1561 
1562 /*
1563  * Called from the upper layers when driver is in polling mode to
1564  * pick up any queued packets. Care should be taken to not block
1565  * this thread.
1566  */
1567 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1568 {
1569 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)arg;
1570 	mblk_t			*mp = NULL;
1571 	mblk_t			*tail;
1572 	uint_t			sz = 0;
1573 	struct e1000g 		*adapter;
1574 
1575 	adapter = rx_ring->adapter;
1576 
1577 	mutex_enter(&rx_ring->rx_lock);
1578 	ASSERT(rx_ring->poll_flag);
1579 
1580 	/*
1581 	 * Get any packets that have arrived. Works only if we
1582 	 * actually disable the physical adapter/rx_ring interrupt.
1583 	 * (e1000g_poll_mode == 1). In case e1000g_poll_mode == 0,
1584 	 * packets will have already been added to the poll list
1585 	 * by the interrupt (see e1000g_intr_work()).
1586 	 */
1587 	if (adapter->poll_mode) {
1588 		mp = e1000g_receive(rx_ring, &tail, &sz);
1589 		if (mp != NULL) {
1590 			if (rx_ring->poll_list_head == NULL)
1591 				rx_ring->poll_list_head = mp;
1592 			else
1593 				rx_ring->poll_list_tail->b_next = mp;
1594 			rx_ring->poll_list_tail = tail;
1595 			rx_ring->poll_list_sz += sz;
1596 		}
1597 	}
1598 
1599 	mp = rx_ring->poll_list_head;
1600 	if (mp == NULL) {
1601 		mutex_exit(&rx_ring->rx_lock);
1602 		return (NULL);
1603 	}
1604 
1605 	/* Check if we can sendup the entire chain */
1606 	if (bytes_to_pickup >= rx_ring->poll_list_sz) {
1607 		mp = rx_ring->poll_list_head;
1608 		rx_ring->poll_list_head = NULL;
1609 		rx_ring->poll_list_tail = NULL;
1610 		rx_ring->poll_list_sz = 0;
1611 		mutex_exit(&rx_ring->rx_lock);
1612 		return (mp);
1613 	}
1614 
1615 	/*
1616 	 * We need to find out how much chain we can send up. We
1617 	 * are guaranteed that atleast one packet will go up since
1618 	 * we already checked that.
1619 	 */
1620 	tail = mp;
1621 	sz = 0;
1622 	while (mp != NULL) {
1623 		sz += MBLKL(mp);
1624 		if (sz > bytes_to_pickup) {
1625 			sz -= MBLKL(mp);
1626 			break;
1627 		}
1628 		tail = mp;
1629 		mp = mp->b_next;
1630 	}
1631 
1632 	mp = rx_ring->poll_list_head;
1633 	rx_ring->poll_list_head = tail->b_next;
1634 	if (rx_ring->poll_list_head == NULL)
1635 		rx_ring->poll_list_tail = NULL;
1636 	rx_ring->poll_list_sz -= sz;
1637 	tail->b_next = NULL;
1638 	mutex_exit(&rx_ring->rx_lock);
1639 	return (mp);
1640 }
1641 
1642 static int
1643 e1000g_m_start(void *arg)
1644 {
1645 	struct e1000g *Adapter = (struct e1000g *)arg;
1646 
1647 	return (e1000g_start(Adapter, B_TRUE));
1648 }
1649 
1650 static int
1651 e1000g_start(struct e1000g *Adapter, boolean_t global)
1652 {
1653 	if (global) {
1654 		/* Allocate dma resources for descriptors and buffers */
1655 		if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1656 			e1000g_log(Adapter, CE_WARN,
1657 			    "Alloc DMA resources failed");
1658 			return (ENOTACTIVE);
1659 		}
1660 		Adapter->rx_buffer_setup = B_FALSE;
1661 	}
1662 
1663 	if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1664 		if (e1000g_init(Adapter) != DDI_SUCCESS) {
1665 			e1000g_log(Adapter, CE_WARN,
1666 			    "Adapter initialization failed");
1667 			if (global)
1668 				e1000g_release_dma_resources(Adapter);
1669 			return (ENOTACTIVE);
1670 		}
1671 	}
1672 
1673 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1674 
1675 	/* Setup and initialize the transmit structures */
1676 	e1000g_tx_setup(Adapter);
1677 	msec_delay(5);
1678 
1679 	/* Setup and initialize the receive structures */
1680 	e1000g_rx_setup(Adapter);
1681 	msec_delay(5);
1682 
1683 	/* Restore the e1000g promiscuous mode */
1684 	e1000g_restore_promisc(Adapter);
1685 
1686 	e1000g_mask_interrupt(Adapter);
1687 
1688 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1689 		rw_exit(&Adapter->chip_lock);
1690 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1691 		return (ENOTACTIVE);
1692 	}
1693 
1694 	Adapter->chip_state = E1000G_START;
1695 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1696 
1697 	rw_exit(&Adapter->chip_lock);
1698 
1699 	/* Enable and start the watchdog timer */
1700 	enable_watchdog_timer(Adapter);
1701 
1702 	return (0);
1703 }
1704 
1705 static void
1706 e1000g_m_stop(void *arg)
1707 {
1708 	struct e1000g *Adapter = (struct e1000g *)arg;
1709 
1710 	e1000g_stop(Adapter, B_TRUE);
1711 }
1712 
1713 static void
1714 e1000g_stop(struct e1000g *Adapter, boolean_t global)
1715 {
1716 	int result;
1717 
1718 	/* Set stop flags */
1719 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1720 
1721 	Adapter->chip_state = E1000G_STOP;
1722 	Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
1723 
1724 	rw_exit(&Adapter->chip_lock);
1725 
1726 	/* Drain tx sessions */
1727 	(void) e1000g_tx_drain(Adapter);
1728 
1729 	/* Disable and stop all the timers */
1730 	disable_watchdog_timer(Adapter);
1731 	stop_link_timer(Adapter);
1732 	stop_82547_timer(Adapter->tx_ring);
1733 
1734 	/* Stop the chip and release pending resources */
1735 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1736 
1737 	/* Tell firmware driver is no longer in control */
1738 	e1000g_release_driver_control(&Adapter->shared);
1739 
1740 	e1000g_clear_all_interrupts(Adapter);
1741 
1742 	mutex_enter(&e1000g_nvm_lock);
1743 	result = e1000_reset_hw(&Adapter->shared);
1744 	mutex_exit(&e1000g_nvm_lock);
1745 
1746 	if (result != E1000_SUCCESS) {
1747 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1748 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1749 	}
1750 
1751 	/* Release resources still held by the TX descriptors */
1752 	e1000g_tx_clean(Adapter);
1753 
1754 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1755 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1756 
1757 	/* Clean the pending rx jumbo packet fragment */
1758 	e1000g_rx_clean(Adapter);
1759 
1760 	rw_exit(&Adapter->chip_lock);
1761 
1762 	if (global)
1763 		e1000g_release_dma_resources(Adapter);
1764 }
1765 
1766 static void
1767 e1000g_rx_clean(struct e1000g *Adapter)
1768 {
1769 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
1770 
1771 	if (rx_ring->rx_mblk != NULL) {
1772 		freemsg(rx_ring->rx_mblk);
1773 		rx_ring->rx_mblk = NULL;
1774 		rx_ring->rx_mblk_tail = NULL;
1775 		rx_ring->rx_mblk_len = 0;
1776 	}
1777 }
1778 
1779 static void
1780 e1000g_tx_clean(struct e1000g *Adapter)
1781 {
1782 	e1000g_tx_ring_t *tx_ring;
1783 	p_tx_sw_packet_t packet;
1784 	mblk_t *mp;
1785 	mblk_t *nmp;
1786 	uint32_t packet_count;
1787 
1788 	tx_ring = Adapter->tx_ring;
1789 
1790 	/*
1791 	 * Here we don't need to protect the lists using
1792 	 * the usedlist_lock and freelist_lock, for they
1793 	 * have been protected by the chip_lock.
1794 	 */
1795 	mp = NULL;
1796 	nmp = NULL;
1797 	packet_count = 0;
1798 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
1799 	while (packet != NULL) {
1800 		if (packet->mp != NULL) {
1801 			/* Assemble the message chain */
1802 			if (mp == NULL) {
1803 				mp = packet->mp;
1804 				nmp = packet->mp;
1805 			} else {
1806 				nmp->b_next = packet->mp;
1807 				nmp = packet->mp;
1808 			}
1809 			/* Disconnect the message from the sw packet */
1810 			packet->mp = NULL;
1811 		}
1812 
1813 		e1000g_free_tx_swpkt(packet);
1814 		packet_count++;
1815 
1816 		packet = (p_tx_sw_packet_t)
1817 		    QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
1818 	}
1819 
1820 	if (mp != NULL)
1821 		freemsgchain(mp);
1822 
1823 	if (packet_count > 0) {
1824 		QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
1825 		QUEUE_INIT_LIST(&tx_ring->used_list);
1826 
1827 		/* Setup TX descriptor pointers */
1828 		tx_ring->tbd_next = tx_ring->tbd_first;
1829 		tx_ring->tbd_oldest = tx_ring->tbd_first;
1830 
1831 		/* Setup our HW Tx Head & Tail descriptor pointers */
1832 		E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
1833 		E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
1834 	}
1835 }
1836 
1837 static boolean_t
1838 e1000g_tx_drain(struct e1000g *Adapter)
1839 {
1840 	int i;
1841 	boolean_t done;
1842 	e1000g_tx_ring_t *tx_ring;
1843 
1844 	tx_ring = Adapter->tx_ring;
1845 
1846 	/* Allow up to 'wsdraintime' for pending xmit's to complete. */
1847 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1848 		mutex_enter(&tx_ring->usedlist_lock);
1849 		done = IS_QUEUE_EMPTY(&tx_ring->used_list);
1850 		mutex_exit(&tx_ring->usedlist_lock);
1851 
1852 		if (done)
1853 			break;
1854 
1855 		msec_delay(1);
1856 	}
1857 
1858 	return (done);
1859 }
1860 
1861 static boolean_t
1862 e1000g_rx_drain(struct e1000g *Adapter)
1863 {
1864 	e1000g_rx_ring_t *rx_ring;
1865 	p_rx_sw_packet_t packet;
1866 	boolean_t done;
1867 
1868 	rx_ring = Adapter->rx_ring;
1869 	done = B_TRUE;
1870 
1871 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1872 
1873 	while (rx_ring->pending_list != NULL) {
1874 		packet = rx_ring->pending_list;
1875 		rx_ring->pending_list =
1876 		    rx_ring->pending_list->next;
1877 
1878 		if (packet->flag == E1000G_RX_SW_STOP) {
1879 			packet->flag = E1000G_RX_SW_DETACH;
1880 			done = B_FALSE;
1881 		} else {
1882 			ASSERT(packet->flag == E1000G_RX_SW_FREE);
1883 			ASSERT(packet->mp == NULL);
1884 			e1000g_free_rx_sw_packet(packet);
1885 		}
1886 	}
1887 
1888 	rw_exit(&e1000g_rx_detach_lock);
1889 
1890 	return (done);
1891 }
1892 
1893 static boolean_t
1894 e1000g_reset_adapter(struct e1000g *Adapter)
1895 {
1896 	e1000g_stop(Adapter, B_FALSE);
1897 
1898 	if (e1000g_start(Adapter, B_FALSE)) {
1899 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1900 		return (B_FALSE);
1901 	}
1902 
1903 	return (B_TRUE);
1904 }
1905 
1906 boolean_t
1907 e1000g_global_reset(struct e1000g *Adapter)
1908 {
1909 	e1000g_stop(Adapter, B_TRUE);
1910 
1911 	Adapter->init_count = 0;
1912 
1913 	if (e1000g_start(Adapter, B_TRUE)) {
1914 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1915 		return (B_FALSE);
1916 	}
1917 
1918 	return (B_TRUE);
1919 }
1920 
1921 /*
1922  * e1000g_intr_pciexpress - ISR for PCI Express chipsets
1923  *
1924  * This interrupt service routine is for PCI-Express adapters.
1925  * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
1926  * bit is set.
1927  */
1928 static uint_t
1929 e1000g_intr_pciexpress(caddr_t arg)
1930 {
1931 	struct e1000g *Adapter;
1932 	uint32_t icr;
1933 
1934 	Adapter = (struct e1000g *)(uintptr_t)arg;
1935 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
1936 
1937 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1938 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
1939 
1940 	if (icr & E1000_ICR_INT_ASSERTED) {
1941 		/*
1942 		 * E1000_ICR_INT_ASSERTED bit was set:
1943 		 * Read(Clear) the ICR, claim this interrupt,
1944 		 * look for work to do.
1945 		 */
1946 		e1000g_intr_work(Adapter, icr);
1947 		return (DDI_INTR_CLAIMED);
1948 	} else {
1949 		/*
1950 		 * E1000_ICR_INT_ASSERTED bit was not set:
1951 		 * Don't claim this interrupt, return immediately.
1952 		 */
1953 		return (DDI_INTR_UNCLAIMED);
1954 	}
1955 }
1956 
1957 /*
1958  * e1000g_intr - ISR for PCI/PCI-X chipsets
1959  *
1960  * This interrupt service routine is for PCI/PCI-X adapters.
1961  * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
1962  * bit is set or not.
1963  */
1964 static uint_t
1965 e1000g_intr(caddr_t arg)
1966 {
1967 	struct e1000g *Adapter;
1968 	uint32_t icr;
1969 
1970 	Adapter = (struct e1000g *)(uintptr_t)arg;
1971 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
1972 
1973 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1974 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
1975 
1976 	if (icr) {
1977 		/*
1978 		 * Any bit was set in ICR:
1979 		 * Read(Clear) the ICR, claim this interrupt,
1980 		 * look for work to do.
1981 		 */
1982 		e1000g_intr_work(Adapter, icr);
1983 		return (DDI_INTR_CLAIMED);
1984 	} else {
1985 		/*
1986 		 * No bit was set in ICR:
1987 		 * Don't claim this interrupt, return immediately.
1988 		 */
1989 		return (DDI_INTR_UNCLAIMED);
1990 	}
1991 }
1992 
1993 /*
1994  * e1000g_intr_work - actual processing of ISR
1995  *
1996  * Read(clear) the ICR contents and call appropriate interrupt
1997  * processing routines.
1998  */
1999 static void
2000 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2001 {
2002 	struct e1000_hw *hw;
2003 	hw = &Adapter->shared;
2004 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2005 
2006 	Adapter->rx_pkt_cnt = 0;
2007 	Adapter->tx_pkt_cnt = 0;
2008 
2009 	rw_enter(&Adapter->chip_lock, RW_READER);
2010 	/*
2011 	 * Here we need to check the "chip_state" flag within the chip_lock to
2012 	 * ensure the receive routine will not execute when the adapter is
2013 	 * being reset.
2014 	 */
2015 	if (Adapter->chip_state != E1000G_START) {
2016 		rw_exit(&Adapter->chip_lock);
2017 		return;
2018 	}
2019 
2020 	if (icr & E1000_ICR_RXT0) {
2021 		mblk_t			*mp;
2022 		uint_t			sz = 0;
2023 		mblk_t			*tmp, *tail = NULL;
2024 		e1000g_rx_ring_t	*rx_ring;
2025 
2026 		rx_ring = Adapter->rx_ring;
2027 		mutex_enter(&rx_ring->rx_lock);
2028 
2029 		/*
2030 		 * If the real interrupt for the Rx ring was
2031 		 * not disabled (e1000g_poll_mode == 0), then
2032 		 * we still pick up the packets and queue them
2033 		 * on Rx ring if we were in polling mode. this
2034 		 * enables the polling thread to pick up packets
2035 		 * really fast in polling mode and helps improve
2036 		 * latency.
2037 		 */
2038 		mp = e1000g_receive(rx_ring, &tail, &sz);
2039 		rw_exit(&Adapter->chip_lock);
2040 
2041 		if (mp != NULL) {
2042 			ASSERT(tail != NULL);
2043 			if (!rx_ring->poll_flag) {
2044 				/*
2045 				 * If not polling, see if something was
2046 				 * already queued. Take care not to
2047 				 * reorder packets.
2048 				 */
2049 				if (rx_ring->poll_list_head == NULL) {
2050 					mutex_exit(&rx_ring->rx_lock);
2051 					mac_rx_ring(Adapter->mh, rx_ring->mrh,
2052 					    mp, rx_ring->ring_gen_num);
2053 				} else {
2054 					tmp = rx_ring->poll_list_head;
2055 					rx_ring->poll_list_head = NULL;
2056 					rx_ring->poll_list_tail->b_next = mp;
2057 					rx_ring->poll_list_tail = NULL;
2058 					rx_ring->poll_list_sz = 0;
2059 					mutex_exit(&rx_ring->rx_lock);
2060 					mac_rx_ring(Adapter->mh, rx_ring->mrh,
2061 					    tmp, rx_ring->ring_gen_num);
2062 				}
2063 			} else {
2064 				/*
2065 				 * We are in a polling mode. Put the
2066 				 * processed packets on the poll list.
2067 				 */
2068 				if (rx_ring->poll_list_head == NULL)
2069 					rx_ring->poll_list_head = mp;
2070 				else
2071 					rx_ring->poll_list_tail->b_next = mp;
2072 				rx_ring->poll_list_tail = tail;
2073 				rx_ring->poll_list_sz += sz;
2074 				mutex_exit(&rx_ring->rx_lock);
2075 			}
2076 		} else if (!rx_ring->poll_flag &&
2077 		    rx_ring->poll_list_head != NULL) {
2078 			/*
2079 			 * Nothing new has arrived (then why
2080 			 * was the interrupt raised??). Check
2081 			 * if something queued from the last
2082 			 * time.
2083 			 */
2084 			tmp = rx_ring->poll_list_head;
2085 			rx_ring->poll_list_head = NULL;
2086 			rx_ring->poll_list_tail = NULL;
2087 			rx_ring->poll_list_sz = 0;
2088 			mutex_exit(&rx_ring->rx_lock);
2089 			mac_rx_ring(Adapter->mh, rx_ring->mrh,
2090 			    tmp, rx_ring->ring_gen_num);
2091 		} else {
2092 			mutex_exit(&rx_ring->rx_lock);
2093 		}
2094 	} else
2095 		rw_exit(&Adapter->chip_lock);
2096 
2097 	if (icr & E1000_ICR_TXDW) {
2098 		if (!Adapter->tx_intr_enable)
2099 			e1000g_clear_tx_interrupt(Adapter);
2100 
2101 		/* Recycle the tx descriptors */
2102 		rw_enter(&Adapter->chip_lock, RW_READER);
2103 		(void) e1000g_recycle(tx_ring);
2104 		E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2105 		rw_exit(&Adapter->chip_lock);
2106 
2107 		if (tx_ring->resched_needed &&
2108 		    (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2109 			tx_ring->resched_needed = B_FALSE;
2110 			mac_tx_update(Adapter->mh);
2111 			E1000G_STAT(tx_ring->stat_reschedule);
2112 		}
2113 	}
2114 
2115 	/*
2116 	 * The Receive Sequence errors RXSEQ and the link status change LSC
2117 	 * are checked to detect that the cable has been pulled out. For
2118 	 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2119 	 * are an indication that cable is not connected.
2120 	 */
2121 	if ((icr & E1000_ICR_RXSEQ) ||
2122 	    (icr & E1000_ICR_LSC) ||
2123 	    (icr & E1000_ICR_GPI_EN1)) {
2124 		boolean_t link_changed;
2125 		timeout_id_t tid = 0;
2126 
2127 		stop_watchdog_timer(Adapter);
2128 
2129 		rw_enter(&Adapter->chip_lock, RW_WRITER);
2130 
2131 		/*
2132 		 * Because we got a link-status-change interrupt, force
2133 		 * e1000_check_for_link() to look at phy
2134 		 */
2135 		Adapter->shared.mac.get_link_status = B_TRUE;
2136 
2137 		/* e1000g_link_check takes care of link status change */
2138 		link_changed = e1000g_link_check(Adapter);
2139 
2140 		/* Get new phy state */
2141 		e1000g_get_phy_state(Adapter);
2142 
2143 		/*
2144 		 * If the link timer has not timed out, we'll not notify
2145 		 * the upper layer with any link state until the link is up.
2146 		 */
2147 		if (link_changed && !Adapter->link_complete) {
2148 			if (Adapter->link_state == LINK_STATE_UP) {
2149 				mutex_enter(&Adapter->link_lock);
2150 				Adapter->link_complete = B_TRUE;
2151 				tid = Adapter->link_tid;
2152 				Adapter->link_tid = 0;
2153 				mutex_exit(&Adapter->link_lock);
2154 			} else {
2155 				link_changed = B_FALSE;
2156 			}
2157 		}
2158 		rw_exit(&Adapter->chip_lock);
2159 
2160 		if (link_changed) {
2161 			if (tid != 0)
2162 				(void) untimeout(tid);
2163 
2164 			/*
2165 			 * Workaround for esb2. Data stuck in fifo on a link
2166 			 * down event. Stop receiver here and reset in watchdog.
2167 			 */
2168 			if ((Adapter->link_state == LINK_STATE_DOWN) &&
2169 			    (Adapter->shared.mac.type == e1000_80003es2lan)) {
2170 				uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2171 				E1000_WRITE_REG(hw, E1000_RCTL,
2172 				    rctl & ~E1000_RCTL_EN);
2173 				e1000g_log(Adapter, CE_WARN,
2174 				    "ESB2 receiver disabled");
2175 				Adapter->esb2_workaround = B_TRUE;
2176 			}
2177 
2178 			mac_link_update(Adapter->mh, Adapter->link_state);
2179 		}
2180 
2181 		start_watchdog_timer(Adapter);
2182 	}
2183 }
2184 
2185 static void
2186 e1000g_init_unicst(struct e1000g *Adapter)
2187 {
2188 	struct e1000_hw *hw;
2189 	int slot;
2190 
2191 	hw = &Adapter->shared;
2192 
2193 	if (Adapter->init_count == 0) {
2194 		/* Initialize the multiple unicast addresses */
2195 		Adapter->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2196 
2197 		/* Workaround for an erratum of 82571 chipst */
2198 		if ((hw->mac.type == e1000_82571) &&
2199 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2200 			Adapter->unicst_total--;
2201 
2202 		Adapter->unicst_avail = Adapter->unicst_total;
2203 
2204 		for (slot = 0; slot < Adapter->unicst_total; slot++) {
2205 			/* Clear both the flag and MAC address */
2206 			Adapter->unicst_addr[slot].reg.high = 0;
2207 			Adapter->unicst_addr[slot].reg.low = 0;
2208 		}
2209 	} else {
2210 		/* Workaround for an erratum of 82571 chipst */
2211 		if ((hw->mac.type == e1000_82571) &&
2212 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2213 			e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2214 
2215 		/* Re-configure the RAR registers */
2216 		for (slot = 0; slot < Adapter->unicst_total; slot++)
2217 			if (Adapter->unicst_addr[slot].mac.set == 1)
2218 				e1000_rar_set(hw,
2219 				    Adapter->unicst_addr[slot].mac.addr, slot);
2220 	}
2221 
2222 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2223 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2224 }
2225 
2226 static int
2227 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2228     int slot)
2229 {
2230 	struct e1000_hw *hw;
2231 
2232 	hw = &Adapter->shared;
2233 
2234 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2235 
2236 	/*
2237 	 * The first revision of Wiseman silicon (rev 2.0) has an errata
2238 	 * that requires the receiver to be in reset when any of the
2239 	 * receive address registers (RAR regs) are accessed.  The first
2240 	 * rev of Wiseman silicon also requires MWI to be disabled when
2241 	 * a global reset or a receive reset is issued.  So before we
2242 	 * initialize the RARs, we check the rev of the Wiseman controller
2243 	 * and work around any necessary HW errata.
2244 	 */
2245 	if ((hw->mac.type == e1000_82542) &&
2246 	    (hw->revision_id == E1000_REVISION_2)) {
2247 		e1000_pci_clear_mwi(hw);
2248 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2249 		msec_delay(5);
2250 	}
2251 	if (mac_addr == NULL) {
2252 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2253 		E1000_WRITE_FLUSH(hw);
2254 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2255 		E1000_WRITE_FLUSH(hw);
2256 		/* Clear both the flag and MAC address */
2257 		Adapter->unicst_addr[slot].reg.high = 0;
2258 		Adapter->unicst_addr[slot].reg.low = 0;
2259 	} else {
2260 		bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2261 		    ETHERADDRL);
2262 		e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2263 		Adapter->unicst_addr[slot].mac.set = 1;
2264 	}
2265 
2266 	/* Workaround for an erratum of 82571 chipst */
2267 	if (slot == 0) {
2268 		if ((hw->mac.type == e1000_82571) &&
2269 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2270 			if (mac_addr == NULL) {
2271 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2272 				    slot << 1, 0);
2273 				E1000_WRITE_FLUSH(hw);
2274 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2275 				    (slot << 1) + 1, 0);
2276 				E1000_WRITE_FLUSH(hw);
2277 			} else {
2278 				e1000_rar_set(hw, (uint8_t *)mac_addr,
2279 				    LAST_RAR_ENTRY);
2280 			}
2281 	}
2282 
2283 	/*
2284 	 * If we are using Wiseman rev 2.0 silicon, we will have previously
2285 	 * put the receive in reset, and disabled MWI, to work around some
2286 	 * HW errata.  Now we should take the receiver out of reset, and
2287 	 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2288 	 */
2289 	if ((hw->mac.type == e1000_82542) &&
2290 	    (hw->revision_id == E1000_REVISION_2)) {
2291 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2292 		msec_delay(1);
2293 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2294 			e1000_pci_set_mwi(hw);
2295 		e1000g_rx_setup(Adapter);
2296 	}
2297 
2298 	rw_exit(&Adapter->chip_lock);
2299 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2300 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2301 		return (EIO);
2302 	}
2303 
2304 	return (0);
2305 }
2306 
2307 static int
2308 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2309 {
2310 	struct e1000_hw *hw = &Adapter->shared;
2311 	int res = 0;
2312 
2313 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2314 
2315 	if ((multiaddr[0] & 01) == 0) {
2316 		res = EINVAL;
2317 		goto done;
2318 	}
2319 
2320 	if (Adapter->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2321 		res = ENOENT;
2322 		goto done;
2323 	}
2324 
2325 	bcopy(multiaddr,
2326 	    &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2327 	Adapter->mcast_count++;
2328 
2329 	/*
2330 	 * Update the MC table in the hardware
2331 	 */
2332 	e1000g_clear_interrupt(Adapter);
2333 
2334 	e1000g_setup_multicast(Adapter);
2335 
2336 	if ((hw->mac.type == e1000_82542) &&
2337 	    (hw->revision_id == E1000_REVISION_2))
2338 		e1000g_rx_setup(Adapter);
2339 
2340 	e1000g_mask_interrupt(Adapter);
2341 
2342 done:
2343 	rw_exit(&Adapter->chip_lock);
2344 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2345 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2346 		res = EIO;
2347 	}
2348 
2349 	return (res);
2350 }
2351 
2352 static int
2353 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2354 {
2355 	struct e1000_hw *hw = &Adapter->shared;
2356 	unsigned i;
2357 
2358 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2359 
2360 	for (i = 0; i < Adapter->mcast_count; i++) {
2361 		if (bcmp(multiaddr, &Adapter->mcast_table[i],
2362 		    ETHERADDRL) == 0) {
2363 			for (i++; i < Adapter->mcast_count; i++) {
2364 				Adapter->mcast_table[i - 1] =
2365 				    Adapter->mcast_table[i];
2366 			}
2367 			Adapter->mcast_count--;
2368 			break;
2369 		}
2370 	}
2371 
2372 	/*
2373 	 * Update the MC table in the hardware
2374 	 */
2375 	e1000g_clear_interrupt(Adapter);
2376 
2377 	e1000g_setup_multicast(Adapter);
2378 
2379 	if ((hw->mac.type == e1000_82542) &&
2380 	    (hw->revision_id == E1000_REVISION_2))
2381 		e1000g_rx_setup(Adapter);
2382 
2383 	e1000g_mask_interrupt(Adapter);
2384 
2385 done:
2386 	rw_exit(&Adapter->chip_lock);
2387 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2388 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2389 		return (EIO);
2390 	}
2391 
2392 	return (0);
2393 }
2394 
2395 /*
2396  * e1000g_setup_multicast - setup multicast data structures
2397  *
2398  * This routine initializes all of the multicast related structures.
2399  */
2400 void
2401 e1000g_setup_multicast(struct e1000g *Adapter)
2402 {
2403 	uint8_t *mc_addr_list;
2404 	uint32_t mc_addr_count;
2405 	uint32_t rctl;
2406 	struct e1000_hw *hw;
2407 
2408 	hw = &Adapter->shared;
2409 
2410 	/*
2411 	 * The e1000g has the ability to do perfect filtering of 16
2412 	 * addresses. The driver uses one of the e1000g's 16 receive
2413 	 * address registers for its node/network/mac/individual address.
2414 	 * So, we have room for up to 15 multicast addresses in the CAM,
2415 	 * additional MC addresses are handled by the MTA (Multicast Table
2416 	 * Array)
2417 	 */
2418 
2419 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2420 
2421 	mc_addr_list = (uint8_t *)Adapter->mcast_table;
2422 
2423 	if (Adapter->mcast_count > MAX_NUM_MULTICAST_ADDRESSES) {
2424 		E1000G_DEBUGLOG_1(Adapter, CE_WARN,
2425 		    "Adapter requested more than %d MC Addresses.\n",
2426 		    MAX_NUM_MULTICAST_ADDRESSES);
2427 		mc_addr_count = MAX_NUM_MULTICAST_ADDRESSES;
2428 	} else {
2429 		/*
2430 		 * Set the number of MC addresses that we are being
2431 		 * requested to use
2432 		 */
2433 		mc_addr_count = Adapter->mcast_count;
2434 	}
2435 	/*
2436 	 * The Wiseman 2.0 silicon has an errata by which the receiver will
2437 	 * hang  while writing to the receive address registers if the receiver
2438 	 * is not in reset before writing to the registers. Updating the RAR
2439 	 * is done during the setting up of the multicast table, hence the
2440 	 * receiver has to be put in reset before updating the multicast table
2441 	 * and then taken out of reset at the end
2442 	 */
2443 	/*
2444 	 * if WMI was enabled then dis able it before issueing the global
2445 	 * reset to the hardware.
2446 	 */
2447 	/*
2448 	 * Only required for WISEMAN_2_0
2449 	 */
2450 	if ((hw->mac.type == e1000_82542) &&
2451 	    (hw->revision_id == E1000_REVISION_2)) {
2452 		e1000_pci_clear_mwi(hw);
2453 		/*
2454 		 * The e1000g must be in reset before changing any RA
2455 		 * registers. Reset receive unit.  The chip will remain in
2456 		 * the reset state until software explicitly restarts it.
2457 		 */
2458 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2459 		/* Allow receiver time to go in to reset */
2460 		msec_delay(5);
2461 	}
2462 
2463 	e1000_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2464 	    Adapter->unicst_total, hw->mac.rar_entry_count);
2465 
2466 	/*
2467 	 * Only for Wiseman_2_0
2468 	 * If MWI was enabled then re-enable it after issueing (as we
2469 	 * disabled it up there) the receive reset command.
2470 	 * Wainwright does not have a receive reset command and only thing
2471 	 * close to it is global reset which will require tx setup also
2472 	 */
2473 	if ((hw->mac.type == e1000_82542) &&
2474 	    (hw->revision_id == E1000_REVISION_2)) {
2475 		/*
2476 		 * if WMI was enabled then reenable it after issueing the
2477 		 * global or receive reset to the hardware.
2478 		 */
2479 
2480 		/*
2481 		 * Take receiver out of reset
2482 		 * clear E1000_RCTL_RST bit (and all others)
2483 		 */
2484 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2485 		msec_delay(5);
2486 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2487 			e1000_pci_set_mwi(hw);
2488 	}
2489 
2490 	/*
2491 	 * Restore original value
2492 	 */
2493 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2494 }
2495 
2496 int
2497 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2498 {
2499 	struct e1000g *Adapter = (struct e1000g *)arg;
2500 
2501 	return ((add) ? multicst_add(Adapter, addr)
2502 	    : multicst_remove(Adapter, addr));
2503 }
2504 
2505 int
2506 e1000g_m_promisc(void *arg, boolean_t on)
2507 {
2508 	struct e1000g *Adapter = (struct e1000g *)arg;
2509 	uint32_t rctl;
2510 
2511 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2512 
2513 	rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2514 
2515 	if (on)
2516 		rctl |=
2517 		    (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2518 	else
2519 		rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2520 
2521 	E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2522 
2523 	Adapter->e1000g_promisc = on;
2524 
2525 	rw_exit(&Adapter->chip_lock);
2526 
2527 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2528 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2529 		return (EIO);
2530 	}
2531 
2532 	return (0);
2533 }
2534 
2535 /*
2536  * Entry points to enable and disable interrupts at the granularity of
2537  * a group.
2538  * Turns the poll_mode for the whole adapter on and off to enable or
2539  * override the ring level polling control over the hardware interrupts.
2540  */
2541 static int
2542 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2543 {
2544 	struct e1000g		*adapter = (struct e1000g *)arg;
2545 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2546 
2547 	/*
2548 	 * Later interrupts at the granularity of the this ring will
2549 	 * invoke mac_rx() with NULL, indicating the need for another
2550 	 * software classification.
2551 	 * We have a single ring usable per adapter now, so we only need to
2552 	 * reset the rx handle for that one.
2553 	 * When more RX rings can be used, we should update each one of them.
2554 	 */
2555 	mutex_enter(&rx_ring->rx_lock);
2556 	rx_ring->mrh = NULL;
2557 	adapter->poll_mode = B_FALSE;
2558 	mutex_exit(&rx_ring->rx_lock);
2559 	return (0);
2560 }
2561 
2562 static int
2563 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2564 {
2565 	struct e1000g *adapter = (struct e1000g *)arg;
2566 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2567 
2568 	mutex_enter(&rx_ring->rx_lock);
2569 
2570 	/*
2571 	 * Later interrupts at the granularity of the this ring will
2572 	 * invoke mac_rx() with the handle for this ring;
2573 	 */
2574 	adapter->poll_mode = B_TRUE;
2575 	rx_ring->mrh = rx_ring->mrh_init;
2576 	mutex_exit(&rx_ring->rx_lock);
2577 	return (0);
2578 }
2579 
2580 /*
2581  * Entry points to enable and disable interrupts at the granularity of
2582  * a ring.
2583  * adapter poll_mode controls whether we actually proceed with hardware
2584  * interrupt toggling.
2585  */
2586 static int
2587 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2588 {
2589 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2590 	struct e1000g 		*adapter = rx_ring->adapter;
2591 	struct e1000_hw 	*hw = &adapter->shared;
2592 	uint32_t		intr_mask;
2593 	boolean_t		poll_mode;
2594 
2595 	mutex_enter(&rx_ring->rx_lock);
2596 	rx_ring->poll_flag = 0;
2597 	poll_mode = adapter->poll_mode;
2598 	mutex_exit(&rx_ring->rx_lock);
2599 
2600 	if (poll_mode) {
2601 		/* Rx interrupt enabling for MSI and legacy */
2602 		intr_mask = E1000_READ_REG(hw, E1000_IMS);
2603 		intr_mask |= E1000_IMS_RXT0;
2604 		E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2605 		E1000_WRITE_FLUSH(hw);
2606 
2607 		/* Trigger a Rx interrupt to check Rx ring */
2608 		E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2609 		E1000_WRITE_FLUSH(hw);
2610 	}
2611 	return (0);
2612 }
2613 
2614 static int
2615 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2616 {
2617 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2618 	struct e1000g 		*adapter = rx_ring->adapter;
2619 	struct e1000_hw 	*hw = &adapter->shared;
2620 	boolean_t		poll_mode;
2621 
2622 	/*
2623 	 * Once the adapter can support per Rx ring interrupt,
2624 	 * we should disable the real interrupt instead of just setting
2625 	 * the flag.
2626 	 */
2627 	mutex_enter(&rx_ring->rx_lock);
2628 	rx_ring->poll_flag = 1;
2629 	poll_mode = adapter->poll_mode;
2630 	mutex_exit(&rx_ring->rx_lock);
2631 
2632 	if (poll_mode) {
2633 		/* Rx interrupt disabling for MSI and legacy */
2634 		E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2635 		E1000_WRITE_FLUSH(hw);
2636 	}
2637 	return (0);
2638 }
2639 
2640 /*
2641  * e1000g_unicst_find - Find the slot for the specified unicast address
2642  */
2643 static int
2644 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2645 {
2646 	int slot;
2647 
2648 	ASSERT(mutex_owned(&Adapter->gen_lock));
2649 
2650 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2651 		if (Adapter->unicst_addr[slot].mac.set == 1) {
2652 			if (bcmp(Adapter->unicst_addr[slot].mac.addr,
2653 			    mac_addr, ETHERADDRL) == 0)
2654 				return (slot);
2655 		} else
2656 			continue;
2657 	}
2658 
2659 	return (-1);
2660 }
2661 
2662 /*
2663  * Entry points to add and remove a MAC address to a ring group.
2664  * The caller takes care of adding and removing the MAC addresses
2665  * to the filter via these two routines.
2666  */
2667 
2668 static int
2669 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2670 {
2671 	struct e1000g *Adapter = (struct e1000g *)arg;
2672 	int slot, err;
2673 
2674 	mutex_enter(&Adapter->gen_lock);
2675 
2676 	if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
2677 		/* The same address is already in slot */
2678 		mutex_exit(&Adapter->gen_lock);
2679 		return (0);
2680 	}
2681 
2682 	if (Adapter->unicst_avail == 0) {
2683 		/* no slots available */
2684 		mutex_exit(&Adapter->gen_lock);
2685 		return (ENOSPC);
2686 	}
2687 
2688 	/* Search for a free slot */
2689 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2690 		if (Adapter->unicst_addr[slot].mac.set == 0)
2691 			break;
2692 	}
2693 	ASSERT(slot < Adapter->unicst_total);
2694 
2695 	err = e1000g_unicst_set(Adapter, mac_addr, slot);
2696 	if (err == 0)
2697 		Adapter->unicst_avail--;
2698 
2699 	mutex_exit(&Adapter->gen_lock);
2700 
2701 	return (err);
2702 }
2703 
2704 static int
2705 e1000g_remmac(void *arg, const uint8_t *mac_addr)
2706 {
2707 	struct e1000g *Adapter = (struct e1000g *)arg;
2708 	int slot, err;
2709 
2710 	mutex_enter(&Adapter->gen_lock);
2711 
2712 	slot = e1000g_unicst_find(Adapter, mac_addr);
2713 	if (slot == -1) {
2714 		mutex_exit(&Adapter->gen_lock);
2715 		return (EINVAL);
2716 	}
2717 
2718 	ASSERT(Adapter->unicst_addr[slot].mac.set);
2719 
2720 	/* Clear this slot */
2721 	err = e1000g_unicst_set(Adapter, NULL, slot);
2722 	if (err == 0)
2723 		Adapter->unicst_avail++;
2724 
2725 	mutex_exit(&Adapter->gen_lock);
2726 
2727 	return (err);
2728 }
2729 
2730 static int
2731 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
2732 {
2733 	e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
2734 
2735 	mutex_enter(&rx_ring->rx_lock);
2736 	rx_ring->ring_gen_num = mr_gen_num;
2737 	mutex_exit(&rx_ring->rx_lock);
2738 	return (0);
2739 }
2740 
2741 /*
2742  * Callback funtion for MAC layer to register all rings.
2743  *
2744  * The hardware supports a single group with currently only one ring
2745  * available.
2746  * Though not offering virtualization ability per se, exposing the
2747  * group/ring still enables the polling and interrupt toggling.
2748  */
2749 void
2750 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
2751     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2752 {
2753 	struct e1000g *Adapter = (struct e1000g *)arg;
2754 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
2755 	mac_intr_t *mintr;
2756 
2757 	/*
2758 	 * We advertised only RX group/rings, so the MAC framework shouldn't
2759 	 * ask for any thing else.
2760 	 */
2761 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
2762 
2763 	rx_ring->mrh = rx_ring->mrh_init = rh;
2764 	infop->mri_driver = (mac_ring_driver_t)rx_ring;
2765 	infop->mri_start = e1000g_ring_start;
2766 	infop->mri_stop = NULL;
2767 	infop->mri_poll = e1000g_poll_ring;
2768 
2769 	/* Ring level interrupts */
2770 	mintr = &infop->mri_intr;
2771 	mintr->mi_handle = (mac_intr_handle_t)rx_ring;
2772 	mintr->mi_enable = e1000g_rx_ring_intr_enable;
2773 	mintr->mi_disable = e1000g_rx_ring_intr_disable;
2774 }
2775 
2776 static void
2777 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
2778     mac_group_info_t *infop, mac_group_handle_t gh)
2779 {
2780 	struct e1000g *Adapter = (struct e1000g *)arg;
2781 	mac_intr_t *mintr;
2782 
2783 	/*
2784 	 * We advertised a single RX ring. Getting a request for anything else
2785 	 * signifies a bug in the MAC framework.
2786 	 */
2787 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
2788 
2789 	Adapter->rx_group = gh;
2790 
2791 	infop->mgi_driver = (mac_group_driver_t)Adapter;
2792 	infop->mgi_start = NULL;
2793 	infop->mgi_stop = NULL;
2794 	infop->mgi_addmac = e1000g_addmac;
2795 	infop->mgi_remmac = e1000g_remmac;
2796 	infop->mgi_count = 1;
2797 
2798 	/* Group level interrupts */
2799 	mintr = &infop->mgi_intr;
2800 	mintr->mi_handle = (mac_intr_handle_t)Adapter;
2801 	mintr->mi_enable = e1000g_rx_group_intr_enable;
2802 	mintr->mi_disable = e1000g_rx_group_intr_disable;
2803 }
2804 
2805 static boolean_t
2806 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2807 {
2808 	struct e1000g *Adapter = (struct e1000g *)arg;
2809 
2810 	switch (cap) {
2811 	case MAC_CAPAB_HCKSUM: {
2812 		uint32_t *txflags = cap_data;
2813 
2814 		if (Adapter->tx_hcksum_enable)
2815 			*txflags = HCKSUM_IPHDRCKSUM |
2816 			    HCKSUM_INET_PARTIAL;
2817 		else
2818 			return (B_FALSE);
2819 		break;
2820 	}
2821 
2822 	case MAC_CAPAB_LSO: {
2823 		mac_capab_lso_t *cap_lso = cap_data;
2824 
2825 		if (Adapter->lso_enable) {
2826 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2827 			cap_lso->lso_basic_tcp_ipv4.lso_max =
2828 			    E1000_LSO_MAXLEN;
2829 		} else
2830 			return (B_FALSE);
2831 		break;
2832 	}
2833 	case MAC_CAPAB_RINGS: {
2834 		mac_capab_rings_t *cap_rings = cap_data;
2835 
2836 		/* No TX rings exposed yet */
2837 		if (cap_rings->mr_type != MAC_RING_TYPE_RX)
2838 			return (B_FALSE);
2839 
2840 		cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2841 		cap_rings->mr_rnum = 1;
2842 		cap_rings->mr_gnum = 1;
2843 		cap_rings->mr_rget = e1000g_fill_ring;
2844 		cap_rings->mr_gget = e1000g_fill_group;
2845 		break;
2846 	}
2847 	default:
2848 		return (B_FALSE);
2849 	}
2850 	return (B_TRUE);
2851 }
2852 
2853 static boolean_t
2854 e1000g_param_locked(mac_prop_id_t pr_num)
2855 {
2856 	/*
2857 	 * All en_* parameters are locked (read-only) while
2858 	 * the device is in any sort of loopback mode ...
2859 	 */
2860 	switch (pr_num) {
2861 		case MAC_PROP_EN_1000FDX_CAP:
2862 		case MAC_PROP_EN_1000HDX_CAP:
2863 		case MAC_PROP_EN_100FDX_CAP:
2864 		case MAC_PROP_EN_100HDX_CAP:
2865 		case MAC_PROP_EN_10FDX_CAP:
2866 		case MAC_PROP_EN_10HDX_CAP:
2867 		case MAC_PROP_AUTONEG:
2868 		case MAC_PROP_FLOWCTRL:
2869 			return (B_TRUE);
2870 	}
2871 	return (B_FALSE);
2872 }
2873 
2874 /*
2875  * callback function for set/get of properties
2876  */
2877 static int
2878 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
2879     uint_t pr_valsize, const void *pr_val)
2880 {
2881 	struct e1000g *Adapter = arg;
2882 	struct e1000_mac_info *mac = &Adapter->shared.mac;
2883 	struct e1000_phy_info *phy = &Adapter->shared.phy;
2884 	struct e1000_fc_info *fc = &Adapter->shared.fc;
2885 	int err = 0;
2886 	link_flowctrl_t flowctrl;
2887 	uint32_t cur_mtu, new_mtu;
2888 	uint64_t tmp = 0;
2889 
2890 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2891 	if (Adapter->loopback_mode != E1000G_LB_NONE &&
2892 	    e1000g_param_locked(pr_num)) {
2893 		/*
2894 		 * All en_* parameters are locked (read-only)
2895 		 * while the device is in any sort of loopback mode.
2896 		 */
2897 		rw_exit(&Adapter->chip_lock);
2898 		return (EBUSY);
2899 	}
2900 
2901 	switch (pr_num) {
2902 		case MAC_PROP_EN_1000FDX_CAP:
2903 			Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
2904 			Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
2905 			goto reset;
2906 		case MAC_PROP_EN_100FDX_CAP:
2907 			Adapter->param_en_100fdx = *(uint8_t *)pr_val;
2908 			Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
2909 			goto reset;
2910 		case MAC_PROP_EN_100HDX_CAP:
2911 			Adapter->param_en_100hdx = *(uint8_t *)pr_val;
2912 			Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
2913 			goto reset;
2914 		case MAC_PROP_EN_10FDX_CAP:
2915 			Adapter->param_en_10fdx = *(uint8_t *)pr_val;
2916 			Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
2917 			goto reset;
2918 		case MAC_PROP_EN_10HDX_CAP:
2919 			Adapter->param_en_10hdx = *(uint8_t *)pr_val;
2920 			Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
2921 			goto reset;
2922 		case MAC_PROP_AUTONEG:
2923 			Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
2924 			goto reset;
2925 		case MAC_PROP_FLOWCTRL:
2926 			fc->send_xon = B_TRUE;
2927 			bcopy(pr_val, &flowctrl, sizeof (flowctrl));
2928 
2929 			switch (flowctrl) {
2930 			default:
2931 				err = EINVAL;
2932 				break;
2933 			case LINK_FLOWCTRL_NONE:
2934 				fc->type = e1000_fc_none;
2935 				break;
2936 			case LINK_FLOWCTRL_RX:
2937 				fc->type = e1000_fc_rx_pause;
2938 				break;
2939 			case LINK_FLOWCTRL_TX:
2940 				fc->type = e1000_fc_tx_pause;
2941 				break;
2942 			case LINK_FLOWCTRL_BI:
2943 				fc->type = e1000_fc_full;
2944 				break;
2945 			}
2946 reset:
2947 			if (err == 0) {
2948 				if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
2949 					err = EINVAL;
2950 			}
2951 			break;
2952 		case MAC_PROP_ADV_1000FDX_CAP:
2953 		case MAC_PROP_ADV_1000HDX_CAP:
2954 		case MAC_PROP_ADV_100FDX_CAP:
2955 		case MAC_PROP_ADV_100HDX_CAP:
2956 		case MAC_PROP_ADV_10FDX_CAP:
2957 		case MAC_PROP_ADV_10HDX_CAP:
2958 		case MAC_PROP_EN_1000HDX_CAP:
2959 		case MAC_PROP_STATUS:
2960 		case MAC_PROP_SPEED:
2961 		case MAC_PROP_DUPLEX:
2962 			err = ENOTSUP; /* read-only prop. Can't set this. */
2963 			break;
2964 		case MAC_PROP_MTU:
2965 			cur_mtu = Adapter->default_mtu;
2966 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
2967 			if (new_mtu == cur_mtu) {
2968 				err = 0;
2969 				break;
2970 			}
2971 
2972 			tmp = new_mtu + sizeof (struct ether_vlan_header) +
2973 			    ETHERFCSL;
2974 			if ((tmp < DEFAULT_FRAME_SIZE) ||
2975 			    (tmp > MAXIMUM_FRAME_SIZE)) {
2976 				err = EINVAL;
2977 				break;
2978 			}
2979 
2980 			/* ich8 does not support jumbo frames */
2981 			if ((mac->type == e1000_ich8lan) &&
2982 			    (tmp > DEFAULT_FRAME_SIZE)) {
2983 				err = EINVAL;
2984 				break;
2985 			}
2986 			/* ich9 does not do jumbo frames on one phy type */
2987 			if ((mac->type == e1000_ich9lan) &&
2988 			    (phy->type == e1000_phy_ife) &&
2989 			    (tmp > DEFAULT_FRAME_SIZE)) {
2990 				err = EINVAL;
2991 				break;
2992 			}
2993 			if (Adapter->chip_state != E1000G_STOP) {
2994 				err = EBUSY;
2995 				break;
2996 			}
2997 
2998 			err = mac_maxsdu_update(Adapter->mh, new_mtu);
2999 			if (err == 0) {
3000 				Adapter->max_frame_size = (uint32_t)tmp;
3001 				Adapter->default_mtu = new_mtu;
3002 				e1000g_set_bufsize(Adapter);
3003 			}
3004 			break;
3005 		case MAC_PROP_PRIVATE:
3006 			err = e1000g_set_priv_prop(Adapter, pr_name,
3007 			    pr_valsize, pr_val);
3008 			break;
3009 		default:
3010 			err = ENOTSUP;
3011 			break;
3012 	}
3013 	rw_exit(&Adapter->chip_lock);
3014 	return (err);
3015 }
3016 
3017 static int
3018 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3019     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3020 {
3021 	struct e1000g *Adapter = arg;
3022 	struct e1000_fc_info *fc = &Adapter->shared.fc;
3023 	int err = 0;
3024 	link_flowctrl_t flowctrl;
3025 	uint64_t tmp = 0;
3026 
3027 	if (pr_valsize == 0)
3028 		return (EINVAL);
3029 
3030 	*perm = MAC_PROP_PERM_RW;
3031 
3032 	bzero(pr_val, pr_valsize);
3033 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3034 		return (e1000g_get_def_val(Adapter, pr_num,
3035 		    pr_valsize, pr_val));
3036 	}
3037 
3038 	switch (pr_num) {
3039 		case MAC_PROP_DUPLEX:
3040 			*perm = MAC_PROP_PERM_READ;
3041 			if (pr_valsize >= sizeof (link_duplex_t)) {
3042 				bcopy(&Adapter->link_duplex, pr_val,
3043 				    sizeof (link_duplex_t));
3044 			} else
3045 				err = EINVAL;
3046 			break;
3047 		case MAC_PROP_SPEED:
3048 			*perm = MAC_PROP_PERM_READ;
3049 			if (pr_valsize >= sizeof (uint64_t)) {
3050 				tmp = Adapter->link_speed * 1000000ull;
3051 				bcopy(&tmp, pr_val, sizeof (tmp));
3052 			} else
3053 				err = EINVAL;
3054 			break;
3055 		case MAC_PROP_AUTONEG:
3056 			*(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3057 			break;
3058 		case MAC_PROP_FLOWCTRL:
3059 			if (pr_valsize >= sizeof (link_flowctrl_t)) {
3060 				switch (fc->type) {
3061 					case e1000_fc_none:
3062 						flowctrl = LINK_FLOWCTRL_NONE;
3063 						break;
3064 					case e1000_fc_rx_pause:
3065 						flowctrl = LINK_FLOWCTRL_RX;
3066 						break;
3067 					case e1000_fc_tx_pause:
3068 						flowctrl = LINK_FLOWCTRL_TX;
3069 						break;
3070 					case e1000_fc_full:
3071 						flowctrl = LINK_FLOWCTRL_BI;
3072 						break;
3073 				}
3074 				bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3075 			} else
3076 				err = EINVAL;
3077 			break;
3078 		case MAC_PROP_ADV_1000FDX_CAP:
3079 			*perm = MAC_PROP_PERM_READ;
3080 			*(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3081 			break;
3082 		case MAC_PROP_EN_1000FDX_CAP:
3083 			*(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3084 			break;
3085 		case MAC_PROP_ADV_1000HDX_CAP:
3086 			*perm = MAC_PROP_PERM_READ;
3087 			*(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3088 			break;
3089 		case MAC_PROP_EN_1000HDX_CAP:
3090 			*perm = MAC_PROP_PERM_READ;
3091 			*(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3092 			break;
3093 		case MAC_PROP_ADV_100FDX_CAP:
3094 			*perm = MAC_PROP_PERM_READ;
3095 			*(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3096 			break;
3097 		case MAC_PROP_EN_100FDX_CAP:
3098 			*(uint8_t *)pr_val = Adapter->param_en_100fdx;
3099 			break;
3100 		case MAC_PROP_ADV_100HDX_CAP:
3101 			*perm = MAC_PROP_PERM_READ;
3102 			*(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3103 			break;
3104 		case MAC_PROP_EN_100HDX_CAP:
3105 			*(uint8_t *)pr_val = Adapter->param_en_100hdx;
3106 			break;
3107 		case MAC_PROP_ADV_10FDX_CAP:
3108 			*perm = MAC_PROP_PERM_READ;
3109 			*(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3110 			break;
3111 		case MAC_PROP_EN_10FDX_CAP:
3112 			*(uint8_t *)pr_val = Adapter->param_en_10fdx;
3113 			break;
3114 		case MAC_PROP_ADV_10HDX_CAP:
3115 			*perm = MAC_PROP_PERM_READ;
3116 			*(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3117 			break;
3118 		case MAC_PROP_EN_10HDX_CAP:
3119 			*(uint8_t *)pr_val = Adapter->param_en_10hdx;
3120 			break;
3121 		case MAC_PROP_ADV_100T4_CAP:
3122 		case MAC_PROP_EN_100T4_CAP:
3123 			*perm = MAC_PROP_PERM_READ;
3124 			*(uint8_t *)pr_val = Adapter->param_adv_100t4;
3125 			break;
3126 		case MAC_PROP_PRIVATE:
3127 			err = e1000g_get_priv_prop(Adapter, pr_name,
3128 			    pr_flags, pr_valsize, pr_val, perm);
3129 			break;
3130 		default:
3131 			err = ENOTSUP;
3132 			break;
3133 	}
3134 	return (err);
3135 }
3136 
3137 /* ARGSUSED2 */
3138 static int
3139 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3140     uint_t pr_valsize, const void *pr_val)
3141 {
3142 	int err = 0;
3143 	long result;
3144 	struct e1000_hw *hw = &Adapter->shared;
3145 
3146 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3147 		if (pr_val == NULL) {
3148 			err = EINVAL;
3149 			return (err);
3150 		}
3151 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3152 		if (result < MIN_TX_BCOPY_THRESHOLD ||
3153 		    result > MAX_TX_BCOPY_THRESHOLD)
3154 			err = EINVAL;
3155 		else {
3156 			Adapter->tx_bcopy_thresh = (uint32_t)result;
3157 		}
3158 		return (err);
3159 	}
3160 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3161 		if (pr_val == NULL) {
3162 			err = EINVAL;
3163 			return (err);
3164 		}
3165 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3166 		if (result < 0 || result > 1)
3167 			err = EINVAL;
3168 		else {
3169 			Adapter->tx_intr_enable = (result == 1) ?
3170 			    B_TRUE: B_FALSE;
3171 			if (Adapter->tx_intr_enable)
3172 				e1000g_mask_tx_interrupt(Adapter);
3173 			else
3174 				e1000g_clear_tx_interrupt(Adapter);
3175 			if (e1000g_check_acc_handle(
3176 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3177 				ddi_fm_service_impact(Adapter->dip,
3178 				    DDI_SERVICE_DEGRADED);
3179 		}
3180 		return (err);
3181 	}
3182 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3183 		if (pr_val == NULL) {
3184 			err = EINVAL;
3185 			return (err);
3186 		}
3187 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3188 		if (result < MIN_TX_INTR_DELAY ||
3189 		    result > MAX_TX_INTR_DELAY)
3190 			err = EINVAL;
3191 		else {
3192 			Adapter->tx_intr_delay = (uint32_t)result;
3193 			E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3194 			if (e1000g_check_acc_handle(
3195 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3196 				ddi_fm_service_impact(Adapter->dip,
3197 				    DDI_SERVICE_DEGRADED);
3198 		}
3199 		return (err);
3200 	}
3201 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3202 		if (pr_val == NULL) {
3203 			err = EINVAL;
3204 			return (err);
3205 		}
3206 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3207 		if (result < MIN_TX_INTR_ABS_DELAY ||
3208 		    result > MAX_TX_INTR_ABS_DELAY)
3209 			err = EINVAL;
3210 		else {
3211 			Adapter->tx_intr_abs_delay = (uint32_t)result;
3212 			E1000_WRITE_REG(hw, E1000_TADV,
3213 			    Adapter->tx_intr_abs_delay);
3214 			if (e1000g_check_acc_handle(
3215 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3216 				ddi_fm_service_impact(Adapter->dip,
3217 				    DDI_SERVICE_DEGRADED);
3218 		}
3219 		return (err);
3220 	}
3221 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3222 		if (pr_val == NULL) {
3223 			err = EINVAL;
3224 			return (err);
3225 		}
3226 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3227 		if (result < MIN_RX_BCOPY_THRESHOLD ||
3228 		    result > MAX_RX_BCOPY_THRESHOLD)
3229 			err = EINVAL;
3230 		else
3231 			Adapter->rx_bcopy_thresh = (uint32_t)result;
3232 		return (err);
3233 	}
3234 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3235 		if (pr_val == NULL) {
3236 			err = EINVAL;
3237 			return (err);
3238 		}
3239 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3240 		if (result < MIN_RX_LIMIT_ON_INTR ||
3241 		    result > MAX_RX_LIMIT_ON_INTR)
3242 			err = EINVAL;
3243 		else
3244 			Adapter->rx_limit_onintr = (uint32_t)result;
3245 		return (err);
3246 	}
3247 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3248 		if (pr_val == NULL) {
3249 			err = EINVAL;
3250 			return (err);
3251 		}
3252 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3253 		if (result < MIN_RX_INTR_DELAY ||
3254 		    result > MAX_RX_INTR_DELAY)
3255 			err = EINVAL;
3256 		else {
3257 			Adapter->rx_intr_delay = (uint32_t)result;
3258 			E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3259 			if (e1000g_check_acc_handle(
3260 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3261 				ddi_fm_service_impact(Adapter->dip,
3262 				    DDI_SERVICE_DEGRADED);
3263 		}
3264 		return (err);
3265 	}
3266 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3267 		if (pr_val == NULL) {
3268 			err = EINVAL;
3269 			return (err);
3270 		}
3271 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3272 		if (result < MIN_RX_INTR_ABS_DELAY ||
3273 		    result > MAX_RX_INTR_ABS_DELAY)
3274 			err = EINVAL;
3275 		else {
3276 			Adapter->rx_intr_abs_delay = (uint32_t)result;
3277 			E1000_WRITE_REG(hw, E1000_RADV,
3278 			    Adapter->rx_intr_abs_delay);
3279 			if (e1000g_check_acc_handle(
3280 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3281 				ddi_fm_service_impact(Adapter->dip,
3282 				    DDI_SERVICE_DEGRADED);
3283 		}
3284 		return (err);
3285 	}
3286 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3287 		if (pr_val == NULL) {
3288 			err = EINVAL;
3289 			return (err);
3290 		}
3291 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3292 		if (result < MIN_INTR_THROTTLING ||
3293 		    result > MAX_INTR_THROTTLING)
3294 			err = EINVAL;
3295 		else {
3296 			if (hw->mac.type >= e1000_82540) {
3297 				Adapter->intr_throttling_rate =
3298 				    (uint32_t)result;
3299 				E1000_WRITE_REG(hw, E1000_ITR,
3300 				    Adapter->intr_throttling_rate);
3301 				if (e1000g_check_acc_handle(
3302 				    Adapter->osdep.reg_handle) != DDI_FM_OK)
3303 					ddi_fm_service_impact(Adapter->dip,
3304 					    DDI_SERVICE_DEGRADED);
3305 			} else
3306 				err = EINVAL;
3307 		}
3308 		return (err);
3309 	}
3310 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3311 		if (pr_val == NULL) {
3312 			err = EINVAL;
3313 			return (err);
3314 		}
3315 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3316 		if (result < 0 || result > 1)
3317 			err = EINVAL;
3318 		else {
3319 			if (hw->mac.type >= e1000_82540) {
3320 				Adapter->intr_adaptive = (result == 1) ?
3321 				    B_TRUE : B_FALSE;
3322 			} else {
3323 				err = EINVAL;
3324 			}
3325 		}
3326 		return (err);
3327 	}
3328 	return (ENOTSUP);
3329 }
3330 
3331 static int
3332 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
3333     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3334 {
3335 	int err = ENOTSUP;
3336 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
3337 	int value;
3338 
3339 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
3340 		*perm = MAC_PROP_PERM_READ;
3341 		if (is_default)
3342 			goto done;
3343 		value = Adapter->param_adv_pause;
3344 		err = 0;
3345 		goto done;
3346 	}
3347 	if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3348 		*perm = MAC_PROP_PERM_READ;
3349 		if (is_default)
3350 			goto done;
3351 		value = Adapter->param_adv_asym_pause;
3352 		err = 0;
3353 		goto done;
3354 	}
3355 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3356 		value = (is_default ? DEFAULT_TX_BCOPY_THRESHOLD :
3357 		    Adapter->tx_bcopy_thresh);
3358 		err = 0;
3359 		goto done;
3360 	}
3361 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3362 		value = (is_default ? DEFAULT_TX_INTR_ENABLE :
3363 		    Adapter->tx_intr_enable);
3364 		err = 0;
3365 		goto done;
3366 	}
3367 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3368 		value = (is_default ? DEFAULT_TX_INTR_DELAY :
3369 		    Adapter->tx_intr_delay);
3370 		err = 0;
3371 		goto done;
3372 	}
3373 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3374 		value = (is_default ? DEFAULT_TX_INTR_ABS_DELAY :
3375 		    Adapter->tx_intr_abs_delay);
3376 		err = 0;
3377 		goto done;
3378 	}
3379 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3380 		value = (is_default ? DEFAULT_RX_BCOPY_THRESHOLD :
3381 		    Adapter->rx_bcopy_thresh);
3382 		err = 0;
3383 		goto done;
3384 	}
3385 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3386 		value = (is_default ? DEFAULT_RX_LIMIT_ON_INTR :
3387 		    Adapter->rx_limit_onintr);
3388 		err = 0;
3389 		goto done;
3390 	}
3391 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3392 		value = (is_default ? DEFAULT_RX_INTR_DELAY :
3393 		    Adapter->rx_intr_delay);
3394 		err = 0;
3395 		goto done;
3396 	}
3397 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3398 		value = (is_default ? DEFAULT_RX_INTR_ABS_DELAY :
3399 		    Adapter->rx_intr_abs_delay);
3400 		err = 0;
3401 		goto done;
3402 	}
3403 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3404 		value = (is_default ? DEFAULT_INTR_THROTTLING :
3405 		    Adapter->intr_throttling_rate);
3406 		err = 0;
3407 		goto done;
3408 	}
3409 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3410 		value = (is_default ? 1 : Adapter->intr_adaptive);
3411 		err = 0;
3412 		goto done;
3413 	}
3414 done:
3415 	if (err == 0) {
3416 		(void) snprintf(pr_val, pr_valsize, "%d", value);
3417 	}
3418 	return (err);
3419 }
3420 
3421 /*
3422  * e1000g_get_conf - get configurations set in e1000g.conf
3423  * This routine gets user-configured values out of the configuration
3424  * file e1000g.conf.
3425  *
3426  * For each configurable value, there is a minimum, a maximum, and a
3427  * default.
3428  * If user does not configure a value, use the default.
3429  * If user configures below the minimum, use the minumum.
3430  * If user configures above the maximum, use the maxumum.
3431  */
3432 static void
3433 e1000g_get_conf(struct e1000g *Adapter)
3434 {
3435 	struct e1000_hw *hw = &Adapter->shared;
3436 	boolean_t tbi_compatibility = B_FALSE;
3437 
3438 	/*
3439 	 * get each configurable property from e1000g.conf
3440 	 */
3441 
3442 	/*
3443 	 * NumTxDescriptors
3444 	 */
3445 	Adapter->tx_desc_num =
3446 	    e1000g_get_prop(Adapter, "NumTxDescriptors",
3447 	    MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
3448 	    DEFAULT_NUM_TX_DESCRIPTOR);
3449 
3450 	/*
3451 	 * NumRxDescriptors
3452 	 */
3453 	Adapter->rx_desc_num =
3454 	    e1000g_get_prop(Adapter, "NumRxDescriptors",
3455 	    MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
3456 	    DEFAULT_NUM_RX_DESCRIPTOR);
3457 
3458 	/*
3459 	 * NumRxFreeList
3460 	 */
3461 	Adapter->rx_freelist_num =
3462 	    e1000g_get_prop(Adapter, "NumRxFreeList",
3463 	    MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
3464 	    DEFAULT_NUM_RX_FREELIST);
3465 
3466 	/*
3467 	 * NumTxPacketList
3468 	 */
3469 	Adapter->tx_freelist_num =
3470 	    e1000g_get_prop(Adapter, "NumTxPacketList",
3471 	    MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
3472 	    DEFAULT_NUM_TX_FREELIST);
3473 
3474 	/*
3475 	 * FlowControl
3476 	 */
3477 	hw->fc.send_xon = B_TRUE;
3478 	hw->fc.type =
3479 	    e1000g_get_prop(Adapter, "FlowControl",
3480 	    e1000_fc_none, 4, DEFAULT_FLOW_CONTROL);
3481 	/* 4 is the setting that says "let the eeprom decide" */
3482 	if (hw->fc.type == 4)
3483 		hw->fc.type = e1000_fc_default;
3484 
3485 	/*
3486 	 * Max Num Receive Packets on Interrupt
3487 	 */
3488 	Adapter->rx_limit_onintr =
3489 	    e1000g_get_prop(Adapter, "MaxNumReceivePackets",
3490 	    MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
3491 	    DEFAULT_RX_LIMIT_ON_INTR);
3492 
3493 	/*
3494 	 * PHY master slave setting
3495 	 */
3496 	hw->phy.ms_type =
3497 	    e1000g_get_prop(Adapter, "SetMasterSlave",
3498 	    e1000_ms_hw_default, e1000_ms_auto,
3499 	    e1000_ms_hw_default);
3500 
3501 	/*
3502 	 * Parameter which controls TBI mode workaround, which is only
3503 	 * needed on certain switches such as Cisco 6500/Foundry
3504 	 */
3505 	tbi_compatibility =
3506 	    e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
3507 	    0, 1, DEFAULT_TBI_COMPAT_ENABLE);
3508 	e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
3509 
3510 	/*
3511 	 * MSI Enable
3512 	 */
3513 	Adapter->msi_enable =
3514 	    e1000g_get_prop(Adapter, "MSIEnable",
3515 	    0, 1, DEFAULT_MSI_ENABLE);
3516 
3517 	/*
3518 	 * Interrupt Throttling Rate
3519 	 */
3520 	Adapter->intr_throttling_rate =
3521 	    e1000g_get_prop(Adapter, "intr_throttling_rate",
3522 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
3523 	    DEFAULT_INTR_THROTTLING);
3524 
3525 	/*
3526 	 * Adaptive Interrupt Blanking Enable/Disable
3527 	 * It is enabled by default
3528 	 */
3529 	Adapter->intr_adaptive =
3530 	    (e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1) == 1) ?
3531 	    B_TRUE : B_FALSE;
3532 
3533 	/*
3534 	 * Hardware checksum enable/disable parameter
3535 	 */
3536 	Adapter->tx_hcksum_enable =
3537 	    e1000g_get_prop(Adapter, "tx_hcksum_enable",
3538 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3539 	/*
3540 	 * Checksum on/off selection via global parameters.
3541 	 *
3542 	 * If the chip is flagged as not capable of (correctly)
3543 	 * handling checksumming, we don't enable it on either
3544 	 * Rx or Tx side.  Otherwise, we take this chip's settings
3545 	 * from the patchable global defaults.
3546 	 *
3547 	 * We advertise our capabilities only if TX offload is
3548 	 * enabled.  On receive, the stack will accept checksummed
3549 	 * packets anyway, even if we haven't said we can deliver
3550 	 * them.
3551 	 */
3552 	switch (hw->mac.type) {
3553 		case e1000_82540:
3554 		case e1000_82544:
3555 		case e1000_82545:
3556 		case e1000_82545_rev_3:
3557 		case e1000_82546:
3558 		case e1000_82546_rev_3:
3559 		case e1000_82571:
3560 		case e1000_82572:
3561 		case e1000_82573:
3562 		case e1000_80003es2lan:
3563 			break;
3564 		/*
3565 		 * For the following Intel PRO/1000 chipsets, we have not
3566 		 * tested the hardware checksum offload capability, so we
3567 		 * disable the capability for them.
3568 		 *	e1000_82542,
3569 		 *	e1000_82543,
3570 		 *	e1000_82541,
3571 		 *	e1000_82541_rev_2,
3572 		 *	e1000_82547,
3573 		 *	e1000_82547_rev_2,
3574 		 */
3575 		default:
3576 			Adapter->tx_hcksum_enable = B_FALSE;
3577 	}
3578 
3579 	/*
3580 	 * Large Send Offloading(LSO) Enable/Disable
3581 	 * If the tx hardware checksum is not enabled, LSO should be
3582 	 * disabled.
3583 	 */
3584 	Adapter->lso_enable =
3585 	    e1000g_get_prop(Adapter, "lso_enable",
3586 	    0, 1, DEFAULT_LSO_ENABLE);
3587 
3588 	switch (hw->mac.type) {
3589 		case e1000_82546:
3590 		case e1000_82546_rev_3:
3591 			if (Adapter->lso_enable)
3592 				Adapter->lso_premature_issue = B_TRUE;
3593 			/* FALLTHRU */
3594 		case e1000_82571:
3595 		case e1000_82572:
3596 		case e1000_82573:
3597 		case e1000_80003es2lan:
3598 			break;
3599 		default:
3600 			Adapter->lso_enable = B_FALSE;
3601 	}
3602 
3603 	if (!Adapter->tx_hcksum_enable) {
3604 		Adapter->lso_premature_issue = B_FALSE;
3605 		Adapter->lso_enable = B_FALSE;
3606 	}
3607 
3608 	/*
3609 	 * If mem_workaround_82546 is enabled, the rx buffer allocated by
3610 	 * e1000_82545, e1000_82546 and e1000_82546_rev_3
3611 	 * will not cross 64k boundary.
3612 	 */
3613 	Adapter->mem_workaround_82546 =
3614 	    e1000g_get_prop(Adapter, "mem_workaround_82546",
3615 	    0, 1, DEFAULT_MEM_WORKAROUND_82546);
3616 }
3617 
3618 /*
3619  * e1000g_get_prop - routine to read properties
3620  *
3621  * Get a user-configure property value out of the configuration
3622  * file e1000g.conf.
3623  *
3624  * Caller provides name of the property, a default value, a minimum
3625  * value, and a maximum value.
3626  *
3627  * Return configured value of the property, with default, minimum and
3628  * maximum properly applied.
3629  */
3630 static int
3631 e1000g_get_prop(struct e1000g *Adapter,	/* point to per-adapter structure */
3632     char *propname,		/* name of the property */
3633     int minval,			/* minimum acceptable value */
3634     int maxval,			/* maximim acceptable value */
3635     int defval)			/* default value */
3636 {
3637 	int propval;		/* value returned for requested property */
3638 	int *props;		/* point to array of properties returned */
3639 	uint_t nprops;		/* number of property value returned */
3640 
3641 	/*
3642 	 * get the array of properties from the config file
3643 	 */
3644 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
3645 	    DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
3646 		/* got some properties, test if we got enough */
3647 		if (Adapter->instance < nprops) {
3648 			propval = props[Adapter->instance];
3649 		} else {
3650 			/* not enough properties configured */
3651 			propval = defval;
3652 			E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3653 			    "Not Enough %s values found in e1000g.conf"
3654 			    " - set to %d\n",
3655 			    propname, propval);
3656 		}
3657 
3658 		/* free memory allocated for properties */
3659 		ddi_prop_free(props);
3660 
3661 	} else {
3662 		propval = defval;
3663 	}
3664 
3665 	/*
3666 	 * enforce limits
3667 	 */
3668 	if (propval > maxval) {
3669 		propval = maxval;
3670 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3671 		    "Too High %s value in e1000g.conf - set to %d\n",
3672 		    propname, propval);
3673 	}
3674 
3675 	if (propval < minval) {
3676 		propval = minval;
3677 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3678 		    "Too Low %s value in e1000g.conf - set to %d\n",
3679 		    propname, propval);
3680 	}
3681 
3682 	return (propval);
3683 }
3684 
3685 static boolean_t
3686 e1000g_link_check(struct e1000g *Adapter)
3687 {
3688 	uint16_t speed, duplex, phydata;
3689 	boolean_t link_changed = B_FALSE;
3690 	struct e1000_hw *hw;
3691 	uint32_t reg_tarc;
3692 
3693 	hw = &Adapter->shared;
3694 
3695 	if (e1000g_link_up(Adapter)) {
3696 		/*
3697 		 * The Link is up, check whether it was marked as down earlier
3698 		 */
3699 		if (Adapter->link_state != LINK_STATE_UP) {
3700 			(void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
3701 			Adapter->link_speed = speed;
3702 			Adapter->link_duplex = duplex;
3703 			Adapter->link_state = LINK_STATE_UP;
3704 			link_changed = B_TRUE;
3705 
3706 			Adapter->tx_link_down_timeout = 0;
3707 
3708 			if ((hw->mac.type == e1000_82571) ||
3709 			    (hw->mac.type == e1000_82572)) {
3710 				reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
3711 				if (speed == SPEED_1000)
3712 					reg_tarc |= (1 << 21);
3713 				else
3714 					reg_tarc &= ~(1 << 21);
3715 				E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
3716 			}
3717 		}
3718 		Adapter->smartspeed = 0;
3719 	} else {
3720 		if (Adapter->link_state != LINK_STATE_DOWN) {
3721 			Adapter->link_speed = 0;
3722 			Adapter->link_duplex = 0;
3723 			Adapter->link_state = LINK_STATE_DOWN;
3724 			link_changed = B_TRUE;
3725 
3726 			/*
3727 			 * SmartSpeed workaround for Tabor/TanaX, When the
3728 			 * driver loses link disable auto master/slave
3729 			 * resolution.
3730 			 */
3731 			if (hw->phy.type == e1000_phy_igp) {
3732 				(void) e1000_read_phy_reg(hw,
3733 				    PHY_1000T_CTRL, &phydata);
3734 				phydata |= CR_1000T_MS_ENABLE;
3735 				(void) e1000_write_phy_reg(hw,
3736 				    PHY_1000T_CTRL, phydata);
3737 			}
3738 		} else {
3739 			e1000g_smartspeed(Adapter);
3740 		}
3741 
3742 		if (Adapter->chip_state == E1000G_START) {
3743 			if (Adapter->tx_link_down_timeout <
3744 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3745 				Adapter->tx_link_down_timeout++;
3746 			} else if (Adapter->tx_link_down_timeout ==
3747 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3748 				e1000g_tx_clean(Adapter);
3749 				Adapter->tx_link_down_timeout++;
3750 			}
3751 		}
3752 	}
3753 
3754 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
3755 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
3756 
3757 	return (link_changed);
3758 }
3759 
3760 /*
3761  * e1000g_reset_link - Using the link properties to setup the link
3762  */
3763 int
3764 e1000g_reset_link(struct e1000g *Adapter)
3765 {
3766 	struct e1000_mac_info *mac;
3767 	struct e1000_phy_info *phy;
3768 	boolean_t invalid;
3769 
3770 	mac = &Adapter->shared.mac;
3771 	phy = &Adapter->shared.phy;
3772 	invalid = B_FALSE;
3773 
3774 	if (Adapter->param_adv_autoneg == 1) {
3775 		mac->autoneg = B_TRUE;
3776 		phy->autoneg_advertised = 0;
3777 
3778 		/*
3779 		 * 1000hdx is not supported for autonegotiation
3780 		 */
3781 		if (Adapter->param_adv_1000fdx == 1)
3782 			phy->autoneg_advertised |= ADVERTISE_1000_FULL;
3783 
3784 		if (Adapter->param_adv_100fdx == 1)
3785 			phy->autoneg_advertised |= ADVERTISE_100_FULL;
3786 
3787 		if (Adapter->param_adv_100hdx == 1)
3788 			phy->autoneg_advertised |= ADVERTISE_100_HALF;
3789 
3790 		if (Adapter->param_adv_10fdx == 1)
3791 			phy->autoneg_advertised |= ADVERTISE_10_FULL;
3792 
3793 		if (Adapter->param_adv_10hdx == 1)
3794 			phy->autoneg_advertised |= ADVERTISE_10_HALF;
3795 
3796 		if (phy->autoneg_advertised == 0)
3797 			invalid = B_TRUE;
3798 	} else {
3799 		mac->autoneg = B_FALSE;
3800 
3801 		/*
3802 		 * 1000fdx and 1000hdx are not supported for forced link
3803 		 */
3804 		if (Adapter->param_adv_100fdx == 1)
3805 			mac->forced_speed_duplex = ADVERTISE_100_FULL;
3806 		else if (Adapter->param_adv_100hdx == 1)
3807 			mac->forced_speed_duplex = ADVERTISE_100_HALF;
3808 		else if (Adapter->param_adv_10fdx == 1)
3809 			mac->forced_speed_duplex = ADVERTISE_10_FULL;
3810 		else if (Adapter->param_adv_10hdx == 1)
3811 			mac->forced_speed_duplex = ADVERTISE_10_HALF;
3812 		else
3813 			invalid = B_TRUE;
3814 
3815 	}
3816 
3817 	if (invalid) {
3818 		e1000g_log(Adapter, CE_WARN,
3819 		    "Invalid link sets. Setup link to"
3820 		    "support autonegotiation with all link capabilities.");
3821 		mac->autoneg = B_TRUE;
3822 		phy->autoneg_advertised = ADVERTISE_1000_FULL |
3823 		    ADVERTISE_100_FULL | ADVERTISE_100_HALF |
3824 		    ADVERTISE_10_FULL | ADVERTISE_10_HALF;
3825 	}
3826 
3827 	return (e1000_setup_link(&Adapter->shared));
3828 }
3829 
3830 static void
3831 e1000g_timer_tx_resched(struct e1000g *Adapter)
3832 {
3833 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
3834 
3835 	if (tx_ring->resched_needed &&
3836 	    ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
3837 	    drv_usectohz(1000000)) &&
3838 	    (Adapter->chip_state == E1000G_START) &&
3839 	    (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
3840 		tx_ring->resched_needed = B_FALSE;
3841 		mac_tx_update(Adapter->mh);
3842 		E1000G_STAT(tx_ring->stat_reschedule);
3843 		E1000G_STAT(tx_ring->stat_timer_reschedule);
3844 	}
3845 }
3846 
3847 static void
3848 e1000g_local_timer(void *ws)
3849 {
3850 	struct e1000g *Adapter = (struct e1000g *)ws;
3851 	struct e1000_hw *hw;
3852 	e1000g_ether_addr_t ether_addr;
3853 	boolean_t link_changed;
3854 
3855 	hw = &Adapter->shared;
3856 
3857 	if (Adapter->chip_state == E1000G_ERROR) {
3858 		Adapter->reset_count++;
3859 		if (e1000g_global_reset(Adapter)) {
3860 			ddi_fm_service_impact(Adapter->dip,
3861 			    DDI_SERVICE_RESTORED);
3862 			e1000g_timer_tx_resched(Adapter);
3863 		} else
3864 			ddi_fm_service_impact(Adapter->dip,
3865 			    DDI_SERVICE_LOST);
3866 		return;
3867 	}
3868 
3869 	if (e1000g_stall_check(Adapter)) {
3870 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
3871 		    "Tx stall detected. Activate automatic recovery.\n");
3872 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
3873 		Adapter->reset_count++;
3874 		if (e1000g_reset_adapter(Adapter)) {
3875 			ddi_fm_service_impact(Adapter->dip,
3876 			    DDI_SERVICE_RESTORED);
3877 			e1000g_timer_tx_resched(Adapter);
3878 		} else
3879 			ddi_fm_service_impact(Adapter->dip,
3880 			    DDI_SERVICE_LOST);
3881 		return;
3882 	}
3883 
3884 	link_changed = B_FALSE;
3885 	rw_enter(&Adapter->chip_lock, RW_READER);
3886 	if (Adapter->link_complete)
3887 		link_changed = e1000g_link_check(Adapter);
3888 	rw_exit(&Adapter->chip_lock);
3889 
3890 	if (link_changed)
3891 		mac_link_update(Adapter->mh, Adapter->link_state);
3892 
3893 	/*
3894 	 * Workaround for esb2. Data stuck in fifo on a link
3895 	 * down event. Reset the adapter to recover it.
3896 	 */
3897 	if (Adapter->esb2_workaround) {
3898 		Adapter->esb2_workaround = B_FALSE;
3899 		(void) e1000g_reset_adapter(Adapter);
3900 	}
3901 
3902 	/*
3903 	 * With 82571 controllers, any locally administered address will
3904 	 * be overwritten when there is a reset on the other port.
3905 	 * Detect this circumstance and correct it.
3906 	 */
3907 	if ((hw->mac.type == e1000_82571) &&
3908 	    (e1000_get_laa_state_82571(hw) == B_TRUE)) {
3909 		ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
3910 		ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
3911 
3912 		ether_addr.reg.low = ntohl(ether_addr.reg.low);
3913 		ether_addr.reg.high = ntohl(ether_addr.reg.high);
3914 
3915 		if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
3916 		    (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
3917 		    (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
3918 		    (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
3919 		    (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
3920 		    (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
3921 			e1000_rar_set(hw, hw->mac.addr, 0);
3922 		}
3923 	}
3924 
3925 	/*
3926 	 * Long TTL workaround for 82541/82547
3927 	 */
3928 	(void) e1000_igp_ttl_workaround_82547(hw);
3929 
3930 	/*
3931 	 * Check for Adaptive IFS settings If there are lots of collisions
3932 	 * change the value in steps...
3933 	 * These properties should only be set for 10/100
3934 	 */
3935 	if ((hw->phy.media_type == e1000_media_type_copper) &&
3936 	    ((Adapter->link_speed == SPEED_100) ||
3937 	    (Adapter->link_speed == SPEED_10))) {
3938 		e1000_update_adaptive(hw);
3939 	}
3940 	/*
3941 	 * Set Timer Interrupts
3942 	 */
3943 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
3944 
3945 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
3946 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
3947 	else
3948 		e1000g_timer_tx_resched(Adapter);
3949 
3950 	restart_watchdog_timer(Adapter);
3951 }
3952 
3953 /*
3954  * The function e1000g_link_timer() is called when the timer for link setup
3955  * is expired, which indicates the completion of the link setup. The link
3956  * state will not be updated until the link setup is completed. And the
3957  * link state will not be sent to the upper layer through mac_link_update()
3958  * in this function. It will be updated in the local timer routine or the
3959  * interrupt service routine after the interface is started (plumbed).
3960  */
3961 static void
3962 e1000g_link_timer(void *arg)
3963 {
3964 	struct e1000g *Adapter = (struct e1000g *)arg;
3965 
3966 	mutex_enter(&Adapter->link_lock);
3967 	Adapter->link_complete = B_TRUE;
3968 	Adapter->link_tid = 0;
3969 	mutex_exit(&Adapter->link_lock);
3970 }
3971 
3972 /*
3973  * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
3974  *
3975  * This function read the forced speed and duplex for 10/100 Mbps speeds
3976  * and also for 1000 Mbps speeds from the e1000g.conf file
3977  */
3978 static void
3979 e1000g_force_speed_duplex(struct e1000g *Adapter)
3980 {
3981 	int forced;
3982 	struct e1000_mac_info *mac = &Adapter->shared.mac;
3983 	struct e1000_phy_info *phy = &Adapter->shared.phy;
3984 
3985 	/*
3986 	 * get value out of config file
3987 	 */
3988 	forced = e1000g_get_prop(Adapter, "ForceSpeedDuplex",
3989 	    GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY);
3990 
3991 	switch (forced) {
3992 	case GDIAG_10_HALF:
3993 		/*
3994 		 * Disable Auto Negotiation
3995 		 */
3996 		mac->autoneg = B_FALSE;
3997 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
3998 		break;
3999 	case GDIAG_10_FULL:
4000 		/*
4001 		 * Disable Auto Negotiation
4002 		 */
4003 		mac->autoneg = B_FALSE;
4004 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
4005 		break;
4006 	case GDIAG_100_HALF:
4007 		/*
4008 		 * Disable Auto Negotiation
4009 		 */
4010 		mac->autoneg = B_FALSE;
4011 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
4012 		break;
4013 	case GDIAG_100_FULL:
4014 		/*
4015 		 * Disable Auto Negotiation
4016 		 */
4017 		mac->autoneg = B_FALSE;
4018 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
4019 		break;
4020 	case GDIAG_1000_FULL:
4021 		/*
4022 		 * The gigabit spec requires autonegotiation.  Therefore,
4023 		 * when the user wants to force the speed to 1000Mbps, we
4024 		 * enable AutoNeg, but only allow the harware to advertise
4025 		 * 1000Mbps.  This is different from 10/100 operation, where
4026 		 * we are allowed to link without any negotiation.
4027 		 */
4028 		mac->autoneg = B_TRUE;
4029 		phy->autoneg_advertised = ADVERTISE_1000_FULL;
4030 		break;
4031 	default:	/* obey the setting of AutoNegAdvertised */
4032 		mac->autoneg = B_TRUE;
4033 		phy->autoneg_advertised =
4034 		    (uint16_t)e1000g_get_prop(Adapter, "AutoNegAdvertised",
4035 		    0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4036 		    AUTONEG_ADVERTISE_SPEED_DEFAULT);
4037 		break;
4038 	}	/* switch */
4039 }
4040 
4041 /*
4042  * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4043  *
4044  * This function reads MaxFrameSize from e1000g.conf
4045  */
4046 static void
4047 e1000g_get_max_frame_size(struct e1000g *Adapter)
4048 {
4049 	int max_frame;
4050 	struct e1000_mac_info *mac = &Adapter->shared.mac;
4051 	struct e1000_phy_info *phy = &Adapter->shared.phy;
4052 
4053 	/*
4054 	 * get value out of config file
4055 	 */
4056 	max_frame = e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0);
4057 
4058 	switch (max_frame) {
4059 	case 0:
4060 		Adapter->default_mtu = ETHERMTU;
4061 		break;
4062 	/*
4063 	 * To avoid excessive memory allocation for rx buffers,
4064 	 * the bytes of E1000G_IPALIGNPRESERVEROOM are reserved.
4065 	 */
4066 	case 1:
4067 		Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4068 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4069 		    E1000G_IPALIGNPRESERVEROOM;
4070 		break;
4071 	case 2:
4072 		Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4073 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4074 		    E1000G_IPALIGNPRESERVEROOM;
4075 		break;
4076 	case 3:
4077 		if (mac->type >= e1000_82571)
4078 			Adapter->default_mtu = MAXIMUM_MTU;
4079 		else
4080 			Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4081 			    sizeof (struct ether_vlan_header) - ETHERFCSL -
4082 			    E1000G_IPALIGNPRESERVEROOM;
4083 		break;
4084 	default:
4085 		Adapter->default_mtu = ETHERMTU;
4086 		break;
4087 	}	/* switch */
4088 
4089 	Adapter->max_frame_size = Adapter->default_mtu +
4090 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
4091 
4092 	/* ich8 does not do jumbo frames */
4093 	if (mac->type == e1000_ich8lan) {
4094 		Adapter->default_mtu = ETHERMTU;
4095 		Adapter->max_frame_size = ETHERMTU +
4096 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4097 	}
4098 
4099 	/* ich9 does not do jumbo frames on one phy type */
4100 	if ((mac->type == e1000_ich9lan) &&
4101 	    (phy->type == e1000_phy_ife)) {
4102 		Adapter->default_mtu = ETHERMTU;
4103 		Adapter->max_frame_size = ETHERMTU +
4104 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4105 	}
4106 }
4107 
4108 static void
4109 arm_watchdog_timer(struct e1000g *Adapter)
4110 {
4111 	Adapter->watchdog_tid =
4112 	    timeout(e1000g_local_timer,
4113 	    (void *)Adapter, 1 * drv_usectohz(1000000));
4114 }
4115 #pragma inline(arm_watchdog_timer)
4116 
4117 static void
4118 enable_watchdog_timer(struct e1000g *Adapter)
4119 {
4120 	mutex_enter(&Adapter->watchdog_lock);
4121 
4122 	if (!Adapter->watchdog_timer_enabled) {
4123 		Adapter->watchdog_timer_enabled = B_TRUE;
4124 		Adapter->watchdog_timer_started = B_TRUE;
4125 		arm_watchdog_timer(Adapter);
4126 	}
4127 
4128 	mutex_exit(&Adapter->watchdog_lock);
4129 }
4130 
4131 static void
4132 disable_watchdog_timer(struct e1000g *Adapter)
4133 {
4134 	timeout_id_t tid;
4135 
4136 	mutex_enter(&Adapter->watchdog_lock);
4137 
4138 	Adapter->watchdog_timer_enabled = B_FALSE;
4139 	Adapter->watchdog_timer_started = B_FALSE;
4140 	tid = Adapter->watchdog_tid;
4141 	Adapter->watchdog_tid = 0;
4142 
4143 	mutex_exit(&Adapter->watchdog_lock);
4144 
4145 	if (tid != 0)
4146 		(void) untimeout(tid);
4147 }
4148 
4149 static void
4150 start_watchdog_timer(struct e1000g *Adapter)
4151 {
4152 	mutex_enter(&Adapter->watchdog_lock);
4153 
4154 	if (Adapter->watchdog_timer_enabled) {
4155 		if (!Adapter->watchdog_timer_started) {
4156 			Adapter->watchdog_timer_started = B_TRUE;
4157 			arm_watchdog_timer(Adapter);
4158 		}
4159 	}
4160 
4161 	mutex_exit(&Adapter->watchdog_lock);
4162 }
4163 
4164 static void
4165 restart_watchdog_timer(struct e1000g *Adapter)
4166 {
4167 	mutex_enter(&Adapter->watchdog_lock);
4168 
4169 	if (Adapter->watchdog_timer_started)
4170 		arm_watchdog_timer(Adapter);
4171 
4172 	mutex_exit(&Adapter->watchdog_lock);
4173 }
4174 
4175 static void
4176 stop_watchdog_timer(struct e1000g *Adapter)
4177 {
4178 	timeout_id_t tid;
4179 
4180 	mutex_enter(&Adapter->watchdog_lock);
4181 
4182 	Adapter->watchdog_timer_started = B_FALSE;
4183 	tid = Adapter->watchdog_tid;
4184 	Adapter->watchdog_tid = 0;
4185 
4186 	mutex_exit(&Adapter->watchdog_lock);
4187 
4188 	if (tid != 0)
4189 		(void) untimeout(tid);
4190 }
4191 
4192 static void
4193 stop_link_timer(struct e1000g *Adapter)
4194 {
4195 	timeout_id_t tid;
4196 
4197 	/* Disable the link timer */
4198 	mutex_enter(&Adapter->link_lock);
4199 
4200 	tid = Adapter->link_tid;
4201 	Adapter->link_tid = 0;
4202 
4203 	mutex_exit(&Adapter->link_lock);
4204 
4205 	if (tid != 0)
4206 		(void) untimeout(tid);
4207 }
4208 
4209 static void
4210 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4211 {
4212 	timeout_id_t tid;
4213 
4214 	/* Disable the tx timer for 82547 chipset */
4215 	mutex_enter(&tx_ring->tx_lock);
4216 
4217 	tx_ring->timer_enable_82547 = B_FALSE;
4218 	tid = tx_ring->timer_id_82547;
4219 	tx_ring->timer_id_82547 = 0;
4220 
4221 	mutex_exit(&tx_ring->tx_lock);
4222 
4223 	if (tid != 0)
4224 		(void) untimeout(tid);
4225 }
4226 
4227 void
4228 e1000g_clear_interrupt(struct e1000g *Adapter)
4229 {
4230 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4231 	    0xffffffff & ~E1000_IMS_RXSEQ);
4232 }
4233 
4234 void
4235 e1000g_mask_interrupt(struct e1000g *Adapter)
4236 {
4237 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4238 	    IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4239 
4240 	if (Adapter->tx_intr_enable)
4241 		e1000g_mask_tx_interrupt(Adapter);
4242 }
4243 
4244 /*
4245  * This routine is called by e1000g_quiesce(), therefore must not block.
4246  */
4247 void
4248 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4249 {
4250 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4251 }
4252 
4253 void
4254 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
4255 {
4256 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
4257 }
4258 
4259 void
4260 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
4261 {
4262 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
4263 }
4264 
4265 static void
4266 e1000g_smartspeed(struct e1000g *Adapter)
4267 {
4268 	struct e1000_hw *hw = &Adapter->shared;
4269 	uint16_t phy_status;
4270 	uint16_t phy_ctrl;
4271 
4272 	/*
4273 	 * If we're not T-or-T, or we're not autoneg'ing, or we're not
4274 	 * advertising 1000Full, we don't even use the workaround
4275 	 */
4276 	if ((hw->phy.type != e1000_phy_igp) ||
4277 	    !hw->mac.autoneg ||
4278 	    !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
4279 		return;
4280 
4281 	/*
4282 	 * True if this is the first call of this function or after every
4283 	 * 30 seconds of not having link
4284 	 */
4285 	if (Adapter->smartspeed == 0) {
4286 		/*
4287 		 * If Master/Slave config fault is asserted twice, we
4288 		 * assume back-to-back
4289 		 */
4290 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4291 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4292 			return;
4293 
4294 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4295 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4296 			return;
4297 		/*
4298 		 * We're assuming back-2-back because our status register
4299 		 * insists! there's a fault in the master/slave
4300 		 * relationship that was "negotiated"
4301 		 */
4302 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4303 		/*
4304 		 * Is the phy configured for manual configuration of
4305 		 * master/slave?
4306 		 */
4307 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4308 			/*
4309 			 * Yes.  Then disable manual configuration (enable
4310 			 * auto configuration) of master/slave
4311 			 */
4312 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4313 			(void) e1000_write_phy_reg(hw,
4314 			    PHY_1000T_CTRL, phy_ctrl);
4315 			/*
4316 			 * Effectively starting the clock
4317 			 */
4318 			Adapter->smartspeed++;
4319 			/*
4320 			 * Restart autonegotiation
4321 			 */
4322 			if (!e1000_phy_setup_autoneg(hw) &&
4323 			    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4324 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4325 				    MII_CR_RESTART_AUTO_NEG);
4326 				(void) e1000_write_phy_reg(hw,
4327 				    PHY_CONTROL, phy_ctrl);
4328 			}
4329 		}
4330 		return;
4331 		/*
4332 		 * Has 6 seconds transpired still without link? Remember,
4333 		 * you should reset the smartspeed counter once you obtain
4334 		 * link
4335 		 */
4336 	} else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4337 		/*
4338 		 * Yes.  Remember, we did at the start determine that
4339 		 * there's a master/slave configuration fault, so we're
4340 		 * still assuming there's someone on the other end, but we
4341 		 * just haven't yet been able to talk to it. We then
4342 		 * re-enable auto configuration of master/slave to see if
4343 		 * we're running 2/3 pair cables.
4344 		 */
4345 		/*
4346 		 * If still no link, perhaps using 2/3 pair cable
4347 		 */
4348 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4349 		phy_ctrl |= CR_1000T_MS_ENABLE;
4350 		(void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4351 		/*
4352 		 * Restart autoneg with phy enabled for manual
4353 		 * configuration of master/slave
4354 		 */
4355 		if (!e1000_phy_setup_autoneg(hw) &&
4356 		    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4357 			phy_ctrl |=
4358 			    (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
4359 			(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
4360 		}
4361 		/*
4362 		 * Hopefully, there are no more faults and we've obtained
4363 		 * link as a result.
4364 		 */
4365 	}
4366 	/*
4367 	 * Restart process after E1000_SMARTSPEED_MAX iterations (30
4368 	 * seconds)
4369 	 */
4370 	if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4371 		Adapter->smartspeed = 0;
4372 }
4373 
4374 static boolean_t
4375 is_valid_mac_addr(uint8_t *mac_addr)
4376 {
4377 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4378 	const uint8_t addr_test2[6] =
4379 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4380 
4381 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4382 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4383 		return (B_FALSE);
4384 
4385 	return (B_TRUE);
4386 }
4387 
4388 /*
4389  * e1000g_stall_check - check for tx stall
4390  *
4391  * This function checks if the adapter is stalled (in transmit).
4392  *
4393  * It is called each time the watchdog timeout is invoked.
4394  * If the transmit descriptor reclaim continuously fails,
4395  * the watchdog value will increment by 1. If the watchdog
4396  * value exceeds the threshold, the adapter is assumed to
4397  * have stalled and need to be reset.
4398  */
4399 static boolean_t
4400 e1000g_stall_check(struct e1000g *Adapter)
4401 {
4402 	e1000g_tx_ring_t *tx_ring;
4403 
4404 	tx_ring = Adapter->tx_ring;
4405 
4406 	if (Adapter->link_state != LINK_STATE_UP)
4407 		return (B_FALSE);
4408 
4409 	if (tx_ring->recycle_fail > 0)
4410 		tx_ring->stall_watchdog++;
4411 	else
4412 		tx_ring->stall_watchdog = 0;
4413 
4414 	if (tx_ring->stall_watchdog < E1000G_STALL_WATCHDOG_COUNT)
4415 		return (B_FALSE);
4416 
4417 	tx_ring->stall_watchdog = 0;
4418 	tx_ring->recycle_fail = 0;
4419 
4420 	return (B_TRUE);
4421 }
4422 
4423 #ifdef E1000G_DEBUG
4424 static enum ioc_reply
4425 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
4426 {
4427 	void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
4428 	e1000g_peekpoke_t *ppd;
4429 	uint64_t mem_va;
4430 	uint64_t maxoff;
4431 	boolean_t peek;
4432 
4433 	switch (iocp->ioc_cmd) {
4434 
4435 	case E1000G_IOC_REG_PEEK:
4436 		peek = B_TRUE;
4437 		break;
4438 
4439 	case E1000G_IOC_REG_POKE:
4440 		peek = B_FALSE;
4441 		break;
4442 
4443 	deault:
4444 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4445 		    "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
4446 		    iocp->ioc_cmd);
4447 		return (IOC_INVAL);
4448 	}
4449 
4450 	/*
4451 	 * Validate format of ioctl
4452 	 */
4453 	if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
4454 		return (IOC_INVAL);
4455 	if (mp->b_cont == NULL)
4456 		return (IOC_INVAL);
4457 
4458 	ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
4459 
4460 	/*
4461 	 * Validate request parameters
4462 	 */
4463 	switch (ppd->pp_acc_space) {
4464 
4465 	default:
4466 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4467 		    "e1000g_diag_ioctl: invalid access space 0x%X\n",
4468 		    ppd->pp_acc_space);
4469 		return (IOC_INVAL);
4470 
4471 	case E1000G_PP_SPACE_REG:
4472 		/*
4473 		 * Memory-mapped I/O space
4474 		 */
4475 		ASSERT(ppd->pp_acc_size == 4);
4476 		if (ppd->pp_acc_size != 4)
4477 			return (IOC_INVAL);
4478 
4479 		if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
4480 			return (IOC_INVAL);
4481 
4482 		mem_va = 0;
4483 		maxoff = 0x10000;
4484 		ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
4485 		break;
4486 
4487 	case E1000G_PP_SPACE_E1000G:
4488 		/*
4489 		 * E1000g data structure!
4490 		 */
4491 		mem_va = (uintptr_t)e1000gp;
4492 		maxoff = sizeof (struct e1000g);
4493 		ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
4494 		break;
4495 
4496 	}
4497 
4498 	if (ppd->pp_acc_offset >= maxoff)
4499 		return (IOC_INVAL);
4500 
4501 	if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
4502 		return (IOC_INVAL);
4503 
4504 	/*
4505 	 * All OK - go!
4506 	 */
4507 	ppd->pp_acc_offset += mem_va;
4508 	(*ppfn)(e1000gp, ppd);
4509 	return (peek ? IOC_REPLY : IOC_ACK);
4510 }
4511 
4512 static void
4513 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4514 {
4515 	ddi_acc_handle_t handle;
4516 	uint32_t *regaddr;
4517 
4518 	handle = e1000gp->osdep.reg_handle;
4519 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4520 	    (uintptr_t)ppd->pp_acc_offset);
4521 
4522 	ppd->pp_acc_data = ddi_get32(handle, regaddr);
4523 }
4524 
4525 static void
4526 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4527 {
4528 	ddi_acc_handle_t handle;
4529 	uint32_t *regaddr;
4530 	uint32_t value;
4531 
4532 	handle = e1000gp->osdep.reg_handle;
4533 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4534 	    (uintptr_t)ppd->pp_acc_offset);
4535 	value = (uint32_t)ppd->pp_acc_data;
4536 
4537 	ddi_put32(handle, regaddr, value);
4538 }
4539 
4540 static void
4541 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4542 {
4543 	uint64_t value;
4544 	void *vaddr;
4545 
4546 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4547 
4548 	switch (ppd->pp_acc_size) {
4549 	case 1:
4550 		value = *(uint8_t *)vaddr;
4551 		break;
4552 
4553 	case 2:
4554 		value = *(uint16_t *)vaddr;
4555 		break;
4556 
4557 	case 4:
4558 		value = *(uint32_t *)vaddr;
4559 		break;
4560 
4561 	case 8:
4562 		value = *(uint64_t *)vaddr;
4563 		break;
4564 	}
4565 
4566 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4567 	    "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
4568 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4569 
4570 	ppd->pp_acc_data = value;
4571 }
4572 
4573 static void
4574 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4575 {
4576 	uint64_t value;
4577 	void *vaddr;
4578 
4579 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4580 	value = ppd->pp_acc_data;
4581 
4582 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4583 	    "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
4584 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4585 
4586 	switch (ppd->pp_acc_size) {
4587 	case 1:
4588 		*(uint8_t *)vaddr = (uint8_t)value;
4589 		break;
4590 
4591 	case 2:
4592 		*(uint16_t *)vaddr = (uint16_t)value;
4593 		break;
4594 
4595 	case 4:
4596 		*(uint32_t *)vaddr = (uint32_t)value;
4597 		break;
4598 
4599 	case 8:
4600 		*(uint64_t *)vaddr = (uint64_t)value;
4601 		break;
4602 	}
4603 }
4604 #endif
4605 
4606 /*
4607  * Loopback Support
4608  */
4609 static lb_property_t lb_normal =
4610 	{ normal,	"normal",	E1000G_LB_NONE		};
4611 static lb_property_t lb_external1000 =
4612 	{ external,	"1000Mbps",	E1000G_LB_EXTERNAL_1000	};
4613 static lb_property_t lb_external100 =
4614 	{ external,	"100Mbps",	E1000G_LB_EXTERNAL_100	};
4615 static lb_property_t lb_external10 =
4616 	{ external,	"10Mbps",	E1000G_LB_EXTERNAL_10	};
4617 static lb_property_t lb_phy =
4618 	{ internal,	"PHY",		E1000G_LB_INTERNAL_PHY	};
4619 
4620 static enum ioc_reply
4621 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
4622 {
4623 	lb_info_sz_t *lbsp;
4624 	lb_property_t *lbpp;
4625 	struct e1000_hw *hw;
4626 	uint32_t *lbmp;
4627 	uint32_t size;
4628 	uint32_t value;
4629 
4630 	hw = &Adapter->shared;
4631 
4632 	if (mp->b_cont == NULL)
4633 		return (IOC_INVAL);
4634 
4635 	if (!e1000g_check_loopback_support(hw)) {
4636 		e1000g_log(NULL, CE_WARN,
4637 		    "Loopback is not supported on e1000g%d", Adapter->instance);
4638 		return (IOC_INVAL);
4639 	}
4640 
4641 	switch (iocp->ioc_cmd) {
4642 	default:
4643 		return (IOC_INVAL);
4644 
4645 	case LB_GET_INFO_SIZE:
4646 		size = sizeof (lb_info_sz_t);
4647 		if (iocp->ioc_count != size)
4648 			return (IOC_INVAL);
4649 
4650 		rw_enter(&Adapter->chip_lock, RW_WRITER);
4651 		e1000g_get_phy_state(Adapter);
4652 
4653 		/*
4654 		 * Workaround for hardware faults. In order to get a stable
4655 		 * state of phy, we will wait for a specific interval and
4656 		 * try again. The time delay is an experiential value based
4657 		 * on our testing.
4658 		 */
4659 		msec_delay(100);
4660 		e1000g_get_phy_state(Adapter);
4661 		rw_exit(&Adapter->chip_lock);
4662 
4663 		value = sizeof (lb_normal);
4664 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4665 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4666 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4667 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4668 			value += sizeof (lb_phy);
4669 			switch (hw->mac.type) {
4670 			case e1000_82571:
4671 			case e1000_82572:
4672 			case e1000_80003es2lan:
4673 				value += sizeof (lb_external1000);
4674 				break;
4675 			}
4676 		}
4677 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4678 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4679 			value += sizeof (lb_external100);
4680 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4681 			value += sizeof (lb_external10);
4682 
4683 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
4684 		*lbsp = value;
4685 		break;
4686 
4687 	case LB_GET_INFO:
4688 		value = sizeof (lb_normal);
4689 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4690 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4691 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4692 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4693 			value += sizeof (lb_phy);
4694 			switch (hw->mac.type) {
4695 			case e1000_82571:
4696 			case e1000_82572:
4697 			case e1000_80003es2lan:
4698 				value += sizeof (lb_external1000);
4699 				break;
4700 			}
4701 		}
4702 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4703 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4704 			value += sizeof (lb_external100);
4705 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4706 			value += sizeof (lb_external10);
4707 
4708 		size = value;
4709 		if (iocp->ioc_count != size)
4710 			return (IOC_INVAL);
4711 
4712 		value = 0;
4713 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
4714 		lbpp[value++] = lb_normal;
4715 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4716 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4717 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4718 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4719 			lbpp[value++] = lb_phy;
4720 			switch (hw->mac.type) {
4721 			case e1000_82571:
4722 			case e1000_82572:
4723 			case e1000_80003es2lan:
4724 				lbpp[value++] = lb_external1000;
4725 				break;
4726 			}
4727 		}
4728 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4729 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4730 			lbpp[value++] = lb_external100;
4731 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4732 			lbpp[value++] = lb_external10;
4733 		break;
4734 
4735 	case LB_GET_MODE:
4736 		size = sizeof (uint32_t);
4737 		if (iocp->ioc_count != size)
4738 			return (IOC_INVAL);
4739 
4740 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4741 		*lbmp = Adapter->loopback_mode;
4742 		break;
4743 
4744 	case LB_SET_MODE:
4745 		size = 0;
4746 		if (iocp->ioc_count != sizeof (uint32_t))
4747 			return (IOC_INVAL);
4748 
4749 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4750 		if (!e1000g_set_loopback_mode(Adapter, *lbmp))
4751 			return (IOC_INVAL);
4752 		break;
4753 	}
4754 
4755 	iocp->ioc_count = size;
4756 	iocp->ioc_error = 0;
4757 
4758 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
4759 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4760 		return (IOC_INVAL);
4761 	}
4762 
4763 	return (IOC_REPLY);
4764 }
4765 
4766 static boolean_t
4767 e1000g_check_loopback_support(struct e1000_hw *hw)
4768 {
4769 	switch (hw->mac.type) {
4770 	case e1000_82540:
4771 	case e1000_82545:
4772 	case e1000_82545_rev_3:
4773 	case e1000_82546:
4774 	case e1000_82546_rev_3:
4775 	case e1000_82541:
4776 	case e1000_82541_rev_2:
4777 	case e1000_82547:
4778 	case e1000_82547_rev_2:
4779 	case e1000_82571:
4780 	case e1000_82572:
4781 	case e1000_82573:
4782 	case e1000_80003es2lan:
4783 		return (B_TRUE);
4784 	}
4785 	return (B_FALSE);
4786 }
4787 
4788 static boolean_t
4789 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
4790 {
4791 	struct e1000_hw *hw;
4792 	int i, times;
4793 	boolean_t link_up;
4794 
4795 	if (mode == Adapter->loopback_mode)
4796 		return (B_TRUE);
4797 
4798 	hw = &Adapter->shared;
4799 	times = 0;
4800 
4801 	Adapter->loopback_mode = mode;
4802 
4803 	if (mode == E1000G_LB_NONE) {
4804 		/* Reset the chip */
4805 		hw->phy.autoneg_wait_to_complete = B_TRUE;
4806 		(void) e1000g_reset_adapter(Adapter);
4807 		hw->phy.autoneg_wait_to_complete = B_FALSE;
4808 		return (B_TRUE);
4809 	}
4810 
4811 again:
4812 
4813 	rw_enter(&Adapter->chip_lock, RW_WRITER);
4814 
4815 	switch (mode) {
4816 	default:
4817 		rw_exit(&Adapter->chip_lock);
4818 		return (B_FALSE);
4819 
4820 	case E1000G_LB_EXTERNAL_1000:
4821 		e1000g_set_external_loopback_1000(Adapter);
4822 		break;
4823 
4824 	case E1000G_LB_EXTERNAL_100:
4825 		e1000g_set_external_loopback_100(Adapter);
4826 		break;
4827 
4828 	case E1000G_LB_EXTERNAL_10:
4829 		e1000g_set_external_loopback_10(Adapter);
4830 		break;
4831 
4832 	case E1000G_LB_INTERNAL_PHY:
4833 		e1000g_set_internal_loopback(Adapter);
4834 		break;
4835 	}
4836 
4837 	times++;
4838 
4839 	rw_exit(&Adapter->chip_lock);
4840 
4841 	/* Wait for link up */
4842 	for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
4843 		msec_delay(100);
4844 
4845 	rw_enter(&Adapter->chip_lock, RW_WRITER);
4846 
4847 	link_up = e1000g_link_up(Adapter);
4848 
4849 	rw_exit(&Adapter->chip_lock);
4850 
4851 	if (!link_up) {
4852 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4853 		    "Failed to get the link up");
4854 		if (times < 2) {
4855 			/* Reset the link */
4856 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4857 			    "Reset the link ...");
4858 			(void) e1000g_reset_adapter(Adapter);
4859 			goto again;
4860 		}
4861 	}
4862 
4863 	return (B_TRUE);
4864 }
4865 
4866 /*
4867  * The following loopback settings are from Intel's technical
4868  * document - "How To Loopback". All the register settings and
4869  * time delay values are directly inherited from the document
4870  * without more explanations available.
4871  */
4872 static void
4873 e1000g_set_internal_loopback(struct e1000g *Adapter)
4874 {
4875 	struct e1000_hw *hw;
4876 	uint32_t ctrl;
4877 	uint32_t status;
4878 	uint16_t phy_ctrl;
4879 	uint32_t txcw;
4880 
4881 	hw = &Adapter->shared;
4882 
4883 	/* Disable Smart Power Down */
4884 	phy_spd_state(hw, B_FALSE);
4885 
4886 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
4887 	phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
4888 	phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
4889 
4890 	switch (hw->mac.type) {
4891 	case e1000_82540:
4892 	case e1000_82545:
4893 	case e1000_82545_rev_3:
4894 	case e1000_82546:
4895 	case e1000_82546_rev_3:
4896 	case e1000_82573:
4897 		/* Auto-MDI/MDIX off */
4898 		(void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
4899 		/* Reset PHY to update Auto-MDI/MDIX */
4900 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
4901 		    phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
4902 		/* Reset PHY to auto-neg off and force 1000 */
4903 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
4904 		    phy_ctrl | MII_CR_RESET);
4905 		/*
4906 		 * Disable PHY receiver for 82540/545/546 and 82573 Family.
4907 		 * See comments above e1000g_set_internal_loopback() for the
4908 		 * background.
4909 		 */
4910 		(void) e1000_write_phy_reg(hw, 29, 0x001F);
4911 		(void) e1000_write_phy_reg(hw, 30, 0x8FFC);
4912 		(void) e1000_write_phy_reg(hw, 29, 0x001A);
4913 		(void) e1000_write_phy_reg(hw, 30, 0x8FF0);
4914 		break;
4915 	case e1000_80003es2lan:
4916 		/* Force Link Up */
4917 		(void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
4918 		    0x1CC);
4919 		/* Sets PCS loopback at 1Gbs */
4920 		(void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
4921 		    0x1046);
4922 		break;
4923 	}
4924 
4925 	/* Set loopback */
4926 	(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
4927 
4928 	msec_delay(250);
4929 
4930 	/* Now set up the MAC to the same speed/duplex as the PHY. */
4931 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4932 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
4933 	ctrl |= (E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
4934 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
4935 	    E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
4936 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
4937 
4938 	switch (hw->mac.type) {
4939 	case e1000_82540:
4940 	case e1000_82545:
4941 	case e1000_82545_rev_3:
4942 	case e1000_82546:
4943 	case e1000_82546_rev_3:
4944 		/*
4945 		 * For some serdes we'll need to commit the writes now
4946 		 * so that the status is updated on link
4947 		 */
4948 		if (hw->phy.media_type == e1000_media_type_internal_serdes) {
4949 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4950 			msec_delay(100);
4951 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
4952 		}
4953 
4954 		if (hw->phy.media_type == e1000_media_type_copper) {
4955 			/* Invert Loss of Signal */
4956 			ctrl |= E1000_CTRL_ILOS;
4957 		} else {
4958 			/* Set ILOS on fiber nic if half duplex is detected */
4959 			status = E1000_READ_REG(hw, E1000_STATUS);
4960 			if ((status & E1000_STATUS_FD) == 0)
4961 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
4962 		}
4963 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4964 		break;
4965 
4966 	case e1000_82571:
4967 	case e1000_82572:
4968 		/*
4969 		 * The fiber/SerDes versions of this adapter do not contain an
4970 		 * accessible PHY. Therefore, loopback beyond MAC must be done
4971 		 * using SerDes analog loopback.
4972 		 */
4973 		if (hw->phy.media_type != e1000_media_type_copper) {
4974 			status = E1000_READ_REG(hw, E1000_STATUS);
4975 			/* Set ILOS on fiber nic if half duplex is detected */
4976 			if (((status & E1000_STATUS_LU) == 0) ||
4977 			    ((status & E1000_STATUS_FD) == 0) ||
4978 			    (hw->phy.media_type ==
4979 			    e1000_media_type_internal_serdes))
4980 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
4981 
4982 			/* Disable autoneg by setting bit 31 of TXCW to zero */
4983 			txcw = E1000_READ_REG(hw, E1000_TXCW);
4984 			txcw &= ~((uint32_t)1 << 31);
4985 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
4986 
4987 			/*
4988 			 * Write 0x410 to Serdes Control register
4989 			 * to enable Serdes analog loopback
4990 			 */
4991 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
4992 			msec_delay(10);
4993 		}
4994 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
4995 		break;
4996 
4997 	case e1000_82573:
4998 		ctrl |= E1000_CTRL_ILOS;
4999 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5000 		break;
5001 	}
5002 }
5003 
5004 static void
5005 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5006 {
5007 	struct e1000_hw *hw;
5008 	uint32_t rctl;
5009 	uint32_t ctrl_ext;
5010 	uint32_t ctrl;
5011 	uint32_t status;
5012 	uint32_t txcw;
5013 	uint16_t phydata;
5014 
5015 	hw = &Adapter->shared;
5016 
5017 	/* Disable Smart Power Down */
5018 	phy_spd_state(hw, B_FALSE);
5019 
5020 	switch (hw->mac.type) {
5021 	case e1000_82571:
5022 	case e1000_82572:
5023 		switch (hw->phy.media_type) {
5024 		case e1000_media_type_copper:
5025 			/* Force link up (Must be done before the PHY writes) */
5026 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5027 			ctrl |= E1000_CTRL_SLU;	/* Force Link Up */
5028 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5029 
5030 			rctl = E1000_READ_REG(hw, E1000_RCTL);
5031 			rctl |= (E1000_RCTL_EN |
5032 			    E1000_RCTL_SBP |
5033 			    E1000_RCTL_UPE |
5034 			    E1000_RCTL_MPE |
5035 			    E1000_RCTL_LPE |
5036 			    E1000_RCTL_BAM);		/* 0x803E */
5037 			E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5038 
5039 			ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5040 			ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5041 			    E1000_CTRL_EXT_SDP6_DATA |
5042 			    E1000_CTRL_EXT_SDP7_DATA |
5043 			    E1000_CTRL_EXT_SDP4_DIR |
5044 			    E1000_CTRL_EXT_SDP6_DIR |
5045 			    E1000_CTRL_EXT_SDP7_DIR);	/* 0x0DD0 */
5046 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5047 
5048 			/*
5049 			 * This sequence tunes the PHY's SDP and no customer
5050 			 * settable values. For background, see comments above
5051 			 * e1000g_set_internal_loopback().
5052 			 */
5053 			(void) e1000_write_phy_reg(hw, 0x0, 0x140);
5054 			msec_delay(10);
5055 			(void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5056 			(void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5057 			(void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5058 			(void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5059 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5060 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5061 
5062 			(void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5063 			(void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5064 			(void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5065 			(void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5066 			(void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5067 
5068 			msec_delay(50);
5069 			break;
5070 		case e1000_media_type_fiber:
5071 		case e1000_media_type_internal_serdes:
5072 			status = E1000_READ_REG(hw, E1000_STATUS);
5073 			if (((status & E1000_STATUS_LU) == 0) ||
5074 			    (hw->phy.media_type ==
5075 			    e1000_media_type_internal_serdes)) {
5076 				ctrl = E1000_READ_REG(hw, E1000_CTRL);
5077 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5078 				E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5079 			}
5080 
5081 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5082 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5083 			txcw &= ~((uint32_t)1 << 31);
5084 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5085 
5086 			/*
5087 			 * Write 0x410 to Serdes Control register
5088 			 * to enable Serdes analog loopback
5089 			 */
5090 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5091 			msec_delay(10);
5092 			break;
5093 		default:
5094 			break;
5095 		}
5096 		break;
5097 	case e1000_80003es2lan:
5098 		(void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5099 		(void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5100 		    phydata | (1 << 5));
5101 		Adapter->param_adv_autoneg = 1;
5102 		Adapter->param_adv_1000fdx = 1;
5103 		(void) e1000g_reset_link(Adapter);
5104 		break;
5105 	}
5106 }
5107 
5108 static void
5109 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5110 {
5111 	struct e1000_hw *hw;
5112 	uint32_t ctrl;
5113 	uint16_t phy_ctrl;
5114 
5115 	hw = &Adapter->shared;
5116 
5117 	/* Disable Smart Power Down */
5118 	phy_spd_state(hw, B_FALSE);
5119 
5120 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5121 	    MII_CR_SPEED_100);
5122 
5123 	/* Force 100/FD, reset PHY */
5124 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5125 	    phy_ctrl | MII_CR_RESET);	/* 0xA100 */
5126 	msec_delay(10);
5127 
5128 	/* Force 100/FD */
5129 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5130 	    phy_ctrl);			/* 0x2100 */
5131 	msec_delay(10);
5132 
5133 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5134 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5135 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5136 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5137 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5138 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5139 	    E1000_CTRL_SPD_100 |	/* Force Speed to 100 */
5140 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5141 
5142 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5143 }
5144 
5145 static void
5146 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5147 {
5148 	struct e1000_hw *hw;
5149 	uint32_t ctrl;
5150 	uint16_t phy_ctrl;
5151 
5152 	hw = &Adapter->shared;
5153 
5154 	/* Disable Smart Power Down */
5155 	phy_spd_state(hw, B_FALSE);
5156 
5157 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5158 	    MII_CR_SPEED_10);
5159 
5160 	/* Force 10/FD, reset PHY */
5161 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5162 	    phy_ctrl | MII_CR_RESET);	/* 0x8100 */
5163 	msec_delay(10);
5164 
5165 	/* Force 10/FD */
5166 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5167 	    phy_ctrl);			/* 0x0100 */
5168 	msec_delay(10);
5169 
5170 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5171 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5172 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5173 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5174 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5175 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5176 	    E1000_CTRL_SPD_10 |		/* Force Speed to 10 */
5177 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5178 
5179 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5180 }
5181 
5182 #ifdef __sparc
5183 static boolean_t
5184 e1000g_find_mac_address(struct e1000g *Adapter)
5185 {
5186 	struct e1000_hw *hw = &Adapter->shared;
5187 	uchar_t *bytes;
5188 	struct ether_addr sysaddr;
5189 	uint_t nelts;
5190 	int err;
5191 	boolean_t found = B_FALSE;
5192 
5193 	/*
5194 	 * The "vendor's factory-set address" may already have
5195 	 * been extracted from the chip, but if the property
5196 	 * "local-mac-address" is set we use that instead.
5197 	 *
5198 	 * We check whether it looks like an array of 6
5199 	 * bytes (which it should, if OBP set it).  If we can't
5200 	 * make sense of it this way, we'll ignore it.
5201 	 */
5202 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5203 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
5204 	if (err == DDI_PROP_SUCCESS) {
5205 		if (nelts == ETHERADDRL) {
5206 			while (nelts--)
5207 				hw->mac.addr[nelts] = bytes[nelts];
5208 			found = B_TRUE;
5209 		}
5210 		ddi_prop_free(bytes);
5211 	}
5212 
5213 	/*
5214 	 * Look up the OBP property "local-mac-address?". If the user has set
5215 	 * 'local-mac-address? = false', use "the system address" instead.
5216 	 */
5217 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
5218 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
5219 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
5220 			if (localetheraddr(NULL, &sysaddr) != 0) {
5221 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
5222 				found = B_TRUE;
5223 			}
5224 		}
5225 		ddi_prop_free(bytes);
5226 	}
5227 
5228 	/*
5229 	 * Finally(!), if there's a valid "mac-address" property (created
5230 	 * if we netbooted from this interface), we must use this instead
5231 	 * of any of the above to ensure that the NFS/install server doesn't
5232 	 * get confused by the address changing as Solaris takes over!
5233 	 */
5234 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5235 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
5236 	if (err == DDI_PROP_SUCCESS) {
5237 		if (nelts == ETHERADDRL) {
5238 			while (nelts--)
5239 				hw->mac.addr[nelts] = bytes[nelts];
5240 			found = B_TRUE;
5241 		}
5242 		ddi_prop_free(bytes);
5243 	}
5244 
5245 	if (found) {
5246 		bcopy(hw->mac.addr, hw->mac.perm_addr,
5247 		    ETHERADDRL);
5248 	}
5249 
5250 	return (found);
5251 }
5252 #endif
5253 
5254 static int
5255 e1000g_add_intrs(struct e1000g *Adapter)
5256 {
5257 	dev_info_t *devinfo;
5258 	int intr_types;
5259 	int rc;
5260 
5261 	devinfo = Adapter->dip;
5262 
5263 	/* Get supported interrupt types */
5264 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
5265 
5266 	if (rc != DDI_SUCCESS) {
5267 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5268 		    "Get supported interrupt types failed: %d\n", rc);
5269 		return (DDI_FAILURE);
5270 	}
5271 
5272 	/*
5273 	 * Based on Intel Technical Advisory document (TA-160), there are some
5274 	 * cases where some older Intel PCI-X NICs may "advertise" to the OS
5275 	 * that it supports MSI, but in fact has problems.
5276 	 * So we should only enable MSI for PCI-E NICs and disable MSI for old
5277 	 * PCI/PCI-X NICs.
5278 	 */
5279 	if (Adapter->shared.mac.type < e1000_82571)
5280 		Adapter->msi_enable = B_FALSE;
5281 
5282 	if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
5283 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
5284 
5285 		if (rc != DDI_SUCCESS) {
5286 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5287 			    "Add MSI failed, trying Legacy interrupts\n");
5288 		} else {
5289 			Adapter->intr_type = DDI_INTR_TYPE_MSI;
5290 		}
5291 	}
5292 
5293 	if ((Adapter->intr_type == 0) &&
5294 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
5295 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
5296 
5297 		if (rc != DDI_SUCCESS) {
5298 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5299 			    "Add Legacy interrupts failed\n");
5300 			return (DDI_FAILURE);
5301 		}
5302 
5303 		Adapter->intr_type = DDI_INTR_TYPE_FIXED;
5304 	}
5305 
5306 	if (Adapter->intr_type == 0) {
5307 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5308 		    "No interrupts registered\n");
5309 		return (DDI_FAILURE);
5310 	}
5311 
5312 	return (DDI_SUCCESS);
5313 }
5314 
5315 /*
5316  * e1000g_intr_add() handles MSI/Legacy interrupts
5317  */
5318 static int
5319 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
5320 {
5321 	dev_info_t *devinfo;
5322 	int count, avail, actual;
5323 	int x, y, rc, inum = 0;
5324 	int flag;
5325 	ddi_intr_handler_t *intr_handler;
5326 
5327 	devinfo = Adapter->dip;
5328 
5329 	/* get number of interrupts */
5330 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5331 	if ((rc != DDI_SUCCESS) || (count == 0)) {
5332 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5333 		    "Get interrupt number failed. Return: %d, count: %d\n",
5334 		    rc, count);
5335 		return (DDI_FAILURE);
5336 	}
5337 
5338 	/* get number of available interrupts */
5339 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
5340 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
5341 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5342 		    "Get interrupt available number failed. "
5343 		    "Return: %d, available: %d\n", rc, avail);
5344 		return (DDI_FAILURE);
5345 	}
5346 
5347 	if (avail < count) {
5348 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5349 		    "Interrupts count: %d, available: %d\n",
5350 		    count, avail);
5351 	}
5352 
5353 	/* Allocate an array of interrupt handles */
5354 	Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
5355 	Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
5356 
5357 	/* Set NORMAL behavior for both MSI and FIXED interrupt */
5358 	flag = DDI_INTR_ALLOC_NORMAL;
5359 
5360 	/* call ddi_intr_alloc() */
5361 	rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
5362 	    count, &actual, flag);
5363 
5364 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
5365 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5366 		    "Allocate interrupts failed: %d\n", rc);
5367 
5368 		kmem_free(Adapter->htable, Adapter->intr_size);
5369 		return (DDI_FAILURE);
5370 	}
5371 
5372 	if (actual < count) {
5373 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5374 		    "Interrupts requested: %d, received: %d\n",
5375 		    count, actual);
5376 	}
5377 
5378 	Adapter->intr_cnt = actual;
5379 
5380 	/* Get priority for first msi, assume remaining are all the same */
5381 	rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
5382 
5383 	if (rc != DDI_SUCCESS) {
5384 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5385 		    "Get interrupt priority failed: %d\n", rc);
5386 
5387 		/* Free already allocated intr */
5388 		for (y = 0; y < actual; y++)
5389 			(void) ddi_intr_free(Adapter->htable[y]);
5390 
5391 		kmem_free(Adapter->htable, Adapter->intr_size);
5392 		return (DDI_FAILURE);
5393 	}
5394 
5395 	/*
5396 	 * In Legacy Interrupt mode, for PCI-Express adapters, we should
5397 	 * use the interrupt service routine e1000g_intr_pciexpress()
5398 	 * to avoid interrupt stealing when sharing interrupt with other
5399 	 * devices.
5400 	 */
5401 	if (Adapter->shared.mac.type < e1000_82571)
5402 		intr_handler = (ddi_intr_handler_t *)e1000g_intr;
5403 	else
5404 		intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
5405 
5406 	/* Call ddi_intr_add_handler() */
5407 	for (x = 0; x < actual; x++) {
5408 		rc = ddi_intr_add_handler(Adapter->htable[x],
5409 		    intr_handler, (caddr_t)Adapter, NULL);
5410 
5411 		if (rc != DDI_SUCCESS) {
5412 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5413 			    "Add interrupt handler failed: %d\n", rc);
5414 
5415 			/* Remove already added handler */
5416 			for (y = 0; y < x; y++)
5417 				(void) ddi_intr_remove_handler(
5418 				    Adapter->htable[y]);
5419 
5420 			/* Free already allocated intr */
5421 			for (y = 0; y < actual; y++)
5422 				(void) ddi_intr_free(Adapter->htable[y]);
5423 
5424 			kmem_free(Adapter->htable, Adapter->intr_size);
5425 			return (DDI_FAILURE);
5426 		}
5427 	}
5428 
5429 	rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
5430 
5431 	if (rc != DDI_SUCCESS) {
5432 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5433 		    "Get interrupt cap failed: %d\n", rc);
5434 
5435 		/* Free already allocated intr */
5436 		for (y = 0; y < actual; y++) {
5437 			(void) ddi_intr_remove_handler(Adapter->htable[y]);
5438 			(void) ddi_intr_free(Adapter->htable[y]);
5439 		}
5440 
5441 		kmem_free(Adapter->htable, Adapter->intr_size);
5442 		return (DDI_FAILURE);
5443 	}
5444 
5445 	return (DDI_SUCCESS);
5446 }
5447 
5448 static int
5449 e1000g_rem_intrs(struct e1000g *Adapter)
5450 {
5451 	int x;
5452 	int rc;
5453 
5454 	for (x = 0; x < Adapter->intr_cnt; x++) {
5455 		rc = ddi_intr_remove_handler(Adapter->htable[x]);
5456 		if (rc != DDI_SUCCESS) {
5457 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5458 			    "Remove intr handler failed: %d\n", rc);
5459 			return (DDI_FAILURE);
5460 		}
5461 
5462 		rc = ddi_intr_free(Adapter->htable[x]);
5463 		if (rc != DDI_SUCCESS) {
5464 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5465 			    "Free intr failed: %d\n", rc);
5466 			return (DDI_FAILURE);
5467 		}
5468 	}
5469 
5470 	kmem_free(Adapter->htable, Adapter->intr_size);
5471 
5472 	return (DDI_SUCCESS);
5473 }
5474 
5475 static int
5476 e1000g_enable_intrs(struct e1000g *Adapter)
5477 {
5478 	int x;
5479 	int rc;
5480 
5481 	/* Enable interrupts */
5482 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5483 		/* Call ddi_intr_block_enable() for MSI */
5484 		rc = ddi_intr_block_enable(Adapter->htable,
5485 		    Adapter->intr_cnt);
5486 		if (rc != DDI_SUCCESS) {
5487 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5488 			    "Enable block intr failed: %d\n", rc);
5489 			return (DDI_FAILURE);
5490 		}
5491 	} else {
5492 		/* Call ddi_intr_enable() for Legacy/MSI non block enable */
5493 		for (x = 0; x < Adapter->intr_cnt; x++) {
5494 			rc = ddi_intr_enable(Adapter->htable[x]);
5495 			if (rc != DDI_SUCCESS) {
5496 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5497 				    "Enable intr failed: %d\n", rc);
5498 				return (DDI_FAILURE);
5499 			}
5500 		}
5501 	}
5502 
5503 	return (DDI_SUCCESS);
5504 }
5505 
5506 static int
5507 e1000g_disable_intrs(struct e1000g *Adapter)
5508 {
5509 	int x;
5510 	int rc;
5511 
5512 	/* Disable all interrupts */
5513 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5514 		rc = ddi_intr_block_disable(Adapter->htable,
5515 		    Adapter->intr_cnt);
5516 		if (rc != DDI_SUCCESS) {
5517 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5518 			    "Disable block intr failed: %d\n", rc);
5519 			return (DDI_FAILURE);
5520 		}
5521 	} else {
5522 		for (x = 0; x < Adapter->intr_cnt; x++) {
5523 			rc = ddi_intr_disable(Adapter->htable[x]);
5524 			if (rc != DDI_SUCCESS) {
5525 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5526 				    "Disable intr failed: %d\n", rc);
5527 				return (DDI_FAILURE);
5528 			}
5529 		}
5530 	}
5531 
5532 	return (DDI_SUCCESS);
5533 }
5534 
5535 /*
5536  * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
5537  */
5538 static void
5539 e1000g_get_phy_state(struct e1000g *Adapter)
5540 {
5541 	struct e1000_hw *hw = &Adapter->shared;
5542 
5543 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
5544 	(void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
5545 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &Adapter->phy_an_adv);
5546 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &Adapter->phy_an_exp);
5547 	(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &Adapter->phy_ext_status);
5548 	(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &Adapter->phy_1000t_ctrl);
5549 	(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
5550 	    &Adapter->phy_1000t_status);
5551 	(void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &Adapter->phy_lp_able);
5552 
5553 	Adapter->param_autoneg_cap =
5554 	    (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
5555 	Adapter->param_pause_cap =
5556 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5557 	Adapter->param_asym_pause_cap =
5558 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5559 	Adapter->param_1000fdx_cap =
5560 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5561 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5562 	Adapter->param_1000hdx_cap =
5563 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5564 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5565 	Adapter->param_100t4_cap =
5566 	    (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
5567 	Adapter->param_100fdx_cap =
5568 	    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5569 	    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
5570 	Adapter->param_100hdx_cap =
5571 	    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
5572 	    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
5573 	Adapter->param_10fdx_cap =
5574 	    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
5575 	Adapter->param_10hdx_cap =
5576 	    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
5577 
5578 	Adapter->param_adv_autoneg = hw->mac.autoneg;
5579 	Adapter->param_adv_pause =
5580 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5581 	Adapter->param_adv_asym_pause =
5582 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5583 	Adapter->param_adv_1000hdx =
5584 	    (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
5585 	Adapter->param_adv_100t4 =
5586 	    (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
5587 	if (Adapter->param_adv_autoneg == 1) {
5588 		Adapter->param_adv_1000fdx =
5589 		    (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0;
5590 		Adapter->param_adv_100fdx =
5591 		    (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0;
5592 		Adapter->param_adv_100hdx =
5593 		    (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0;
5594 		Adapter->param_adv_10fdx =
5595 		    (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
5596 		Adapter->param_adv_10hdx =
5597 		    (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
5598 	}
5599 
5600 	Adapter->param_lp_autoneg =
5601 	    (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
5602 	Adapter->param_lp_pause =
5603 	    (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
5604 	Adapter->param_lp_asym_pause =
5605 	    (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
5606 	Adapter->param_lp_1000fdx =
5607 	    (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
5608 	Adapter->param_lp_1000hdx =
5609 	    (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
5610 	Adapter->param_lp_100t4 =
5611 	    (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
5612 	Adapter->param_lp_100fdx =
5613 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
5614 	Adapter->param_lp_100hdx =
5615 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
5616 	Adapter->param_lp_10fdx =
5617 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
5618 	Adapter->param_lp_10hdx =
5619 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
5620 }
5621 
5622 /*
5623  * FMA support
5624  */
5625 
5626 int
5627 e1000g_check_acc_handle(ddi_acc_handle_t handle)
5628 {
5629 	ddi_fm_error_t de;
5630 
5631 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5632 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5633 	return (de.fme_status);
5634 }
5635 
5636 int
5637 e1000g_check_dma_handle(ddi_dma_handle_t handle)
5638 {
5639 	ddi_fm_error_t de;
5640 
5641 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5642 	return (de.fme_status);
5643 }
5644 
5645 /*
5646  * The IO fault service error handling callback function
5647  */
5648 /* ARGSUSED2 */
5649 static int
5650 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5651 {
5652 	/*
5653 	 * as the driver can always deal with an error in any dma or
5654 	 * access handle, we can just return the fme_status value.
5655 	 */
5656 	pci_ereport_post(dip, err, NULL);
5657 	return (err->fme_status);
5658 }
5659 
5660 static void
5661 e1000g_fm_init(struct e1000g *Adapter)
5662 {
5663 	ddi_iblock_cookie_t iblk;
5664 	int fma_acc_flag, fma_dma_flag;
5665 
5666 	/* Only register with IO Fault Services if we have some capability */
5667 	if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5668 		e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5669 		fma_acc_flag = 1;
5670 	} else {
5671 		e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5672 		fma_acc_flag = 0;
5673 	}
5674 
5675 	if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5676 		fma_dma_flag = 1;
5677 	} else {
5678 		fma_dma_flag = 0;
5679 	}
5680 
5681 	(void) e1000g_set_fma_flags(Adapter, fma_acc_flag, fma_dma_flag);
5682 
5683 	if (Adapter->fm_capabilities) {
5684 
5685 		/* Register capabilities with IO Fault Services */
5686 		ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
5687 
5688 		/*
5689 		 * Initialize pci ereport capabilities if ereport capable
5690 		 */
5691 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5692 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5693 			pci_ereport_setup(Adapter->dip);
5694 
5695 		/*
5696 		 * Register error callback if error callback capable
5697 		 */
5698 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5699 			ddi_fm_handler_register(Adapter->dip,
5700 			    e1000g_fm_error_cb, (void*) Adapter);
5701 	}
5702 }
5703 
5704 static void
5705 e1000g_fm_fini(struct e1000g *Adapter)
5706 {
5707 	/* Only unregister FMA capabilities if we registered some */
5708 	if (Adapter->fm_capabilities) {
5709 
5710 		/*
5711 		 * Release any resources allocated by pci_ereport_setup()
5712 		 */
5713 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5714 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5715 			pci_ereport_teardown(Adapter->dip);
5716 
5717 		/*
5718 		 * Un-register error callback if error callback capable
5719 		 */
5720 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5721 			ddi_fm_handler_unregister(Adapter->dip);
5722 
5723 		/* Unregister from IO Fault Services */
5724 		ddi_fm_fini(Adapter->dip);
5725 	}
5726 }
5727 
5728 void
5729 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
5730 {
5731 	uint64_t ena;
5732 	char buf[FM_MAX_CLASS];
5733 
5734 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5735 	ena = fm_ena_generate(0, FM_ENA_FMT1);
5736 	if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
5737 		ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
5738 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5739 	}
5740 }
5741 
5742 /*
5743  * quiesce(9E) entry point.
5744  *
5745  * This function is called when the system is single-threaded at high
5746  * PIL with preemption disabled. Therefore, this function must not be
5747  * blocked.
5748  *
5749  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5750  * DDI_FAILURE indicates an error condition and should almost never happen.
5751  */
5752 static int
5753 e1000g_quiesce(dev_info_t *devinfo)
5754 {
5755 	struct e1000g *Adapter;
5756 
5757 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
5758 
5759 	if (Adapter == NULL)
5760 		return (DDI_FAILURE);
5761 
5762 	e1000g_clear_all_interrupts(Adapter);
5763 
5764 	(void) e1000_reset_hw(&Adapter->shared);
5765 
5766 	/* Setup our HW Tx Head & Tail descriptor pointers */
5767 	E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
5768 	E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
5769 
5770 	/* Setup our HW Rx Head & Tail descriptor pointers */
5771 	E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
5772 	E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
5773 
5774 	return (DDI_SUCCESS);
5775 }
5776 
5777 static int
5778 e1000g_get_def_val(struct e1000g *Adapter, mac_prop_id_t pr_num,
5779     uint_t pr_valsize, void *pr_val)
5780 {
5781 	link_flowctrl_t fl;
5782 	int err = 0;
5783 
5784 	ASSERT(pr_valsize > 0);
5785 	switch (pr_num) {
5786 	case MAC_PROP_AUTONEG:
5787 		*(uint8_t *)pr_val =
5788 		    ((Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
5789 		break;
5790 	case MAC_PROP_FLOWCTRL:
5791 		if (pr_valsize < sizeof (link_flowctrl_t))
5792 			return (EINVAL);
5793 		fl = LINK_FLOWCTRL_BI;
5794 		bcopy(&fl, pr_val, sizeof (fl));
5795 		break;
5796 	case MAC_PROP_ADV_1000FDX_CAP:
5797 	case MAC_PROP_EN_1000FDX_CAP:
5798 		*(uint8_t *)pr_val =
5799 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5800 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5801 		break;
5802 	case MAC_PROP_ADV_1000HDX_CAP:
5803 	case MAC_PROP_EN_1000HDX_CAP:
5804 		*(uint8_t *)pr_val =
5805 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5806 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5807 		break;
5808 	case MAC_PROP_ADV_100FDX_CAP:
5809 	case MAC_PROP_EN_100FDX_CAP:
5810 		*(uint8_t *)pr_val =
5811 		    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5812 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
5813 		break;
5814 	case MAC_PROP_ADV_100HDX_CAP:
5815 	case MAC_PROP_EN_100HDX_CAP:
5816 		*(uint8_t *)pr_val =
5817 		    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
5818 		    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
5819 		break;
5820 	case MAC_PROP_ADV_10FDX_CAP:
5821 	case MAC_PROP_EN_10FDX_CAP:
5822 		*(uint8_t *)pr_val =
5823 		    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
5824 		break;
5825 	case MAC_PROP_ADV_10HDX_CAP:
5826 	case MAC_PROP_EN_10HDX_CAP:
5827 		*(uint8_t *)pr_val =
5828 		    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
5829 		break;
5830 	default:
5831 		err = ENOTSUP;
5832 		break;
5833 	}
5834 	return (err);
5835 }
5836 
5837 /*
5838  * synchronize the adv* and en* parameters.
5839  *
5840  * See comments in <sys/dld.h> for details of the *_en_*
5841  * parameters. The usage of ndd for setting adv parameters will
5842  * synchronize all the en parameters with the e1000g parameters,
5843  * implicitly disabling any settings made via dladm.
5844  */
5845 static void
5846 e1000g_param_sync(struct e1000g *Adapter)
5847 {
5848 	Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
5849 	Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
5850 	Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
5851 	Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
5852 	Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
5853 	Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
5854 }
5855 
5856 /*
5857  * e1000g_get_driver_control - tell manageability firmware that the driver
5858  * has control.
5859  */
5860 static void
5861 e1000g_get_driver_control(struct e1000_hw *hw)
5862 {
5863 	uint32_t ctrl_ext;
5864 	uint32_t swsm;
5865 
5866 	/* tell manageability firmware the driver has taken over */
5867 	switch (hw->mac.type) {
5868 	case e1000_82573:
5869 		swsm = E1000_READ_REG(hw, E1000_SWSM);
5870 		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
5871 		break;
5872 	case e1000_82571:
5873 	case e1000_82572:
5874 	case e1000_82574:
5875 	case e1000_80003es2lan:
5876 	case e1000_ich8lan:
5877 	case e1000_ich9lan:
5878 	case e1000_ich10lan:
5879 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5880 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
5881 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
5882 		break;
5883 	default:
5884 		/* no manageability firmware: do nothing */
5885 		break;
5886 	}
5887 }
5888 
5889 /*
5890  * e1000g_release_driver_control - tell manageability firmware that the driver
5891  * has released control.
5892  */
5893 static void
5894 e1000g_release_driver_control(struct e1000_hw *hw)
5895 {
5896 	uint32_t ctrl_ext;
5897 	uint32_t swsm;
5898 
5899 	/* tell manageability firmware the driver has released control */
5900 	switch (hw->mac.type) {
5901 	case e1000_82573:
5902 		swsm = E1000_READ_REG(hw, E1000_SWSM);
5903 		E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
5904 		break;
5905 	case e1000_82571:
5906 	case e1000_82572:
5907 	case e1000_82574:
5908 	case e1000_80003es2lan:
5909 	case e1000_ich8lan:
5910 	case e1000_ich9lan:
5911 	case e1000_ich10lan:
5912 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5913 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
5914 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
5915 		break;
5916 	default:
5917 		/* no manageability firmware: do nothing */
5918 		break;
5919 	}
5920 }
5921 
5922 /*
5923  * Restore e1000g promiscuous mode.
5924  */
5925 static void
5926 e1000g_restore_promisc(struct e1000g *Adapter)
5927 {
5928 	if (Adapter->e1000g_promisc) {
5929 		uint32_t rctl;
5930 
5931 		rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
5932 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
5933 		E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
5934 	}
5935 }
5936