xref: /titanic_52/usr/src/uts/common/io/e1000g/e1000g_main.c (revision 870ad75a2b67a92c3449d93b4fef8a0baa982b4a)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * **********************************************************************
28  *									*
29  * Module Name:								*
30  *   e1000g_main.c							*
31  *									*
32  * Abstract:								*
33  *   This file contains the interface routines for the solaris OS.	*
34  *   It has all DDI entry point routines and GLD entry point routines.	*
35  *									*
36  *   This file also contains routines that take care of initialization	*
37  *   uninit routine and interrupt routine.				*
38  *									*
39  * **********************************************************************
40  */
41 
42 #include <sys/dlpi.h>
43 #include <sys/mac.h>
44 #include "e1000g_sw.h"
45 #include "e1000g_debug.h"
46 
47 static char ident[] = "Intel PRO/1000 Ethernet";
48 static char e1000g_string[] = "Intel(R) PRO/1000 Network Connection";
49 static char e1000g_version[] = "Driver Ver. 5.3.9";
50 
51 /*
52  * Proto types for DDI entry points
53  */
54 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
55 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
56 static int e1000g_quiesce(dev_info_t *);
57 
58 /*
59  * init and intr routines prototype
60  */
61 static int e1000g_resume(dev_info_t *);
62 static int e1000g_suspend(dev_info_t *);
63 static uint_t e1000g_intr_pciexpress(caddr_t);
64 static uint_t e1000g_intr(caddr_t);
65 static void e1000g_intr_work(struct e1000g *, uint32_t);
66 #pragma inline(e1000g_intr_work)
67 static int e1000g_init(struct e1000g *);
68 static int e1000g_start(struct e1000g *, boolean_t);
69 static void e1000g_stop(struct e1000g *, boolean_t);
70 static int e1000g_m_start(void *);
71 static void e1000g_m_stop(void *);
72 static int e1000g_m_promisc(void *, boolean_t);
73 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
74 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
75 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
76 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
77     uint_t, const void *);
78 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
79     uint_t, uint_t, void *, uint_t *);
80 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
81     const void *);
82 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t,
83     uint_t, void *, uint_t *);
84 static void e1000g_init_locks(struct e1000g *);
85 static void e1000g_destroy_locks(struct e1000g *);
86 static int e1000g_identify_hardware(struct e1000g *);
87 static int e1000g_regs_map(struct e1000g *);
88 static int e1000g_set_driver_params(struct e1000g *);
89 static void e1000g_set_bufsize(struct e1000g *);
90 static int e1000g_register_mac(struct e1000g *);
91 static boolean_t e1000g_rx_drain(struct e1000g *);
92 static boolean_t e1000g_tx_drain(struct e1000g *);
93 static void e1000g_init_unicst(struct e1000g *);
94 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
95 static int e1000g_alloc_rx_data(struct e1000g *);
96 
97 /*
98  * Local routines
99  */
100 static boolean_t e1000g_reset_adapter(struct e1000g *);
101 static void e1000g_tx_clean(struct e1000g *);
102 static void e1000g_rx_clean(struct e1000g *);
103 static void e1000g_link_timer(void *);
104 static void e1000g_local_timer(void *);
105 static boolean_t e1000g_link_check(struct e1000g *);
106 static boolean_t e1000g_stall_check(struct e1000g *);
107 static void e1000g_smartspeed(struct e1000g *);
108 static void e1000g_get_conf(struct e1000g *);
109 static int e1000g_get_prop(struct e1000g *, char *, int, int, int);
110 static void enable_watchdog_timer(struct e1000g *);
111 static void disable_watchdog_timer(struct e1000g *);
112 static void start_watchdog_timer(struct e1000g *);
113 static void restart_watchdog_timer(struct e1000g *);
114 static void stop_watchdog_timer(struct e1000g *);
115 static void stop_link_timer(struct e1000g *);
116 static void stop_82547_timer(e1000g_tx_ring_t *);
117 static void e1000g_force_speed_duplex(struct e1000g *);
118 static void e1000g_get_max_frame_size(struct e1000g *);
119 static boolean_t is_valid_mac_addr(uint8_t *);
120 static void e1000g_unattach(dev_info_t *, struct e1000g *);
121 #ifdef E1000G_DEBUG
122 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
123 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
124 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
125 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
126 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
127     struct iocblk *, mblk_t *);
128 #endif
129 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
130     struct iocblk *, mblk_t *);
131 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
132 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
133 static void e1000g_set_internal_loopback(struct e1000g *);
134 static void e1000g_set_external_loopback_1000(struct e1000g *);
135 static void e1000g_set_external_loopback_100(struct e1000g *);
136 static void e1000g_set_external_loopback_10(struct e1000g *);
137 static int e1000g_add_intrs(struct e1000g *);
138 static int e1000g_intr_add(struct e1000g *, int);
139 static int e1000g_rem_intrs(struct e1000g *);
140 static int e1000g_enable_intrs(struct e1000g *);
141 static int e1000g_disable_intrs(struct e1000g *);
142 static boolean_t e1000g_link_up(struct e1000g *);
143 #ifdef __sparc
144 static boolean_t e1000g_find_mac_address(struct e1000g *);
145 #endif
146 static void e1000g_get_phy_state(struct e1000g *);
147 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
148     const void *impl_data);
149 static void e1000g_fm_init(struct e1000g *Adapter);
150 static void e1000g_fm_fini(struct e1000g *Adapter);
151 static int e1000g_get_def_val(struct e1000g *, mac_prop_id_t, uint_t, void *);
152 static void e1000g_param_sync(struct e1000g *);
153 static void e1000g_get_driver_control(struct e1000_hw *);
154 static void e1000g_release_driver_control(struct e1000_hw *);
155 static void e1000g_restore_promisc(struct e1000g *Adapter);
156 
157 mac_priv_prop_t e1000g_priv_props[] = {
158 	{"_tx_bcopy_threshold", MAC_PROP_PERM_RW},
159 	{"_tx_interrupt_enable", MAC_PROP_PERM_RW},
160 	{"_tx_intr_delay", MAC_PROP_PERM_RW},
161 	{"_tx_intr_abs_delay", MAC_PROP_PERM_RW},
162 	{"_rx_bcopy_threshold", MAC_PROP_PERM_RW},
163 	{"_max_num_rcv_packets", MAC_PROP_PERM_RW},
164 	{"_rx_intr_delay", MAC_PROP_PERM_RW},
165 	{"_rx_intr_abs_delay", MAC_PROP_PERM_RW},
166 	{"_intr_throttling_rate", MAC_PROP_PERM_RW},
167 	{"_intr_adaptive", MAC_PROP_PERM_RW},
168 	{"_adv_pause_cap", MAC_PROP_PERM_READ},
169 	{"_adv_asym_pause_cap", MAC_PROP_PERM_READ},
170 };
171 #define	E1000G_MAX_PRIV_PROPS	\
172 	(sizeof (e1000g_priv_props)/sizeof (mac_priv_prop_t))
173 
174 
175 static struct cb_ops cb_ws_ops = {
176 	nulldev,		/* cb_open */
177 	nulldev,		/* cb_close */
178 	nodev,			/* cb_strategy */
179 	nodev,			/* cb_print */
180 	nodev,			/* cb_dump */
181 	nodev,			/* cb_read */
182 	nodev,			/* cb_write */
183 	nodev,			/* cb_ioctl */
184 	nodev,			/* cb_devmap */
185 	nodev,			/* cb_mmap */
186 	nodev,			/* cb_segmap */
187 	nochpoll,		/* cb_chpoll */
188 	ddi_prop_op,		/* cb_prop_op */
189 	NULL,			/* cb_stream */
190 	D_MP | D_HOTPLUG,	/* cb_flag */
191 	CB_REV,			/* cb_rev */
192 	nodev,			/* cb_aread */
193 	nodev			/* cb_awrite */
194 };
195 
196 static struct dev_ops ws_ops = {
197 	DEVO_REV,		/* devo_rev */
198 	0,			/* devo_refcnt */
199 	NULL,			/* devo_getinfo */
200 	nulldev,		/* devo_identify */
201 	nulldev,		/* devo_probe */
202 	e1000g_attach,		/* devo_attach */
203 	e1000g_detach,		/* devo_detach */
204 	nodev,			/* devo_reset */
205 	&cb_ws_ops,		/* devo_cb_ops */
206 	NULL,			/* devo_bus_ops */
207 	ddi_power,		/* devo_power */
208 	e1000g_quiesce		/* devo_quiesce */
209 };
210 
211 static struct modldrv modldrv = {
212 	&mod_driverops,		/* Type of module.  This one is a driver */
213 	ident,			/* Discription string */
214 	&ws_ops,		/* driver ops */
215 };
216 
217 static struct modlinkage modlinkage = {
218 	MODREV_1, &modldrv, NULL
219 };
220 
221 /* Access attributes for register mapping */
222 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
223 	DDI_DEVICE_ATTR_V0,
224 	DDI_STRUCTURE_LE_ACC,
225 	DDI_STRICTORDER_ACC,
226 	DDI_FLAGERR_ACC
227 };
228 
229 #define	E1000G_M_CALLBACK_FLAGS \
230 	(MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
231 
232 static mac_callbacks_t e1000g_m_callbacks = {
233 	E1000G_M_CALLBACK_FLAGS,
234 	e1000g_m_stat,
235 	e1000g_m_start,
236 	e1000g_m_stop,
237 	e1000g_m_promisc,
238 	e1000g_m_multicst,
239 	NULL,
240 	e1000g_m_tx,
241 	e1000g_m_ioctl,
242 	e1000g_m_getcapab,
243 	NULL,
244 	NULL,
245 	e1000g_m_setprop,
246 	e1000g_m_getprop
247 };
248 
249 /*
250  * Global variables
251  */
252 uint32_t e1000g_mblks_pending = 0;
253 /*
254  * Workaround for Dynamic Reconfiguration support, for x86 platform only.
255  * Here we maintain a private dev_info list if e1000g_force_detach is
256  * enabled. If we force the driver to detach while there are still some
257  * rx buffers retained in the upper layer, we have to keep a copy of the
258  * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
259  * structure will be freed after the driver is detached. However when we
260  * finally free those rx buffers released by the upper layer, we need to
261  * refer to the dev_info to free the dma buffers. So we save a copy of
262  * the dev_info for this purpose. On x86 platform, we assume this copy
263  * of dev_info is always valid, but on SPARC platform, it could be invalid
264  * after the system board level DR operation. For this reason, the global
265  * variable e1000g_force_detach must be B_FALSE on SPARC platform.
266  */
267 #ifdef __sparc
268 boolean_t e1000g_force_detach = B_FALSE;
269 #else
270 boolean_t e1000g_force_detach = B_TRUE;
271 #endif
272 private_devi_list_t *e1000g_private_devi_list = NULL;
273 
274 /*
275  * The mutex e1000g_rx_detach_lock is defined to protect the processing of
276  * the private dev_info list, and to serialize the processing of rx buffer
277  * freeing and rx buffer recycling.
278  */
279 kmutex_t e1000g_rx_detach_lock;
280 /*
281  * The rwlock e1000g_dma_type_lock is defined to protect the global flag
282  * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
283  * If there are many e1000g instances, the system may run out of DVMA
284  * resources during the initialization of the instances, then the flag will
285  * be changed to "USE_DMA". Because different e1000g instances are initialized
286  * in parallel, we need to use this lock to protect the flag.
287  */
288 krwlock_t e1000g_dma_type_lock;
289 
290 /*
291  * The 82546 chipset is a dual-port device, both the ports share one eeprom.
292  * Based on the information from Intel, the 82546 chipset has some hardware
293  * problem. When one port is being reset and the other port is trying to
294  * access the eeprom, it could cause system hang or panic. To workaround this
295  * hardware problem, we use a global mutex to prevent such operations from
296  * happening simultaneously on different instances. This workaround is applied
297  * to all the devices supported by this driver.
298  */
299 kmutex_t e1000g_nvm_lock;
300 
301 /*
302  * Loadable module configuration entry points for the driver
303  */
304 
305 /*
306  * _init - module initialization
307  */
308 int
309 _init(void)
310 {
311 	int status;
312 
313 	mac_init_ops(&ws_ops, WSNAME);
314 	status = mod_install(&modlinkage);
315 	if (status != DDI_SUCCESS)
316 		mac_fini_ops(&ws_ops);
317 	else {
318 		mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL);
319 		rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
320 		mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
321 	}
322 
323 	return (status);
324 }
325 
326 /*
327  * _fini - module finalization
328  */
329 int
330 _fini(void)
331 {
332 	int status;
333 
334 	if (e1000g_mblks_pending != 0)
335 		return (EBUSY);
336 
337 	status = mod_remove(&modlinkage);
338 	if (status == DDI_SUCCESS) {
339 		mac_fini_ops(&ws_ops);
340 
341 		if (e1000g_force_detach) {
342 			private_devi_list_t *devi_node;
343 
344 			mutex_enter(&e1000g_rx_detach_lock);
345 			while (e1000g_private_devi_list != NULL) {
346 				devi_node = e1000g_private_devi_list;
347 				e1000g_private_devi_list =
348 				    e1000g_private_devi_list->next;
349 
350 				kmem_free(devi_node->priv_dip,
351 				    sizeof (struct dev_info));
352 				kmem_free(devi_node,
353 				    sizeof (private_devi_list_t));
354 			}
355 			mutex_exit(&e1000g_rx_detach_lock);
356 		}
357 
358 		mutex_destroy(&e1000g_rx_detach_lock);
359 		rw_destroy(&e1000g_dma_type_lock);
360 		mutex_destroy(&e1000g_nvm_lock);
361 	}
362 
363 	return (status);
364 }
365 
366 /*
367  * _info - module information
368  */
369 int
370 _info(struct modinfo *modinfop)
371 {
372 	return (mod_info(&modlinkage, modinfop));
373 }
374 
375 /*
376  * e1000g_attach - driver attach
377  *
378  * This function is the device-specific initialization entry
379  * point. This entry point is required and must be written.
380  * The DDI_ATTACH command must be provided in the attach entry
381  * point. When attach() is called with cmd set to DDI_ATTACH,
382  * all normal kernel services (such as kmem_alloc(9F)) are
383  * available for use by the driver.
384  *
385  * The attach() function will be called once for each instance
386  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
387  * Until attach() succeeds, the only driver entry points which
388  * may be called are open(9E) and getinfo(9E).
389  */
390 static int
391 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
392 {
393 	struct e1000g *Adapter;
394 	struct e1000_hw *hw;
395 	struct e1000g_osdep *osdep;
396 	int instance;
397 
398 	switch (cmd) {
399 	default:
400 		e1000g_log(NULL, CE_WARN,
401 		    "Unsupported command send to e1000g_attach... ");
402 		return (DDI_FAILURE);
403 
404 	case DDI_RESUME:
405 		return (e1000g_resume(devinfo));
406 
407 	case DDI_ATTACH:
408 		break;
409 	}
410 
411 	/*
412 	 * get device instance number
413 	 */
414 	instance = ddi_get_instance(devinfo);
415 
416 	/*
417 	 * Allocate soft data structure
418 	 */
419 	Adapter =
420 	    (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
421 
422 	Adapter->dip = devinfo;
423 	Adapter->instance = instance;
424 	Adapter->tx_ring->adapter = Adapter;
425 	Adapter->rx_ring->adapter = Adapter;
426 
427 	hw = &Adapter->shared;
428 	osdep = &Adapter->osdep;
429 	hw->back = osdep;
430 	osdep->adapter = Adapter;
431 
432 	ddi_set_driver_private(devinfo, (caddr_t)Adapter);
433 
434 	/*
435 	 * Initialize for fma support
436 	 */
437 	Adapter->fm_capabilities = e1000g_get_prop(Adapter, "fm-capable",
438 	    0, 0x0f,
439 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
440 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
441 	e1000g_fm_init(Adapter);
442 	Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
443 
444 	/*
445 	 * PCI Configure
446 	 */
447 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
448 		e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
449 		goto attach_fail;
450 	}
451 	Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
452 
453 	/*
454 	 * Setup hardware
455 	 */
456 	if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
457 		e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
458 		goto attach_fail;
459 	}
460 
461 	/*
462 	 * Map in the device registers.
463 	 */
464 	if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
465 		e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
466 		goto attach_fail;
467 	}
468 	Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
469 
470 	/*
471 	 * Initialize driver parameters
472 	 */
473 	if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
474 		goto attach_fail;
475 	}
476 	Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
477 
478 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
479 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
480 		goto attach_fail;
481 	}
482 
483 	/*
484 	 * Initialize interrupts
485 	 */
486 	if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
487 		e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
488 		goto attach_fail;
489 	}
490 	Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
491 
492 	/*
493 	 * Initialize mutex's for this device.
494 	 * Do this before enabling the interrupt handler and
495 	 * register the softint to avoid the condition where
496 	 * interrupt handler can try using uninitialized mutex
497 	 */
498 	e1000g_init_locks(Adapter);
499 	Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
500 
501 	/*
502 	 * Initialize Driver Counters
503 	 */
504 	if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
505 		e1000g_log(Adapter, CE_WARN, "Init stats failed");
506 		goto attach_fail;
507 	}
508 	Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
509 
510 	/*
511 	 * Initialize chip hardware and software structures
512 	 */
513 	rw_enter(&Adapter->chip_lock, RW_WRITER);
514 	if (e1000g_init(Adapter) != DDI_SUCCESS) {
515 		rw_exit(&Adapter->chip_lock);
516 		e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
517 		goto attach_fail;
518 	}
519 	rw_exit(&Adapter->chip_lock);
520 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
521 
522 	/*
523 	 * Register the driver to the MAC
524 	 */
525 	if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
526 		e1000g_log(Adapter, CE_WARN, "Register MAC failed");
527 		goto attach_fail;
528 	}
529 	Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
530 
531 	/*
532 	 * Now that mutex locks are initialized, and the chip is also
533 	 * initialized, enable interrupts.
534 	 */
535 	if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
536 		e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
537 		goto attach_fail;
538 	}
539 	Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
540 
541 	/*
542 	 * If e1000g_force_detach is enabled, in global private dip list,
543 	 * we will create a new entry, which maintains the priv_dip for DR
544 	 * supports after driver detached.
545 	 */
546 	if (e1000g_force_detach) {
547 		private_devi_list_t *devi_node;
548 
549 		Adapter->priv_dip =
550 		    kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
551 		bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
552 		    sizeof (struct dev_info));
553 
554 		devi_node =
555 		    kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
556 
557 		mutex_enter(&e1000g_rx_detach_lock);
558 		devi_node->priv_dip = Adapter->priv_dip;
559 		devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
560 		devi_node->pending_rx_count = 0;
561 
562 		Adapter->priv_devi_node = devi_node;
563 
564 		if (e1000g_private_devi_list == NULL) {
565 			devi_node->prev = NULL;
566 			devi_node->next = NULL;
567 			e1000g_private_devi_list = devi_node;
568 		} else {
569 			devi_node->prev = NULL;
570 			devi_node->next = e1000g_private_devi_list;
571 			e1000g_private_devi_list->prev = devi_node;
572 			e1000g_private_devi_list = devi_node;
573 		}
574 		mutex_exit(&e1000g_rx_detach_lock);
575 	}
576 
577 	cmn_err(CE_CONT, "!%s, %s\n", e1000g_string, e1000g_version);
578 	Adapter->e1000g_state = E1000G_INITIALIZED;
579 
580 	return (DDI_SUCCESS);
581 
582 attach_fail:
583 	e1000g_unattach(devinfo, Adapter);
584 	return (DDI_FAILURE);
585 }
586 
587 static int
588 e1000g_register_mac(struct e1000g *Adapter)
589 {
590 	struct e1000_hw *hw = &Adapter->shared;
591 	mac_register_t *mac;
592 	int err;
593 
594 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
595 		return (DDI_FAILURE);
596 
597 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
598 	mac->m_driver = Adapter;
599 	mac->m_dip = Adapter->dip;
600 	mac->m_src_addr = hw->mac.addr;
601 	mac->m_callbacks = &e1000g_m_callbacks;
602 	mac->m_min_sdu = 0;
603 	mac->m_max_sdu = Adapter->default_mtu;
604 	mac->m_margin = VLAN_TAGSZ;
605 	mac->m_priv_props = e1000g_priv_props;
606 	mac->m_priv_prop_count = E1000G_MAX_PRIV_PROPS;
607 	mac->m_v12n = MAC_VIRT_LEVEL1;
608 
609 	err = mac_register(mac, &Adapter->mh);
610 	mac_free(mac);
611 
612 	return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
613 }
614 
615 static int
616 e1000g_identify_hardware(struct e1000g *Adapter)
617 {
618 	struct e1000_hw *hw = &Adapter->shared;
619 	struct e1000g_osdep *osdep = &Adapter->osdep;
620 
621 	/* Get the device id */
622 	hw->vendor_id =
623 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
624 	hw->device_id =
625 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
626 	hw->revision_id =
627 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
628 	hw->subsystem_device_id =
629 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
630 	hw->subsystem_vendor_id =
631 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
632 
633 	if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
634 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
635 		    "MAC type could not be set properly.");
636 		return (DDI_FAILURE);
637 	}
638 
639 	return (DDI_SUCCESS);
640 }
641 
642 static int
643 e1000g_regs_map(struct e1000g *Adapter)
644 {
645 	dev_info_t *devinfo = Adapter->dip;
646 	struct e1000_hw *hw = &Adapter->shared;
647 	struct e1000g_osdep *osdep = &Adapter->osdep;
648 	off_t mem_size;
649 
650 	/* Get size of adapter register memory */
651 	if (ddi_dev_regsize(devinfo, ADAPTER_REG_SET, &mem_size) !=
652 	    DDI_SUCCESS) {
653 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
654 		    "ddi_dev_regsize for registers failed");
655 		return (DDI_FAILURE);
656 	}
657 
658 	/* Map adapter register memory */
659 	if ((ddi_regs_map_setup(devinfo, ADAPTER_REG_SET,
660 	    (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
661 	    &osdep->reg_handle)) != DDI_SUCCESS) {
662 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
663 		    "ddi_regs_map_setup for registers failed");
664 		goto regs_map_fail;
665 	}
666 
667 	/* ICH needs to map flash memory */
668 	if (hw->mac.type == e1000_ich8lan ||
669 	    hw->mac.type == e1000_ich9lan ||
670 	    hw->mac.type == e1000_ich10lan) {
671 		/* get flash size */
672 		if (ddi_dev_regsize(devinfo, ICH_FLASH_REG_SET,
673 		    &mem_size) != DDI_SUCCESS) {
674 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
675 			    "ddi_dev_regsize for ICH flash failed");
676 			goto regs_map_fail;
677 		}
678 
679 		/* map flash in */
680 		if (ddi_regs_map_setup(devinfo, ICH_FLASH_REG_SET,
681 		    (caddr_t *)&hw->flash_address, 0,
682 		    mem_size, &e1000g_regs_acc_attr,
683 		    &osdep->ich_flash_handle) != DDI_SUCCESS) {
684 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
685 			    "ddi_regs_map_setup for ICH flash failed");
686 			goto regs_map_fail;
687 		}
688 	}
689 
690 	return (DDI_SUCCESS);
691 
692 regs_map_fail:
693 	if (osdep->reg_handle != NULL)
694 		ddi_regs_map_free(&osdep->reg_handle);
695 
696 	return (DDI_FAILURE);
697 }
698 
699 static int
700 e1000g_set_driver_params(struct e1000g *Adapter)
701 {
702 	struct e1000_hw *hw;
703 	uint32_t mem_bar, io_bar, bar64;
704 
705 	hw = &Adapter->shared;
706 
707 	/* Set MAC type and initialize hardware functions */
708 	if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
709 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
710 		    "Could not setup hardware functions");
711 		return (DDI_FAILURE);
712 	}
713 
714 	/* Get bus information */
715 	if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
716 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
717 		    "Could not get bus information");
718 		return (DDI_FAILURE);
719 	}
720 
721 	/* get mem_base addr */
722 	mem_bar = pci_config_get32(Adapter->osdep.cfg_handle, PCI_CONF_BASE0);
723 	bar64 = mem_bar & PCI_BASE_TYPE_ALL;
724 
725 	/* get io_base addr */
726 	if (hw->mac.type >= e1000_82544) {
727 		if (bar64) {
728 			/* IO BAR is different for 64 bit BAR mode */
729 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
730 			    PCI_CONF_BASE4);
731 		} else {
732 			/* normal 32-bit BAR mode */
733 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
734 			    PCI_CONF_BASE2);
735 		}
736 		hw->io_base = io_bar & PCI_BASE_IO_ADDR_M;
737 	} else {
738 		/* no I/O access for adapters prior to 82544 */
739 		hw->io_base = 0x0;
740 	}
741 
742 	e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
743 
744 	hw->mac.autoneg_failed = B_TRUE;
745 
746 	/* Set the autoneg_wait_to_complete flag to B_FALSE */
747 	hw->phy.autoneg_wait_to_complete = B_FALSE;
748 
749 	/* Adaptive IFS related changes */
750 	hw->mac.adaptive_ifs = B_TRUE;
751 
752 	/* Enable phy init script for IGP phy of 82541/82547 */
753 	if ((hw->mac.type == e1000_82547) ||
754 	    (hw->mac.type == e1000_82541) ||
755 	    (hw->mac.type == e1000_82547_rev_2) ||
756 	    (hw->mac.type == e1000_82541_rev_2))
757 		e1000_init_script_state_82541(hw, B_TRUE);
758 
759 	/* Enable the TTL workaround for 82541/82547 */
760 	e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
761 
762 #ifdef __sparc
763 	Adapter->strip_crc = B_TRUE;
764 #else
765 	Adapter->strip_crc = B_FALSE;
766 #endif
767 
768 	/* Get conf file properties */
769 	e1000g_get_conf(Adapter);
770 
771 	/* Get speed/duplex settings in conf file */
772 	hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
773 	hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
774 	e1000g_force_speed_duplex(Adapter);
775 
776 	/* Get Jumbo Frames settings in conf file */
777 	e1000g_get_max_frame_size(Adapter);
778 
779 	/* Set Rx/Tx buffer size */
780 	e1000g_set_bufsize(Adapter);
781 
782 	/* Master Latency Timer */
783 	Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
784 
785 	/* copper options */
786 	if (hw->phy.media_type == e1000_media_type_copper) {
787 		hw->phy.mdix = 0;	/* AUTO_ALL_MODES */
788 		hw->phy.disable_polarity_correction = B_FALSE;
789 		hw->phy.ms_type = e1000_ms_hw_default;	/* E1000_MASTER_SLAVE */
790 	}
791 
792 	/* The initial link state should be "unknown" */
793 	Adapter->link_state = LINK_STATE_UNKNOWN;
794 
795 	/* Initialize rx parameters */
796 	Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
797 	Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
798 
799 	/* Initialize tx parameters */
800 	Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
801 	Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
802 	Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
803 	Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
804 
805 	/* Initialize rx parameters */
806 	Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
807 
808 	return (DDI_SUCCESS);
809 }
810 
811 static void
812 e1000g_set_bufsize(struct e1000g *Adapter)
813 {
814 	struct e1000_mac_info *mac = &Adapter->shared.mac;
815 	uint64_t rx_size;
816 	uint64_t tx_size;
817 
818 	dev_info_t *devinfo = Adapter->dip;
819 #ifdef __sparc
820 	ulong_t iommu_pagesize;
821 #endif
822 	/* Get the system page size */
823 	Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
824 
825 #ifdef __sparc
826 	iommu_pagesize = dvma_pagesize(devinfo);
827 	if (iommu_pagesize != 0) {
828 		if (Adapter->sys_page_sz == iommu_pagesize) {
829 			if (iommu_pagesize > 0x4000)
830 				Adapter->sys_page_sz = 0x4000;
831 		} else {
832 			if (Adapter->sys_page_sz > iommu_pagesize)
833 				Adapter->sys_page_sz = iommu_pagesize;
834 		}
835 	}
836 	if (Adapter->lso_enable) {
837 		Adapter->dvma_page_num = E1000_LSO_MAXLEN /
838 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
839 	} else {
840 		Adapter->dvma_page_num = Adapter->max_frame_size /
841 		    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
842 	}
843 	ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
844 #endif
845 
846 	Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
847 
848 	if (Adapter->mem_workaround_82546 &&
849 	    ((mac->type == e1000_82545) ||
850 	    (mac->type == e1000_82546) ||
851 	    (mac->type == e1000_82546_rev_3))) {
852 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
853 	} else {
854 		rx_size = Adapter->max_frame_size + E1000G_IPALIGNPRESERVEROOM;
855 		if ((rx_size > FRAME_SIZE_UPTO_2K) &&
856 		    (rx_size <= FRAME_SIZE_UPTO_4K))
857 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
858 		else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
859 		    (rx_size <= FRAME_SIZE_UPTO_8K))
860 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
861 		else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
862 		    (rx_size <= FRAME_SIZE_UPTO_16K))
863 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
864 		else
865 			Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
866 	}
867 
868 	tx_size = Adapter->max_frame_size;
869 	if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
870 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
871 	else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
872 	    (tx_size <= FRAME_SIZE_UPTO_8K))
873 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
874 	else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
875 	    (tx_size <= FRAME_SIZE_UPTO_16K))
876 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
877 	else
878 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
879 
880 	/*
881 	 * For Wiseman adapters we have an requirement of having receive
882 	 * buffers aligned at 256 byte boundary. Since Livengood does not
883 	 * require this and forcing it for all hardwares will have
884 	 * performance implications, I am making it applicable only for
885 	 * Wiseman and for Jumbo frames enabled mode as rest of the time,
886 	 * it is okay to have normal frames...but it does involve a
887 	 * potential risk where we may loose data if buffer is not
888 	 * aligned...so all wiseman boards to have 256 byte aligned
889 	 * buffers
890 	 */
891 	if (mac->type < e1000_82543)
892 		Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
893 	else
894 		Adapter->rx_buf_align = 1;
895 }
896 
897 /*
898  * e1000g_detach - driver detach
899  *
900  * The detach() function is the complement of the attach routine.
901  * If cmd is set to DDI_DETACH, detach() is used to remove  the
902  * state  associated  with  a  given  instance of a device node
903  * prior to the removal of that instance from the system.
904  *
905  * The detach() function will be called once for each  instance
906  * of the device for which there has been a successful attach()
907  * once there are no longer  any  opens  on  the  device.
908  *
909  * Interrupts routine are disabled, All memory allocated by this
910  * driver are freed.
911  */
912 static int
913 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
914 {
915 	struct e1000g *Adapter;
916 	boolean_t rx_drain;
917 
918 	switch (cmd) {
919 	default:
920 		return (DDI_FAILURE);
921 
922 	case DDI_SUSPEND:
923 		return (e1000g_suspend(devinfo));
924 
925 	case DDI_DETACH:
926 		break;
927 	}
928 
929 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
930 	if (Adapter == NULL)
931 		return (DDI_FAILURE);
932 
933 	rx_drain = e1000g_rx_drain(Adapter);
934 	if (!rx_drain && !e1000g_force_detach)
935 		return (DDI_FAILURE);
936 
937 	if (mac_unregister(Adapter->mh) != 0) {
938 		e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
939 		return (DDI_FAILURE);
940 	}
941 	Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
942 
943 	ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
944 
945 	if (!e1000g_force_detach && !rx_drain)
946 		return (DDI_FAILURE);
947 
948 	e1000g_unattach(devinfo, Adapter);
949 
950 	return (DDI_SUCCESS);
951 }
952 
953 /*
954  * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
955  */
956 void
957 e1000g_free_priv_devi_node(private_devi_list_t *devi_node)
958 {
959 	ASSERT(e1000g_private_devi_list != NULL);
960 	ASSERT(devi_node != NULL);
961 
962 	if (devi_node->prev != NULL)
963 		devi_node->prev->next = devi_node->next;
964 	if (devi_node->next != NULL)
965 		devi_node->next->prev = devi_node->prev;
966 	if (devi_node == e1000g_private_devi_list)
967 		e1000g_private_devi_list = devi_node->next;
968 
969 	kmem_free(devi_node->priv_dip,
970 	    sizeof (struct dev_info));
971 	kmem_free(devi_node,
972 	    sizeof (private_devi_list_t));
973 }
974 
975 static void
976 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
977 {
978 	private_devi_list_t *devi_node;
979 	int result;
980 
981 	if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
982 		(void) e1000g_disable_intrs(Adapter);
983 	}
984 
985 	if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
986 		(void) mac_unregister(Adapter->mh);
987 	}
988 
989 	if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
990 		(void) e1000g_rem_intrs(Adapter);
991 	}
992 
993 	if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
994 		(void) ddi_prop_remove_all(devinfo);
995 	}
996 
997 	if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
998 		kstat_delete((kstat_t *)Adapter->e1000g_ksp);
999 	}
1000 
1001 	if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1002 		stop_link_timer(Adapter);
1003 
1004 		mutex_enter(&e1000g_nvm_lock);
1005 		result = e1000_reset_hw(&Adapter->shared);
1006 		mutex_exit(&e1000g_nvm_lock);
1007 
1008 		if (result != E1000_SUCCESS) {
1009 			e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1010 			ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1011 		}
1012 	}
1013 
1014 	if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1015 		if (Adapter->osdep.reg_handle != NULL)
1016 			ddi_regs_map_free(&Adapter->osdep.reg_handle);
1017 		if (Adapter->osdep.ich_flash_handle != NULL)
1018 			ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1019 	}
1020 
1021 	if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1022 		if (Adapter->osdep.cfg_handle != NULL)
1023 			pci_config_teardown(&Adapter->osdep.cfg_handle);
1024 	}
1025 
1026 	if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1027 		e1000g_destroy_locks(Adapter);
1028 	}
1029 
1030 	if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1031 		e1000g_fm_fini(Adapter);
1032 	}
1033 
1034 	mutex_enter(&e1000g_rx_detach_lock);
1035 	if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) {
1036 		devi_node = Adapter->priv_devi_node;
1037 		devi_node->flag |= E1000G_PRIV_DEVI_DETACH;
1038 
1039 		if (devi_node->pending_rx_count == 0) {
1040 			e1000g_free_priv_devi_node(devi_node);
1041 		}
1042 	}
1043 	mutex_exit(&e1000g_rx_detach_lock);
1044 
1045 	kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1046 
1047 	/*
1048 	 * Another hotplug spec requirement,
1049 	 * run ddi_set_driver_private(devinfo, null);
1050 	 */
1051 	ddi_set_driver_private(devinfo, NULL);
1052 }
1053 
1054 static void
1055 e1000g_init_locks(struct e1000g *Adapter)
1056 {
1057 	e1000g_tx_ring_t *tx_ring;
1058 	e1000g_rx_ring_t *rx_ring;
1059 
1060 	rw_init(&Adapter->chip_lock, NULL,
1061 	    RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1062 	mutex_init(&Adapter->link_lock, NULL,
1063 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1064 	mutex_init(&Adapter->watchdog_lock, NULL,
1065 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1066 
1067 	tx_ring = Adapter->tx_ring;
1068 
1069 	mutex_init(&tx_ring->tx_lock, NULL,
1070 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1071 	mutex_init(&tx_ring->usedlist_lock, NULL,
1072 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1073 	mutex_init(&tx_ring->freelist_lock, NULL,
1074 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1075 
1076 	rx_ring = Adapter->rx_ring;
1077 
1078 	mutex_init(&rx_ring->rx_lock, NULL,
1079 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1080 }
1081 
1082 static void
1083 e1000g_destroy_locks(struct e1000g *Adapter)
1084 {
1085 	e1000g_tx_ring_t *tx_ring;
1086 	e1000g_rx_ring_t *rx_ring;
1087 
1088 	tx_ring = Adapter->tx_ring;
1089 	mutex_destroy(&tx_ring->tx_lock);
1090 	mutex_destroy(&tx_ring->usedlist_lock);
1091 	mutex_destroy(&tx_ring->freelist_lock);
1092 
1093 	rx_ring = Adapter->rx_ring;
1094 	mutex_destroy(&rx_ring->rx_lock);
1095 
1096 	mutex_destroy(&Adapter->link_lock);
1097 	mutex_destroy(&Adapter->watchdog_lock);
1098 	rw_destroy(&Adapter->chip_lock);
1099 }
1100 
1101 static int
1102 e1000g_resume(dev_info_t *devinfo)
1103 {
1104 	struct e1000g *Adapter;
1105 
1106 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1107 	if (Adapter == NULL)
1108 		e1000g_log(Adapter, CE_PANIC,
1109 		    "Instance pointer is null\n");
1110 
1111 	if (Adapter->dip != devinfo)
1112 		e1000g_log(Adapter, CE_PANIC,
1113 		    "Devinfo is not the same as saved devinfo\n");
1114 
1115 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1116 
1117 	if (Adapter->e1000g_state & E1000G_STARTED) {
1118 		if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1119 			rw_exit(&Adapter->chip_lock);
1120 			/*
1121 			 * We note the failure, but return success, as the
1122 			 * system is still usable without this controller.
1123 			 */
1124 			e1000g_log(Adapter, CE_WARN,
1125 			    "e1000g_resume: failed to restart controller\n");
1126 			return (DDI_SUCCESS);
1127 		}
1128 		/* Enable and start the watchdog timer */
1129 		enable_watchdog_timer(Adapter);
1130 	}
1131 
1132 	Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1133 
1134 	rw_exit(&Adapter->chip_lock);
1135 
1136 	return (DDI_SUCCESS);
1137 }
1138 
1139 static int
1140 e1000g_suspend(dev_info_t *devinfo)
1141 {
1142 	struct e1000g *Adapter;
1143 
1144 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1145 	if (Adapter == NULL)
1146 		return (DDI_FAILURE);
1147 
1148 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1149 
1150 	Adapter->e1000g_state |= E1000G_SUSPENDED;
1151 
1152 	/* if the port isn't plumbed, we can simply return */
1153 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1154 		rw_exit(&Adapter->chip_lock);
1155 		return (DDI_SUCCESS);
1156 	}
1157 
1158 	e1000g_stop(Adapter, B_FALSE);
1159 
1160 	rw_exit(&Adapter->chip_lock);
1161 
1162 	/* Disable and stop all the timers */
1163 	disable_watchdog_timer(Adapter);
1164 	stop_link_timer(Adapter);
1165 	stop_82547_timer(Adapter->tx_ring);
1166 
1167 	return (DDI_SUCCESS);
1168 }
1169 
1170 static int
1171 e1000g_init(struct e1000g *Adapter)
1172 {
1173 	uint32_t pba;
1174 	uint32_t high_water;
1175 	struct e1000_hw *hw;
1176 	clock_t link_timeout;
1177 	int result;
1178 
1179 	hw = &Adapter->shared;
1180 
1181 	/*
1182 	 * reset to put the hardware in a known state
1183 	 * before we try to do anything with the eeprom
1184 	 */
1185 	mutex_enter(&e1000g_nvm_lock);
1186 	result = e1000_reset_hw(hw);
1187 	mutex_exit(&e1000g_nvm_lock);
1188 
1189 	if (result != E1000_SUCCESS) {
1190 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1191 		goto init_fail;
1192 	}
1193 
1194 	mutex_enter(&e1000g_nvm_lock);
1195 	result = e1000_validate_nvm_checksum(hw);
1196 	if (result < E1000_SUCCESS) {
1197 		/*
1198 		 * Some PCI-E parts fail the first check due to
1199 		 * the link being in sleep state.  Call it again,
1200 		 * if it fails a second time its a real issue.
1201 		 */
1202 		result = e1000_validate_nvm_checksum(hw);
1203 	}
1204 	mutex_exit(&e1000g_nvm_lock);
1205 
1206 	if (result < E1000_SUCCESS) {
1207 		e1000g_log(Adapter, CE_WARN,
1208 		    "Invalid NVM checksum. Please contact "
1209 		    "the vendor to update the NVM.");
1210 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1211 		goto init_fail;
1212 	}
1213 
1214 	result = 0;
1215 #ifdef __sparc
1216 	/*
1217 	 * First, we try to get the local ethernet address from OBP. If
1218 	 * failed, then we get it from the EEPROM of NIC card.
1219 	 */
1220 	result = e1000g_find_mac_address(Adapter);
1221 #endif
1222 	/* Get the local ethernet address. */
1223 	if (!result) {
1224 		mutex_enter(&e1000g_nvm_lock);
1225 		result = e1000_read_mac_addr(hw);
1226 		mutex_exit(&e1000g_nvm_lock);
1227 	}
1228 
1229 	if (result < E1000_SUCCESS) {
1230 		e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1231 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1232 		goto init_fail;
1233 	}
1234 
1235 	/* check for valid mac address */
1236 	if (!is_valid_mac_addr(hw->mac.addr)) {
1237 		e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1238 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1239 		goto init_fail;
1240 	}
1241 
1242 	/* Set LAA state for 82571 chipset */
1243 	e1000_set_laa_state_82571(hw, B_TRUE);
1244 
1245 	/* Master Latency Timer implementation */
1246 	if (Adapter->master_latency_timer) {
1247 		pci_config_put8(Adapter->osdep.cfg_handle,
1248 		    PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1249 	}
1250 
1251 	if (hw->mac.type < e1000_82547) {
1252 		/*
1253 		 * Total FIFO is 64K
1254 		 */
1255 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1256 			pba = E1000_PBA_40K;	/* 40K for Rx, 24K for Tx */
1257 		else
1258 			pba = E1000_PBA_48K;	/* 48K for Rx, 16K for Tx */
1259 	} else if ((hw->mac.type == e1000_82571) ||
1260 	    (hw->mac.type == e1000_82572) ||
1261 	    (hw->mac.type == e1000_80003es2lan)) {
1262 		/*
1263 		 * Total FIFO is 48K
1264 		 */
1265 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1266 			pba = E1000_PBA_30K;	/* 30K for Rx, 18K for Tx */
1267 		else
1268 			pba = E1000_PBA_38K;	/* 38K for Rx, 10K for Tx */
1269 	} else if (hw->mac.type == e1000_82573) {
1270 		pba = E1000_PBA_20K;		/* 20K for Rx, 12K for Tx */
1271 	} else if (hw->mac.type == e1000_82574) {
1272 		/* Keep adapter default: 20K for Rx, 20K for Tx */
1273 		pba = E1000_READ_REG(hw, E1000_PBA);
1274 	} else if (hw->mac.type == e1000_ich8lan) {
1275 		pba = E1000_PBA_8K;		/* 8K for Rx, 12K for Tx */
1276 	} else if (hw->mac.type == e1000_ich9lan) {
1277 		pba = E1000_PBA_10K;
1278 	} else if (hw->mac.type == e1000_ich10lan) {
1279 		pba = E1000_PBA_10K;
1280 	} else {
1281 		/*
1282 		 * Total FIFO is 40K
1283 		 */
1284 		if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1285 			pba = E1000_PBA_22K;	/* 22K for Rx, 18K for Tx */
1286 		else
1287 			pba = E1000_PBA_30K;	/* 30K for Rx, 10K for Tx */
1288 	}
1289 	E1000_WRITE_REG(hw, E1000_PBA, pba);
1290 
1291 	/*
1292 	 * These parameters set thresholds for the adapter's generation(Tx)
1293 	 * and response(Rx) to Ethernet PAUSE frames.  These are just threshold
1294 	 * settings.  Flow control is enabled or disabled in the configuration
1295 	 * file.
1296 	 * High-water mark is set down from the top of the rx fifo (not
1297 	 * sensitive to max_frame_size) and low-water is set just below
1298 	 * high-water mark.
1299 	 * The high water mark must be low enough to fit one full frame above
1300 	 * it in the rx FIFO.  Should be the lower of:
1301 	 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1302 	 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1303 	 * Rx FIFO size minus one full frame.
1304 	 */
1305 	high_water = min(((pba << 10) * 9 / 10),
1306 	    ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1307 	    hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1308 	    ((pba << 10) - (E1000_ERT_2048 << 3)) :
1309 	    ((pba << 10) - Adapter->max_frame_size)));
1310 
1311 	hw->fc.high_water = high_water & 0xFFF8;
1312 	hw->fc.low_water = hw->fc.high_water - 8;
1313 
1314 	if (hw->mac.type == e1000_80003es2lan)
1315 		hw->fc.pause_time = 0xFFFF;
1316 	else
1317 		hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1318 	hw->fc.send_xon = B_TRUE;
1319 
1320 	/*
1321 	 * Reset the adapter hardware the second time.
1322 	 */
1323 	mutex_enter(&e1000g_nvm_lock);
1324 	result = e1000_reset_hw(hw);
1325 	mutex_exit(&e1000g_nvm_lock);
1326 
1327 	if (result != E1000_SUCCESS) {
1328 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1329 		goto init_fail;
1330 	}
1331 
1332 	/* disable wakeup control by default */
1333 	if (hw->mac.type >= e1000_82544)
1334 		E1000_WRITE_REG(hw, E1000_WUC, 0);
1335 
1336 	/*
1337 	 * MWI should be disabled on 82546.
1338 	 */
1339 	if (hw->mac.type == e1000_82546)
1340 		e1000_pci_clear_mwi(hw);
1341 	else
1342 		e1000_pci_set_mwi(hw);
1343 
1344 	/*
1345 	 * Configure/Initialize hardware
1346 	 */
1347 	mutex_enter(&e1000g_nvm_lock);
1348 	result = e1000_init_hw(hw);
1349 	mutex_exit(&e1000g_nvm_lock);
1350 
1351 	if (result < E1000_SUCCESS) {
1352 		e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1353 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1354 		goto init_fail;
1355 	}
1356 
1357 	/*
1358 	 * Restore LED settings to the default from EEPROM
1359 	 * to meet the standard for Sun platforms.
1360 	 */
1361 	if ((hw->mac.type != e1000_82541) &&
1362 	    (hw->mac.type != e1000_82541_rev_2) &&
1363 	    (hw->mac.type != e1000_82547) &&
1364 	    (hw->mac.type != e1000_82547_rev_2))
1365 		(void) e1000_cleanup_led(hw);
1366 
1367 	/* Disable Smart Power Down */
1368 	phy_spd_state(hw, B_FALSE);
1369 
1370 	/* Make sure driver has control */
1371 	e1000g_get_driver_control(hw);
1372 
1373 	/*
1374 	 * Initialize unicast addresses.
1375 	 */
1376 	e1000g_init_unicst(Adapter);
1377 
1378 	/*
1379 	 * Setup and initialize the mctable structures.  After this routine
1380 	 * completes  Multicast table will be set
1381 	 */
1382 	e1000g_setup_multicast(Adapter);
1383 	msec_delay(5);
1384 
1385 	/*
1386 	 * Implement Adaptive IFS
1387 	 */
1388 	e1000_reset_adaptive(hw);
1389 
1390 	/* Setup Interrupt Throttling Register */
1391 	if (hw->mac.type >= e1000_82540) {
1392 		E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1393 	} else
1394 		Adapter->intr_adaptive = B_FALSE;
1395 
1396 	/* Start the timer for link setup */
1397 	if (hw->mac.autoneg)
1398 		link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1399 	else
1400 		link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1401 
1402 	mutex_enter(&Adapter->link_lock);
1403 	if (hw->phy.autoneg_wait_to_complete) {
1404 		Adapter->link_complete = B_TRUE;
1405 	} else {
1406 		Adapter->link_complete = B_FALSE;
1407 		Adapter->link_tid = timeout(e1000g_link_timer,
1408 		    (void *)Adapter, link_timeout);
1409 	}
1410 	mutex_exit(&Adapter->link_lock);
1411 
1412 	/* Enable PCI-Ex master */
1413 	if (hw->bus.type == e1000_bus_type_pci_express) {
1414 		e1000_enable_pciex_master(hw);
1415 	}
1416 
1417 	/* Save the state of the phy */
1418 	e1000g_get_phy_state(Adapter);
1419 
1420 	e1000g_param_sync(Adapter);
1421 
1422 	Adapter->init_count++;
1423 
1424 	if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1425 		goto init_fail;
1426 	}
1427 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1428 		goto init_fail;
1429 	}
1430 
1431 	Adapter->poll_mode = e1000g_poll_mode;
1432 
1433 	return (DDI_SUCCESS);
1434 
1435 init_fail:
1436 	ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1437 	return (DDI_FAILURE);
1438 }
1439 
1440 static int
1441 e1000g_alloc_rx_data(struct e1000g *Adapter)
1442 {
1443 	e1000g_rx_ring_t *rx_ring;
1444 	e1000g_rx_data_t *rx_data;
1445 
1446 	rx_ring = Adapter->rx_ring;
1447 
1448 	rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP);
1449 
1450 	if (rx_data == NULL)
1451 		return (DDI_FAILURE);
1452 
1453 	rx_data->priv_devi_node = Adapter->priv_devi_node;
1454 	rx_data->rx_ring = rx_ring;
1455 
1456 	mutex_init(&rx_data->freelist_lock, NULL,
1457 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1458 	mutex_init(&rx_data->recycle_lock, NULL,
1459 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1460 
1461 	rx_ring->rx_data = rx_data;
1462 
1463 	return (DDI_SUCCESS);
1464 }
1465 
1466 void
1467 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data)
1468 {
1469 	rx_sw_packet_t *packet, *next_packet;
1470 
1471 	if (rx_data == NULL)
1472 		return;
1473 
1474 	packet = rx_data->packet_area;
1475 	while (packet != NULL) {
1476 		next_packet = packet->next;
1477 		e1000g_free_rx_sw_packet(packet, B_TRUE);
1478 		packet = next_packet;
1479 	}
1480 	rx_data->packet_area = NULL;
1481 }
1482 
1483 void
1484 e1000g_free_rx_data(e1000g_rx_data_t *rx_data)
1485 {
1486 	if (rx_data == NULL)
1487 		return;
1488 
1489 	mutex_destroy(&rx_data->freelist_lock);
1490 	mutex_destroy(&rx_data->recycle_lock);
1491 
1492 	kmem_free(rx_data, sizeof (e1000g_rx_data_t));
1493 }
1494 
1495 /*
1496  * Check if the link is up
1497  */
1498 static boolean_t
1499 e1000g_link_up(struct e1000g *Adapter)
1500 {
1501 	struct e1000_hw *hw;
1502 	boolean_t link_up;
1503 
1504 	hw = &Adapter->shared;
1505 
1506 	(void) e1000_check_for_link(hw);
1507 
1508 	if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) ||
1509 	    ((!hw->mac.get_link_status) && (hw->mac.type == e1000_82543)) ||
1510 	    ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1511 	    (hw->mac.serdes_has_link))) {
1512 		link_up = B_TRUE;
1513 	} else {
1514 		link_up = B_FALSE;
1515 	}
1516 
1517 	return (link_up);
1518 }
1519 
1520 static void
1521 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1522 {
1523 	struct iocblk *iocp;
1524 	struct e1000g *e1000gp;
1525 	enum ioc_reply status;
1526 
1527 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1528 	iocp->ioc_error = 0;
1529 	e1000gp = (struct e1000g *)arg;
1530 
1531 	ASSERT(e1000gp);
1532 	if (e1000gp == NULL) {
1533 		miocnak(q, mp, 0, EINVAL);
1534 		return;
1535 	}
1536 
1537 	rw_enter(&e1000gp->chip_lock, RW_READER);
1538 	if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1539 		rw_exit(&e1000gp->chip_lock);
1540 		miocnak(q, mp, 0, EINVAL);
1541 		return;
1542 	}
1543 	rw_exit(&e1000gp->chip_lock);
1544 
1545 	switch (iocp->ioc_cmd) {
1546 
1547 	case LB_GET_INFO_SIZE:
1548 	case LB_GET_INFO:
1549 	case LB_GET_MODE:
1550 	case LB_SET_MODE:
1551 		status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1552 		break;
1553 
1554 
1555 #ifdef E1000G_DEBUG
1556 	case E1000G_IOC_REG_PEEK:
1557 	case E1000G_IOC_REG_POKE:
1558 		status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1559 		break;
1560 	case E1000G_IOC_CHIP_RESET:
1561 		e1000gp->reset_count++;
1562 		if (e1000g_reset_adapter(e1000gp))
1563 			status = IOC_ACK;
1564 		else
1565 			status = IOC_INVAL;
1566 		break;
1567 #endif
1568 	default:
1569 		status = IOC_INVAL;
1570 		break;
1571 	}
1572 
1573 	/*
1574 	 * Decide how to reply
1575 	 */
1576 	switch (status) {
1577 	default:
1578 	case IOC_INVAL:
1579 		/*
1580 		 * Error, reply with a NAK and EINVAL or the specified error
1581 		 */
1582 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1583 		    EINVAL : iocp->ioc_error);
1584 		break;
1585 
1586 	case IOC_DONE:
1587 		/*
1588 		 * OK, reply already sent
1589 		 */
1590 		break;
1591 
1592 	case IOC_ACK:
1593 		/*
1594 		 * OK, reply with an ACK
1595 		 */
1596 		miocack(q, mp, 0, 0);
1597 		break;
1598 
1599 	case IOC_REPLY:
1600 		/*
1601 		 * OK, send prepared reply as ACK or NAK
1602 		 */
1603 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1604 		    M_IOCACK : M_IOCNAK;
1605 		qreply(q, mp);
1606 		break;
1607 	}
1608 }
1609 
1610 /*
1611  * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1612  * capable of supporting only one interrupt and we shouldn't disable
1613  * the physical interrupt. In this case we let the interrupt come and
1614  * we queue the packets in the rx ring itself in case we are in polling
1615  * mode (better latency but slightly lower performance and a very
1616  * high intrrupt count in mpstat which is harmless).
1617  *
1618  * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1619  * which can be disabled in poll mode. This gives better overall
1620  * throughput (compared to the mode above), shows very low interrupt
1621  * count but has slightly higher latency since we pick the packets when
1622  * the poll thread does polling.
1623  *
1624  * Currently, this flag should be enabled only while doing performance
1625  * measurement or when it can be guaranteed that entire NIC going
1626  * in poll mode will not harm any traffic like cluster heartbeat etc.
1627  */
1628 int e1000g_poll_mode = 0;
1629 
1630 /*
1631  * Called from the upper layers when driver is in polling mode to
1632  * pick up any queued packets. Care should be taken to not block
1633  * this thread.
1634  */
1635 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1636 {
1637 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)arg;
1638 	mblk_t			*mp = NULL;
1639 	mblk_t			*tail;
1640 	struct e1000g 		*adapter;
1641 
1642 	adapter = rx_ring->adapter;
1643 
1644 	rw_enter(&adapter->chip_lock, RW_READER);
1645 
1646 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
1647 		rw_exit(&adapter->chip_lock);
1648 		return (NULL);
1649 	}
1650 
1651 	mutex_enter(&rx_ring->rx_lock);
1652 	mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup);
1653 	mutex_exit(&rx_ring->rx_lock);
1654 	rw_exit(&adapter->chip_lock);
1655 	return (mp);
1656 }
1657 
1658 static int
1659 e1000g_m_start(void *arg)
1660 {
1661 	struct e1000g *Adapter = (struct e1000g *)arg;
1662 
1663 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1664 
1665 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1666 		rw_exit(&Adapter->chip_lock);
1667 		return (ECANCELED);
1668 	}
1669 
1670 	if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1671 		rw_exit(&Adapter->chip_lock);
1672 		return (ENOTACTIVE);
1673 	}
1674 
1675 	Adapter->e1000g_state |= E1000G_STARTED;
1676 
1677 	rw_exit(&Adapter->chip_lock);
1678 
1679 	/* Enable and start the watchdog timer */
1680 	enable_watchdog_timer(Adapter);
1681 
1682 	return (0);
1683 }
1684 
1685 static int
1686 e1000g_start(struct e1000g *Adapter, boolean_t global)
1687 {
1688 	e1000g_rx_data_t *rx_data;
1689 
1690 	if (global) {
1691 		if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) {
1692 			e1000g_log(Adapter, CE_WARN, "Allocate rx data failed");
1693 			goto start_fail;
1694 		}
1695 
1696 		/* Allocate dma resources for descriptors and buffers */
1697 		if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1698 			e1000g_log(Adapter, CE_WARN,
1699 			    "Alloc DMA resources failed");
1700 			goto start_fail;
1701 		}
1702 		Adapter->rx_buffer_setup = B_FALSE;
1703 	}
1704 
1705 	if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1706 		if (e1000g_init(Adapter) != DDI_SUCCESS) {
1707 			e1000g_log(Adapter, CE_WARN,
1708 			    "Adapter initialization failed");
1709 			goto start_fail;
1710 		}
1711 	}
1712 
1713 	/* Setup and initialize the transmit structures */
1714 	e1000g_tx_setup(Adapter);
1715 	msec_delay(5);
1716 
1717 	/* Setup and initialize the receive structures */
1718 	e1000g_rx_setup(Adapter);
1719 	msec_delay(5);
1720 
1721 	/* Restore the e1000g promiscuous mode */
1722 	e1000g_restore_promisc(Adapter);
1723 
1724 	e1000g_mask_interrupt(Adapter);
1725 
1726 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1727 
1728 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1729 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1730 		goto start_fail;
1731 	}
1732 
1733 	return (DDI_SUCCESS);
1734 
1735 start_fail:
1736 	rx_data = Adapter->rx_ring->rx_data;
1737 
1738 	if (global) {
1739 		e1000g_release_dma_resources(Adapter);
1740 		e1000g_free_rx_pending_buffers(rx_data);
1741 		e1000g_free_rx_data(rx_data);
1742 	}
1743 
1744 	mutex_enter(&e1000g_nvm_lock);
1745 	(void) e1000_reset_hw(&Adapter->shared);
1746 	mutex_exit(&e1000g_nvm_lock);
1747 
1748 	return (DDI_FAILURE);
1749 }
1750 
1751 static void
1752 e1000g_m_stop(void *arg)
1753 {
1754 	struct e1000g *Adapter = (struct e1000g *)arg;
1755 
1756 	/* Drain tx sessions */
1757 	(void) e1000g_tx_drain(Adapter);
1758 
1759 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1760 
1761 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1762 		rw_exit(&Adapter->chip_lock);
1763 		return;
1764 	}
1765 	Adapter->e1000g_state &= ~E1000G_STARTED;
1766 	e1000g_stop(Adapter, B_TRUE);
1767 
1768 	rw_exit(&Adapter->chip_lock);
1769 
1770 	/* Disable and stop all the timers */
1771 	disable_watchdog_timer(Adapter);
1772 	stop_link_timer(Adapter);
1773 	stop_82547_timer(Adapter->tx_ring);
1774 }
1775 
1776 static void
1777 e1000g_stop(struct e1000g *Adapter, boolean_t global)
1778 {
1779 	private_devi_list_t *devi_node;
1780 	e1000g_rx_data_t *rx_data;
1781 	int result;
1782 
1783 	Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
1784 
1785 	/* Stop the chip and release pending resources */
1786 
1787 	/* Tell firmware driver is no longer in control */
1788 	e1000g_release_driver_control(&Adapter->shared);
1789 
1790 	e1000g_clear_all_interrupts(Adapter);
1791 
1792 	mutex_enter(&e1000g_nvm_lock);
1793 	result = e1000_reset_hw(&Adapter->shared);
1794 	mutex_exit(&e1000g_nvm_lock);
1795 
1796 	if (result != E1000_SUCCESS) {
1797 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1798 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1799 	}
1800 
1801 	/* Release resources still held by the TX descriptors */
1802 	e1000g_tx_clean(Adapter);
1803 
1804 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
1805 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1806 
1807 	/* Clean the pending rx jumbo packet fragment */
1808 	e1000g_rx_clean(Adapter);
1809 
1810 	if (global) {
1811 		e1000g_release_dma_resources(Adapter);
1812 
1813 		mutex_enter(&e1000g_rx_detach_lock);
1814 		rx_data = Adapter->rx_ring->rx_data;
1815 		rx_data->flag |= E1000G_RX_STOPPED;
1816 
1817 		if (rx_data->pending_count == 0) {
1818 			e1000g_free_rx_pending_buffers(rx_data);
1819 			e1000g_free_rx_data(rx_data);
1820 		} else {
1821 			devi_node = rx_data->priv_devi_node;
1822 			if (devi_node != NULL)
1823 				atomic_inc_32(&devi_node->pending_rx_count);
1824 			else
1825 				atomic_inc_32(&Adapter->pending_rx_count);
1826 		}
1827 		mutex_exit(&e1000g_rx_detach_lock);
1828 	}
1829 
1830 	if (Adapter->link_state == LINK_STATE_UP) {
1831 		Adapter->link_state = LINK_STATE_UNKNOWN;
1832 		mac_link_update(Adapter->mh, Adapter->link_state);
1833 	}
1834 }
1835 
1836 static void
1837 e1000g_rx_clean(struct e1000g *Adapter)
1838 {
1839 	e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data;
1840 
1841 	if (rx_data == NULL)
1842 		return;
1843 
1844 	if (rx_data->rx_mblk != NULL) {
1845 		freemsg(rx_data->rx_mblk);
1846 		rx_data->rx_mblk = NULL;
1847 		rx_data->rx_mblk_tail = NULL;
1848 		rx_data->rx_mblk_len = 0;
1849 	}
1850 }
1851 
1852 static void
1853 e1000g_tx_clean(struct e1000g *Adapter)
1854 {
1855 	e1000g_tx_ring_t *tx_ring;
1856 	p_tx_sw_packet_t packet;
1857 	mblk_t *mp;
1858 	mblk_t *nmp;
1859 	uint32_t packet_count;
1860 
1861 	tx_ring = Adapter->tx_ring;
1862 
1863 	/*
1864 	 * Here we don't need to protect the lists using
1865 	 * the usedlist_lock and freelist_lock, for they
1866 	 * have been protected by the chip_lock.
1867 	 */
1868 	mp = NULL;
1869 	nmp = NULL;
1870 	packet_count = 0;
1871 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
1872 	while (packet != NULL) {
1873 		if (packet->mp != NULL) {
1874 			/* Assemble the message chain */
1875 			if (mp == NULL) {
1876 				mp = packet->mp;
1877 				nmp = packet->mp;
1878 			} else {
1879 				nmp->b_next = packet->mp;
1880 				nmp = packet->mp;
1881 			}
1882 			/* Disconnect the message from the sw packet */
1883 			packet->mp = NULL;
1884 		}
1885 
1886 		e1000g_free_tx_swpkt(packet);
1887 		packet_count++;
1888 
1889 		packet = (p_tx_sw_packet_t)
1890 		    QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
1891 	}
1892 
1893 	if (mp != NULL)
1894 		freemsgchain(mp);
1895 
1896 	if (packet_count > 0) {
1897 		QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
1898 		QUEUE_INIT_LIST(&tx_ring->used_list);
1899 
1900 		/* Setup TX descriptor pointers */
1901 		tx_ring->tbd_next = tx_ring->tbd_first;
1902 		tx_ring->tbd_oldest = tx_ring->tbd_first;
1903 
1904 		/* Setup our HW Tx Head & Tail descriptor pointers */
1905 		E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
1906 		E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
1907 	}
1908 }
1909 
1910 static boolean_t
1911 e1000g_tx_drain(struct e1000g *Adapter)
1912 {
1913 	int i;
1914 	boolean_t done;
1915 	e1000g_tx_ring_t *tx_ring;
1916 
1917 	tx_ring = Adapter->tx_ring;
1918 
1919 	/* Allow up to 'wsdraintime' for pending xmit's to complete. */
1920 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1921 		mutex_enter(&tx_ring->usedlist_lock);
1922 		done = IS_QUEUE_EMPTY(&tx_ring->used_list);
1923 		mutex_exit(&tx_ring->usedlist_lock);
1924 
1925 		if (done)
1926 			break;
1927 
1928 		msec_delay(1);
1929 	}
1930 
1931 	return (done);
1932 }
1933 
1934 static boolean_t
1935 e1000g_rx_drain(struct e1000g *Adapter)
1936 {
1937 	int i;
1938 	boolean_t done;
1939 
1940 	/*
1941 	 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
1942 	 */
1943 	for (i = 0; i < RX_DRAIN_TIME; i++) {
1944 		done = (Adapter->pending_rx_count == 0);
1945 
1946 		if (done)
1947 			break;
1948 
1949 		msec_delay(1);
1950 	}
1951 
1952 	return (done);
1953 }
1954 
1955 static boolean_t
1956 e1000g_reset_adapter(struct e1000g *Adapter)
1957 {
1958 	/* Disable and stop all the timers */
1959 	disable_watchdog_timer(Adapter);
1960 	stop_link_timer(Adapter);
1961 	stop_82547_timer(Adapter->tx_ring);
1962 
1963 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1964 
1965 	e1000g_stop(Adapter, B_FALSE);
1966 
1967 	if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1968 		rw_exit(&Adapter->chip_lock);
1969 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1970 			return (B_FALSE);
1971 	}
1972 
1973 	rw_exit(&Adapter->chip_lock);
1974 
1975 	/* Enable and start the watchdog timer */
1976 	enable_watchdog_timer(Adapter);
1977 
1978 	return (B_TRUE);
1979 }
1980 
1981 boolean_t
1982 e1000g_global_reset(struct e1000g *Adapter)
1983 {
1984 	/* Disable and stop all the timers */
1985 	disable_watchdog_timer(Adapter);
1986 	stop_link_timer(Adapter);
1987 	stop_82547_timer(Adapter->tx_ring);
1988 
1989 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1990 
1991 	e1000g_stop(Adapter, B_TRUE);
1992 
1993 	Adapter->init_count = 0;
1994 
1995 	if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1996 		rw_exit(&Adapter->chip_lock);
1997 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1998 		return (B_FALSE);
1999 	}
2000 
2001 	rw_exit(&Adapter->chip_lock);
2002 
2003 	/* Enable and start the watchdog timer */
2004 	enable_watchdog_timer(Adapter);
2005 
2006 	return (B_TRUE);
2007 }
2008 
2009 /*
2010  * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2011  *
2012  * This interrupt service routine is for PCI-Express adapters.
2013  * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2014  * bit is set.
2015  */
2016 static uint_t
2017 e1000g_intr_pciexpress(caddr_t arg)
2018 {
2019 	struct e1000g *Adapter;
2020 	uint32_t icr;
2021 
2022 	Adapter = (struct e1000g *)(uintptr_t)arg;
2023 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2024 
2025 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2026 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2027 
2028 	if (icr & E1000_ICR_INT_ASSERTED) {
2029 		/*
2030 		 * E1000_ICR_INT_ASSERTED bit was set:
2031 		 * Read(Clear) the ICR, claim this interrupt,
2032 		 * look for work to do.
2033 		 */
2034 		e1000g_intr_work(Adapter, icr);
2035 		return (DDI_INTR_CLAIMED);
2036 	} else {
2037 		/*
2038 		 * E1000_ICR_INT_ASSERTED bit was not set:
2039 		 * Don't claim this interrupt, return immediately.
2040 		 */
2041 		return (DDI_INTR_UNCLAIMED);
2042 	}
2043 }
2044 
2045 /*
2046  * e1000g_intr - ISR for PCI/PCI-X chipsets
2047  *
2048  * This interrupt service routine is for PCI/PCI-X adapters.
2049  * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2050  * bit is set or not.
2051  */
2052 static uint_t
2053 e1000g_intr(caddr_t arg)
2054 {
2055 	struct e1000g *Adapter;
2056 	uint32_t icr;
2057 
2058 	Adapter = (struct e1000g *)(uintptr_t)arg;
2059 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2060 
2061 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2062 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2063 
2064 	if (icr) {
2065 		/*
2066 		 * Any bit was set in ICR:
2067 		 * Read(Clear) the ICR, claim this interrupt,
2068 		 * look for work to do.
2069 		 */
2070 		e1000g_intr_work(Adapter, icr);
2071 		return (DDI_INTR_CLAIMED);
2072 	} else {
2073 		/*
2074 		 * No bit was set in ICR:
2075 		 * Don't claim this interrupt, return immediately.
2076 		 */
2077 		return (DDI_INTR_UNCLAIMED);
2078 	}
2079 }
2080 
2081 /*
2082  * e1000g_intr_work - actual processing of ISR
2083  *
2084  * Read(clear) the ICR contents and call appropriate interrupt
2085  * processing routines.
2086  */
2087 static void
2088 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2089 {
2090 	struct e1000_hw *hw;
2091 	hw = &Adapter->shared;
2092 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2093 
2094 	Adapter->rx_pkt_cnt = 0;
2095 	Adapter->tx_pkt_cnt = 0;
2096 
2097 	rw_enter(&Adapter->chip_lock, RW_READER);
2098 
2099 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2100 		rw_exit(&Adapter->chip_lock);
2101 		return;
2102 	}
2103 	/*
2104 	 * Here we need to check the "e1000g_state" flag within the chip_lock to
2105 	 * ensure the receive routine will not execute when the adapter is
2106 	 * being reset.
2107 	 */
2108 	if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2109 		rw_exit(&Adapter->chip_lock);
2110 		return;
2111 	}
2112 
2113 	if (icr & E1000_ICR_RXT0) {
2114 		mblk_t			*mp = NULL;
2115 		mblk_t			*tail = NULL;
2116 		e1000g_rx_ring_t	*rx_ring;
2117 
2118 		rx_ring = Adapter->rx_ring;
2119 		mutex_enter(&rx_ring->rx_lock);
2120 		/*
2121 		 * Sometimes with legacy interrupts, it possible that
2122 		 * there is a single interrupt for Rx/Tx. In which
2123 		 * case, if poll flag is set, we shouldn't really
2124 		 * be doing Rx processing.
2125 		 */
2126 		if (!rx_ring->poll_flag)
2127 			mp = e1000g_receive(rx_ring, &tail,
2128 			    E1000G_CHAIN_NO_LIMIT);
2129 		mutex_exit(&rx_ring->rx_lock);
2130 		rw_exit(&Adapter->chip_lock);
2131 		if (mp != NULL)
2132 			mac_rx_ring(Adapter->mh, rx_ring->mrh,
2133 			    mp, rx_ring->ring_gen_num);
2134 	} else
2135 		rw_exit(&Adapter->chip_lock);
2136 
2137 	if (icr & E1000_ICR_TXDW) {
2138 		if (!Adapter->tx_intr_enable)
2139 			e1000g_clear_tx_interrupt(Adapter);
2140 
2141 		/* Recycle the tx descriptors */
2142 		rw_enter(&Adapter->chip_lock, RW_READER);
2143 		(void) e1000g_recycle(tx_ring);
2144 		E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2145 		rw_exit(&Adapter->chip_lock);
2146 
2147 		if (tx_ring->resched_needed &&
2148 		    (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2149 			tx_ring->resched_needed = B_FALSE;
2150 			mac_tx_update(Adapter->mh);
2151 			E1000G_STAT(tx_ring->stat_reschedule);
2152 		}
2153 	}
2154 
2155 	/*
2156 	 * The Receive Sequence errors RXSEQ and the link status change LSC
2157 	 * are checked to detect that the cable has been pulled out. For
2158 	 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2159 	 * are an indication that cable is not connected.
2160 	 */
2161 	if ((icr & E1000_ICR_RXSEQ) ||
2162 	    (icr & E1000_ICR_LSC) ||
2163 	    (icr & E1000_ICR_GPI_EN1)) {
2164 		boolean_t link_changed;
2165 		timeout_id_t tid = 0;
2166 
2167 		stop_watchdog_timer(Adapter);
2168 
2169 		rw_enter(&Adapter->chip_lock, RW_WRITER);
2170 
2171 		/*
2172 		 * Because we got a link-status-change interrupt, force
2173 		 * e1000_check_for_link() to look at phy
2174 		 */
2175 		Adapter->shared.mac.get_link_status = B_TRUE;
2176 
2177 		/* e1000g_link_check takes care of link status change */
2178 		link_changed = e1000g_link_check(Adapter);
2179 
2180 		/* Get new phy state */
2181 		e1000g_get_phy_state(Adapter);
2182 
2183 		/*
2184 		 * If the link timer has not timed out, we'll not notify
2185 		 * the upper layer with any link state until the link is up.
2186 		 */
2187 		if (link_changed && !Adapter->link_complete) {
2188 			if (Adapter->link_state == LINK_STATE_UP) {
2189 				mutex_enter(&Adapter->link_lock);
2190 				Adapter->link_complete = B_TRUE;
2191 				tid = Adapter->link_tid;
2192 				Adapter->link_tid = 0;
2193 				mutex_exit(&Adapter->link_lock);
2194 			} else {
2195 				link_changed = B_FALSE;
2196 			}
2197 		}
2198 		rw_exit(&Adapter->chip_lock);
2199 
2200 		if (link_changed) {
2201 			if (tid != 0)
2202 				(void) untimeout(tid);
2203 
2204 			/*
2205 			 * Workaround for esb2. Data stuck in fifo on a link
2206 			 * down event. Stop receiver here and reset in watchdog.
2207 			 */
2208 			if ((Adapter->link_state == LINK_STATE_DOWN) &&
2209 			    (Adapter->shared.mac.type == e1000_80003es2lan)) {
2210 				uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2211 				E1000_WRITE_REG(hw, E1000_RCTL,
2212 				    rctl & ~E1000_RCTL_EN);
2213 				e1000g_log(Adapter, CE_WARN,
2214 				    "ESB2 receiver disabled");
2215 				Adapter->esb2_workaround = B_TRUE;
2216 			}
2217 			if (!Adapter->reset_flag)
2218 				mac_link_update(Adapter->mh,
2219 				    Adapter->link_state);
2220 			if (Adapter->link_state == LINK_STATE_UP)
2221 				Adapter->reset_flag = B_FALSE;
2222 		}
2223 
2224 		start_watchdog_timer(Adapter);
2225 	}
2226 }
2227 
2228 static void
2229 e1000g_init_unicst(struct e1000g *Adapter)
2230 {
2231 	struct e1000_hw *hw;
2232 	int slot;
2233 
2234 	hw = &Adapter->shared;
2235 
2236 	if (Adapter->init_count == 0) {
2237 		/* Initialize the multiple unicast addresses */
2238 		Adapter->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
2239 
2240 		/* Workaround for an erratum of 82571 chipst */
2241 		if ((hw->mac.type == e1000_82571) &&
2242 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2243 			Adapter->unicst_total--;
2244 
2245 		Adapter->unicst_avail = Adapter->unicst_total;
2246 
2247 		for (slot = 0; slot < Adapter->unicst_total; slot++) {
2248 			/* Clear both the flag and MAC address */
2249 			Adapter->unicst_addr[slot].reg.high = 0;
2250 			Adapter->unicst_addr[slot].reg.low = 0;
2251 		}
2252 	} else {
2253 		/* Workaround for an erratum of 82571 chipst */
2254 		if ((hw->mac.type == e1000_82571) &&
2255 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2256 			e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2257 
2258 		/* Re-configure the RAR registers */
2259 		for (slot = 0; slot < Adapter->unicst_total; slot++)
2260 			if (Adapter->unicst_addr[slot].mac.set == 1)
2261 				e1000_rar_set(hw,
2262 				    Adapter->unicst_addr[slot].mac.addr, slot);
2263 	}
2264 
2265 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2266 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2267 }
2268 
2269 static int
2270 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2271     int slot)
2272 {
2273 	struct e1000_hw *hw;
2274 
2275 	hw = &Adapter->shared;
2276 
2277 	/*
2278 	 * The first revision of Wiseman silicon (rev 2.0) has an errata
2279 	 * that requires the receiver to be in reset when any of the
2280 	 * receive address registers (RAR regs) are accessed.  The first
2281 	 * rev of Wiseman silicon also requires MWI to be disabled when
2282 	 * a global reset or a receive reset is issued.  So before we
2283 	 * initialize the RARs, we check the rev of the Wiseman controller
2284 	 * and work around any necessary HW errata.
2285 	 */
2286 	if ((hw->mac.type == e1000_82542) &&
2287 	    (hw->revision_id == E1000_REVISION_2)) {
2288 		e1000_pci_clear_mwi(hw);
2289 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2290 		msec_delay(5);
2291 	}
2292 	if (mac_addr == NULL) {
2293 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2294 		E1000_WRITE_FLUSH(hw);
2295 		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2296 		E1000_WRITE_FLUSH(hw);
2297 		/* Clear both the flag and MAC address */
2298 		Adapter->unicst_addr[slot].reg.high = 0;
2299 		Adapter->unicst_addr[slot].reg.low = 0;
2300 	} else {
2301 		bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2302 		    ETHERADDRL);
2303 		e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2304 		Adapter->unicst_addr[slot].mac.set = 1;
2305 	}
2306 
2307 	/* Workaround for an erratum of 82571 chipst */
2308 	if (slot == 0) {
2309 		if ((hw->mac.type == e1000_82571) &&
2310 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
2311 			if (mac_addr == NULL) {
2312 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2313 				    slot << 1, 0);
2314 				E1000_WRITE_FLUSH(hw);
2315 				E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2316 				    (slot << 1) + 1, 0);
2317 				E1000_WRITE_FLUSH(hw);
2318 			} else {
2319 				e1000_rar_set(hw, (uint8_t *)mac_addr,
2320 				    LAST_RAR_ENTRY);
2321 			}
2322 	}
2323 
2324 	/*
2325 	 * If we are using Wiseman rev 2.0 silicon, we will have previously
2326 	 * put the receive in reset, and disabled MWI, to work around some
2327 	 * HW errata.  Now we should take the receiver out of reset, and
2328 	 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2329 	 */
2330 	if ((hw->mac.type == e1000_82542) &&
2331 	    (hw->revision_id == E1000_REVISION_2)) {
2332 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2333 		msec_delay(1);
2334 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2335 			e1000_pci_set_mwi(hw);
2336 		e1000g_rx_setup(Adapter);
2337 	}
2338 
2339 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2340 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2341 		return (EIO);
2342 	}
2343 
2344 	return (0);
2345 }
2346 
2347 static int
2348 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2349 {
2350 	struct e1000_hw *hw = &Adapter->shared;
2351 	int res = 0;
2352 
2353 	if ((multiaddr[0] & 01) == 0) {
2354 		res = EINVAL;
2355 		goto done;
2356 	}
2357 
2358 	if (Adapter->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2359 		res = ENOENT;
2360 		goto done;
2361 	}
2362 
2363 	bcopy(multiaddr,
2364 	    &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2365 	Adapter->mcast_count++;
2366 
2367 	/*
2368 	 * Update the MC table in the hardware
2369 	 */
2370 	e1000g_clear_interrupt(Adapter);
2371 
2372 	e1000g_setup_multicast(Adapter);
2373 
2374 	if ((hw->mac.type == e1000_82542) &&
2375 	    (hw->revision_id == E1000_REVISION_2))
2376 		e1000g_rx_setup(Adapter);
2377 
2378 	e1000g_mask_interrupt(Adapter);
2379 
2380 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2381 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2382 		res = EIO;
2383 	}
2384 
2385 done:
2386 	return (res);
2387 }
2388 
2389 static int
2390 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2391 {
2392 	struct e1000_hw *hw = &Adapter->shared;
2393 	unsigned i;
2394 
2395 	for (i = 0; i < Adapter->mcast_count; i++) {
2396 		if (bcmp(multiaddr, &Adapter->mcast_table[i],
2397 		    ETHERADDRL) == 0) {
2398 			for (i++; i < Adapter->mcast_count; i++) {
2399 				Adapter->mcast_table[i - 1] =
2400 				    Adapter->mcast_table[i];
2401 			}
2402 			Adapter->mcast_count--;
2403 			break;
2404 		}
2405 	}
2406 
2407 	/*
2408 	 * Update the MC table in the hardware
2409 	 */
2410 	e1000g_clear_interrupt(Adapter);
2411 
2412 	e1000g_setup_multicast(Adapter);
2413 
2414 	if ((hw->mac.type == e1000_82542) &&
2415 	    (hw->revision_id == E1000_REVISION_2))
2416 		e1000g_rx_setup(Adapter);
2417 
2418 	e1000g_mask_interrupt(Adapter);
2419 
2420 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2421 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2422 		return (EIO);
2423 	}
2424 
2425 	return (0);
2426 }
2427 
2428 /*
2429  * e1000g_setup_multicast - setup multicast data structures
2430  *
2431  * This routine initializes all of the multicast related structures.
2432  */
2433 void
2434 e1000g_setup_multicast(struct e1000g *Adapter)
2435 {
2436 	uint8_t *mc_addr_list;
2437 	uint32_t mc_addr_count;
2438 	uint32_t rctl;
2439 	struct e1000_hw *hw;
2440 
2441 	hw = &Adapter->shared;
2442 
2443 	/*
2444 	 * The e1000g has the ability to do perfect filtering of 16
2445 	 * addresses. The driver uses one of the e1000g's 16 receive
2446 	 * address registers for its node/network/mac/individual address.
2447 	 * So, we have room for up to 15 multicast addresses in the CAM,
2448 	 * additional MC addresses are handled by the MTA (Multicast Table
2449 	 * Array)
2450 	 */
2451 
2452 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2453 
2454 	mc_addr_list = (uint8_t *)Adapter->mcast_table;
2455 
2456 	if (Adapter->mcast_count > MAX_NUM_MULTICAST_ADDRESSES) {
2457 		E1000G_DEBUGLOG_1(Adapter, CE_WARN,
2458 		    "Adapter requested more than %d MC Addresses.\n",
2459 		    MAX_NUM_MULTICAST_ADDRESSES);
2460 		mc_addr_count = MAX_NUM_MULTICAST_ADDRESSES;
2461 	} else {
2462 		/*
2463 		 * Set the number of MC addresses that we are being
2464 		 * requested to use
2465 		 */
2466 		mc_addr_count = Adapter->mcast_count;
2467 	}
2468 	/*
2469 	 * The Wiseman 2.0 silicon has an errata by which the receiver will
2470 	 * hang  while writing to the receive address registers if the receiver
2471 	 * is not in reset before writing to the registers. Updating the RAR
2472 	 * is done during the setting up of the multicast table, hence the
2473 	 * receiver has to be put in reset before updating the multicast table
2474 	 * and then taken out of reset at the end
2475 	 */
2476 	/*
2477 	 * if WMI was enabled then dis able it before issueing the global
2478 	 * reset to the hardware.
2479 	 */
2480 	/*
2481 	 * Only required for WISEMAN_2_0
2482 	 */
2483 	if ((hw->mac.type == e1000_82542) &&
2484 	    (hw->revision_id == E1000_REVISION_2)) {
2485 		e1000_pci_clear_mwi(hw);
2486 		/*
2487 		 * The e1000g must be in reset before changing any RA
2488 		 * registers. Reset receive unit.  The chip will remain in
2489 		 * the reset state until software explicitly restarts it.
2490 		 */
2491 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2492 		/* Allow receiver time to go in to reset */
2493 		msec_delay(5);
2494 	}
2495 
2496 	e1000_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2497 	    Adapter->unicst_total, hw->mac.rar_entry_count);
2498 
2499 	/*
2500 	 * Only for Wiseman_2_0
2501 	 * If MWI was enabled then re-enable it after issueing (as we
2502 	 * disabled it up there) the receive reset command.
2503 	 * Wainwright does not have a receive reset command and only thing
2504 	 * close to it is global reset which will require tx setup also
2505 	 */
2506 	if ((hw->mac.type == e1000_82542) &&
2507 	    (hw->revision_id == E1000_REVISION_2)) {
2508 		/*
2509 		 * if WMI was enabled then reenable it after issueing the
2510 		 * global or receive reset to the hardware.
2511 		 */
2512 
2513 		/*
2514 		 * Take receiver out of reset
2515 		 * clear E1000_RCTL_RST bit (and all others)
2516 		 */
2517 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2518 		msec_delay(5);
2519 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2520 			e1000_pci_set_mwi(hw);
2521 	}
2522 
2523 	/*
2524 	 * Restore original value
2525 	 */
2526 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2527 }
2528 
2529 int
2530 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2531 {
2532 	struct e1000g *Adapter = (struct e1000g *)arg;
2533 	int result;
2534 
2535 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2536 
2537 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2538 		result = ECANCELED;
2539 		goto done;
2540 	}
2541 
2542 	result = (add) ? multicst_add(Adapter, addr)
2543 	    : multicst_remove(Adapter, addr);
2544 
2545 done:
2546 	rw_exit(&Adapter->chip_lock);
2547 	return (result);
2548 
2549 }
2550 
2551 int
2552 e1000g_m_promisc(void *arg, boolean_t on)
2553 {
2554 	struct e1000g *Adapter = (struct e1000g *)arg;
2555 	uint32_t rctl;
2556 
2557 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2558 
2559 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2560 		rw_exit(&Adapter->chip_lock);
2561 		return (ECANCELED);
2562 	}
2563 
2564 	rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2565 
2566 	if (on)
2567 		rctl |=
2568 		    (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2569 	else
2570 		rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2571 
2572 	E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2573 
2574 	Adapter->e1000g_promisc = on;
2575 
2576 	rw_exit(&Adapter->chip_lock);
2577 
2578 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2579 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2580 		return (EIO);
2581 	}
2582 
2583 	return (0);
2584 }
2585 
2586 /*
2587  * Entry points to enable and disable interrupts at the granularity of
2588  * a group.
2589  * Turns the poll_mode for the whole adapter on and off to enable or
2590  * override the ring level polling control over the hardware interrupts.
2591  */
2592 static int
2593 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2594 {
2595 	struct e1000g		*adapter = (struct e1000g *)arg;
2596 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2597 
2598 	/*
2599 	 * Later interrupts at the granularity of the this ring will
2600 	 * invoke mac_rx() with NULL, indicating the need for another
2601 	 * software classification.
2602 	 * We have a single ring usable per adapter now, so we only need to
2603 	 * reset the rx handle for that one.
2604 	 * When more RX rings can be used, we should update each one of them.
2605 	 */
2606 	mutex_enter(&rx_ring->rx_lock);
2607 	rx_ring->mrh = NULL;
2608 	adapter->poll_mode = B_FALSE;
2609 	mutex_exit(&rx_ring->rx_lock);
2610 	return (0);
2611 }
2612 
2613 static int
2614 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2615 {
2616 	struct e1000g *adapter = (struct e1000g *)arg;
2617 	e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2618 
2619 	mutex_enter(&rx_ring->rx_lock);
2620 
2621 	/*
2622 	 * Later interrupts at the granularity of the this ring will
2623 	 * invoke mac_rx() with the handle for this ring;
2624 	 */
2625 	adapter->poll_mode = B_TRUE;
2626 	rx_ring->mrh = rx_ring->mrh_init;
2627 	mutex_exit(&rx_ring->rx_lock);
2628 	return (0);
2629 }
2630 
2631 /*
2632  * Entry points to enable and disable interrupts at the granularity of
2633  * a ring.
2634  * adapter poll_mode controls whether we actually proceed with hardware
2635  * interrupt toggling.
2636  */
2637 static int
2638 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2639 {
2640 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2641 	struct e1000g 		*adapter = rx_ring->adapter;
2642 	struct e1000_hw 	*hw = &adapter->shared;
2643 	uint32_t		intr_mask;
2644 
2645 	rw_enter(&adapter->chip_lock, RW_READER);
2646 
2647 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
2648 		rw_exit(&adapter->chip_lock);
2649 		return (0);
2650 	}
2651 
2652 	mutex_enter(&rx_ring->rx_lock);
2653 	rx_ring->poll_flag = 0;
2654 	mutex_exit(&rx_ring->rx_lock);
2655 
2656 	/* Rx interrupt enabling for MSI and legacy */
2657 	intr_mask = E1000_READ_REG(hw, E1000_IMS);
2658 	intr_mask |= E1000_IMS_RXT0;
2659 	E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2660 	E1000_WRITE_FLUSH(hw);
2661 
2662 	/* Trigger a Rx interrupt to check Rx ring */
2663 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2664 	E1000_WRITE_FLUSH(hw);
2665 
2666 	rw_exit(&adapter->chip_lock);
2667 	return (0);
2668 }
2669 
2670 static int
2671 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2672 {
2673 	e1000g_rx_ring_t	*rx_ring = (e1000g_rx_ring_t *)intrh;
2674 	struct e1000g 		*adapter = rx_ring->adapter;
2675 	struct e1000_hw 	*hw = &adapter->shared;
2676 
2677 	rw_enter(&adapter->chip_lock, RW_READER);
2678 
2679 	if (adapter->e1000g_state & E1000G_SUSPENDED) {
2680 		rw_exit(&adapter->chip_lock);
2681 		return (0);
2682 	}
2683 	mutex_enter(&rx_ring->rx_lock);
2684 	rx_ring->poll_flag = 1;
2685 	mutex_exit(&rx_ring->rx_lock);
2686 
2687 	/* Rx interrupt disabling for MSI and legacy */
2688 	E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2689 	E1000_WRITE_FLUSH(hw);
2690 
2691 	rw_exit(&adapter->chip_lock);
2692 	return (0);
2693 }
2694 
2695 /*
2696  * e1000g_unicst_find - Find the slot for the specified unicast address
2697  */
2698 static int
2699 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2700 {
2701 	int slot;
2702 
2703 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2704 		if ((Adapter->unicst_addr[slot].mac.set == 1) &&
2705 		    (bcmp(Adapter->unicst_addr[slot].mac.addr,
2706 		    mac_addr, ETHERADDRL) == 0))
2707 				return (slot);
2708 	}
2709 
2710 	return (-1);
2711 }
2712 
2713 /*
2714  * Entry points to add and remove a MAC address to a ring group.
2715  * The caller takes care of adding and removing the MAC addresses
2716  * to the filter via these two routines.
2717  */
2718 
2719 static int
2720 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2721 {
2722 	struct e1000g *Adapter = (struct e1000g *)arg;
2723 	int slot, err;
2724 
2725 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2726 
2727 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2728 		rw_exit(&Adapter->chip_lock);
2729 		return (ECANCELED);
2730 	}
2731 
2732 	if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
2733 		/* The same address is already in slot */
2734 		rw_exit(&Adapter->chip_lock);
2735 		return (0);
2736 	}
2737 
2738 	if (Adapter->unicst_avail == 0) {
2739 		/* no slots available */
2740 		rw_exit(&Adapter->chip_lock);
2741 		return (ENOSPC);
2742 	}
2743 
2744 	/* Search for a free slot */
2745 	for (slot = 0; slot < Adapter->unicst_total; slot++) {
2746 		if (Adapter->unicst_addr[slot].mac.set == 0)
2747 			break;
2748 	}
2749 	ASSERT(slot < Adapter->unicst_total);
2750 
2751 	err = e1000g_unicst_set(Adapter, mac_addr, slot);
2752 	if (err == 0)
2753 		Adapter->unicst_avail--;
2754 
2755 	rw_exit(&Adapter->chip_lock);
2756 
2757 	return (err);
2758 }
2759 
2760 static int
2761 e1000g_remmac(void *arg, const uint8_t *mac_addr)
2762 {
2763 	struct e1000g *Adapter = (struct e1000g *)arg;
2764 	int slot, err;
2765 
2766 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2767 
2768 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2769 		rw_exit(&Adapter->chip_lock);
2770 		return (ECANCELED);
2771 	}
2772 
2773 	slot = e1000g_unicst_find(Adapter, mac_addr);
2774 	if (slot == -1) {
2775 		rw_exit(&Adapter->chip_lock);
2776 		return (EINVAL);
2777 	}
2778 
2779 	ASSERT(Adapter->unicst_addr[slot].mac.set);
2780 
2781 	/* Clear this slot */
2782 	err = e1000g_unicst_set(Adapter, NULL, slot);
2783 	if (err == 0)
2784 		Adapter->unicst_avail++;
2785 
2786 	rw_exit(&Adapter->chip_lock);
2787 
2788 	return (err);
2789 }
2790 
2791 static int
2792 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
2793 {
2794 	e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
2795 
2796 	mutex_enter(&rx_ring->rx_lock);
2797 	rx_ring->ring_gen_num = mr_gen_num;
2798 	mutex_exit(&rx_ring->rx_lock);
2799 	return (0);
2800 }
2801 
2802 /*
2803  * Callback funtion for MAC layer to register all rings.
2804  *
2805  * The hardware supports a single group with currently only one ring
2806  * available.
2807  * Though not offering virtualization ability per se, exposing the
2808  * group/ring still enables the polling and interrupt toggling.
2809  */
2810 void
2811 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
2812     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2813 {
2814 	struct e1000g *Adapter = (struct e1000g *)arg;
2815 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
2816 	mac_intr_t *mintr;
2817 
2818 	/*
2819 	 * We advertised only RX group/rings, so the MAC framework shouldn't
2820 	 * ask for any thing else.
2821 	 */
2822 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
2823 
2824 	rx_ring->mrh = rx_ring->mrh_init = rh;
2825 	infop->mri_driver = (mac_ring_driver_t)rx_ring;
2826 	infop->mri_start = e1000g_ring_start;
2827 	infop->mri_stop = NULL;
2828 	infop->mri_poll = e1000g_poll_ring;
2829 
2830 	/* Ring level interrupts */
2831 	mintr = &infop->mri_intr;
2832 	mintr->mi_handle = (mac_intr_handle_t)rx_ring;
2833 	mintr->mi_enable = e1000g_rx_ring_intr_enable;
2834 	mintr->mi_disable = e1000g_rx_ring_intr_disable;
2835 }
2836 
2837 static void
2838 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
2839     mac_group_info_t *infop, mac_group_handle_t gh)
2840 {
2841 	struct e1000g *Adapter = (struct e1000g *)arg;
2842 	mac_intr_t *mintr;
2843 
2844 	/*
2845 	 * We advertised a single RX ring. Getting a request for anything else
2846 	 * signifies a bug in the MAC framework.
2847 	 */
2848 	ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
2849 
2850 	Adapter->rx_group = gh;
2851 
2852 	infop->mgi_driver = (mac_group_driver_t)Adapter;
2853 	infop->mgi_start = NULL;
2854 	infop->mgi_stop = NULL;
2855 	infop->mgi_addmac = e1000g_addmac;
2856 	infop->mgi_remmac = e1000g_remmac;
2857 	infop->mgi_count = 1;
2858 
2859 	/* Group level interrupts */
2860 	mintr = &infop->mgi_intr;
2861 	mintr->mi_handle = (mac_intr_handle_t)Adapter;
2862 	mintr->mi_enable = e1000g_rx_group_intr_enable;
2863 	mintr->mi_disable = e1000g_rx_group_intr_disable;
2864 }
2865 
2866 static boolean_t
2867 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2868 {
2869 	struct e1000g *Adapter = (struct e1000g *)arg;
2870 
2871 	switch (cap) {
2872 	case MAC_CAPAB_HCKSUM: {
2873 		uint32_t *txflags = cap_data;
2874 
2875 		if (Adapter->tx_hcksum_enable)
2876 			*txflags = HCKSUM_IPHDRCKSUM |
2877 			    HCKSUM_INET_PARTIAL;
2878 		else
2879 			return (B_FALSE);
2880 		break;
2881 	}
2882 
2883 	case MAC_CAPAB_LSO: {
2884 		mac_capab_lso_t *cap_lso = cap_data;
2885 
2886 		if (Adapter->lso_enable) {
2887 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2888 			cap_lso->lso_basic_tcp_ipv4.lso_max =
2889 			    E1000_LSO_MAXLEN;
2890 		} else
2891 			return (B_FALSE);
2892 		break;
2893 	}
2894 	case MAC_CAPAB_RINGS: {
2895 		mac_capab_rings_t *cap_rings = cap_data;
2896 
2897 		/* No TX rings exposed yet */
2898 		if (cap_rings->mr_type != MAC_RING_TYPE_RX)
2899 			return (B_FALSE);
2900 
2901 		cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2902 		cap_rings->mr_rnum = 1;
2903 		cap_rings->mr_gnum = 1;
2904 		cap_rings->mr_rget = e1000g_fill_ring;
2905 		cap_rings->mr_gget = e1000g_fill_group;
2906 		break;
2907 	}
2908 	default:
2909 		return (B_FALSE);
2910 	}
2911 	return (B_TRUE);
2912 }
2913 
2914 static boolean_t
2915 e1000g_param_locked(mac_prop_id_t pr_num)
2916 {
2917 	/*
2918 	 * All en_* parameters are locked (read-only) while
2919 	 * the device is in any sort of loopback mode ...
2920 	 */
2921 	switch (pr_num) {
2922 		case MAC_PROP_EN_1000FDX_CAP:
2923 		case MAC_PROP_EN_1000HDX_CAP:
2924 		case MAC_PROP_EN_100FDX_CAP:
2925 		case MAC_PROP_EN_100HDX_CAP:
2926 		case MAC_PROP_EN_10FDX_CAP:
2927 		case MAC_PROP_EN_10HDX_CAP:
2928 		case MAC_PROP_AUTONEG:
2929 		case MAC_PROP_FLOWCTRL:
2930 			return (B_TRUE);
2931 	}
2932 	return (B_FALSE);
2933 }
2934 
2935 /*
2936  * callback function for set/get of properties
2937  */
2938 static int
2939 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
2940     uint_t pr_valsize, const void *pr_val)
2941 {
2942 	struct e1000g *Adapter = arg;
2943 	struct e1000_mac_info *mac = &Adapter->shared.mac;
2944 	struct e1000_phy_info *phy = &Adapter->shared.phy;
2945 	struct e1000_fc_info *fc = &Adapter->shared.fc;
2946 	int err = 0;
2947 	link_flowctrl_t flowctrl;
2948 	uint32_t cur_mtu, new_mtu;
2949 	uint64_t tmp = 0;
2950 
2951 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2952 
2953 	if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2954 		rw_exit(&Adapter->chip_lock);
2955 		return (ECANCELED);
2956 	}
2957 
2958 	if (Adapter->loopback_mode != E1000G_LB_NONE &&
2959 	    e1000g_param_locked(pr_num)) {
2960 		/*
2961 		 * All en_* parameters are locked (read-only)
2962 		 * while the device is in any sort of loopback mode.
2963 		 */
2964 		rw_exit(&Adapter->chip_lock);
2965 		return (EBUSY);
2966 	}
2967 
2968 	switch (pr_num) {
2969 		case MAC_PROP_EN_1000FDX_CAP:
2970 			Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
2971 			Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
2972 			goto reset;
2973 		case MAC_PROP_EN_100FDX_CAP:
2974 			Adapter->param_en_100fdx = *(uint8_t *)pr_val;
2975 			Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
2976 			goto reset;
2977 		case MAC_PROP_EN_100HDX_CAP:
2978 			Adapter->param_en_100hdx = *(uint8_t *)pr_val;
2979 			Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
2980 			goto reset;
2981 		case MAC_PROP_EN_10FDX_CAP:
2982 			Adapter->param_en_10fdx = *(uint8_t *)pr_val;
2983 			Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
2984 			goto reset;
2985 		case MAC_PROP_EN_10HDX_CAP:
2986 			Adapter->param_en_10hdx = *(uint8_t *)pr_val;
2987 			Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
2988 			goto reset;
2989 		case MAC_PROP_AUTONEG:
2990 			Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
2991 			goto reset;
2992 		case MAC_PROP_FLOWCTRL:
2993 			fc->send_xon = B_TRUE;
2994 			bcopy(pr_val, &flowctrl, sizeof (flowctrl));
2995 
2996 			switch (flowctrl) {
2997 			default:
2998 				err = EINVAL;
2999 				break;
3000 			case LINK_FLOWCTRL_NONE:
3001 				fc->requested_mode = e1000_fc_none;
3002 				break;
3003 			case LINK_FLOWCTRL_RX:
3004 				fc->requested_mode = e1000_fc_rx_pause;
3005 				break;
3006 			case LINK_FLOWCTRL_TX:
3007 				fc->requested_mode = e1000_fc_tx_pause;
3008 				break;
3009 			case LINK_FLOWCTRL_BI:
3010 				fc->requested_mode = e1000_fc_full;
3011 				break;
3012 			}
3013 reset:
3014 			if (err == 0) {
3015 				if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3016 					err = EINVAL;
3017 			}
3018 			break;
3019 		case MAC_PROP_ADV_1000FDX_CAP:
3020 		case MAC_PROP_ADV_1000HDX_CAP:
3021 		case MAC_PROP_ADV_100FDX_CAP:
3022 		case MAC_PROP_ADV_100HDX_CAP:
3023 		case MAC_PROP_ADV_10FDX_CAP:
3024 		case MAC_PROP_ADV_10HDX_CAP:
3025 		case MAC_PROP_EN_1000HDX_CAP:
3026 		case MAC_PROP_STATUS:
3027 		case MAC_PROP_SPEED:
3028 		case MAC_PROP_DUPLEX:
3029 			err = ENOTSUP; /* read-only prop. Can't set this. */
3030 			break;
3031 		case MAC_PROP_MTU:
3032 			cur_mtu = Adapter->default_mtu;
3033 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3034 			if (new_mtu == cur_mtu) {
3035 				err = 0;
3036 				break;
3037 			}
3038 
3039 			tmp = new_mtu + sizeof (struct ether_vlan_header) +
3040 			    ETHERFCSL;
3041 			if ((tmp < DEFAULT_FRAME_SIZE) ||
3042 			    (tmp > MAXIMUM_FRAME_SIZE)) {
3043 				err = EINVAL;
3044 				break;
3045 			}
3046 
3047 			/* ich8 does not support jumbo frames */
3048 			if ((mac->type == e1000_ich8lan) &&
3049 			    (tmp > DEFAULT_FRAME_SIZE)) {
3050 				err = EINVAL;
3051 				break;
3052 			}
3053 			/* ich9 does not do jumbo frames on one phy type */
3054 			if ((mac->type == e1000_ich9lan) &&
3055 			    (phy->type == e1000_phy_ife) &&
3056 			    (tmp > DEFAULT_FRAME_SIZE)) {
3057 				err = EINVAL;
3058 				break;
3059 			}
3060 			if (Adapter->e1000g_state & E1000G_STARTED) {
3061 				err = EBUSY;
3062 				break;
3063 			}
3064 
3065 			err = mac_maxsdu_update(Adapter->mh, new_mtu);
3066 			if (err == 0) {
3067 				Adapter->max_frame_size = (uint32_t)tmp;
3068 				Adapter->default_mtu = new_mtu;
3069 				e1000g_set_bufsize(Adapter);
3070 			}
3071 			break;
3072 		case MAC_PROP_PRIVATE:
3073 			err = e1000g_set_priv_prop(Adapter, pr_name,
3074 			    pr_valsize, pr_val);
3075 			break;
3076 		default:
3077 			err = ENOTSUP;
3078 			break;
3079 	}
3080 	rw_exit(&Adapter->chip_lock);
3081 	return (err);
3082 }
3083 
3084 static int
3085 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3086     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3087 {
3088 	struct e1000g *Adapter = arg;
3089 	struct e1000_fc_info *fc = &Adapter->shared.fc;
3090 	int err = 0;
3091 	link_flowctrl_t flowctrl;
3092 	uint64_t tmp = 0;
3093 
3094 	if (pr_valsize == 0)
3095 		return (EINVAL);
3096 
3097 	*perm = MAC_PROP_PERM_RW;
3098 
3099 	bzero(pr_val, pr_valsize);
3100 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3101 		return (e1000g_get_def_val(Adapter, pr_num,
3102 		    pr_valsize, pr_val));
3103 	}
3104 
3105 	switch (pr_num) {
3106 		case MAC_PROP_DUPLEX:
3107 			*perm = MAC_PROP_PERM_READ;
3108 			if (pr_valsize >= sizeof (link_duplex_t)) {
3109 				bcopy(&Adapter->link_duplex, pr_val,
3110 				    sizeof (link_duplex_t));
3111 			} else
3112 				err = EINVAL;
3113 			break;
3114 		case MAC_PROP_SPEED:
3115 			*perm = MAC_PROP_PERM_READ;
3116 			if (pr_valsize >= sizeof (uint64_t)) {
3117 				tmp = Adapter->link_speed * 1000000ull;
3118 				bcopy(&tmp, pr_val, sizeof (tmp));
3119 			} else
3120 				err = EINVAL;
3121 			break;
3122 		case MAC_PROP_AUTONEG:
3123 			*(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3124 			break;
3125 		case MAC_PROP_FLOWCTRL:
3126 			if (pr_valsize >= sizeof (link_flowctrl_t)) {
3127 				switch (fc->current_mode) {
3128 					case e1000_fc_none:
3129 						flowctrl = LINK_FLOWCTRL_NONE;
3130 						break;
3131 					case e1000_fc_rx_pause:
3132 						flowctrl = LINK_FLOWCTRL_RX;
3133 						break;
3134 					case e1000_fc_tx_pause:
3135 						flowctrl = LINK_FLOWCTRL_TX;
3136 						break;
3137 					case e1000_fc_full:
3138 						flowctrl = LINK_FLOWCTRL_BI;
3139 						break;
3140 				}
3141 				bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3142 			} else
3143 				err = EINVAL;
3144 			break;
3145 		case MAC_PROP_ADV_1000FDX_CAP:
3146 			*perm = MAC_PROP_PERM_READ;
3147 			*(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3148 			break;
3149 		case MAC_PROP_EN_1000FDX_CAP:
3150 			*(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3151 			break;
3152 		case MAC_PROP_ADV_1000HDX_CAP:
3153 			*perm = MAC_PROP_PERM_READ;
3154 			*(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3155 			break;
3156 		case MAC_PROP_EN_1000HDX_CAP:
3157 			*perm = MAC_PROP_PERM_READ;
3158 			*(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3159 			break;
3160 		case MAC_PROP_ADV_100FDX_CAP:
3161 			*perm = MAC_PROP_PERM_READ;
3162 			*(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3163 			break;
3164 		case MAC_PROP_EN_100FDX_CAP:
3165 			*(uint8_t *)pr_val = Adapter->param_en_100fdx;
3166 			break;
3167 		case MAC_PROP_ADV_100HDX_CAP:
3168 			*perm = MAC_PROP_PERM_READ;
3169 			*(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3170 			break;
3171 		case MAC_PROP_EN_100HDX_CAP:
3172 			*(uint8_t *)pr_val = Adapter->param_en_100hdx;
3173 			break;
3174 		case MAC_PROP_ADV_10FDX_CAP:
3175 			*perm = MAC_PROP_PERM_READ;
3176 			*(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3177 			break;
3178 		case MAC_PROP_EN_10FDX_CAP:
3179 			*(uint8_t *)pr_val = Adapter->param_en_10fdx;
3180 			break;
3181 		case MAC_PROP_ADV_10HDX_CAP:
3182 			*perm = MAC_PROP_PERM_READ;
3183 			*(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3184 			break;
3185 		case MAC_PROP_EN_10HDX_CAP:
3186 			*(uint8_t *)pr_val = Adapter->param_en_10hdx;
3187 			break;
3188 		case MAC_PROP_ADV_100T4_CAP:
3189 		case MAC_PROP_EN_100T4_CAP:
3190 			*perm = MAC_PROP_PERM_READ;
3191 			*(uint8_t *)pr_val = Adapter->param_adv_100t4;
3192 			break;
3193 		case MAC_PROP_PRIVATE:
3194 			err = e1000g_get_priv_prop(Adapter, pr_name,
3195 			    pr_flags, pr_valsize, pr_val, perm);
3196 			break;
3197 		case MAC_PROP_MTU: {
3198 			struct e1000_mac_info *mac = &Adapter->shared.mac;
3199 			struct e1000_phy_info *phy = &Adapter->shared.phy;
3200 			mac_propval_range_t range;
3201 
3202 			if (!(pr_flags & MAC_PROP_POSSIBLE))
3203 				return (ENOTSUP);
3204 			if (pr_valsize < sizeof (mac_propval_range_t))
3205 				return (EINVAL);
3206 			range.mpr_count = 1;
3207 			range.mpr_type = MAC_PROPVAL_UINT32;
3208 			range.range_uint32[0].mpur_min = DEFAULT_MTU;
3209 			range.range_uint32[0].mpur_max = MAXIMUM_MTU;
3210 			/* following MAC type do not support jumbo frames */
3211 			if ((mac->type == e1000_ich8lan) ||
3212 			    ((mac->type == e1000_ich9lan) && (phy->type ==
3213 			    e1000_phy_ife))) {
3214 				range.range_uint32[0].mpur_max = DEFAULT_MTU;
3215 			}
3216 			bcopy(&range, pr_val, sizeof (range));
3217 			break;
3218 		}
3219 		default:
3220 			err = ENOTSUP;
3221 			break;
3222 	}
3223 	return (err);
3224 }
3225 
3226 /* ARGSUSED2 */
3227 static int
3228 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3229     uint_t pr_valsize, const void *pr_val)
3230 {
3231 	int err = 0;
3232 	long result;
3233 	struct e1000_hw *hw = &Adapter->shared;
3234 
3235 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3236 		if (pr_val == NULL) {
3237 			err = EINVAL;
3238 			return (err);
3239 		}
3240 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3241 		if (result < MIN_TX_BCOPY_THRESHOLD ||
3242 		    result > MAX_TX_BCOPY_THRESHOLD)
3243 			err = EINVAL;
3244 		else {
3245 			Adapter->tx_bcopy_thresh = (uint32_t)result;
3246 		}
3247 		return (err);
3248 	}
3249 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3250 		if (pr_val == NULL) {
3251 			err = EINVAL;
3252 			return (err);
3253 		}
3254 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3255 		if (result < 0 || result > 1)
3256 			err = EINVAL;
3257 		else {
3258 			Adapter->tx_intr_enable = (result == 1) ?
3259 			    B_TRUE: B_FALSE;
3260 			if (Adapter->tx_intr_enable)
3261 				e1000g_mask_tx_interrupt(Adapter);
3262 			else
3263 				e1000g_clear_tx_interrupt(Adapter);
3264 			if (e1000g_check_acc_handle(
3265 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3266 				ddi_fm_service_impact(Adapter->dip,
3267 				    DDI_SERVICE_DEGRADED);
3268 		}
3269 		return (err);
3270 	}
3271 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3272 		if (pr_val == NULL) {
3273 			err = EINVAL;
3274 			return (err);
3275 		}
3276 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3277 		if (result < MIN_TX_INTR_DELAY ||
3278 		    result > MAX_TX_INTR_DELAY)
3279 			err = EINVAL;
3280 		else {
3281 			Adapter->tx_intr_delay = (uint32_t)result;
3282 			E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3283 			if (e1000g_check_acc_handle(
3284 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3285 				ddi_fm_service_impact(Adapter->dip,
3286 				    DDI_SERVICE_DEGRADED);
3287 		}
3288 		return (err);
3289 	}
3290 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3291 		if (pr_val == NULL) {
3292 			err = EINVAL;
3293 			return (err);
3294 		}
3295 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3296 		if (result < MIN_TX_INTR_ABS_DELAY ||
3297 		    result > MAX_TX_INTR_ABS_DELAY)
3298 			err = EINVAL;
3299 		else {
3300 			Adapter->tx_intr_abs_delay = (uint32_t)result;
3301 			E1000_WRITE_REG(hw, E1000_TADV,
3302 			    Adapter->tx_intr_abs_delay);
3303 			if (e1000g_check_acc_handle(
3304 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3305 				ddi_fm_service_impact(Adapter->dip,
3306 				    DDI_SERVICE_DEGRADED);
3307 		}
3308 		return (err);
3309 	}
3310 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3311 		if (pr_val == NULL) {
3312 			err = EINVAL;
3313 			return (err);
3314 		}
3315 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3316 		if (result < MIN_RX_BCOPY_THRESHOLD ||
3317 		    result > MAX_RX_BCOPY_THRESHOLD)
3318 			err = EINVAL;
3319 		else
3320 			Adapter->rx_bcopy_thresh = (uint32_t)result;
3321 		return (err);
3322 	}
3323 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3324 		if (pr_val == NULL) {
3325 			err = EINVAL;
3326 			return (err);
3327 		}
3328 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3329 		if (result < MIN_RX_LIMIT_ON_INTR ||
3330 		    result > MAX_RX_LIMIT_ON_INTR)
3331 			err = EINVAL;
3332 		else
3333 			Adapter->rx_limit_onintr = (uint32_t)result;
3334 		return (err);
3335 	}
3336 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3337 		if (pr_val == NULL) {
3338 			err = EINVAL;
3339 			return (err);
3340 		}
3341 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3342 		if (result < MIN_RX_INTR_DELAY ||
3343 		    result > MAX_RX_INTR_DELAY)
3344 			err = EINVAL;
3345 		else {
3346 			Adapter->rx_intr_delay = (uint32_t)result;
3347 			E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3348 			if (e1000g_check_acc_handle(
3349 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3350 				ddi_fm_service_impact(Adapter->dip,
3351 				    DDI_SERVICE_DEGRADED);
3352 		}
3353 		return (err);
3354 	}
3355 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3356 		if (pr_val == NULL) {
3357 			err = EINVAL;
3358 			return (err);
3359 		}
3360 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3361 		if (result < MIN_RX_INTR_ABS_DELAY ||
3362 		    result > MAX_RX_INTR_ABS_DELAY)
3363 			err = EINVAL;
3364 		else {
3365 			Adapter->rx_intr_abs_delay = (uint32_t)result;
3366 			E1000_WRITE_REG(hw, E1000_RADV,
3367 			    Adapter->rx_intr_abs_delay);
3368 			if (e1000g_check_acc_handle(
3369 			    Adapter->osdep.reg_handle) != DDI_FM_OK)
3370 				ddi_fm_service_impact(Adapter->dip,
3371 				    DDI_SERVICE_DEGRADED);
3372 		}
3373 		return (err);
3374 	}
3375 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3376 		if (pr_val == NULL) {
3377 			err = EINVAL;
3378 			return (err);
3379 		}
3380 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3381 		if (result < MIN_INTR_THROTTLING ||
3382 		    result > MAX_INTR_THROTTLING)
3383 			err = EINVAL;
3384 		else {
3385 			if (hw->mac.type >= e1000_82540) {
3386 				Adapter->intr_throttling_rate =
3387 				    (uint32_t)result;
3388 				E1000_WRITE_REG(hw, E1000_ITR,
3389 				    Adapter->intr_throttling_rate);
3390 				if (e1000g_check_acc_handle(
3391 				    Adapter->osdep.reg_handle) != DDI_FM_OK)
3392 					ddi_fm_service_impact(Adapter->dip,
3393 					    DDI_SERVICE_DEGRADED);
3394 			} else
3395 				err = EINVAL;
3396 		}
3397 		return (err);
3398 	}
3399 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3400 		if (pr_val == NULL) {
3401 			err = EINVAL;
3402 			return (err);
3403 		}
3404 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3405 		if (result < 0 || result > 1)
3406 			err = EINVAL;
3407 		else {
3408 			if (hw->mac.type >= e1000_82540) {
3409 				Adapter->intr_adaptive = (result == 1) ?
3410 				    B_TRUE : B_FALSE;
3411 			} else {
3412 				err = EINVAL;
3413 			}
3414 		}
3415 		return (err);
3416 	}
3417 	return (ENOTSUP);
3418 }
3419 
3420 static int
3421 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
3422     uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm)
3423 {
3424 	int err = ENOTSUP;
3425 	boolean_t is_default = (pr_flags & MAC_PROP_DEFAULT);
3426 	int value;
3427 
3428 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
3429 		*perm = MAC_PROP_PERM_READ;
3430 		if (is_default)
3431 			goto done;
3432 		value = Adapter->param_adv_pause;
3433 		err = 0;
3434 		goto done;
3435 	}
3436 	if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3437 		*perm = MAC_PROP_PERM_READ;
3438 		if (is_default)
3439 			goto done;
3440 		value = Adapter->param_adv_asym_pause;
3441 		err = 0;
3442 		goto done;
3443 	}
3444 	if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3445 		value = (is_default ? DEFAULT_TX_BCOPY_THRESHOLD :
3446 		    Adapter->tx_bcopy_thresh);
3447 		err = 0;
3448 		goto done;
3449 	}
3450 	if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3451 		value = (is_default ? DEFAULT_TX_INTR_ENABLE :
3452 		    Adapter->tx_intr_enable);
3453 		err = 0;
3454 		goto done;
3455 	}
3456 	if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3457 		value = (is_default ? DEFAULT_TX_INTR_DELAY :
3458 		    Adapter->tx_intr_delay);
3459 		err = 0;
3460 		goto done;
3461 	}
3462 	if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3463 		value = (is_default ? DEFAULT_TX_INTR_ABS_DELAY :
3464 		    Adapter->tx_intr_abs_delay);
3465 		err = 0;
3466 		goto done;
3467 	}
3468 	if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3469 		value = (is_default ? DEFAULT_RX_BCOPY_THRESHOLD :
3470 		    Adapter->rx_bcopy_thresh);
3471 		err = 0;
3472 		goto done;
3473 	}
3474 	if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3475 		value = (is_default ? DEFAULT_RX_LIMIT_ON_INTR :
3476 		    Adapter->rx_limit_onintr);
3477 		err = 0;
3478 		goto done;
3479 	}
3480 	if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3481 		value = (is_default ? DEFAULT_RX_INTR_DELAY :
3482 		    Adapter->rx_intr_delay);
3483 		err = 0;
3484 		goto done;
3485 	}
3486 	if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3487 		value = (is_default ? DEFAULT_RX_INTR_ABS_DELAY :
3488 		    Adapter->rx_intr_abs_delay);
3489 		err = 0;
3490 		goto done;
3491 	}
3492 	if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3493 		value = (is_default ? DEFAULT_INTR_THROTTLING :
3494 		    Adapter->intr_throttling_rate);
3495 		err = 0;
3496 		goto done;
3497 	}
3498 	if (strcmp(pr_name, "_intr_adaptive") == 0) {
3499 		value = (is_default ? 1 : Adapter->intr_adaptive);
3500 		err = 0;
3501 		goto done;
3502 	}
3503 done:
3504 	if (err == 0) {
3505 		(void) snprintf(pr_val, pr_valsize, "%d", value);
3506 	}
3507 	return (err);
3508 }
3509 
3510 /*
3511  * e1000g_get_conf - get configurations set in e1000g.conf
3512  * This routine gets user-configured values out of the configuration
3513  * file e1000g.conf.
3514  *
3515  * For each configurable value, there is a minimum, a maximum, and a
3516  * default.
3517  * If user does not configure a value, use the default.
3518  * If user configures below the minimum, use the minumum.
3519  * If user configures above the maximum, use the maxumum.
3520  */
3521 static void
3522 e1000g_get_conf(struct e1000g *Adapter)
3523 {
3524 	struct e1000_hw *hw = &Adapter->shared;
3525 	boolean_t tbi_compatibility = B_FALSE;
3526 
3527 	/*
3528 	 * get each configurable property from e1000g.conf
3529 	 */
3530 
3531 	/*
3532 	 * NumTxDescriptors
3533 	 */
3534 	Adapter->tx_desc_num =
3535 	    e1000g_get_prop(Adapter, "NumTxDescriptors",
3536 	    MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
3537 	    DEFAULT_NUM_TX_DESCRIPTOR);
3538 
3539 	/*
3540 	 * NumRxDescriptors
3541 	 */
3542 	Adapter->rx_desc_num =
3543 	    e1000g_get_prop(Adapter, "NumRxDescriptors",
3544 	    MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
3545 	    DEFAULT_NUM_RX_DESCRIPTOR);
3546 
3547 	/*
3548 	 * NumRxFreeList
3549 	 */
3550 	Adapter->rx_freelist_num =
3551 	    e1000g_get_prop(Adapter, "NumRxFreeList",
3552 	    MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
3553 	    DEFAULT_NUM_RX_FREELIST);
3554 
3555 	/*
3556 	 * NumTxPacketList
3557 	 */
3558 	Adapter->tx_freelist_num =
3559 	    e1000g_get_prop(Adapter, "NumTxPacketList",
3560 	    MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
3561 	    DEFAULT_NUM_TX_FREELIST);
3562 
3563 	/*
3564 	 * FlowControl
3565 	 */
3566 	hw->fc.send_xon = B_TRUE;
3567 	hw->fc.requested_mode =
3568 	    e1000g_get_prop(Adapter, "FlowControl",
3569 	    e1000_fc_none, 4, DEFAULT_FLOW_CONTROL);
3570 	/* 4 is the setting that says "let the eeprom decide" */
3571 	if (hw->fc.requested_mode == 4)
3572 		hw->fc.requested_mode = e1000_fc_default;
3573 
3574 	/*
3575 	 * Max Num Receive Packets on Interrupt
3576 	 */
3577 	Adapter->rx_limit_onintr =
3578 	    e1000g_get_prop(Adapter, "MaxNumReceivePackets",
3579 	    MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
3580 	    DEFAULT_RX_LIMIT_ON_INTR);
3581 
3582 	/*
3583 	 * PHY master slave setting
3584 	 */
3585 	hw->phy.ms_type =
3586 	    e1000g_get_prop(Adapter, "SetMasterSlave",
3587 	    e1000_ms_hw_default, e1000_ms_auto,
3588 	    e1000_ms_hw_default);
3589 
3590 	/*
3591 	 * Parameter which controls TBI mode workaround, which is only
3592 	 * needed on certain switches such as Cisco 6500/Foundry
3593 	 */
3594 	tbi_compatibility =
3595 	    e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
3596 	    0, 1, DEFAULT_TBI_COMPAT_ENABLE);
3597 	e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
3598 
3599 	/*
3600 	 * MSI Enable
3601 	 */
3602 	Adapter->msi_enable =
3603 	    e1000g_get_prop(Adapter, "MSIEnable",
3604 	    0, 1, DEFAULT_MSI_ENABLE);
3605 
3606 	/*
3607 	 * Interrupt Throttling Rate
3608 	 */
3609 	Adapter->intr_throttling_rate =
3610 	    e1000g_get_prop(Adapter, "intr_throttling_rate",
3611 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
3612 	    DEFAULT_INTR_THROTTLING);
3613 
3614 	/*
3615 	 * Adaptive Interrupt Blanking Enable/Disable
3616 	 * It is enabled by default
3617 	 */
3618 	Adapter->intr_adaptive =
3619 	    (e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1) == 1) ?
3620 	    B_TRUE : B_FALSE;
3621 
3622 	/*
3623 	 * Hardware checksum enable/disable parameter
3624 	 */
3625 	Adapter->tx_hcksum_enable =
3626 	    e1000g_get_prop(Adapter, "tx_hcksum_enable",
3627 	    0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3628 	/*
3629 	 * Checksum on/off selection via global parameters.
3630 	 *
3631 	 * If the chip is flagged as not capable of (correctly)
3632 	 * handling checksumming, we don't enable it on either
3633 	 * Rx or Tx side.  Otherwise, we take this chip's settings
3634 	 * from the patchable global defaults.
3635 	 *
3636 	 * We advertise our capabilities only if TX offload is
3637 	 * enabled.  On receive, the stack will accept checksummed
3638 	 * packets anyway, even if we haven't said we can deliver
3639 	 * them.
3640 	 */
3641 	switch (hw->mac.type) {
3642 		case e1000_82540:
3643 		case e1000_82544:
3644 		case e1000_82545:
3645 		case e1000_82545_rev_3:
3646 		case e1000_82546:
3647 		case e1000_82546_rev_3:
3648 		case e1000_82571:
3649 		case e1000_82572:
3650 		case e1000_82573:
3651 		case e1000_80003es2lan:
3652 			break;
3653 		/*
3654 		 * For the following Intel PRO/1000 chipsets, we have not
3655 		 * tested the hardware checksum offload capability, so we
3656 		 * disable the capability for them.
3657 		 *	e1000_82542,
3658 		 *	e1000_82543,
3659 		 *	e1000_82541,
3660 		 *	e1000_82541_rev_2,
3661 		 *	e1000_82547,
3662 		 *	e1000_82547_rev_2,
3663 		 */
3664 		default:
3665 			Adapter->tx_hcksum_enable = B_FALSE;
3666 	}
3667 
3668 	/*
3669 	 * Large Send Offloading(LSO) Enable/Disable
3670 	 * If the tx hardware checksum is not enabled, LSO should be
3671 	 * disabled.
3672 	 */
3673 	Adapter->lso_enable =
3674 	    e1000g_get_prop(Adapter, "lso_enable",
3675 	    0, 1, DEFAULT_LSO_ENABLE);
3676 
3677 	switch (hw->mac.type) {
3678 		case e1000_82546:
3679 		case e1000_82546_rev_3:
3680 			if (Adapter->lso_enable)
3681 				Adapter->lso_premature_issue = B_TRUE;
3682 			/* FALLTHRU */
3683 		case e1000_82571:
3684 		case e1000_82572:
3685 		case e1000_82573:
3686 		case e1000_80003es2lan:
3687 			break;
3688 		default:
3689 			Adapter->lso_enable = B_FALSE;
3690 	}
3691 
3692 	if (!Adapter->tx_hcksum_enable) {
3693 		Adapter->lso_premature_issue = B_FALSE;
3694 		Adapter->lso_enable = B_FALSE;
3695 	}
3696 
3697 	/*
3698 	 * If mem_workaround_82546 is enabled, the rx buffer allocated by
3699 	 * e1000_82545, e1000_82546 and e1000_82546_rev_3
3700 	 * will not cross 64k boundary.
3701 	 */
3702 	Adapter->mem_workaround_82546 =
3703 	    e1000g_get_prop(Adapter, "mem_workaround_82546",
3704 	    0, 1, DEFAULT_MEM_WORKAROUND_82546);
3705 }
3706 
3707 /*
3708  * e1000g_get_prop - routine to read properties
3709  *
3710  * Get a user-configure property value out of the configuration
3711  * file e1000g.conf.
3712  *
3713  * Caller provides name of the property, a default value, a minimum
3714  * value, and a maximum value.
3715  *
3716  * Return configured value of the property, with default, minimum and
3717  * maximum properly applied.
3718  */
3719 static int
3720 e1000g_get_prop(struct e1000g *Adapter,	/* point to per-adapter structure */
3721     char *propname,		/* name of the property */
3722     int minval,			/* minimum acceptable value */
3723     int maxval,			/* maximim acceptable value */
3724     int defval)			/* default value */
3725 {
3726 	int propval;		/* value returned for requested property */
3727 	int *props;		/* point to array of properties returned */
3728 	uint_t nprops;		/* number of property value returned */
3729 
3730 	/*
3731 	 * get the array of properties from the config file
3732 	 */
3733 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
3734 	    DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
3735 		/* got some properties, test if we got enough */
3736 		if (Adapter->instance < nprops) {
3737 			propval = props[Adapter->instance];
3738 		} else {
3739 			/* not enough properties configured */
3740 			propval = defval;
3741 			E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3742 			    "Not Enough %s values found in e1000g.conf"
3743 			    " - set to %d\n",
3744 			    propname, propval);
3745 		}
3746 
3747 		/* free memory allocated for properties */
3748 		ddi_prop_free(props);
3749 
3750 	} else {
3751 		propval = defval;
3752 	}
3753 
3754 	/*
3755 	 * enforce limits
3756 	 */
3757 	if (propval > maxval) {
3758 		propval = maxval;
3759 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3760 		    "Too High %s value in e1000g.conf - set to %d\n",
3761 		    propname, propval);
3762 	}
3763 
3764 	if (propval < minval) {
3765 		propval = minval;
3766 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
3767 		    "Too Low %s value in e1000g.conf - set to %d\n",
3768 		    propname, propval);
3769 	}
3770 
3771 	return (propval);
3772 }
3773 
3774 static boolean_t
3775 e1000g_link_check(struct e1000g *Adapter)
3776 {
3777 	uint16_t speed, duplex, phydata;
3778 	boolean_t link_changed = B_FALSE;
3779 	struct e1000_hw *hw;
3780 	uint32_t reg_tarc;
3781 
3782 	hw = &Adapter->shared;
3783 
3784 	if (e1000g_link_up(Adapter)) {
3785 		/*
3786 		 * The Link is up, check whether it was marked as down earlier
3787 		 */
3788 		if (Adapter->link_state != LINK_STATE_UP) {
3789 			(void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
3790 			Adapter->link_speed = speed;
3791 			Adapter->link_duplex = duplex;
3792 			Adapter->link_state = LINK_STATE_UP;
3793 			link_changed = B_TRUE;
3794 
3795 			if (Adapter->link_speed == SPEED_1000)
3796 				Adapter->stall_threshold = TX_STALL_TIME_2S;
3797 			else
3798 				Adapter->stall_threshold = TX_STALL_TIME_8S;
3799 
3800 			Adapter->tx_link_down_timeout = 0;
3801 
3802 			if ((hw->mac.type == e1000_82571) ||
3803 			    (hw->mac.type == e1000_82572)) {
3804 				reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
3805 				if (speed == SPEED_1000)
3806 					reg_tarc |= (1 << 21);
3807 				else
3808 					reg_tarc &= ~(1 << 21);
3809 				E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
3810 			}
3811 		}
3812 		Adapter->smartspeed = 0;
3813 	} else {
3814 		if (Adapter->link_state != LINK_STATE_DOWN) {
3815 			Adapter->link_speed = 0;
3816 			Adapter->link_duplex = 0;
3817 			Adapter->link_state = LINK_STATE_DOWN;
3818 			link_changed = B_TRUE;
3819 
3820 			/*
3821 			 * SmartSpeed workaround for Tabor/TanaX, When the
3822 			 * driver loses link disable auto master/slave
3823 			 * resolution.
3824 			 */
3825 			if (hw->phy.type == e1000_phy_igp) {
3826 				(void) e1000_read_phy_reg(hw,
3827 				    PHY_1000T_CTRL, &phydata);
3828 				phydata |= CR_1000T_MS_ENABLE;
3829 				(void) e1000_write_phy_reg(hw,
3830 				    PHY_1000T_CTRL, phydata);
3831 			}
3832 		} else {
3833 			e1000g_smartspeed(Adapter);
3834 		}
3835 
3836 		if (Adapter->e1000g_state & E1000G_STARTED) {
3837 			if (Adapter->tx_link_down_timeout <
3838 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3839 				Adapter->tx_link_down_timeout++;
3840 			} else if (Adapter->tx_link_down_timeout ==
3841 			    MAX_TX_LINK_DOWN_TIMEOUT) {
3842 				e1000g_tx_clean(Adapter);
3843 				Adapter->tx_link_down_timeout++;
3844 			}
3845 		}
3846 	}
3847 
3848 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
3849 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
3850 
3851 	return (link_changed);
3852 }
3853 
3854 /*
3855  * e1000g_reset_link - Using the link properties to setup the link
3856  */
3857 int
3858 e1000g_reset_link(struct e1000g *Adapter)
3859 {
3860 	struct e1000_mac_info *mac;
3861 	struct e1000_phy_info *phy;
3862 	boolean_t invalid;
3863 
3864 	mac = &Adapter->shared.mac;
3865 	phy = &Adapter->shared.phy;
3866 	invalid = B_FALSE;
3867 
3868 	if (Adapter->param_adv_autoneg == 1) {
3869 		mac->autoneg = B_TRUE;
3870 		phy->autoneg_advertised = 0;
3871 
3872 		/*
3873 		 * 1000hdx is not supported for autonegotiation
3874 		 */
3875 		if (Adapter->param_adv_1000fdx == 1)
3876 			phy->autoneg_advertised |= ADVERTISE_1000_FULL;
3877 
3878 		if (Adapter->param_adv_100fdx == 1)
3879 			phy->autoneg_advertised |= ADVERTISE_100_FULL;
3880 
3881 		if (Adapter->param_adv_100hdx == 1)
3882 			phy->autoneg_advertised |= ADVERTISE_100_HALF;
3883 
3884 		if (Adapter->param_adv_10fdx == 1)
3885 			phy->autoneg_advertised |= ADVERTISE_10_FULL;
3886 
3887 		if (Adapter->param_adv_10hdx == 1)
3888 			phy->autoneg_advertised |= ADVERTISE_10_HALF;
3889 
3890 		if (phy->autoneg_advertised == 0)
3891 			invalid = B_TRUE;
3892 	} else {
3893 		mac->autoneg = B_FALSE;
3894 
3895 		/*
3896 		 * 1000fdx and 1000hdx are not supported for forced link
3897 		 */
3898 		if (Adapter->param_adv_100fdx == 1)
3899 			mac->forced_speed_duplex = ADVERTISE_100_FULL;
3900 		else if (Adapter->param_adv_100hdx == 1)
3901 			mac->forced_speed_duplex = ADVERTISE_100_HALF;
3902 		else if (Adapter->param_adv_10fdx == 1)
3903 			mac->forced_speed_duplex = ADVERTISE_10_FULL;
3904 		else if (Adapter->param_adv_10hdx == 1)
3905 			mac->forced_speed_duplex = ADVERTISE_10_HALF;
3906 		else
3907 			invalid = B_TRUE;
3908 
3909 	}
3910 
3911 	if (invalid) {
3912 		e1000g_log(Adapter, CE_WARN,
3913 		    "Invalid link sets. Setup link to"
3914 		    "support autonegotiation with all link capabilities.");
3915 		mac->autoneg = B_TRUE;
3916 		phy->autoneg_advertised = ADVERTISE_1000_FULL |
3917 		    ADVERTISE_100_FULL | ADVERTISE_100_HALF |
3918 		    ADVERTISE_10_FULL | ADVERTISE_10_HALF;
3919 	}
3920 
3921 	return (e1000_setup_link(&Adapter->shared));
3922 }
3923 
3924 static void
3925 e1000g_timer_tx_resched(struct e1000g *Adapter)
3926 {
3927 	e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
3928 
3929 	rw_enter(&Adapter->chip_lock, RW_READER);
3930 
3931 	if (tx_ring->resched_needed &&
3932 	    ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
3933 	    drv_usectohz(1000000)) &&
3934 	    (Adapter->e1000g_state & E1000G_STARTED) &&
3935 	    (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
3936 		tx_ring->resched_needed = B_FALSE;
3937 		mac_tx_update(Adapter->mh);
3938 		E1000G_STAT(tx_ring->stat_reschedule);
3939 		E1000G_STAT(tx_ring->stat_timer_reschedule);
3940 	}
3941 
3942 	rw_exit(&Adapter->chip_lock);
3943 }
3944 
3945 static void
3946 e1000g_local_timer(void *ws)
3947 {
3948 	struct e1000g *Adapter = (struct e1000g *)ws;
3949 	struct e1000_hw *hw;
3950 	e1000g_ether_addr_t ether_addr;
3951 	boolean_t link_changed;
3952 
3953 	hw = &Adapter->shared;
3954 
3955 	if (Adapter->e1000g_state & E1000G_ERROR) {
3956 		rw_enter(&Adapter->chip_lock, RW_WRITER);
3957 		Adapter->e1000g_state &= ~E1000G_ERROR;
3958 		rw_exit(&Adapter->chip_lock);
3959 
3960 		Adapter->reset_count++;
3961 		if (e1000g_global_reset(Adapter)) {
3962 			ddi_fm_service_impact(Adapter->dip,
3963 			    DDI_SERVICE_RESTORED);
3964 			e1000g_timer_tx_resched(Adapter);
3965 		} else
3966 			ddi_fm_service_impact(Adapter->dip,
3967 			    DDI_SERVICE_LOST);
3968 		return;
3969 	}
3970 
3971 	if (e1000g_stall_check(Adapter)) {
3972 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
3973 		    "Tx stall detected. Activate automatic recovery.\n");
3974 		e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
3975 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
3976 		Adapter->reset_count++;
3977 		if (e1000g_reset_adapter(Adapter)) {
3978 			ddi_fm_service_impact(Adapter->dip,
3979 			    DDI_SERVICE_RESTORED);
3980 			e1000g_timer_tx_resched(Adapter);
3981 		}
3982 		return;
3983 	}
3984 
3985 	link_changed = B_FALSE;
3986 	rw_enter(&Adapter->chip_lock, RW_READER);
3987 	if (Adapter->link_complete)
3988 		link_changed = e1000g_link_check(Adapter);
3989 	rw_exit(&Adapter->chip_lock);
3990 
3991 	if (link_changed) {
3992 		if (!Adapter->reset_flag)
3993 			mac_link_update(Adapter->mh, Adapter->link_state);
3994 		if (Adapter->link_state == LINK_STATE_UP)
3995 			Adapter->reset_flag = B_FALSE;
3996 	}
3997 	/*
3998 	 * Workaround for esb2. Data stuck in fifo on a link
3999 	 * down event. Reset the adapter to recover it.
4000 	 */
4001 	if (Adapter->esb2_workaround) {
4002 		Adapter->esb2_workaround = B_FALSE;
4003 		(void) e1000g_reset_adapter(Adapter);
4004 		return;
4005 	}
4006 
4007 	/*
4008 	 * With 82571 controllers, any locally administered address will
4009 	 * be overwritten when there is a reset on the other port.
4010 	 * Detect this circumstance and correct it.
4011 	 */
4012 	if ((hw->mac.type == e1000_82571) &&
4013 	    (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4014 		ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4015 		ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4016 
4017 		ether_addr.reg.low = ntohl(ether_addr.reg.low);
4018 		ether_addr.reg.high = ntohl(ether_addr.reg.high);
4019 
4020 		if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4021 		    (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4022 		    (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4023 		    (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4024 		    (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4025 		    (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4026 			e1000_rar_set(hw, hw->mac.addr, 0);
4027 		}
4028 	}
4029 
4030 	/*
4031 	 * Long TTL workaround for 82541/82547
4032 	 */
4033 	(void) e1000_igp_ttl_workaround_82547(hw);
4034 
4035 	/*
4036 	 * Check for Adaptive IFS settings If there are lots of collisions
4037 	 * change the value in steps...
4038 	 * These properties should only be set for 10/100
4039 	 */
4040 	if ((hw->phy.media_type == e1000_media_type_copper) &&
4041 	    ((Adapter->link_speed == SPEED_100) ||
4042 	    (Adapter->link_speed == SPEED_10))) {
4043 		e1000_update_adaptive(hw);
4044 	}
4045 	/*
4046 	 * Set Timer Interrupts
4047 	 */
4048 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4049 
4050 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4051 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4052 	else
4053 		e1000g_timer_tx_resched(Adapter);
4054 
4055 	restart_watchdog_timer(Adapter);
4056 }
4057 
4058 /*
4059  * The function e1000g_link_timer() is called when the timer for link setup
4060  * is expired, which indicates the completion of the link setup. The link
4061  * state will not be updated until the link setup is completed. And the
4062  * link state will not be sent to the upper layer through mac_link_update()
4063  * in this function. It will be updated in the local timer routine or the
4064  * interrupt service routine after the interface is started (plumbed).
4065  */
4066 static void
4067 e1000g_link_timer(void *arg)
4068 {
4069 	struct e1000g *Adapter = (struct e1000g *)arg;
4070 
4071 	mutex_enter(&Adapter->link_lock);
4072 	Adapter->link_complete = B_TRUE;
4073 	Adapter->link_tid = 0;
4074 	mutex_exit(&Adapter->link_lock);
4075 }
4076 
4077 /*
4078  * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4079  *
4080  * This function read the forced speed and duplex for 10/100 Mbps speeds
4081  * and also for 1000 Mbps speeds from the e1000g.conf file
4082  */
4083 static void
4084 e1000g_force_speed_duplex(struct e1000g *Adapter)
4085 {
4086 	int forced;
4087 	struct e1000_mac_info *mac = &Adapter->shared.mac;
4088 	struct e1000_phy_info *phy = &Adapter->shared.phy;
4089 
4090 	/*
4091 	 * get value out of config file
4092 	 */
4093 	forced = e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4094 	    GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY);
4095 
4096 	switch (forced) {
4097 	case GDIAG_10_HALF:
4098 		/*
4099 		 * Disable Auto Negotiation
4100 		 */
4101 		mac->autoneg = B_FALSE;
4102 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
4103 		break;
4104 	case GDIAG_10_FULL:
4105 		/*
4106 		 * Disable Auto Negotiation
4107 		 */
4108 		mac->autoneg = B_FALSE;
4109 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
4110 		break;
4111 	case GDIAG_100_HALF:
4112 		/*
4113 		 * Disable Auto Negotiation
4114 		 */
4115 		mac->autoneg = B_FALSE;
4116 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
4117 		break;
4118 	case GDIAG_100_FULL:
4119 		/*
4120 		 * Disable Auto Negotiation
4121 		 */
4122 		mac->autoneg = B_FALSE;
4123 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
4124 		break;
4125 	case GDIAG_1000_FULL:
4126 		/*
4127 		 * The gigabit spec requires autonegotiation.  Therefore,
4128 		 * when the user wants to force the speed to 1000Mbps, we
4129 		 * enable AutoNeg, but only allow the harware to advertise
4130 		 * 1000Mbps.  This is different from 10/100 operation, where
4131 		 * we are allowed to link without any negotiation.
4132 		 */
4133 		mac->autoneg = B_TRUE;
4134 		phy->autoneg_advertised = ADVERTISE_1000_FULL;
4135 		break;
4136 	default:	/* obey the setting of AutoNegAdvertised */
4137 		mac->autoneg = B_TRUE;
4138 		phy->autoneg_advertised =
4139 		    (uint16_t)e1000g_get_prop(Adapter, "AutoNegAdvertised",
4140 		    0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4141 		    AUTONEG_ADVERTISE_SPEED_DEFAULT);
4142 		break;
4143 	}	/* switch */
4144 }
4145 
4146 /*
4147  * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4148  *
4149  * This function reads MaxFrameSize from e1000g.conf
4150  */
4151 static void
4152 e1000g_get_max_frame_size(struct e1000g *Adapter)
4153 {
4154 	int max_frame;
4155 	struct e1000_mac_info *mac = &Adapter->shared.mac;
4156 	struct e1000_phy_info *phy = &Adapter->shared.phy;
4157 
4158 	/*
4159 	 * get value out of config file
4160 	 */
4161 	max_frame = e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0);
4162 
4163 	switch (max_frame) {
4164 	case 0:
4165 		Adapter->default_mtu = ETHERMTU;
4166 		break;
4167 	/*
4168 	 * To avoid excessive memory allocation for rx buffers,
4169 	 * the bytes of E1000G_IPALIGNPRESERVEROOM are reserved.
4170 	 */
4171 	case 1:
4172 		Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4173 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4174 		    E1000G_IPALIGNPRESERVEROOM;
4175 		break;
4176 	case 2:
4177 		Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4178 		    sizeof (struct ether_vlan_header) - ETHERFCSL -
4179 		    E1000G_IPALIGNPRESERVEROOM;
4180 		break;
4181 	case 3:
4182 		if (mac->type >= e1000_82571)
4183 			Adapter->default_mtu = MAXIMUM_MTU;
4184 		else
4185 			Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4186 			    sizeof (struct ether_vlan_header) - ETHERFCSL -
4187 			    E1000G_IPALIGNPRESERVEROOM;
4188 		break;
4189 	default:
4190 		Adapter->default_mtu = ETHERMTU;
4191 		break;
4192 	}	/* switch */
4193 
4194 	Adapter->max_frame_size = Adapter->default_mtu +
4195 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
4196 
4197 	/* ich8 does not do jumbo frames */
4198 	if (mac->type == e1000_ich8lan) {
4199 		Adapter->default_mtu = ETHERMTU;
4200 		Adapter->max_frame_size = ETHERMTU +
4201 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4202 	}
4203 
4204 	/* ich9 does not do jumbo frames on one phy type */
4205 	if ((mac->type == e1000_ich9lan) &&
4206 	    (phy->type == e1000_phy_ife)) {
4207 		Adapter->default_mtu = ETHERMTU;
4208 		Adapter->max_frame_size = ETHERMTU +
4209 		    sizeof (struct ether_vlan_header) + ETHERFCSL;
4210 	}
4211 }
4212 
4213 static void
4214 arm_watchdog_timer(struct e1000g *Adapter)
4215 {
4216 	Adapter->watchdog_tid =
4217 	    timeout(e1000g_local_timer,
4218 	    (void *)Adapter, 1 * drv_usectohz(1000000));
4219 }
4220 #pragma inline(arm_watchdog_timer)
4221 
4222 static void
4223 enable_watchdog_timer(struct e1000g *Adapter)
4224 {
4225 	mutex_enter(&Adapter->watchdog_lock);
4226 
4227 	if (!Adapter->watchdog_timer_enabled) {
4228 		Adapter->watchdog_timer_enabled = B_TRUE;
4229 		Adapter->watchdog_timer_started = B_TRUE;
4230 		arm_watchdog_timer(Adapter);
4231 	}
4232 
4233 	mutex_exit(&Adapter->watchdog_lock);
4234 }
4235 
4236 static void
4237 disable_watchdog_timer(struct e1000g *Adapter)
4238 {
4239 	timeout_id_t tid;
4240 
4241 	mutex_enter(&Adapter->watchdog_lock);
4242 
4243 	Adapter->watchdog_timer_enabled = B_FALSE;
4244 	Adapter->watchdog_timer_started = B_FALSE;
4245 	tid = Adapter->watchdog_tid;
4246 	Adapter->watchdog_tid = 0;
4247 
4248 	mutex_exit(&Adapter->watchdog_lock);
4249 
4250 	if (tid != 0)
4251 		(void) untimeout(tid);
4252 }
4253 
4254 static void
4255 start_watchdog_timer(struct e1000g *Adapter)
4256 {
4257 	mutex_enter(&Adapter->watchdog_lock);
4258 
4259 	if (Adapter->watchdog_timer_enabled) {
4260 		if (!Adapter->watchdog_timer_started) {
4261 			Adapter->watchdog_timer_started = B_TRUE;
4262 			arm_watchdog_timer(Adapter);
4263 		}
4264 	}
4265 
4266 	mutex_exit(&Adapter->watchdog_lock);
4267 }
4268 
4269 static void
4270 restart_watchdog_timer(struct e1000g *Adapter)
4271 {
4272 	mutex_enter(&Adapter->watchdog_lock);
4273 
4274 	if (Adapter->watchdog_timer_started)
4275 		arm_watchdog_timer(Adapter);
4276 
4277 	mutex_exit(&Adapter->watchdog_lock);
4278 }
4279 
4280 static void
4281 stop_watchdog_timer(struct e1000g *Adapter)
4282 {
4283 	timeout_id_t tid;
4284 
4285 	mutex_enter(&Adapter->watchdog_lock);
4286 
4287 	Adapter->watchdog_timer_started = B_FALSE;
4288 	tid = Adapter->watchdog_tid;
4289 	Adapter->watchdog_tid = 0;
4290 
4291 	mutex_exit(&Adapter->watchdog_lock);
4292 
4293 	if (tid != 0)
4294 		(void) untimeout(tid);
4295 }
4296 
4297 static void
4298 stop_link_timer(struct e1000g *Adapter)
4299 {
4300 	timeout_id_t tid;
4301 
4302 	/* Disable the link timer */
4303 	mutex_enter(&Adapter->link_lock);
4304 
4305 	tid = Adapter->link_tid;
4306 	Adapter->link_tid = 0;
4307 
4308 	mutex_exit(&Adapter->link_lock);
4309 
4310 	if (tid != 0)
4311 		(void) untimeout(tid);
4312 }
4313 
4314 static void
4315 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4316 {
4317 	timeout_id_t tid;
4318 
4319 	/* Disable the tx timer for 82547 chipset */
4320 	mutex_enter(&tx_ring->tx_lock);
4321 
4322 	tx_ring->timer_enable_82547 = B_FALSE;
4323 	tid = tx_ring->timer_id_82547;
4324 	tx_ring->timer_id_82547 = 0;
4325 
4326 	mutex_exit(&tx_ring->tx_lock);
4327 
4328 	if (tid != 0)
4329 		(void) untimeout(tid);
4330 }
4331 
4332 void
4333 e1000g_clear_interrupt(struct e1000g *Adapter)
4334 {
4335 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4336 	    0xffffffff & ~E1000_IMS_RXSEQ);
4337 }
4338 
4339 void
4340 e1000g_mask_interrupt(struct e1000g *Adapter)
4341 {
4342 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4343 	    IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4344 
4345 	if (Adapter->tx_intr_enable)
4346 		e1000g_mask_tx_interrupt(Adapter);
4347 }
4348 
4349 /*
4350  * This routine is called by e1000g_quiesce(), therefore must not block.
4351  */
4352 void
4353 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4354 {
4355 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4356 }
4357 
4358 void
4359 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
4360 {
4361 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
4362 }
4363 
4364 void
4365 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
4366 {
4367 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
4368 }
4369 
4370 static void
4371 e1000g_smartspeed(struct e1000g *Adapter)
4372 {
4373 	struct e1000_hw *hw = &Adapter->shared;
4374 	uint16_t phy_status;
4375 	uint16_t phy_ctrl;
4376 
4377 	/*
4378 	 * If we're not T-or-T, or we're not autoneg'ing, or we're not
4379 	 * advertising 1000Full, we don't even use the workaround
4380 	 */
4381 	if ((hw->phy.type != e1000_phy_igp) ||
4382 	    !hw->mac.autoneg ||
4383 	    !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
4384 		return;
4385 
4386 	/*
4387 	 * True if this is the first call of this function or after every
4388 	 * 30 seconds of not having link
4389 	 */
4390 	if (Adapter->smartspeed == 0) {
4391 		/*
4392 		 * If Master/Slave config fault is asserted twice, we
4393 		 * assume back-to-back
4394 		 */
4395 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4396 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4397 			return;
4398 
4399 		(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4400 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4401 			return;
4402 		/*
4403 		 * We're assuming back-2-back because our status register
4404 		 * insists! there's a fault in the master/slave
4405 		 * relationship that was "negotiated"
4406 		 */
4407 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4408 		/*
4409 		 * Is the phy configured for manual configuration of
4410 		 * master/slave?
4411 		 */
4412 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4413 			/*
4414 			 * Yes.  Then disable manual configuration (enable
4415 			 * auto configuration) of master/slave
4416 			 */
4417 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4418 			(void) e1000_write_phy_reg(hw,
4419 			    PHY_1000T_CTRL, phy_ctrl);
4420 			/*
4421 			 * Effectively starting the clock
4422 			 */
4423 			Adapter->smartspeed++;
4424 			/*
4425 			 * Restart autonegotiation
4426 			 */
4427 			if (!e1000_phy_setup_autoneg(hw) &&
4428 			    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4429 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4430 				    MII_CR_RESTART_AUTO_NEG);
4431 				(void) e1000_write_phy_reg(hw,
4432 				    PHY_CONTROL, phy_ctrl);
4433 			}
4434 		}
4435 		return;
4436 		/*
4437 		 * Has 6 seconds transpired still without link? Remember,
4438 		 * you should reset the smartspeed counter once you obtain
4439 		 * link
4440 		 */
4441 	} else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4442 		/*
4443 		 * Yes.  Remember, we did at the start determine that
4444 		 * there's a master/slave configuration fault, so we're
4445 		 * still assuming there's someone on the other end, but we
4446 		 * just haven't yet been able to talk to it. We then
4447 		 * re-enable auto configuration of master/slave to see if
4448 		 * we're running 2/3 pair cables.
4449 		 */
4450 		/*
4451 		 * If still no link, perhaps using 2/3 pair cable
4452 		 */
4453 		(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4454 		phy_ctrl |= CR_1000T_MS_ENABLE;
4455 		(void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4456 		/*
4457 		 * Restart autoneg with phy enabled for manual
4458 		 * configuration of master/slave
4459 		 */
4460 		if (!e1000_phy_setup_autoneg(hw) &&
4461 		    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
4462 			phy_ctrl |=
4463 			    (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
4464 			(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
4465 		}
4466 		/*
4467 		 * Hopefully, there are no more faults and we've obtained
4468 		 * link as a result.
4469 		 */
4470 	}
4471 	/*
4472 	 * Restart process after E1000_SMARTSPEED_MAX iterations (30
4473 	 * seconds)
4474 	 */
4475 	if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4476 		Adapter->smartspeed = 0;
4477 }
4478 
4479 static boolean_t
4480 is_valid_mac_addr(uint8_t *mac_addr)
4481 {
4482 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4483 	const uint8_t addr_test2[6] =
4484 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4485 
4486 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4487 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4488 		return (B_FALSE);
4489 
4490 	return (B_TRUE);
4491 }
4492 
4493 /*
4494  * e1000g_stall_check - check for tx stall
4495  *
4496  * This function checks if the adapter is stalled (in transmit).
4497  *
4498  * It is called each time the watchdog timeout is invoked.
4499  * If the transmit descriptor reclaim continuously fails,
4500  * the watchdog value will increment by 1. If the watchdog
4501  * value exceeds the threshold, the adapter is assumed to
4502  * have stalled and need to be reset.
4503  */
4504 static boolean_t
4505 e1000g_stall_check(struct e1000g *Adapter)
4506 {
4507 	e1000g_tx_ring_t *tx_ring;
4508 
4509 	tx_ring = Adapter->tx_ring;
4510 
4511 	if (Adapter->link_state != LINK_STATE_UP)
4512 		return (B_FALSE);
4513 
4514 	(void) e1000g_recycle(tx_ring);
4515 
4516 	if (Adapter->stall_flag) {
4517 		Adapter->stall_flag = B_FALSE;
4518 		Adapter->reset_flag = B_TRUE;
4519 		return (B_TRUE);
4520 	}
4521 
4522 	return (B_FALSE);
4523 }
4524 
4525 #ifdef E1000G_DEBUG
4526 static enum ioc_reply
4527 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
4528 {
4529 	void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
4530 	e1000g_peekpoke_t *ppd;
4531 	uint64_t mem_va;
4532 	uint64_t maxoff;
4533 	boolean_t peek;
4534 
4535 	switch (iocp->ioc_cmd) {
4536 
4537 	case E1000G_IOC_REG_PEEK:
4538 		peek = B_TRUE;
4539 		break;
4540 
4541 	case E1000G_IOC_REG_POKE:
4542 		peek = B_FALSE;
4543 		break;
4544 
4545 	deault:
4546 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4547 		    "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
4548 		    iocp->ioc_cmd);
4549 		return (IOC_INVAL);
4550 	}
4551 
4552 	/*
4553 	 * Validate format of ioctl
4554 	 */
4555 	if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
4556 		return (IOC_INVAL);
4557 	if (mp->b_cont == NULL)
4558 		return (IOC_INVAL);
4559 
4560 	ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
4561 
4562 	/*
4563 	 * Validate request parameters
4564 	 */
4565 	switch (ppd->pp_acc_space) {
4566 
4567 	default:
4568 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
4569 		    "e1000g_diag_ioctl: invalid access space 0x%X\n",
4570 		    ppd->pp_acc_space);
4571 		return (IOC_INVAL);
4572 
4573 	case E1000G_PP_SPACE_REG:
4574 		/*
4575 		 * Memory-mapped I/O space
4576 		 */
4577 		ASSERT(ppd->pp_acc_size == 4);
4578 		if (ppd->pp_acc_size != 4)
4579 			return (IOC_INVAL);
4580 
4581 		if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
4582 			return (IOC_INVAL);
4583 
4584 		mem_va = 0;
4585 		maxoff = 0x10000;
4586 		ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
4587 		break;
4588 
4589 	case E1000G_PP_SPACE_E1000G:
4590 		/*
4591 		 * E1000g data structure!
4592 		 */
4593 		mem_va = (uintptr_t)e1000gp;
4594 		maxoff = sizeof (struct e1000g);
4595 		ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
4596 		break;
4597 
4598 	}
4599 
4600 	if (ppd->pp_acc_offset >= maxoff)
4601 		return (IOC_INVAL);
4602 
4603 	if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
4604 		return (IOC_INVAL);
4605 
4606 	/*
4607 	 * All OK - go!
4608 	 */
4609 	ppd->pp_acc_offset += mem_va;
4610 	(*ppfn)(e1000gp, ppd);
4611 	return (peek ? IOC_REPLY : IOC_ACK);
4612 }
4613 
4614 static void
4615 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4616 {
4617 	ddi_acc_handle_t handle;
4618 	uint32_t *regaddr;
4619 
4620 	handle = e1000gp->osdep.reg_handle;
4621 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4622 	    (uintptr_t)ppd->pp_acc_offset);
4623 
4624 	ppd->pp_acc_data = ddi_get32(handle, regaddr);
4625 }
4626 
4627 static void
4628 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4629 {
4630 	ddi_acc_handle_t handle;
4631 	uint32_t *regaddr;
4632 	uint32_t value;
4633 
4634 	handle = e1000gp->osdep.reg_handle;
4635 	regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
4636 	    (uintptr_t)ppd->pp_acc_offset);
4637 	value = (uint32_t)ppd->pp_acc_data;
4638 
4639 	ddi_put32(handle, regaddr, value);
4640 }
4641 
4642 static void
4643 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4644 {
4645 	uint64_t value;
4646 	void *vaddr;
4647 
4648 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4649 
4650 	switch (ppd->pp_acc_size) {
4651 	case 1:
4652 		value = *(uint8_t *)vaddr;
4653 		break;
4654 
4655 	case 2:
4656 		value = *(uint16_t *)vaddr;
4657 		break;
4658 
4659 	case 4:
4660 		value = *(uint32_t *)vaddr;
4661 		break;
4662 
4663 	case 8:
4664 		value = *(uint64_t *)vaddr;
4665 		break;
4666 	}
4667 
4668 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4669 	    "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
4670 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4671 
4672 	ppd->pp_acc_data = value;
4673 }
4674 
4675 static void
4676 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
4677 {
4678 	uint64_t value;
4679 	void *vaddr;
4680 
4681 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
4682 	value = ppd->pp_acc_data;
4683 
4684 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
4685 	    "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
4686 	    (void *)e1000gp, (void *)ppd, value, vaddr);
4687 
4688 	switch (ppd->pp_acc_size) {
4689 	case 1:
4690 		*(uint8_t *)vaddr = (uint8_t)value;
4691 		break;
4692 
4693 	case 2:
4694 		*(uint16_t *)vaddr = (uint16_t)value;
4695 		break;
4696 
4697 	case 4:
4698 		*(uint32_t *)vaddr = (uint32_t)value;
4699 		break;
4700 
4701 	case 8:
4702 		*(uint64_t *)vaddr = (uint64_t)value;
4703 		break;
4704 	}
4705 }
4706 #endif
4707 
4708 /*
4709  * Loopback Support
4710  */
4711 static lb_property_t lb_normal =
4712 	{ normal,	"normal",	E1000G_LB_NONE		};
4713 static lb_property_t lb_external1000 =
4714 	{ external,	"1000Mbps",	E1000G_LB_EXTERNAL_1000	};
4715 static lb_property_t lb_external100 =
4716 	{ external,	"100Mbps",	E1000G_LB_EXTERNAL_100	};
4717 static lb_property_t lb_external10 =
4718 	{ external,	"10Mbps",	E1000G_LB_EXTERNAL_10	};
4719 static lb_property_t lb_phy =
4720 	{ internal,	"PHY",		E1000G_LB_INTERNAL_PHY	};
4721 
4722 static enum ioc_reply
4723 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
4724 {
4725 	lb_info_sz_t *lbsp;
4726 	lb_property_t *lbpp;
4727 	struct e1000_hw *hw;
4728 	uint32_t *lbmp;
4729 	uint32_t size;
4730 	uint32_t value;
4731 
4732 	hw = &Adapter->shared;
4733 
4734 	if (mp->b_cont == NULL)
4735 		return (IOC_INVAL);
4736 
4737 	if (!e1000g_check_loopback_support(hw)) {
4738 		e1000g_log(NULL, CE_WARN,
4739 		    "Loopback is not supported on e1000g%d", Adapter->instance);
4740 		return (IOC_INVAL);
4741 	}
4742 
4743 	switch (iocp->ioc_cmd) {
4744 	default:
4745 		return (IOC_INVAL);
4746 
4747 	case LB_GET_INFO_SIZE:
4748 		size = sizeof (lb_info_sz_t);
4749 		if (iocp->ioc_count != size)
4750 			return (IOC_INVAL);
4751 
4752 		rw_enter(&Adapter->chip_lock, RW_WRITER);
4753 		e1000g_get_phy_state(Adapter);
4754 
4755 		/*
4756 		 * Workaround for hardware faults. In order to get a stable
4757 		 * state of phy, we will wait for a specific interval and
4758 		 * try again. The time delay is an experiential value based
4759 		 * on our testing.
4760 		 */
4761 		msec_delay(100);
4762 		e1000g_get_phy_state(Adapter);
4763 		rw_exit(&Adapter->chip_lock);
4764 
4765 		value = sizeof (lb_normal);
4766 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4767 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4768 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4769 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4770 			value += sizeof (lb_phy);
4771 			switch (hw->mac.type) {
4772 			case e1000_82571:
4773 			case e1000_82572:
4774 			case e1000_80003es2lan:
4775 				value += sizeof (lb_external1000);
4776 				break;
4777 			}
4778 		}
4779 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4780 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4781 			value += sizeof (lb_external100);
4782 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4783 			value += sizeof (lb_external10);
4784 
4785 		lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
4786 		*lbsp = value;
4787 		break;
4788 
4789 	case LB_GET_INFO:
4790 		value = sizeof (lb_normal);
4791 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4792 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4793 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4794 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4795 			value += sizeof (lb_phy);
4796 			switch (hw->mac.type) {
4797 			case e1000_82571:
4798 			case e1000_82572:
4799 			case e1000_80003es2lan:
4800 				value += sizeof (lb_external1000);
4801 				break;
4802 			}
4803 		}
4804 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4805 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4806 			value += sizeof (lb_external100);
4807 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4808 			value += sizeof (lb_external10);
4809 
4810 		size = value;
4811 		if (iocp->ioc_count != size)
4812 			return (IOC_INVAL);
4813 
4814 		value = 0;
4815 		lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
4816 		lbpp[value++] = lb_normal;
4817 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
4818 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
4819 		    (hw->phy.media_type == e1000_media_type_fiber) ||
4820 		    (hw->phy.media_type == e1000_media_type_internal_serdes)) {
4821 			lbpp[value++] = lb_phy;
4822 			switch (hw->mac.type) {
4823 			case e1000_82571:
4824 			case e1000_82572:
4825 			case e1000_80003es2lan:
4826 				lbpp[value++] = lb_external1000;
4827 				break;
4828 			}
4829 		}
4830 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
4831 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
4832 			lbpp[value++] = lb_external100;
4833 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
4834 			lbpp[value++] = lb_external10;
4835 		break;
4836 
4837 	case LB_GET_MODE:
4838 		size = sizeof (uint32_t);
4839 		if (iocp->ioc_count != size)
4840 			return (IOC_INVAL);
4841 
4842 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4843 		*lbmp = Adapter->loopback_mode;
4844 		break;
4845 
4846 	case LB_SET_MODE:
4847 		size = 0;
4848 		if (iocp->ioc_count != sizeof (uint32_t))
4849 			return (IOC_INVAL);
4850 
4851 		lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4852 		if (!e1000g_set_loopback_mode(Adapter, *lbmp))
4853 			return (IOC_INVAL);
4854 		break;
4855 	}
4856 
4857 	iocp->ioc_count = size;
4858 	iocp->ioc_error = 0;
4859 
4860 	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
4861 		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4862 		return (IOC_INVAL);
4863 	}
4864 
4865 	return (IOC_REPLY);
4866 }
4867 
4868 static boolean_t
4869 e1000g_check_loopback_support(struct e1000_hw *hw)
4870 {
4871 	switch (hw->mac.type) {
4872 	case e1000_82540:
4873 	case e1000_82545:
4874 	case e1000_82545_rev_3:
4875 	case e1000_82546:
4876 	case e1000_82546_rev_3:
4877 	case e1000_82541:
4878 	case e1000_82541_rev_2:
4879 	case e1000_82547:
4880 	case e1000_82547_rev_2:
4881 	case e1000_82571:
4882 	case e1000_82572:
4883 	case e1000_82573:
4884 	case e1000_82574:
4885 	case e1000_80003es2lan:
4886 	case e1000_ich9lan:
4887 	case e1000_ich10lan:
4888 		return (B_TRUE);
4889 	}
4890 	return (B_FALSE);
4891 }
4892 
4893 static boolean_t
4894 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
4895 {
4896 	struct e1000_hw *hw;
4897 	int i, times;
4898 	boolean_t link_up;
4899 
4900 	if (mode == Adapter->loopback_mode)
4901 		return (B_TRUE);
4902 
4903 	hw = &Adapter->shared;
4904 	times = 0;
4905 
4906 	Adapter->loopback_mode = mode;
4907 
4908 	if (mode == E1000G_LB_NONE) {
4909 		/* Reset the chip */
4910 		hw->phy.autoneg_wait_to_complete = B_TRUE;
4911 		(void) e1000g_reset_adapter(Adapter);
4912 		hw->phy.autoneg_wait_to_complete = B_FALSE;
4913 		return (B_TRUE);
4914 	}
4915 
4916 again:
4917 
4918 	rw_enter(&Adapter->chip_lock, RW_WRITER);
4919 
4920 	switch (mode) {
4921 	default:
4922 		rw_exit(&Adapter->chip_lock);
4923 		return (B_FALSE);
4924 
4925 	case E1000G_LB_EXTERNAL_1000:
4926 		e1000g_set_external_loopback_1000(Adapter);
4927 		break;
4928 
4929 	case E1000G_LB_EXTERNAL_100:
4930 		e1000g_set_external_loopback_100(Adapter);
4931 		break;
4932 
4933 	case E1000G_LB_EXTERNAL_10:
4934 		e1000g_set_external_loopback_10(Adapter);
4935 		break;
4936 
4937 	case E1000G_LB_INTERNAL_PHY:
4938 		e1000g_set_internal_loopback(Adapter);
4939 		break;
4940 	}
4941 
4942 	times++;
4943 
4944 	rw_exit(&Adapter->chip_lock);
4945 
4946 	/* Wait for link up */
4947 	for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
4948 		msec_delay(100);
4949 
4950 	rw_enter(&Adapter->chip_lock, RW_WRITER);
4951 
4952 	link_up = e1000g_link_up(Adapter);
4953 
4954 	rw_exit(&Adapter->chip_lock);
4955 
4956 	if (!link_up) {
4957 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4958 		    "Failed to get the link up");
4959 		if (times < 2) {
4960 			/* Reset the link */
4961 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4962 			    "Reset the link ...");
4963 			(void) e1000g_reset_adapter(Adapter);
4964 			goto again;
4965 		}
4966 	}
4967 
4968 	return (B_TRUE);
4969 }
4970 
4971 /*
4972  * The following loopback settings are from Intel's technical
4973  * document - "How To Loopback". All the register settings and
4974  * time delay values are directly inherited from the document
4975  * without more explanations available.
4976  */
4977 static void
4978 e1000g_set_internal_loopback(struct e1000g *Adapter)
4979 {
4980 	struct e1000_hw *hw;
4981 	uint32_t ctrl;
4982 	uint32_t status;
4983 	uint16_t phy_ctrl;
4984 	uint16_t phy_reg;
4985 	uint32_t txcw;
4986 
4987 	hw = &Adapter->shared;
4988 
4989 	/* Disable Smart Power Down */
4990 	phy_spd_state(hw, B_FALSE);
4991 
4992 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
4993 	phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
4994 	phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
4995 
4996 	switch (hw->mac.type) {
4997 	case e1000_82540:
4998 	case e1000_82545:
4999 	case e1000_82545_rev_3:
5000 	case e1000_82546:
5001 	case e1000_82546_rev_3:
5002 	case e1000_82573:
5003 		/* Auto-MDI/MDIX off */
5004 		(void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5005 		/* Reset PHY to update Auto-MDI/MDIX */
5006 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5007 		    phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5008 		/* Reset PHY to auto-neg off and force 1000 */
5009 		(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5010 		    phy_ctrl | MII_CR_RESET);
5011 		/*
5012 		 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5013 		 * See comments above e1000g_set_internal_loopback() for the
5014 		 * background.
5015 		 */
5016 		(void) e1000_write_phy_reg(hw, 29, 0x001F);
5017 		(void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5018 		(void) e1000_write_phy_reg(hw, 29, 0x001A);
5019 		(void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5020 		break;
5021 	case e1000_80003es2lan:
5022 		/* Force Link Up */
5023 		(void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5024 		    0x1CC);
5025 		/* Sets PCS loopback at 1Gbs */
5026 		(void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5027 		    0x1046);
5028 		break;
5029 	}
5030 
5031 	/*
5032 	 * The following registers should be set for e1000_phy_bm phy type.
5033 	 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5034 	 * For others, we do not need to set these registers.
5035 	 */
5036 	if (hw->phy.type == e1000_phy_bm) {
5037 		/* Set Default MAC Interface speed to 1GB */
5038 		(void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5039 		phy_reg &= ~0x0007;
5040 		phy_reg |= 0x006;
5041 		(void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5042 		/* Assert SW reset for above settings to take effect */
5043 		(void) e1000_phy_commit(hw);
5044 		msec_delay(1);
5045 		/* Force Full Duplex */
5046 		(void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5047 		(void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5048 		    phy_reg | 0x000C);
5049 		/* Set Link Up (in force link) */
5050 		(void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5051 		(void) e1000_write_phy_reg(hw, PHY_REG(776, 16),
5052 		    phy_reg | 0x0040);
5053 		/* Force Link */
5054 		(void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5055 		(void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5056 		    phy_reg | 0x0040);
5057 		/* Set Early Link Enable */
5058 		(void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5059 		(void) e1000_write_phy_reg(hw, PHY_REG(769, 20),
5060 		    phy_reg | 0x0400);
5061 	}
5062 
5063 	/* Set loopback */
5064 	(void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5065 
5066 	msec_delay(250);
5067 
5068 	/* Now set up the MAC to the same speed/duplex as the PHY. */
5069 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5070 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5071 	ctrl |= (E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
5072 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5073 	    E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
5074 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5075 
5076 	switch (hw->mac.type) {
5077 	case e1000_82540:
5078 	case e1000_82545:
5079 	case e1000_82545_rev_3:
5080 	case e1000_82546:
5081 	case e1000_82546_rev_3:
5082 		/*
5083 		 * For some serdes we'll need to commit the writes now
5084 		 * so that the status is updated on link
5085 		 */
5086 		if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5087 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5088 			msec_delay(100);
5089 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5090 		}
5091 
5092 		if (hw->phy.media_type == e1000_media_type_copper) {
5093 			/* Invert Loss of Signal */
5094 			ctrl |= E1000_CTRL_ILOS;
5095 		} else {
5096 			/* Set ILOS on fiber nic if half duplex is detected */
5097 			status = E1000_READ_REG(hw, E1000_STATUS);
5098 			if ((status & E1000_STATUS_FD) == 0)
5099 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5100 		}
5101 		break;
5102 
5103 	case e1000_82571:
5104 	case e1000_82572:
5105 		/*
5106 		 * The fiber/SerDes versions of this adapter do not contain an
5107 		 * accessible PHY. Therefore, loopback beyond MAC must be done
5108 		 * using SerDes analog loopback.
5109 		 */
5110 		if (hw->phy.media_type != e1000_media_type_copper) {
5111 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5112 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5113 			txcw &= ~((uint32_t)1 << 31);
5114 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5115 
5116 			/*
5117 			 * Write 0x410 to Serdes Control register
5118 			 * to enable Serdes analog loopback
5119 			 */
5120 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5121 			msec_delay(10);
5122 		}
5123 
5124 		status = E1000_READ_REG(hw, E1000_STATUS);
5125 		/* Set ILOS on fiber nic if half duplex is detected */
5126 		if ((hw->phy.media_type == e1000_media_type_fiber) &&
5127 		    ((status & E1000_STATUS_FD) == 0 ||
5128 		    (status & E1000_STATUS_LU) == 0))
5129 			ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5130 		else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5131 			ctrl |= E1000_CTRL_SLU;
5132 		break;
5133 
5134 	case e1000_82573:
5135 		ctrl |= E1000_CTRL_ILOS;
5136 		break;
5137 	case e1000_ich9lan:
5138 	case e1000_ich10lan:
5139 		ctrl |= E1000_CTRL_SLU;
5140 		break;
5141 	}
5142 	if (hw->phy.type == e1000_phy_bm)
5143 		ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5144 
5145 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5146 }
5147 
5148 static void
5149 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5150 {
5151 	struct e1000_hw *hw;
5152 	uint32_t rctl;
5153 	uint32_t ctrl_ext;
5154 	uint32_t ctrl;
5155 	uint32_t status;
5156 	uint32_t txcw;
5157 	uint16_t phydata;
5158 
5159 	hw = &Adapter->shared;
5160 
5161 	/* Disable Smart Power Down */
5162 	phy_spd_state(hw, B_FALSE);
5163 
5164 	switch (hw->mac.type) {
5165 	case e1000_82571:
5166 	case e1000_82572:
5167 		switch (hw->phy.media_type) {
5168 		case e1000_media_type_copper:
5169 			/* Force link up (Must be done before the PHY writes) */
5170 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
5171 			ctrl |= E1000_CTRL_SLU;	/* Force Link Up */
5172 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5173 
5174 			rctl = E1000_READ_REG(hw, E1000_RCTL);
5175 			rctl |= (E1000_RCTL_EN |
5176 			    E1000_RCTL_SBP |
5177 			    E1000_RCTL_UPE |
5178 			    E1000_RCTL_MPE |
5179 			    E1000_RCTL_LPE |
5180 			    E1000_RCTL_BAM);		/* 0x803E */
5181 			E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5182 
5183 			ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5184 			ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5185 			    E1000_CTRL_EXT_SDP6_DATA |
5186 			    E1000_CTRL_EXT_SDP7_DATA |
5187 			    E1000_CTRL_EXT_SDP4_DIR |
5188 			    E1000_CTRL_EXT_SDP6_DIR |
5189 			    E1000_CTRL_EXT_SDP7_DIR);	/* 0x0DD0 */
5190 			E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5191 
5192 			/*
5193 			 * This sequence tunes the PHY's SDP and no customer
5194 			 * settable values. For background, see comments above
5195 			 * e1000g_set_internal_loopback().
5196 			 */
5197 			(void) e1000_write_phy_reg(hw, 0x0, 0x140);
5198 			msec_delay(10);
5199 			(void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5200 			(void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5201 			(void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5202 			(void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5203 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5204 			(void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5205 
5206 			(void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5207 			(void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5208 			(void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5209 			(void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5210 			(void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5211 
5212 			msec_delay(50);
5213 			break;
5214 		case e1000_media_type_fiber:
5215 		case e1000_media_type_internal_serdes:
5216 			status = E1000_READ_REG(hw, E1000_STATUS);
5217 			if (((status & E1000_STATUS_LU) == 0) ||
5218 			    (hw->phy.media_type ==
5219 			    e1000_media_type_internal_serdes)) {
5220 				ctrl = E1000_READ_REG(hw, E1000_CTRL);
5221 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5222 				E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5223 			}
5224 
5225 			/* Disable autoneg by setting bit 31 of TXCW to zero */
5226 			txcw = E1000_READ_REG(hw, E1000_TXCW);
5227 			txcw &= ~((uint32_t)1 << 31);
5228 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5229 
5230 			/*
5231 			 * Write 0x410 to Serdes Control register
5232 			 * to enable Serdes analog loopback
5233 			 */
5234 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5235 			msec_delay(10);
5236 			break;
5237 		default:
5238 			break;
5239 		}
5240 		break;
5241 	case e1000_82574:
5242 	case e1000_80003es2lan:
5243 	case e1000_ich9lan:
5244 	case e1000_ich10lan:
5245 		(void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5246 		(void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5247 		    phydata | (1 << 5));
5248 		Adapter->param_adv_autoneg = 1;
5249 		Adapter->param_adv_1000fdx = 1;
5250 		(void) e1000g_reset_link(Adapter);
5251 		break;
5252 	}
5253 }
5254 
5255 static void
5256 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5257 {
5258 	struct e1000_hw *hw;
5259 	uint32_t ctrl;
5260 	uint16_t phy_ctrl;
5261 
5262 	hw = &Adapter->shared;
5263 
5264 	/* Disable Smart Power Down */
5265 	phy_spd_state(hw, B_FALSE);
5266 
5267 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5268 	    MII_CR_SPEED_100);
5269 
5270 	/* Force 100/FD, reset PHY */
5271 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5272 	    phy_ctrl | MII_CR_RESET);	/* 0xA100 */
5273 	msec_delay(10);
5274 
5275 	/* Force 100/FD */
5276 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5277 	    phy_ctrl);			/* 0x2100 */
5278 	msec_delay(10);
5279 
5280 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5281 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5282 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5283 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5284 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5285 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5286 	    E1000_CTRL_SPD_100 |	/* Force Speed to 100 */
5287 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5288 
5289 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5290 }
5291 
5292 static void
5293 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5294 {
5295 	struct e1000_hw *hw;
5296 	uint32_t ctrl;
5297 	uint16_t phy_ctrl;
5298 
5299 	hw = &Adapter->shared;
5300 
5301 	/* Disable Smart Power Down */
5302 	phy_spd_state(hw, B_FALSE);
5303 
5304 	phy_ctrl = (MII_CR_FULL_DUPLEX |
5305 	    MII_CR_SPEED_10);
5306 
5307 	/* Force 10/FD, reset PHY */
5308 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5309 	    phy_ctrl | MII_CR_RESET);	/* 0x8100 */
5310 	msec_delay(10);
5311 
5312 	/* Force 10/FD */
5313 	(void) e1000_write_phy_reg(hw, PHY_CONTROL,
5314 	    phy_ctrl);			/* 0x0100 */
5315 	msec_delay(10);
5316 
5317 	/* Now setup the MAC to the same speed/duplex as the PHY. */
5318 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5319 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
5320 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
5321 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
5322 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
5323 	    E1000_CTRL_SPD_10 |		/* Force Speed to 10 */
5324 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
5325 
5326 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5327 }
5328 
5329 #ifdef __sparc
5330 static boolean_t
5331 e1000g_find_mac_address(struct e1000g *Adapter)
5332 {
5333 	struct e1000_hw *hw = &Adapter->shared;
5334 	uchar_t *bytes;
5335 	struct ether_addr sysaddr;
5336 	uint_t nelts;
5337 	int err;
5338 	boolean_t found = B_FALSE;
5339 
5340 	/*
5341 	 * The "vendor's factory-set address" may already have
5342 	 * been extracted from the chip, but if the property
5343 	 * "local-mac-address" is set we use that instead.
5344 	 *
5345 	 * We check whether it looks like an array of 6
5346 	 * bytes (which it should, if OBP set it).  If we can't
5347 	 * make sense of it this way, we'll ignore it.
5348 	 */
5349 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5350 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
5351 	if (err == DDI_PROP_SUCCESS) {
5352 		if (nelts == ETHERADDRL) {
5353 			while (nelts--)
5354 				hw->mac.addr[nelts] = bytes[nelts];
5355 			found = B_TRUE;
5356 		}
5357 		ddi_prop_free(bytes);
5358 	}
5359 
5360 	/*
5361 	 * Look up the OBP property "local-mac-address?". If the user has set
5362 	 * 'local-mac-address? = false', use "the system address" instead.
5363 	 */
5364 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
5365 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
5366 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
5367 			if (localetheraddr(NULL, &sysaddr) != 0) {
5368 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
5369 				found = B_TRUE;
5370 			}
5371 		}
5372 		ddi_prop_free(bytes);
5373 	}
5374 
5375 	/*
5376 	 * Finally(!), if there's a valid "mac-address" property (created
5377 	 * if we netbooted from this interface), we must use this instead
5378 	 * of any of the above to ensure that the NFS/install server doesn't
5379 	 * get confused by the address changing as Solaris takes over!
5380 	 */
5381 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
5382 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
5383 	if (err == DDI_PROP_SUCCESS) {
5384 		if (nelts == ETHERADDRL) {
5385 			while (nelts--)
5386 				hw->mac.addr[nelts] = bytes[nelts];
5387 			found = B_TRUE;
5388 		}
5389 		ddi_prop_free(bytes);
5390 	}
5391 
5392 	if (found) {
5393 		bcopy(hw->mac.addr, hw->mac.perm_addr,
5394 		    ETHERADDRL);
5395 	}
5396 
5397 	return (found);
5398 }
5399 #endif
5400 
5401 static int
5402 e1000g_add_intrs(struct e1000g *Adapter)
5403 {
5404 	dev_info_t *devinfo;
5405 	int intr_types;
5406 	int rc;
5407 
5408 	devinfo = Adapter->dip;
5409 
5410 	/* Get supported interrupt types */
5411 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
5412 
5413 	if (rc != DDI_SUCCESS) {
5414 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5415 		    "Get supported interrupt types failed: %d\n", rc);
5416 		return (DDI_FAILURE);
5417 	}
5418 
5419 	/*
5420 	 * Based on Intel Technical Advisory document (TA-160), there are some
5421 	 * cases where some older Intel PCI-X NICs may "advertise" to the OS
5422 	 * that it supports MSI, but in fact has problems.
5423 	 * So we should only enable MSI for PCI-E NICs and disable MSI for old
5424 	 * PCI/PCI-X NICs.
5425 	 */
5426 	if (Adapter->shared.mac.type < e1000_82571)
5427 		Adapter->msi_enable = B_FALSE;
5428 
5429 	if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
5430 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
5431 
5432 		if (rc != DDI_SUCCESS) {
5433 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5434 			    "Add MSI failed, trying Legacy interrupts\n");
5435 		} else {
5436 			Adapter->intr_type = DDI_INTR_TYPE_MSI;
5437 		}
5438 	}
5439 
5440 	if ((Adapter->intr_type == 0) &&
5441 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
5442 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
5443 
5444 		if (rc != DDI_SUCCESS) {
5445 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5446 			    "Add Legacy interrupts failed\n");
5447 			return (DDI_FAILURE);
5448 		}
5449 
5450 		Adapter->intr_type = DDI_INTR_TYPE_FIXED;
5451 	}
5452 
5453 	if (Adapter->intr_type == 0) {
5454 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
5455 		    "No interrupts registered\n");
5456 		return (DDI_FAILURE);
5457 	}
5458 
5459 	return (DDI_SUCCESS);
5460 }
5461 
5462 /*
5463  * e1000g_intr_add() handles MSI/Legacy interrupts
5464  */
5465 static int
5466 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
5467 {
5468 	dev_info_t *devinfo;
5469 	int count, avail, actual;
5470 	int x, y, rc, inum = 0;
5471 	int flag;
5472 	ddi_intr_handler_t *intr_handler;
5473 
5474 	devinfo = Adapter->dip;
5475 
5476 	/* get number of interrupts */
5477 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5478 	if ((rc != DDI_SUCCESS) || (count == 0)) {
5479 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5480 		    "Get interrupt number failed. Return: %d, count: %d\n",
5481 		    rc, count);
5482 		return (DDI_FAILURE);
5483 	}
5484 
5485 	/* get number of available interrupts */
5486 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
5487 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
5488 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5489 		    "Get interrupt available number failed. "
5490 		    "Return: %d, available: %d\n", rc, avail);
5491 		return (DDI_FAILURE);
5492 	}
5493 
5494 	if (avail < count) {
5495 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5496 		    "Interrupts count: %d, available: %d\n",
5497 		    count, avail);
5498 	}
5499 
5500 	/* Allocate an array of interrupt handles */
5501 	Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
5502 	Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
5503 
5504 	/* Set NORMAL behavior for both MSI and FIXED interrupt */
5505 	flag = DDI_INTR_ALLOC_NORMAL;
5506 
5507 	/* call ddi_intr_alloc() */
5508 	rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
5509 	    count, &actual, flag);
5510 
5511 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
5512 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5513 		    "Allocate interrupts failed: %d\n", rc);
5514 
5515 		kmem_free(Adapter->htable, Adapter->intr_size);
5516 		return (DDI_FAILURE);
5517 	}
5518 
5519 	if (actual < count) {
5520 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
5521 		    "Interrupts requested: %d, received: %d\n",
5522 		    count, actual);
5523 	}
5524 
5525 	Adapter->intr_cnt = actual;
5526 
5527 	/* Get priority for first msi, assume remaining are all the same */
5528 	rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
5529 
5530 	if (rc != DDI_SUCCESS) {
5531 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5532 		    "Get interrupt priority failed: %d\n", rc);
5533 
5534 		/* Free already allocated intr */
5535 		for (y = 0; y < actual; y++)
5536 			(void) ddi_intr_free(Adapter->htable[y]);
5537 
5538 		kmem_free(Adapter->htable, Adapter->intr_size);
5539 		return (DDI_FAILURE);
5540 	}
5541 
5542 	/*
5543 	 * In Legacy Interrupt mode, for PCI-Express adapters, we should
5544 	 * use the interrupt service routine e1000g_intr_pciexpress()
5545 	 * to avoid interrupt stealing when sharing interrupt with other
5546 	 * devices.
5547 	 */
5548 	if (Adapter->shared.mac.type < e1000_82571)
5549 		intr_handler = (ddi_intr_handler_t *)e1000g_intr;
5550 	else
5551 		intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
5552 
5553 	/* Call ddi_intr_add_handler() */
5554 	for (x = 0; x < actual; x++) {
5555 		rc = ddi_intr_add_handler(Adapter->htable[x],
5556 		    intr_handler, (caddr_t)Adapter, NULL);
5557 
5558 		if (rc != DDI_SUCCESS) {
5559 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5560 			    "Add interrupt handler failed: %d\n", rc);
5561 
5562 			/* Remove already added handler */
5563 			for (y = 0; y < x; y++)
5564 				(void) ddi_intr_remove_handler(
5565 				    Adapter->htable[y]);
5566 
5567 			/* Free already allocated intr */
5568 			for (y = 0; y < actual; y++)
5569 				(void) ddi_intr_free(Adapter->htable[y]);
5570 
5571 			kmem_free(Adapter->htable, Adapter->intr_size);
5572 			return (DDI_FAILURE);
5573 		}
5574 	}
5575 
5576 	rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
5577 
5578 	if (rc != DDI_SUCCESS) {
5579 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5580 		    "Get interrupt cap failed: %d\n", rc);
5581 
5582 		/* Free already allocated intr */
5583 		for (y = 0; y < actual; y++) {
5584 			(void) ddi_intr_remove_handler(Adapter->htable[y]);
5585 			(void) ddi_intr_free(Adapter->htable[y]);
5586 		}
5587 
5588 		kmem_free(Adapter->htable, Adapter->intr_size);
5589 		return (DDI_FAILURE);
5590 	}
5591 
5592 	return (DDI_SUCCESS);
5593 }
5594 
5595 static int
5596 e1000g_rem_intrs(struct e1000g *Adapter)
5597 {
5598 	int x;
5599 	int rc;
5600 
5601 	for (x = 0; x < Adapter->intr_cnt; x++) {
5602 		rc = ddi_intr_remove_handler(Adapter->htable[x]);
5603 		if (rc != DDI_SUCCESS) {
5604 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5605 			    "Remove intr handler failed: %d\n", rc);
5606 			return (DDI_FAILURE);
5607 		}
5608 
5609 		rc = ddi_intr_free(Adapter->htable[x]);
5610 		if (rc != DDI_SUCCESS) {
5611 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5612 			    "Free intr failed: %d\n", rc);
5613 			return (DDI_FAILURE);
5614 		}
5615 	}
5616 
5617 	kmem_free(Adapter->htable, Adapter->intr_size);
5618 
5619 	return (DDI_SUCCESS);
5620 }
5621 
5622 static int
5623 e1000g_enable_intrs(struct e1000g *Adapter)
5624 {
5625 	int x;
5626 	int rc;
5627 
5628 	/* Enable interrupts */
5629 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5630 		/* Call ddi_intr_block_enable() for MSI */
5631 		rc = ddi_intr_block_enable(Adapter->htable,
5632 		    Adapter->intr_cnt);
5633 		if (rc != DDI_SUCCESS) {
5634 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5635 			    "Enable block intr failed: %d\n", rc);
5636 			return (DDI_FAILURE);
5637 		}
5638 	} else {
5639 		/* Call ddi_intr_enable() for Legacy/MSI non block enable */
5640 		for (x = 0; x < Adapter->intr_cnt; x++) {
5641 			rc = ddi_intr_enable(Adapter->htable[x]);
5642 			if (rc != DDI_SUCCESS) {
5643 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5644 				    "Enable intr failed: %d\n", rc);
5645 				return (DDI_FAILURE);
5646 			}
5647 		}
5648 	}
5649 
5650 	return (DDI_SUCCESS);
5651 }
5652 
5653 static int
5654 e1000g_disable_intrs(struct e1000g *Adapter)
5655 {
5656 	int x;
5657 	int rc;
5658 
5659 	/* Disable all interrupts */
5660 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
5661 		rc = ddi_intr_block_disable(Adapter->htable,
5662 		    Adapter->intr_cnt);
5663 		if (rc != DDI_SUCCESS) {
5664 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5665 			    "Disable block intr failed: %d\n", rc);
5666 			return (DDI_FAILURE);
5667 		}
5668 	} else {
5669 		for (x = 0; x < Adapter->intr_cnt; x++) {
5670 			rc = ddi_intr_disable(Adapter->htable[x]);
5671 			if (rc != DDI_SUCCESS) {
5672 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
5673 				    "Disable intr failed: %d\n", rc);
5674 				return (DDI_FAILURE);
5675 			}
5676 		}
5677 	}
5678 
5679 	return (DDI_SUCCESS);
5680 }
5681 
5682 /*
5683  * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
5684  */
5685 static void
5686 e1000g_get_phy_state(struct e1000g *Adapter)
5687 {
5688 	struct e1000_hw *hw = &Adapter->shared;
5689 
5690 	(void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
5691 	(void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
5692 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &Adapter->phy_an_adv);
5693 	(void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &Adapter->phy_an_exp);
5694 	(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &Adapter->phy_ext_status);
5695 	(void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &Adapter->phy_1000t_ctrl);
5696 	(void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
5697 	    &Adapter->phy_1000t_status);
5698 	(void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &Adapter->phy_lp_able);
5699 
5700 	Adapter->param_autoneg_cap =
5701 	    (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
5702 	Adapter->param_pause_cap =
5703 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5704 	Adapter->param_asym_pause_cap =
5705 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5706 	Adapter->param_1000fdx_cap =
5707 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5708 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5709 	Adapter->param_1000hdx_cap =
5710 	    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5711 	    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5712 	Adapter->param_100t4_cap =
5713 	    (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
5714 	Adapter->param_100fdx_cap =
5715 	    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5716 	    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
5717 	Adapter->param_100hdx_cap =
5718 	    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
5719 	    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
5720 	Adapter->param_10fdx_cap =
5721 	    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
5722 	Adapter->param_10hdx_cap =
5723 	    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
5724 
5725 	Adapter->param_adv_autoneg = hw->mac.autoneg;
5726 	Adapter->param_adv_pause =
5727 	    (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
5728 	Adapter->param_adv_asym_pause =
5729 	    (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
5730 	Adapter->param_adv_1000hdx =
5731 	    (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
5732 	Adapter->param_adv_100t4 =
5733 	    (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
5734 	if (Adapter->param_adv_autoneg == 1) {
5735 		Adapter->param_adv_1000fdx =
5736 		    (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0;
5737 		Adapter->param_adv_100fdx =
5738 		    (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0;
5739 		Adapter->param_adv_100hdx =
5740 		    (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0;
5741 		Adapter->param_adv_10fdx =
5742 		    (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
5743 		Adapter->param_adv_10hdx =
5744 		    (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
5745 	}
5746 
5747 	Adapter->param_lp_autoneg =
5748 	    (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
5749 	Adapter->param_lp_pause =
5750 	    (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
5751 	Adapter->param_lp_asym_pause =
5752 	    (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
5753 	Adapter->param_lp_1000fdx =
5754 	    (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
5755 	Adapter->param_lp_1000hdx =
5756 	    (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
5757 	Adapter->param_lp_100t4 =
5758 	    (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
5759 	Adapter->param_lp_100fdx =
5760 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
5761 	Adapter->param_lp_100hdx =
5762 	    (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
5763 	Adapter->param_lp_10fdx =
5764 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
5765 	Adapter->param_lp_10hdx =
5766 	    (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
5767 }
5768 
5769 /*
5770  * FMA support
5771  */
5772 
5773 int
5774 e1000g_check_acc_handle(ddi_acc_handle_t handle)
5775 {
5776 	ddi_fm_error_t de;
5777 
5778 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5779 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5780 	return (de.fme_status);
5781 }
5782 
5783 int
5784 e1000g_check_dma_handle(ddi_dma_handle_t handle)
5785 {
5786 	ddi_fm_error_t de;
5787 
5788 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5789 	return (de.fme_status);
5790 }
5791 
5792 /*
5793  * The IO fault service error handling callback function
5794  */
5795 /* ARGSUSED2 */
5796 static int
5797 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5798 {
5799 	/*
5800 	 * as the driver can always deal with an error in any dma or
5801 	 * access handle, we can just return the fme_status value.
5802 	 */
5803 	pci_ereport_post(dip, err, NULL);
5804 	return (err->fme_status);
5805 }
5806 
5807 static void
5808 e1000g_fm_init(struct e1000g *Adapter)
5809 {
5810 	ddi_iblock_cookie_t iblk;
5811 	int fma_acc_flag, fma_dma_flag;
5812 
5813 	/* Only register with IO Fault Services if we have some capability */
5814 	if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5815 		e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5816 		fma_acc_flag = 1;
5817 	} else {
5818 		e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5819 		fma_acc_flag = 0;
5820 	}
5821 
5822 	if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5823 		fma_dma_flag = 1;
5824 	} else {
5825 		fma_dma_flag = 0;
5826 	}
5827 
5828 	(void) e1000g_set_fma_flags(Adapter, fma_acc_flag, fma_dma_flag);
5829 
5830 	if (Adapter->fm_capabilities) {
5831 
5832 		/* Register capabilities with IO Fault Services */
5833 		ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
5834 
5835 		/*
5836 		 * Initialize pci ereport capabilities if ereport capable
5837 		 */
5838 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5839 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5840 			pci_ereport_setup(Adapter->dip);
5841 
5842 		/*
5843 		 * Register error callback if error callback capable
5844 		 */
5845 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5846 			ddi_fm_handler_register(Adapter->dip,
5847 			    e1000g_fm_error_cb, (void*) Adapter);
5848 	}
5849 }
5850 
5851 static void
5852 e1000g_fm_fini(struct e1000g *Adapter)
5853 {
5854 	/* Only unregister FMA capabilities if we registered some */
5855 	if (Adapter->fm_capabilities) {
5856 
5857 		/*
5858 		 * Release any resources allocated by pci_ereport_setup()
5859 		 */
5860 		if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
5861 		    DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5862 			pci_ereport_teardown(Adapter->dip);
5863 
5864 		/*
5865 		 * Un-register error callback if error callback capable
5866 		 */
5867 		if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
5868 			ddi_fm_handler_unregister(Adapter->dip);
5869 
5870 		/* Unregister from IO Fault Services */
5871 		mutex_enter(&e1000g_rx_detach_lock);
5872 		ddi_fm_fini(Adapter->dip);
5873 		if (Adapter->priv_dip != NULL) {
5874 			DEVI(Adapter->priv_dip)->devi_fmhdl = NULL;
5875 		}
5876 		mutex_exit(&e1000g_rx_detach_lock);
5877 	}
5878 }
5879 
5880 void
5881 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
5882 {
5883 	uint64_t ena;
5884 	char buf[FM_MAX_CLASS];
5885 
5886 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5887 	ena = fm_ena_generate(0, FM_ENA_FMT1);
5888 	if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
5889 		ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
5890 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5891 	}
5892 }
5893 
5894 /*
5895  * quiesce(9E) entry point.
5896  *
5897  * This function is called when the system is single-threaded at high
5898  * PIL with preemption disabled. Therefore, this function must not be
5899  * blocked.
5900  *
5901  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5902  * DDI_FAILURE indicates an error condition and should almost never happen.
5903  */
5904 static int
5905 e1000g_quiesce(dev_info_t *devinfo)
5906 {
5907 	struct e1000g *Adapter;
5908 
5909 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
5910 
5911 	if (Adapter == NULL)
5912 		return (DDI_FAILURE);
5913 
5914 	e1000g_clear_all_interrupts(Adapter);
5915 
5916 	(void) e1000_reset_hw(&Adapter->shared);
5917 
5918 	/* Setup our HW Tx Head & Tail descriptor pointers */
5919 	E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
5920 	E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
5921 
5922 	/* Setup our HW Rx Head & Tail descriptor pointers */
5923 	E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
5924 	E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
5925 
5926 	return (DDI_SUCCESS);
5927 }
5928 
5929 static int
5930 e1000g_get_def_val(struct e1000g *Adapter, mac_prop_id_t pr_num,
5931     uint_t pr_valsize, void *pr_val)
5932 {
5933 	link_flowctrl_t fl;
5934 	int err = 0;
5935 
5936 	ASSERT(pr_valsize > 0);
5937 	switch (pr_num) {
5938 	case MAC_PROP_AUTONEG:
5939 		*(uint8_t *)pr_val =
5940 		    ((Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
5941 		break;
5942 	case MAC_PROP_FLOWCTRL:
5943 		if (pr_valsize < sizeof (link_flowctrl_t))
5944 			return (EINVAL);
5945 		fl = LINK_FLOWCTRL_BI;
5946 		bcopy(&fl, pr_val, sizeof (fl));
5947 		break;
5948 	case MAC_PROP_ADV_1000FDX_CAP:
5949 	case MAC_PROP_EN_1000FDX_CAP:
5950 		*(uint8_t *)pr_val =
5951 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5952 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
5953 		break;
5954 	case MAC_PROP_ADV_1000HDX_CAP:
5955 	case MAC_PROP_EN_1000HDX_CAP:
5956 		*(uint8_t *)pr_val =
5957 		    ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
5958 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
5959 		break;
5960 	case MAC_PROP_ADV_100FDX_CAP:
5961 	case MAC_PROP_EN_100FDX_CAP:
5962 		*(uint8_t *)pr_val =
5963 		    ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5964 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
5965 		break;
5966 	case MAC_PROP_ADV_100HDX_CAP:
5967 	case MAC_PROP_EN_100HDX_CAP:
5968 		*(uint8_t *)pr_val =
5969 		    ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
5970 		    (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
5971 		break;
5972 	case MAC_PROP_ADV_10FDX_CAP:
5973 	case MAC_PROP_EN_10FDX_CAP:
5974 		*(uint8_t *)pr_val =
5975 		    (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
5976 		break;
5977 	case MAC_PROP_ADV_10HDX_CAP:
5978 	case MAC_PROP_EN_10HDX_CAP:
5979 		*(uint8_t *)pr_val =
5980 		    (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
5981 		break;
5982 	default:
5983 		err = ENOTSUP;
5984 		break;
5985 	}
5986 	return (err);
5987 }
5988 
5989 /*
5990  * synchronize the adv* and en* parameters.
5991  *
5992  * See comments in <sys/dld.h> for details of the *_en_*
5993  * parameters. The usage of ndd for setting adv parameters will
5994  * synchronize all the en parameters with the e1000g parameters,
5995  * implicitly disabling any settings made via dladm.
5996  */
5997 static void
5998 e1000g_param_sync(struct e1000g *Adapter)
5999 {
6000 	Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6001 	Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6002 	Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6003 	Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6004 	Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6005 	Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6006 }
6007 
6008 /*
6009  * e1000g_get_driver_control - tell manageability firmware that the driver
6010  * has control.
6011  */
6012 static void
6013 e1000g_get_driver_control(struct e1000_hw *hw)
6014 {
6015 	uint32_t ctrl_ext;
6016 	uint32_t swsm;
6017 
6018 	/* tell manageability firmware the driver has taken over */
6019 	switch (hw->mac.type) {
6020 	case e1000_82573:
6021 		swsm = E1000_READ_REG(hw, E1000_SWSM);
6022 		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6023 		break;
6024 	case e1000_82571:
6025 	case e1000_82572:
6026 	case e1000_82574:
6027 	case e1000_80003es2lan:
6028 	case e1000_ich8lan:
6029 	case e1000_ich9lan:
6030 	case e1000_ich10lan:
6031 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6032 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6033 		    ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6034 		break;
6035 	default:
6036 		/* no manageability firmware: do nothing */
6037 		break;
6038 	}
6039 }
6040 
6041 /*
6042  * e1000g_release_driver_control - tell manageability firmware that the driver
6043  * has released control.
6044  */
6045 static void
6046 e1000g_release_driver_control(struct e1000_hw *hw)
6047 {
6048 	uint32_t ctrl_ext;
6049 	uint32_t swsm;
6050 
6051 	/* tell manageability firmware the driver has released control */
6052 	switch (hw->mac.type) {
6053 	case e1000_82573:
6054 		swsm = E1000_READ_REG(hw, E1000_SWSM);
6055 		E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6056 		break;
6057 	case e1000_82571:
6058 	case e1000_82572:
6059 	case e1000_82574:
6060 	case e1000_80003es2lan:
6061 	case e1000_ich8lan:
6062 	case e1000_ich9lan:
6063 	case e1000_ich10lan:
6064 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6065 		E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6066 		    ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6067 		break;
6068 	default:
6069 		/* no manageability firmware: do nothing */
6070 		break;
6071 	}
6072 }
6073 
6074 /*
6075  * Restore e1000g promiscuous mode.
6076  */
6077 static void
6078 e1000g_restore_promisc(struct e1000g *Adapter)
6079 {
6080 	if (Adapter->e1000g_promisc) {
6081 		uint32_t rctl;
6082 
6083 		rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6084 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6085 		E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
6086 	}
6087 }
6088