xref: /titanic_44/usr/src/uts/common/io/e1000g/e1000g_main.c (revision dd4eeefdb8e4583c47e28a7f315db6087931ef06)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2007 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms of the CDDLv1.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * **********************************************************************
30  *									*
31  * Module Name:								*
32  *   e1000g_main.c							*
33  *									*
34  * Abstract:								*
35  *   This file contains the interface routines for the solaris OS.	*
36  *   It has all DDI entry point routines and GLD entry point routines.	*
37  *									*
38  *   This file also contains routines that take care of initialization	*
39  *   uninit routine and interrupt routine.				*
40  *									*
41  * **********************************************************************
42  */
43 
44 #include <sys/dlpi.h>
45 #include <sys/mac.h>
46 #include "e1000g_sw.h"
47 #include "e1000g_debug.h"
48 
49 #define	E1000_RX_INTPT_TIME	128
50 #define	E1000_RX_PKT_CNT	8
51 
52 static char ident[] = "Intel PRO/1000 Ethernet 5.2.2";
53 static char e1000g_string[] = "Intel(R) PRO/1000 Network Connection";
54 static char e1000g_version[] = "Driver Ver. 5.2.2";
55 
56 /*
57  * Proto types for DDI entry points
58  */
59 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
60 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
61 
62 /*
63  * init and intr routines prototype
64  */
65 static int e1000g_resume(dev_info_t *);
66 static int e1000g_suspend(dev_info_t *);
67 static uint_t e1000g_intr_pciexpress(caddr_t);
68 static uint_t e1000g_intr(caddr_t);
69 static void e1000g_intr_work(struct e1000g *, uint32_t);
70 #pragma inline(e1000g_intr_work)
71 static int e1000g_init(struct e1000g *);
72 static int e1000g_start(struct e1000g *, boolean_t);
73 static void e1000g_stop(struct e1000g *, boolean_t);
74 static int e1000g_m_start(void *);
75 static void e1000g_m_stop(void *);
76 static int e1000g_m_promisc(void *, boolean_t);
77 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
78 static int e1000g_m_unicst(void *, const uint8_t *);
79 static int e1000g_m_unicst_add(void *, mac_multi_addr_t *);
80 static int e1000g_m_unicst_remove(void *, mac_addr_slot_t);
81 static int e1000g_m_unicst_modify(void *, mac_multi_addr_t *);
82 static int e1000g_m_unicst_get(void *, mac_multi_addr_t *);
83 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
84 static void e1000g_m_blank(void *, time_t, uint32_t);
85 static void e1000g_m_resources(void *);
86 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
87 static void e1000g_init_locks(struct e1000g *);
88 static void e1000g_destroy_locks(struct e1000g *);
89 static int e1000g_identify_hardware(struct e1000g *);
90 static int e1000g_regs_map(struct e1000g *);
91 static int e1000g_set_driver_params(struct e1000g *);
92 static int e1000g_register_mac(struct e1000g *);
93 static boolean_t e1000g_rx_drain(struct e1000g *);
94 static boolean_t e1000g_tx_drain(struct e1000g *);
95 static void e1000g_init_unicst(struct e1000g *);
96 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, mac_addr_slot_t);
97 
98 /*
99  * Local routines
100  */
101 static void e1000g_tx_clean(struct e1000g *);
102 static void e1000g_rx_clean(struct e1000g *);
103 static void e1000g_link_timer(void *);
104 static void e1000g_local_timer(void *);
105 static boolean_t e1000g_link_check(struct e1000g *);
106 static boolean_t e1000g_stall_check(struct e1000g *);
107 static void e1000g_smartspeed(struct e1000g *);
108 static void e1000g_get_conf(struct e1000g *);
109 static int e1000g_get_prop(struct e1000g *, char *, int, int, int);
110 static void enable_watchdog_timer(struct e1000g *);
111 static void disable_watchdog_timer(struct e1000g *);
112 static void start_watchdog_timer(struct e1000g *);
113 static void restart_watchdog_timer(struct e1000g *);
114 static void stop_watchdog_timer(struct e1000g *);
115 static void stop_link_timer(struct e1000g *);
116 static void stop_82547_timer(e1000g_tx_ring_t *);
117 static void e1000g_force_speed_duplex(struct e1000g *);
118 static void e1000g_get_max_frame_size(struct e1000g *);
119 static boolean_t is_valid_mac_addr(uint8_t *);
120 static void e1000g_unattach(dev_info_t *, struct e1000g *);
121 #ifdef E1000G_DEBUG
122 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
123 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
124 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
125 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
126 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
127     struct iocblk *, mblk_t *);
128 #endif
129 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
130     struct iocblk *, mblk_t *);
131 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
132 static void e1000g_set_internal_loopback(struct e1000g *);
133 static void e1000g_set_external_loopback_1000(struct e1000g *);
134 static void e1000g_set_external_loopback_100(struct e1000g *);
135 static void e1000g_set_external_loopback_10(struct e1000g *);
136 static int e1000g_add_intrs(struct e1000g *);
137 static int e1000g_intr_add(struct e1000g *, int);
138 static int e1000g_rem_intrs(struct e1000g *);
139 static int e1000g_enable_intrs(struct e1000g *);
140 static int e1000g_disable_intrs(struct e1000g *);
141 static boolean_t e1000g_link_up(struct e1000g *);
142 #ifdef __sparc
143 static boolean_t e1000g_find_mac_address(struct e1000g *);
144 #endif
145 static void e1000g_get_phy_state(struct e1000g *);
146 static void e1000g_free_priv_devi_node(struct e1000g *, boolean_t);
147 
148 static struct cb_ops cb_ws_ops = {
149 	nulldev,		/* cb_open */
150 	nulldev,		/* cb_close */
151 	nodev,			/* cb_strategy */
152 	nodev,			/* cb_print */
153 	nodev,			/* cb_dump */
154 	nodev,			/* cb_read */
155 	nodev,			/* cb_write */
156 	nodev,			/* cb_ioctl */
157 	nodev,			/* cb_devmap */
158 	nodev,			/* cb_mmap */
159 	nodev,			/* cb_segmap */
160 	nochpoll,		/* cb_chpoll */
161 	ddi_prop_op,		/* cb_prop_op */
162 	NULL,			/* cb_stream */
163 	D_MP | D_HOTPLUG,	/* cb_flag */
164 	CB_REV,			/* cb_rev */
165 	nodev,			/* cb_aread */
166 	nodev			/* cb_awrite */
167 };
168 
169 static struct dev_ops ws_ops = {
170 	DEVO_REV,		/* devo_rev */
171 	0,			/* devo_refcnt */
172 	NULL,			/* devo_getinfo */
173 	nulldev,		/* devo_identify */
174 	nulldev,		/* devo_probe */
175 	e1000g_attach,		/* devo_attach */
176 	e1000g_detach,		/* devo_detach */
177 	nodev,			/* devo_reset */
178 	&cb_ws_ops,		/* devo_cb_ops */
179 	NULL,			/* devo_bus_ops */
180 	ddi_power		/* devo_power */
181 };
182 
183 static struct modldrv modldrv = {
184 	&mod_driverops,		/* Type of module.  This one is a driver */
185 	ident,			/* Discription string */
186 	&ws_ops,		/* driver ops */
187 };
188 
189 static struct modlinkage modlinkage = {
190 	MODREV_1, &modldrv, NULL
191 };
192 
193 /* Access attributes for register mapping */
194 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
195 	DDI_DEVICE_ATTR_V0,
196 	DDI_STRUCTURE_LE_ACC,
197 	DDI_STRICTORDER_ACC,
198 };
199 
200 #define	E1000G_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
201 
202 static mac_callbacks_t e1000g_m_callbacks = {
203 	E1000G_M_CALLBACK_FLAGS,
204 	e1000g_m_stat,
205 	e1000g_m_start,
206 	e1000g_m_stop,
207 	e1000g_m_promisc,
208 	e1000g_m_multicst,
209 	e1000g_m_unicst,
210 	e1000g_m_tx,
211 	e1000g_m_resources,
212 	e1000g_m_ioctl,
213 	e1000g_m_getcapab
214 };
215 
216 /*
217  * Global variables
218  */
219 
220 uint32_t e1000g_mblks_pending = 0;
221 /*
222  * Workaround for Dynamic Reconfiguration support, for x86 platform only.
223  * Here we maintain a private dev_info list if e1000g_force_detach is
224  * enabled. If we force the driver to detach while there are still some
225  * rx buffers retained in the upper layer, we have to keep a copy of the
226  * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
227  * structure will be freed after the driver is detached. However when we
228  * finally free those rx buffers released by the upper layer, we need to
229  * refer to the dev_info to free the dma buffers. So we save a copy of
230  * the dev_info for this purpose. On x86 platform, we assume this copy
231  * of dev_info is always valid, but on SPARC platform, it could be invalid
232  * after the system board level DR operation. For this reason, the global
233  * variable e1000g_force_detach must be B_FALSE on SPARC platform.
234  */
235 #ifdef __sparc
236 boolean_t e1000g_force_detach = B_FALSE;
237 #else
238 boolean_t e1000g_force_detach = B_TRUE;
239 #endif
240 private_devi_list_t *e1000g_private_devi_list = NULL;
241 
242 /*
243  * The rwlock is defined to protect the whole processing of rx recycling
244  * and the rx packets release in detach processing to make them mutually
245  * exclusive.
246  * The rx recycling processes different rx packets in different threads,
247  * so it will be protected with RW_READER and it won't block any other rx
248  * recycling threads.
249  * While the detach processing will be protected with RW_WRITER to make
250  * it mutually exclusive with the rx recycling.
251  */
252 krwlock_t e1000g_rx_detach_lock;
253 /*
254  * The rwlock e1000g_dma_type_lock is defined to protect the global flag
255  * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
256  * If there are many e1000g instances, the system may run out of DVMA
257  * resources during the initialization of the instances, then the flag will
258  * be changed to "USE_DMA". Because different e1000g instances are initialized
259  * in parallel, we need to use this lock to protect the flag.
260  */
261 krwlock_t e1000g_dma_type_lock;
262 
263 
264 /*
265  * Loadable module configuration entry points for the driver
266  */
267 
268 /*
269  * _init - module initialization
270  */
271 int
272 _init(void)
273 {
274 	int status;
275 
276 	mac_init_ops(&ws_ops, WSNAME);
277 	status = mod_install(&modlinkage);
278 	if (status != DDI_SUCCESS)
279 		mac_fini_ops(&ws_ops);
280 	else {
281 		rw_init(&e1000g_rx_detach_lock, NULL, RW_DRIVER, NULL);
282 		rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
283 	}
284 
285 	return (status);
286 }
287 
288 /*
289  * _fini - module finalization
290  */
291 int
292 _fini(void)
293 {
294 	int status;
295 
296 	rw_enter(&e1000g_rx_detach_lock, RW_READER);
297 	if (e1000g_mblks_pending != 0) {
298 		rw_exit(&e1000g_rx_detach_lock);
299 		return (EBUSY);
300 	}
301 	rw_exit(&e1000g_rx_detach_lock);
302 
303 	status = mod_remove(&modlinkage);
304 	if (status == DDI_SUCCESS) {
305 		mac_fini_ops(&ws_ops);
306 
307 		if (e1000g_force_detach) {
308 			private_devi_list_t *devi_node;
309 
310 			rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
311 			while (e1000g_private_devi_list != NULL) {
312 				devi_node = e1000g_private_devi_list;
313 				e1000g_private_devi_list =
314 				    e1000g_private_devi_list->next;
315 
316 				kmem_free(devi_node->priv_dip,
317 				    sizeof (struct dev_info));
318 				kmem_free(devi_node,
319 				    sizeof (private_devi_list_t));
320 			}
321 			rw_exit(&e1000g_rx_detach_lock);
322 		}
323 
324 		rw_destroy(&e1000g_rx_detach_lock);
325 		rw_destroy(&e1000g_dma_type_lock);
326 	}
327 
328 	return (status);
329 }
330 
331 /*
332  * _info - module information
333  */
334 int
335 _info(struct modinfo *modinfop)
336 {
337 	return (mod_info(&modlinkage, modinfop));
338 }
339 
340 /*
341  * e1000g_attach - driver attach
342  *
343  * This function is the device-specific initialization entry
344  * point. This entry point is required and must be written.
345  * The DDI_ATTACH command must be provided in the attach entry
346  * point. When attach() is called with cmd set to DDI_ATTACH,
347  * all normal kernel services (such as kmem_alloc(9F)) are
348  * available for use by the driver.
349  *
350  * The attach() function will be called once for each instance
351  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
352  * Until attach() succeeds, the only driver entry points which
353  * may be called are open(9E) and getinfo(9E).
354  */
355 static int
356 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
357 {
358 	struct e1000g *Adapter;
359 	struct e1000_hw *hw;
360 	struct e1000g_osdep *osdep;
361 	int instance;
362 
363 	switch (cmd) {
364 	default:
365 		e1000g_log(NULL, CE_WARN,
366 		    "Unsupported command send to e1000g_attach... ");
367 		return (DDI_FAILURE);
368 
369 	case DDI_RESUME:
370 		return (e1000g_resume(devinfo));
371 
372 	case DDI_ATTACH:
373 		break;
374 	}
375 
376 	/*
377 	 * get device instance number
378 	 */
379 	instance = ddi_get_instance(devinfo);
380 
381 	/*
382 	 * Allocate soft data structure
383 	 */
384 	Adapter =
385 	    (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
386 
387 	Adapter->dip = devinfo;
388 	Adapter->instance = instance;
389 	Adapter->tx_ring->adapter = Adapter;
390 	Adapter->rx_ring->adapter = Adapter;
391 
392 	hw = &Adapter->shared;
393 	osdep = &Adapter->osdep;
394 	hw->back = osdep;
395 	osdep->adapter = Adapter;
396 
397 	ddi_set_driver_private(devinfo, (caddr_t)Adapter);
398 
399 	/*
400 	 * PCI Configure
401 	 */
402 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
403 		e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
404 		goto attach_fail;
405 	}
406 	Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
407 
408 	/*
409 	 * Setup hardware
410 	 */
411 	if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
412 		e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
413 		goto attach_fail;
414 	}
415 
416 	/*
417 	 * Map in the device registers.
418 	 */
419 	if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
420 		e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
421 		goto attach_fail;
422 	}
423 	Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
424 
425 	/*
426 	 * Initialize driver parameters
427 	 */
428 	if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
429 		goto attach_fail;
430 	}
431 	Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
432 
433 	/*
434 	 * Initialize interrupts
435 	 */
436 	if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
437 		e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
438 		goto attach_fail;
439 	}
440 	Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
441 
442 	/*
443 	 * Initialize mutex's for this device.
444 	 * Do this before enabling the interrupt handler and
445 	 * register the softint to avoid the condition where
446 	 * interrupt handler can try using uninitialized mutex
447 	 */
448 	e1000g_init_locks(Adapter);
449 	Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
450 
451 	Adapter->tx_softint_pri = DDI_INTR_SOFTPRI_MAX;
452 	if (ddi_intr_add_softint(devinfo,
453 	    &Adapter->tx_softint_handle, Adapter->tx_softint_pri,
454 	    e1000g_tx_softint_worker, (caddr_t)Adapter) != DDI_SUCCESS) {
455 		e1000g_log(Adapter, CE_WARN, "Add soft intr failed");
456 		goto attach_fail;
457 	}
458 	Adapter->attach_progress |= ATTACH_PROGRESS_SOFT_INTR;
459 
460 	/*
461 	 * Initialize Driver Counters
462 	 */
463 	if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
464 		e1000g_log(Adapter, CE_WARN, "Init stats failed");
465 		goto attach_fail;
466 	}
467 	Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
468 
469 	/*
470 	 * Initialize chip hardware and software structures
471 	 */
472 	if (e1000g_init(Adapter) != DDI_SUCCESS) {
473 		e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
474 		goto attach_fail;
475 	}
476 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
477 
478 	/*
479 	 * Initialize NDD parameters
480 	 */
481 	if (e1000g_nd_init(Adapter) != DDI_SUCCESS) {
482 		e1000g_log(Adapter, CE_WARN, "Init ndd failed");
483 		goto attach_fail;
484 	}
485 	Adapter->attach_progress |= ATTACH_PROGRESS_NDD;
486 
487 	/*
488 	 * Register the driver to the MAC
489 	 */
490 	if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
491 		e1000g_log(Adapter, CE_WARN, "Register MAC failed");
492 		goto attach_fail;
493 	}
494 	Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
495 
496 	/*
497 	 * Now that mutex locks are initialized, and the chip is also
498 	 * initialized, enable interrupts.
499 	 */
500 	if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
501 		e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
502 		goto attach_fail;
503 	}
504 	Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
505 
506 	/*
507 	 * If e1000g_force_detach is enabled, in global private dip list,
508 	 * we will create a new entry, which maintains the priv_dip for DR
509 	 * supports after driver detached.
510 	 */
511 	if (e1000g_force_detach) {
512 		private_devi_list_t *devi_node;
513 
514 		Adapter->priv_dip =
515 		    kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
516 		bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
517 		    sizeof (struct dev_info));
518 
519 		devi_node =
520 		    kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
521 
522 		rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
523 		devi_node->priv_dip = Adapter->priv_dip;
524 		devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
525 		devi_node->next = e1000g_private_devi_list;
526 		e1000g_private_devi_list = devi_node;
527 		rw_exit(&e1000g_rx_detach_lock);
528 	}
529 
530 	cmn_err(CE_CONT, "!%s, %s\n", e1000g_string, e1000g_version);
531 
532 	return (DDI_SUCCESS);
533 
534 attach_fail:
535 	e1000g_unattach(devinfo, Adapter);
536 	return (DDI_FAILURE);
537 }
538 
539 static int
540 e1000g_register_mac(struct e1000g *Adapter)
541 {
542 	struct e1000_hw *hw = &Adapter->shared;
543 	mac_register_t *mac;
544 	int err;
545 
546 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
547 		return (DDI_FAILURE);
548 
549 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
550 	mac->m_driver = Adapter;
551 	mac->m_dip = Adapter->dip;
552 	mac->m_src_addr = hw->mac.addr;
553 	mac->m_callbacks = &e1000g_m_callbacks;
554 	mac->m_min_sdu = 0;
555 	mac->m_max_sdu =
556 	    (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K) ?
557 	    hw->mac.max_frame_size - 256 :
558 	    (hw->mac.max_frame_size != ETHERMAX) ?
559 	    hw->mac.max_frame_size - 24 : ETHERMTU;
560 
561 	err = mac_register(mac, &Adapter->mh);
562 	mac_free(mac);
563 
564 	return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
565 }
566 
567 static int
568 e1000g_identify_hardware(struct e1000g *Adapter)
569 {
570 	struct e1000_hw *hw = &Adapter->shared;
571 	struct e1000g_osdep *osdep = &Adapter->osdep;
572 
573 	/* Get the device id */
574 	hw->vendor_id =
575 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
576 	hw->device_id =
577 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
578 	hw->revision_id =
579 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
580 	hw->subsystem_device_id =
581 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
582 	hw->subsystem_vendor_id =
583 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
584 
585 	if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
586 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
587 		    "MAC type could not be set properly.");
588 		return (DDI_FAILURE);
589 	}
590 
591 	return (DDI_SUCCESS);
592 }
593 
594 static int
595 e1000g_regs_map(struct e1000g *Adapter)
596 {
597 	dev_info_t *devinfo = Adapter->dip;
598 	struct e1000_hw *hw = &Adapter->shared;
599 	struct e1000g_osdep *osdep = &Adapter->osdep;
600 	off_t mem_size;
601 
602 	/*
603 	 * first get the size of device register to be mapped. The
604 	 * second parameter is the register we are interested. I our
605 	 * wiseman 0 is for config registers and 1 is for memory mapped
606 	 * registers Mem size should have memory mapped region size
607 	 */
608 	if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) {
609 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
610 		    "ddi_dev_regsize for registers failed");
611 		return (DDI_FAILURE);
612 	}
613 
614 	if ((ddi_regs_map_setup(devinfo, 1, /* register of interest */
615 	    (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
616 	    &osdep->reg_handle)) != DDI_SUCCESS) {
617 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
618 		    "ddi_regs_map_setup for registers failed");
619 		goto regs_map_fail;
620 	}
621 
622 	/* ICH needs to map flash memory */
623 	if (hw->mac.type == e1000_ich8lan || hw->mac.type == e1000_ich9lan) {
624 		/* get flash size */
625 		if (ddi_dev_regsize(devinfo, ICH_FLASH_REG_SET,
626 		    &mem_size) != DDI_SUCCESS) {
627 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
628 			    "ddi_dev_regsize for ICH flash failed");
629 			goto regs_map_fail;
630 		}
631 
632 		/* map flash in */
633 		if (ddi_regs_map_setup(devinfo, ICH_FLASH_REG_SET,
634 		    (caddr_t *)&hw->flash_address, 0,
635 		    mem_size, &e1000g_regs_acc_attr,
636 		    &osdep->ich_flash_handle) != DDI_SUCCESS) {
637 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
638 			    "ddi_regs_map_setup for ICH flash failed");
639 			goto regs_map_fail;
640 		}
641 	}
642 
643 	return (DDI_SUCCESS);
644 
645 regs_map_fail:
646 	if (osdep->reg_handle != NULL)
647 		ddi_regs_map_free(&osdep->reg_handle);
648 
649 	return (DDI_FAILURE);
650 }
651 
652 static int
653 e1000g_set_driver_params(struct e1000g *Adapter)
654 {
655 	struct e1000_hw *hw;
656 	e1000g_tx_ring_t *tx_ring;
657 	uint32_t mem_bar, io_bar, bar64;
658 #ifdef __sparc
659 	dev_info_t *devinfo = Adapter->dip;
660 	ulong_t iommu_pagesize;
661 #endif
662 
663 	hw = &Adapter->shared;
664 
665 	/* Set MAC type and initialize hardware functions */
666 	if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
667 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
668 		    "Could not setup hardware functions");
669 		return (DDI_FAILURE);
670 	}
671 
672 	/* Get bus information */
673 	if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
674 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
675 		    "Could not get bus information");
676 		return (DDI_FAILURE);
677 	}
678 
679 	/* get mem_base addr */
680 	mem_bar = pci_config_get32(Adapter->osdep.cfg_handle, PCI_CONF_BASE0);
681 	bar64 = mem_bar & PCI_BASE_TYPE_ALL;
682 
683 	/* get io_base addr */
684 	if (hw->mac.type >= e1000_82544) {
685 		if (bar64) {
686 			/* IO BAR is different for 64 bit BAR mode */
687 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
688 			    PCI_CONF_BASE4);
689 		} else {
690 			/* normal 32-bit BAR mode */
691 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
692 			    PCI_CONF_BASE2);
693 		}
694 		hw->io_base = io_bar & PCI_BASE_IO_ADDR_M;
695 	} else {
696 		/* no I/O access for adapters prior to 82544 */
697 		hw->io_base = 0x0;
698 	}
699 
700 	e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
701 
702 	hw->mac.autoneg_failed = B_TRUE;
703 
704 	/* Set the wait_for_link flag to B_FALSE */
705 	hw->phy.wait_for_link = B_FALSE;
706 
707 	/* Adaptive IFS related changes */
708 	hw->mac.adaptive_ifs = B_TRUE;
709 
710 	/* Enable phy init script for IGP phy of 82541/82547 */
711 	if ((hw->mac.type == e1000_82547) ||
712 	    (hw->mac.type == e1000_82541) ||
713 	    (hw->mac.type == e1000_82547_rev_2) ||
714 	    (hw->mac.type == e1000_82541_rev_2))
715 		e1000_init_script_state_82541(hw, B_TRUE);
716 
717 	/* Enable the TTL workaround for 82541/82547 */
718 	e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
719 
720 #ifdef __sparc
721 	Adapter->strip_crc = B_TRUE;
722 #else
723 	Adapter->strip_crc = B_FALSE;
724 #endif
725 
726 	/* Get conf file properties */
727 	e1000g_get_conf(Adapter);
728 
729 	/* Get speed/duplex settings in conf file */
730 	hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
731 	hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
732 	e1000g_force_speed_duplex(Adapter);
733 
734 	/* Get Jumbo Frames settings in conf file */
735 	e1000g_get_max_frame_size(Adapter);
736 	hw->mac.min_frame_size =
737 	    MINIMUM_ETHERNET_PACKET_SIZE + CRC_LENGTH;
738 
739 #ifdef __sparc
740 	/* Get the system page size */
741 	Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
742 	iommu_pagesize = dvma_pagesize(devinfo);
743 	if (iommu_pagesize != 0) {
744 		if (Adapter->sys_page_sz == iommu_pagesize) {
745 			if (iommu_pagesize > 0x4000)
746 				Adapter->sys_page_sz = 0x4000;
747 		} else {
748 			if (Adapter->sys_page_sz > iommu_pagesize)
749 				Adapter->sys_page_sz = iommu_pagesize;
750 		}
751 	}
752 	Adapter->dvma_page_num = hw->mac.max_frame_size /
753 	    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
754 	ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
755 #endif
756 
757 	/* Set Rx/Tx buffer size */
758 	switch (hw->mac.max_frame_size) {
759 	case ETHERMAX:
760 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
761 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
762 		break;
763 	case FRAME_SIZE_UPTO_4K:
764 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
765 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
766 		break;
767 	case FRAME_SIZE_UPTO_8K:
768 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
769 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
770 		break;
771 	case FRAME_SIZE_UPTO_9K:
772 	case FRAME_SIZE_UPTO_16K:
773 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
774 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
775 		break;
776 	default:
777 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
778 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
779 		break;
780 	}
781 	Adapter->rx_buffer_size += E1000G_IPALIGNPRESERVEROOM;
782 
783 #ifndef NO_82542_SUPPORT
784 	/*
785 	 * For Wiseman adapters we have an requirement of having receive
786 	 * buffers aligned at 256 byte boundary. Since Livengood does not
787 	 * require this and forcing it for all hardwares will have
788 	 * performance implications, I am making it applicable only for
789 	 * Wiseman and for Jumbo frames enabled mode as rest of the time,
790 	 * it is okay to have normal frames...but it does involve a
791 	 * potential risk where we may loose data if buffer is not
792 	 * aligned...so all wiseman boards to have 256 byte aligned
793 	 * buffers
794 	 */
795 	if (hw->mac.type < e1000_82543)
796 		Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
797 	else
798 		Adapter->rx_buf_align = 1;
799 #endif
800 
801 	/* Master Latency Timer */
802 	Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
803 
804 	/* copper options */
805 	if (hw->media_type == e1000_media_type_copper) {
806 		hw->phy.mdix = 0;	/* AUTO_ALL_MODES */
807 		hw->phy.disable_polarity_correction = B_FALSE;
808 		hw->phy.ms_type = e1000_ms_hw_default;	/* E1000_MASTER_SLAVE */
809 	}
810 
811 	/* The initial link state should be "unknown" */
812 	Adapter->link_state = LINK_STATE_UNKNOWN;
813 
814 	/* Initialize tx parameters */
815 	Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
816 	Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
817 
818 	tx_ring = Adapter->tx_ring;
819 	tx_ring->recycle_low_water = DEFAULT_TX_RECYCLE_LOW_WATER;
820 	tx_ring->recycle_num = DEFAULT_TX_RECYCLE_NUM;
821 	tx_ring->frags_limit =
822 	    (hw->mac.max_frame_size / Adapter->tx_bcopy_thresh) + 2;
823 	if (tx_ring->frags_limit > (MAX_TX_DESC_PER_PACKET >> 1))
824 		tx_ring->frags_limit = (MAX_TX_DESC_PER_PACKET >> 1);
825 
826 	/* Initialize rx parameters */
827 	Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
828 
829 	return (DDI_SUCCESS);
830 }
831 
832 /*
833  * e1000g_detach - driver detach
834  *
835  * The detach() function is the complement of the attach routine.
836  * If cmd is set to DDI_DETACH, detach() is used to remove  the
837  * state  associated  with  a  given  instance of a device node
838  * prior to the removal of that instance from the system.
839  *
840  * The detach() function will be called once for each  instance
841  * of the device for which there has been a successful attach()
842  * once there are no longer  any  opens  on  the  device.
843  *
844  * Interrupts routine are disabled, All memory allocated by this
845  * driver are freed.
846  */
847 static int
848 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
849 {
850 	struct e1000g *Adapter;
851 	boolean_t rx_drain;
852 
853 	switch (cmd) {
854 	default:
855 		return (DDI_FAILURE);
856 
857 	case DDI_SUSPEND:
858 		return (e1000g_suspend(devinfo));
859 
860 	case DDI_DETACH:
861 		break;
862 	}
863 
864 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
865 	if (Adapter == NULL)
866 		return (DDI_FAILURE);
867 
868 	if (mac_unregister(Adapter->mh) != 0) {
869 		e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
870 		return (DDI_FAILURE);
871 	}
872 	Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
873 
874 	if (Adapter->started)
875 		e1000g_stop(Adapter, B_TRUE);
876 
877 	rx_drain = e1000g_rx_drain(Adapter);
878 
879 	/*
880 	 * If e1000g_force_detach is enabled, driver detach is safe.
881 	 * We will let e1000g_free_priv_devi_node routine determine
882 	 * whether we need to free the priv_dip entry for current
883 	 * driver instance.
884 	 */
885 	if (e1000g_force_detach) {
886 		e1000g_free_priv_devi_node(Adapter, rx_drain);
887 	} else {
888 		if (!rx_drain)
889 			return (DDI_FAILURE);
890 	}
891 
892 	e1000g_unattach(devinfo, Adapter);
893 
894 	return (DDI_SUCCESS);
895 }
896 
897 /*
898  * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
899  *
900  * If free_flag is true, that indicates the upper layer is not holding
901  * the rx buffers, we could free the priv_dip entry safely.
902  *
903  * Otherwise, we have to keep this entry even after driver detached,
904  * and we also need to mark this entry with E1000G_PRIV_DEVI_DETACH flag,
905  * so that driver could free it while all of rx buffers are returned
906  * by upper layer later.
907  */
908 static void
909 e1000g_free_priv_devi_node(struct e1000g *Adapter, boolean_t free_flag)
910 {
911 	private_devi_list_t *devi_node, *devi_del;
912 
913 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
914 	ASSERT(e1000g_private_devi_list != NULL);
915 	ASSERT(Adapter->priv_dip != NULL);
916 
917 	devi_node = e1000g_private_devi_list;
918 	if (devi_node->priv_dip == Adapter->priv_dip) {
919 		if (free_flag) {
920 			e1000g_private_devi_list =
921 			    devi_node->next;
922 			kmem_free(devi_node->priv_dip,
923 			    sizeof (struct dev_info));
924 			kmem_free(devi_node,
925 			    sizeof (private_devi_list_t));
926 		} else {
927 			ASSERT(e1000g_mblks_pending != 0);
928 			devi_node->flag =
929 			    E1000G_PRIV_DEVI_DETACH;
930 		}
931 		rw_exit(&e1000g_rx_detach_lock);
932 		return;
933 	}
934 
935 	devi_node = e1000g_private_devi_list;
936 	while (devi_node->next != NULL) {
937 		if (devi_node->next->priv_dip == Adapter->priv_dip) {
938 			if (free_flag) {
939 				devi_del = devi_node->next;
940 				devi_node->next = devi_del->next;
941 				kmem_free(devi_del->priv_dip,
942 				    sizeof (struct dev_info));
943 				kmem_free(devi_del,
944 				    sizeof (private_devi_list_t));
945 			} else {
946 				ASSERT(e1000g_mblks_pending != 0);
947 				devi_node->next->flag =
948 				    E1000G_PRIV_DEVI_DETACH;
949 			}
950 			break;
951 		}
952 		devi_node = devi_node->next;
953 	}
954 	rw_exit(&e1000g_rx_detach_lock);
955 }
956 
957 static void
958 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
959 {
960 	if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
961 		(void) e1000g_disable_intrs(Adapter);
962 	}
963 
964 	if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
965 		(void) mac_unregister(Adapter->mh);
966 	}
967 
968 	if (Adapter->attach_progress & ATTACH_PROGRESS_NDD) {
969 		e1000g_nd_cleanup(Adapter);
970 	}
971 
972 	if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
973 		(void) e1000g_rem_intrs(Adapter);
974 	}
975 
976 	if (Adapter->attach_progress & ATTACH_PROGRESS_SOFT_INTR) {
977 		(void) ddi_intr_remove_softint(Adapter->tx_softint_handle);
978 	}
979 
980 	if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
981 		(void) ddi_prop_remove_all(devinfo);
982 	}
983 
984 	if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
985 		kstat_delete((kstat_t *)Adapter->e1000g_ksp);
986 	}
987 
988 	if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
989 		stop_link_timer(Adapter);
990 		e1000_reset_hw(&Adapter->shared);
991 	}
992 
993 	if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
994 		if (Adapter->osdep.reg_handle != NULL)
995 			ddi_regs_map_free(&Adapter->osdep.reg_handle);
996 		if (Adapter->osdep.ich_flash_handle != NULL)
997 			ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
998 	}
999 
1000 	if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1001 		if (Adapter->osdep.cfg_handle != NULL)
1002 			pci_config_teardown(&Adapter->osdep.cfg_handle);
1003 	}
1004 
1005 	if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1006 		e1000g_destroy_locks(Adapter);
1007 	}
1008 
1009 	e1000_remove_device(&Adapter->shared);
1010 
1011 	kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1012 
1013 	/*
1014 	 * Another hotplug spec requirement,
1015 	 * run ddi_set_driver_private(devinfo, null);
1016 	 */
1017 	ddi_set_driver_private(devinfo, NULL);
1018 }
1019 
1020 static void
1021 e1000g_init_locks(struct e1000g *Adapter)
1022 {
1023 	e1000g_tx_ring_t *tx_ring;
1024 	e1000g_rx_ring_t *rx_ring;
1025 
1026 	rw_init(&Adapter->chip_lock, NULL,
1027 	    RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1028 	mutex_init(&Adapter->link_lock, NULL,
1029 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1030 	mutex_init(&Adapter->watchdog_lock, NULL,
1031 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1032 
1033 	tx_ring = Adapter->tx_ring;
1034 
1035 	mutex_init(&tx_ring->tx_lock, NULL,
1036 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1037 	mutex_init(&tx_ring->usedlist_lock, NULL,
1038 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1039 	mutex_init(&tx_ring->freelist_lock, NULL,
1040 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1041 	mutex_init(&tx_ring->mblks_lock, NULL,
1042 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1043 
1044 	rx_ring = Adapter->rx_ring;
1045 
1046 	mutex_init(&rx_ring->rx_lock, NULL,
1047 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1048 	mutex_init(&rx_ring->freelist_lock, NULL,
1049 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1050 }
1051 
1052 static void
1053 e1000g_destroy_locks(struct e1000g *Adapter)
1054 {
1055 	e1000g_tx_ring_t *tx_ring;
1056 	e1000g_rx_ring_t *rx_ring;
1057 
1058 	tx_ring = Adapter->tx_ring;
1059 	mutex_destroy(&tx_ring->tx_lock);
1060 	mutex_destroy(&tx_ring->usedlist_lock);
1061 	mutex_destroy(&tx_ring->freelist_lock);
1062 	mutex_destroy(&tx_ring->mblks_lock);
1063 
1064 	rx_ring = Adapter->rx_ring;
1065 	mutex_destroy(&rx_ring->rx_lock);
1066 	mutex_destroy(&rx_ring->freelist_lock);
1067 
1068 	mutex_destroy(&Adapter->link_lock);
1069 	mutex_destroy(&Adapter->watchdog_lock);
1070 	rw_destroy(&Adapter->chip_lock);
1071 }
1072 
1073 static int
1074 e1000g_resume(dev_info_t *devinfo)
1075 {
1076 	struct e1000g *Adapter;
1077 
1078 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1079 	if (Adapter == NULL)
1080 		return (DDI_FAILURE);
1081 
1082 	if (e1000g_start(Adapter, B_TRUE))
1083 		return (DDI_FAILURE);
1084 
1085 	return (DDI_SUCCESS);
1086 }
1087 
1088 static int
1089 e1000g_suspend(dev_info_t *devinfo)
1090 {
1091 	struct e1000g *Adapter;
1092 
1093 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1094 	if (Adapter == NULL)
1095 		return (DDI_FAILURE);
1096 
1097 	e1000g_stop(Adapter, B_TRUE);
1098 
1099 	return (DDI_SUCCESS);
1100 }
1101 
1102 static int
1103 e1000g_init(struct e1000g *Adapter)
1104 {
1105 	uint32_t pba;
1106 	uint32_t high_water;
1107 	struct e1000_hw *hw;
1108 	clock_t link_timeout;
1109 
1110 	hw = &Adapter->shared;
1111 
1112 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1113 
1114 	/*
1115 	 * reset to put the hardware in a known state
1116 	 * before we try to do anything with the eeprom
1117 	 */
1118 	(void) e1000_reset_hw(hw);
1119 
1120 	if (e1000_validate_nvm_checksum(hw) < 0) {
1121 		/*
1122 		 * Some PCI-E parts fail the first check due to
1123 		 * the link being in sleep state.  Call it again,
1124 		 * if it fails a second time its a real issue.
1125 		 */
1126 		if (e1000_validate_nvm_checksum(hw) < 0) {
1127 			e1000g_log(Adapter, CE_WARN,
1128 			    "Invalid NVM checksum. Please contact "
1129 			    "the vendor to update the NVM.");
1130 			goto init_fail;
1131 		}
1132 	}
1133 
1134 #ifdef __sparc
1135 	/*
1136 	 * Firstly, we try to get the local ethernet address from OBP. If
1137 	 * fail, we get from EEPROM of NIC card.
1138 	 */
1139 	if (!e1000g_find_mac_address(Adapter)) {
1140 		if (e1000_read_mac_addr(hw) < 0) {
1141 			e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1142 			goto init_fail;
1143 		}
1144 	}
1145 #else
1146 	/* Get the local ethernet address. */
1147 	if (e1000_read_mac_addr(hw) < 0) {
1148 		e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1149 		goto init_fail;
1150 	}
1151 #endif
1152 
1153 	/* check for valid mac address */
1154 	if (!is_valid_mac_addr(hw->mac.addr)) {
1155 		e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1156 		goto init_fail;
1157 	}
1158 
1159 	/* Set LAA state for 82571 chipset */
1160 	e1000_set_laa_state_82571(hw, B_TRUE);
1161 
1162 	/* Master Latency Timer implementation */
1163 	if (Adapter->master_latency_timer) {
1164 		pci_config_put8(Adapter->osdep.cfg_handle,
1165 		    PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1166 	}
1167 
1168 	if (hw->mac.type < e1000_82547) {
1169 		/*
1170 		 * Total FIFO is 64K
1171 		 */
1172 		if (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K)
1173 			pba = E1000_PBA_40K;	/* 40K for Rx, 24K for Tx */
1174 		else
1175 			pba = E1000_PBA_48K;	/* 48K for Rx, 16K for Tx */
1176 	} else if (hw->mac.type >= e1000_82571 &&
1177 	    hw->mac.type <= e1000_82572) {
1178 		/*
1179 		 * Total FIFO is 48K
1180 		 */
1181 		if (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K)
1182 			pba = E1000_PBA_30K;	/* 30K for Rx, 18K for Tx */
1183 		else
1184 			pba = E1000_PBA_38K;	/* 38K for Rx, 10K for Tx */
1185 	} else if (hw->mac.type == e1000_ich8lan) {
1186 		pba = E1000_PBA_8K;		/* 8K for Rx, 12K for Tx */
1187 	} else if (hw->mac.type == e1000_ich9lan) {
1188 		pba = E1000_PBA_12K;
1189 	} else {
1190 		/*
1191 		 * Total FIFO is 40K
1192 		 */
1193 		if (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K)
1194 			pba = E1000_PBA_22K;	/* 22K for Rx, 18K for Tx */
1195 		else
1196 			pba = E1000_PBA_30K;	/* 30K for Rx, 10K for Tx */
1197 	}
1198 	E1000_WRITE_REG(hw, E1000_PBA, pba);
1199 
1200 	/*
1201 	 * These parameters set thresholds for the adapter's generation(Tx)
1202 	 * and response(Rx) to Ethernet PAUSE frames.  These are just threshold
1203 	 * settings.  Flow control is enabled or disabled in the configuration
1204 	 * file.
1205 	 * High-water mark is set down from the top of the rx fifo (not
1206 	 * sensitive to max_frame_size) and low-water is set just below
1207 	 * high-water mark.
1208 	 * The high water mark must be low enough to fit one full frame above
1209 	 * it in the rx FIFO.  Should be the lower of:
1210 	 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1211 	 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1212 	 * Rx FIFO size minus one full frame.
1213 	 */
1214 	high_water = min(((pba << 10) * 9 / 10),
1215 	    ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_ich9lan) ?
1216 	    ((pba << 10) - (E1000_ERT_2048 << 3)) :
1217 	    ((pba << 10) - hw->mac.max_frame_size)));
1218 
1219 	hw->mac.fc_high_water = high_water & 0xFFF8;
1220 	hw->mac.fc_low_water = hw->mac.fc_high_water - 8;
1221 
1222 	if (hw->mac.type == e1000_80003es2lan)
1223 		hw->mac.fc_pause_time = 0xFFFF;
1224 	else
1225 		hw->mac.fc_pause_time = E1000_FC_PAUSE_TIME;
1226 	hw->mac.fc_send_xon = B_TRUE;
1227 	hw->mac.fc = hw->mac.original_fc;
1228 
1229 	/*
1230 	 * Reset the adapter hardware the second time.
1231 	 */
1232 	(void) e1000_reset_hw(hw);
1233 
1234 	/* disable wakeup control by default */
1235 	if (hw->mac.type >= e1000_82544)
1236 		E1000_WRITE_REG(hw, E1000_WUC, 0);
1237 
1238 	/* MWI setup */
1239 	e1000_pci_set_mwi(hw);
1240 
1241 	/*
1242 	 * Configure/Initialize hardware
1243 	 */
1244 	if (e1000_init_hw(hw) < 0) {
1245 		e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1246 		goto init_fail;
1247 	}
1248 
1249 	/* Disable Smart Power Down */
1250 	phy_spd_state(hw, B_FALSE);
1251 
1252 	/* Make sure driver has control */
1253 	e1000g_get_driver_control(hw);
1254 
1255 	/*
1256 	 * Initialize unicast addresses.
1257 	 */
1258 	e1000g_init_unicst(Adapter);
1259 
1260 	/*
1261 	 * Setup and initialize the mctable structures.  After this routine
1262 	 * completes  Multicast table will be set
1263 	 */
1264 	e1000g_setup_multicast(Adapter);
1265 	msec_delay(5);
1266 
1267 	/*
1268 	 * Implement Adaptive IFS
1269 	 */
1270 	e1000_reset_adaptive(hw);
1271 
1272 	/* Setup Interrupt Throttling Register */
1273 	E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1274 
1275 	/* Start the timer for link setup */
1276 	if (hw->mac.autoneg)
1277 		link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1278 	else
1279 		link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1280 
1281 	mutex_enter(&Adapter->link_lock);
1282 	if (hw->phy.wait_for_link) {
1283 		Adapter->link_complete = B_TRUE;
1284 	} else {
1285 		Adapter->link_complete = B_FALSE;
1286 		Adapter->link_tid = timeout(e1000g_link_timer,
1287 		    (void *)Adapter, link_timeout);
1288 	}
1289 	mutex_exit(&Adapter->link_lock);
1290 
1291 	/* Enable PCI-Ex master */
1292 	if (hw->bus.type == e1000_bus_type_pci_express) {
1293 		e1000_enable_pciex_master(hw);
1294 	}
1295 
1296 	/* Save the state of the phy */
1297 	e1000g_get_phy_state(Adapter);
1298 
1299 	Adapter->init_count++;
1300 
1301 	rw_exit(&Adapter->chip_lock);
1302 
1303 	return (DDI_SUCCESS);
1304 
1305 init_fail:
1306 	rw_exit(&Adapter->chip_lock);
1307 	return (DDI_FAILURE);
1308 }
1309 
1310 /*
1311  * Check if the link is up
1312  */
1313 static boolean_t
1314 e1000g_link_up(struct e1000g *Adapter)
1315 {
1316 	struct e1000_hw *hw;
1317 	boolean_t link_up;
1318 
1319 	hw = &Adapter->shared;
1320 
1321 	e1000_check_for_link(hw);
1322 
1323 	if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) ||
1324 	    ((!hw->mac.get_link_status) && (hw->mac.type == e1000_82543)) ||
1325 	    ((hw->media_type == e1000_media_type_internal_serdes) &&
1326 	    (hw->mac.serdes_has_link))) {
1327 		link_up = B_TRUE;
1328 	} else {
1329 		link_up = B_FALSE;
1330 	}
1331 
1332 	return (link_up);
1333 }
1334 
1335 static void
1336 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1337 {
1338 	struct iocblk *iocp;
1339 	struct e1000g *e1000gp;
1340 	enum ioc_reply status;
1341 	int err;
1342 
1343 	iocp = (struct iocblk *)mp->b_rptr;
1344 	iocp->ioc_error = 0;
1345 	e1000gp = (struct e1000g *)arg;
1346 
1347 	ASSERT(e1000gp);
1348 	if (e1000gp == NULL) {
1349 		miocnak(q, mp, 0, EINVAL);
1350 		return;
1351 	}
1352 
1353 	switch (iocp->ioc_cmd) {
1354 
1355 	case LB_GET_INFO_SIZE:
1356 	case LB_GET_INFO:
1357 	case LB_GET_MODE:
1358 	case LB_SET_MODE:
1359 		status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1360 		break;
1361 
1362 	case ND_GET:
1363 	case ND_SET:
1364 		status = e1000g_nd_ioctl(e1000gp, q, mp, iocp);
1365 		break;
1366 
1367 #ifdef E1000G_DEBUG
1368 	case E1000G_IOC_REG_PEEK:
1369 	case E1000G_IOC_REG_POKE:
1370 		status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1371 		break;
1372 	case E1000G_IOC_CHIP_RESET:
1373 		e1000gp->reset_count++;
1374 		if (e1000g_reset(e1000gp))
1375 			status = IOC_ACK;
1376 		else
1377 			status = IOC_INVAL;
1378 		break;
1379 #endif
1380 	default:
1381 		status = IOC_INVAL;
1382 		break;
1383 	}
1384 
1385 	/*
1386 	 * Decide how to reply
1387 	 */
1388 	switch (status) {
1389 	default:
1390 	case IOC_INVAL:
1391 		/*
1392 		 * Error, reply with a NAK and EINVAL or the specified error
1393 		 */
1394 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1395 		    EINVAL : iocp->ioc_error);
1396 		break;
1397 
1398 	case IOC_DONE:
1399 		/*
1400 		 * OK, reply already sent
1401 		 */
1402 		break;
1403 
1404 	case IOC_ACK:
1405 		/*
1406 		 * OK, reply with an ACK
1407 		 */
1408 		miocack(q, mp, 0, 0);
1409 		break;
1410 
1411 	case IOC_REPLY:
1412 		/*
1413 		 * OK, send prepared reply as ACK or NAK
1414 		 */
1415 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1416 		    M_IOCACK : M_IOCNAK;
1417 		qreply(q, mp);
1418 		break;
1419 	}
1420 }
1421 
1422 static void e1000g_m_blank(void *arg, time_t ticks, uint32_t count)
1423 {
1424 	struct e1000g *Adapter;
1425 
1426 	Adapter = (struct e1000g *)arg;
1427 
1428 	/*
1429 	 * Adjust ITR (Interrupt Throttling Register) to coalesce
1430 	 * interrupts. This formula and its coefficient come from
1431 	 * our experiments.
1432 	 */
1433 	if (Adapter->intr_adaptive) {
1434 		Adapter->intr_throttling_rate = count << 5;
1435 		E1000_WRITE_REG(&Adapter->shared, E1000_ITR,
1436 		    Adapter->intr_throttling_rate);
1437 	}
1438 }
1439 
1440 static void
1441 e1000g_m_resources(void *arg)
1442 {
1443 	struct e1000g *adapter = (struct e1000g *)arg;
1444 	mac_rx_fifo_t mrf;
1445 
1446 	mrf.mrf_type = MAC_RX_FIFO;
1447 	mrf.mrf_blank = e1000g_m_blank;
1448 	mrf.mrf_arg = (void *)adapter;
1449 	mrf.mrf_normal_blank_time = E1000_RX_INTPT_TIME;
1450 	mrf.mrf_normal_pkt_count = E1000_RX_PKT_CNT;
1451 
1452 	adapter->mrh = mac_resource_add(adapter->mh, (mac_resource_t *)&mrf);
1453 }
1454 
1455 static int
1456 e1000g_m_start(void *arg)
1457 {
1458 	struct e1000g *Adapter = (struct e1000g *)arg;
1459 
1460 	return (e1000g_start(Adapter, B_TRUE));
1461 }
1462 
1463 static int
1464 e1000g_start(struct e1000g *Adapter, boolean_t global)
1465 {
1466 	if (global) {
1467 		/* Allocate dma resources for descriptors and buffers */
1468 		if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1469 			e1000g_log(Adapter, CE_WARN,
1470 			    "Alloc DMA resources failed");
1471 			return (ENOTACTIVE);
1472 		}
1473 		Adapter->rx_buffer_setup = B_FALSE;
1474 	}
1475 
1476 	if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1477 		if (e1000g_init(Adapter) != DDI_SUCCESS) {
1478 			e1000g_log(Adapter, CE_WARN,
1479 			    "Adapter initialization failed");
1480 			if (global)
1481 				e1000g_release_dma_resources(Adapter);
1482 			return (ENOTACTIVE);
1483 		}
1484 	}
1485 
1486 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1487 
1488 	/* Setup and initialize the transmit structures */
1489 	e1000g_tx_setup(Adapter);
1490 	msec_delay(5);
1491 
1492 	/* Setup and initialize the receive structures */
1493 	e1000g_rx_setup(Adapter);
1494 	msec_delay(5);
1495 
1496 	e1000g_mask_interrupt(Adapter);
1497 	if (Adapter->tx_intr_enable)
1498 		e1000g_mask_tx_interrupt(Adapter);
1499 
1500 	Adapter->started = B_TRUE;
1501 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1502 
1503 	rw_exit(&Adapter->chip_lock);
1504 
1505 	/* Enable and start the watchdog timer */
1506 	enable_watchdog_timer(Adapter);
1507 
1508 	return (0);
1509 }
1510 
1511 static void
1512 e1000g_m_stop(void *arg)
1513 {
1514 	struct e1000g *Adapter = (struct e1000g *)arg;
1515 
1516 	e1000g_stop(Adapter, B_TRUE);
1517 }
1518 
1519 static void
1520 e1000g_stop(struct e1000g *Adapter, boolean_t global)
1521 {
1522 	/* Set stop flags */
1523 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1524 
1525 	Adapter->started = B_FALSE;
1526 	Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
1527 
1528 	rw_exit(&Adapter->chip_lock);
1529 
1530 	/* Drain tx sessions */
1531 	(void) e1000g_tx_drain(Adapter);
1532 
1533 	/* Disable and stop all the timers */
1534 	disable_watchdog_timer(Adapter);
1535 	stop_link_timer(Adapter);
1536 	stop_82547_timer(Adapter->tx_ring);
1537 
1538 	/* Stop the chip and release pending resources */
1539 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1540 
1541 	e1000g_clear_all_interrupts(Adapter);
1542 	e1000_reset_hw(&Adapter->shared);
1543 
1544 	/* Release resources still held by the TX descriptors */
1545 	e1000g_tx_clean(Adapter);
1546 
1547 	/* Clean the pending rx jumbo packet fragment */
1548 	e1000g_rx_clean(Adapter);
1549 
1550 	rw_exit(&Adapter->chip_lock);
1551 
1552 	if (global)
1553 		e1000g_release_dma_resources(Adapter);
1554 }
1555 
1556 static void
1557 e1000g_rx_clean(struct e1000g *Adapter)
1558 {
1559 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
1560 
1561 	if (rx_ring->rx_mblk != NULL) {
1562 		freemsg(rx_ring->rx_mblk);
1563 		rx_ring->rx_mblk = NULL;
1564 		rx_ring->rx_mblk_tail = NULL;
1565 		rx_ring->rx_mblk_len = 0;
1566 	}
1567 }
1568 
1569 static void
1570 e1000g_tx_clean(struct e1000g *Adapter)
1571 {
1572 	e1000g_tx_ring_t *tx_ring;
1573 	p_tx_sw_packet_t packet;
1574 	mblk_t *mp;
1575 	mblk_t *nmp;
1576 	uint32_t packet_count;
1577 
1578 	tx_ring = Adapter->tx_ring;
1579 
1580 	/*
1581 	 * Here we don't need to protect the lists using
1582 	 * the usedlist_lock and freelist_lock, for they
1583 	 * have been protected by the chip_lock.
1584 	 */
1585 	mp = NULL;
1586 	nmp = NULL;
1587 	packet_count = 0;
1588 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
1589 	while (packet != NULL) {
1590 		if (packet->mp != NULL) {
1591 			/* Assemble the message chain */
1592 			if (mp == NULL) {
1593 				mp = packet->mp;
1594 				nmp = packet->mp;
1595 			} else {
1596 				nmp->b_next = packet->mp;
1597 				nmp = packet->mp;
1598 			}
1599 			/* Disconnect the message from the sw packet */
1600 			packet->mp = NULL;
1601 		}
1602 
1603 		e1000g_free_tx_swpkt(packet);
1604 		packet_count++;
1605 
1606 		packet = (p_tx_sw_packet_t)
1607 		    QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
1608 	}
1609 
1610 	if (mp != NULL) {
1611 		mutex_enter(&tx_ring->mblks_lock);
1612 		if (tx_ring->mblks.head == NULL) {
1613 			tx_ring->mblks.head = mp;
1614 			tx_ring->mblks.tail = nmp;
1615 		} else {
1616 			tx_ring->mblks.tail->b_next = mp;
1617 			tx_ring->mblks.tail = nmp;
1618 		}
1619 		mutex_exit(&tx_ring->mblks_lock);
1620 	}
1621 
1622 	ddi_intr_trigger_softint(Adapter->tx_softint_handle, NULL);
1623 
1624 	if (packet_count > 0) {
1625 		QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
1626 		QUEUE_INIT_LIST(&tx_ring->used_list);
1627 
1628 		/* Setup TX descriptor pointers */
1629 		tx_ring->tbd_next = tx_ring->tbd_first;
1630 		tx_ring->tbd_oldest = tx_ring->tbd_first;
1631 
1632 		/* Setup our HW Tx Head & Tail descriptor pointers */
1633 		E1000_WRITE_REG(&Adapter->shared, E1000_TDH, 0);
1634 		E1000_WRITE_REG(&Adapter->shared, E1000_TDT, 0);
1635 	}
1636 }
1637 
1638 static boolean_t
1639 e1000g_tx_drain(struct e1000g *Adapter)
1640 {
1641 	int i;
1642 	boolean_t done;
1643 	e1000g_tx_ring_t *tx_ring;
1644 
1645 	tx_ring = Adapter->tx_ring;
1646 
1647 	/* Allow up to 'wsdraintime' for pending xmit's to complete. */
1648 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1649 		mutex_enter(&tx_ring->usedlist_lock);
1650 		done = IS_QUEUE_EMPTY(&tx_ring->used_list);
1651 		mutex_exit(&tx_ring->usedlist_lock);
1652 
1653 		if (done)
1654 			break;
1655 
1656 		msec_delay(1);
1657 	}
1658 
1659 	return (done);
1660 }
1661 
1662 static boolean_t
1663 e1000g_rx_drain(struct e1000g *Adapter)
1664 {
1665 	e1000g_rx_ring_t *rx_ring;
1666 	p_rx_sw_packet_t packet;
1667 	boolean_t done;
1668 
1669 	rx_ring = Adapter->rx_ring;
1670 	done = B_TRUE;
1671 
1672 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1673 
1674 	while (rx_ring->pending_list != NULL) {
1675 		packet = rx_ring->pending_list;
1676 		rx_ring->pending_list =
1677 		    rx_ring->pending_list->next;
1678 
1679 		if (packet->flag == E1000G_RX_SW_STOP) {
1680 			packet->flag = E1000G_RX_SW_DETACH;
1681 			done = B_FALSE;
1682 		} else {
1683 			ASSERT(packet->flag == E1000G_RX_SW_FREE);
1684 			ASSERT(packet->mp == NULL);
1685 			e1000g_free_rx_sw_packet(packet);
1686 		}
1687 	}
1688 
1689 	rw_exit(&e1000g_rx_detach_lock);
1690 
1691 	return (done);
1692 }
1693 
1694 boolean_t
1695 e1000g_reset(struct e1000g *Adapter)
1696 {
1697 	e1000g_stop(Adapter, B_FALSE);
1698 
1699 	if (e1000g_start(Adapter, B_FALSE)) {
1700 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1701 		return (B_FALSE);
1702 	}
1703 
1704 	return (B_TRUE);
1705 }
1706 
1707 /*
1708  * e1000g_intr_pciexpress - ISR for PCI Express chipsets
1709  *
1710  * This interrupt service routine is for PCI-Express adapters.
1711  * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
1712  * bit is set.
1713  */
1714 static uint_t
1715 e1000g_intr_pciexpress(caddr_t arg)
1716 {
1717 	struct e1000g *Adapter;
1718 	uint32_t icr;
1719 
1720 	Adapter = (struct e1000g *)arg;
1721 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
1722 
1723 	if (icr & E1000_ICR_INT_ASSERTED) {
1724 		/*
1725 		 * E1000_ICR_INT_ASSERTED bit was set:
1726 		 * Read(Clear) the ICR, claim this interrupt,
1727 		 * look for work to do.
1728 		 */
1729 		e1000g_intr_work(Adapter, icr);
1730 		return (DDI_INTR_CLAIMED);
1731 	} else {
1732 		/*
1733 		 * E1000_ICR_INT_ASSERTED bit was not set:
1734 		 * Don't claim this interrupt, return immediately.
1735 		 */
1736 		return (DDI_INTR_UNCLAIMED);
1737 	}
1738 }
1739 
1740 /*
1741  * e1000g_intr - ISR for PCI/PCI-X chipsets
1742  *
1743  * This interrupt service routine is for PCI/PCI-X adapters.
1744  * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
1745  * bit is set or not.
1746  */
1747 static uint_t
1748 e1000g_intr(caddr_t arg)
1749 {
1750 	struct e1000g *Adapter;
1751 	uint32_t icr;
1752 
1753 	Adapter = (struct e1000g *)arg;
1754 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
1755 
1756 	if (icr) {
1757 		/*
1758 		 * Any bit was set in ICR:
1759 		 * Read(Clear) the ICR, claim this interrupt,
1760 		 * look for work to do.
1761 		 */
1762 		e1000g_intr_work(Adapter, icr);
1763 		return (DDI_INTR_CLAIMED);
1764 	} else {
1765 		/*
1766 		 * No bit was set in ICR:
1767 		 * Don't claim this interrupt, return immediately.
1768 		 */
1769 		return (DDI_INTR_UNCLAIMED);
1770 	}
1771 }
1772 
1773 /*
1774  * e1000g_intr_work - actual processing of ISR
1775  *
1776  * Read(clear) the ICR contents and call appropriate interrupt
1777  * processing routines.
1778  */
1779 static void
1780 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
1781 {
1782 	rw_enter(&Adapter->chip_lock, RW_READER);
1783 	/*
1784 	 * Here we need to check the "started" flag within the chip_lock to
1785 	 * ensure the receive routine will not execute when the adapter is
1786 	 * being reset.
1787 	 */
1788 	if (!Adapter->started) {
1789 		rw_exit(&Adapter->chip_lock);
1790 		return;
1791 	}
1792 
1793 	if (icr & E1000_ICR_RXT0) {
1794 		mblk_t *mp;
1795 
1796 		mutex_enter(&Adapter->rx_ring->rx_lock);
1797 		mp = e1000g_receive(Adapter);
1798 		mutex_exit(&Adapter->rx_ring->rx_lock);
1799 
1800 		rw_exit(&Adapter->chip_lock);
1801 
1802 		if (mp != NULL)
1803 			mac_rx(Adapter->mh, Adapter->mrh, mp);
1804 	} else
1805 		rw_exit(&Adapter->chip_lock);
1806 
1807 	/*
1808 	 * The Receive Sequence errors RXSEQ and the link status change LSC
1809 	 * are checked to detect that the cable has been pulled out. For
1810 	 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
1811 	 * are an indication that cable is not connected.
1812 	 */
1813 	if ((icr & E1000_ICR_RXSEQ) ||
1814 	    (icr & E1000_ICR_LSC) ||
1815 	    (icr & E1000_ICR_GPI_EN1)) {
1816 		boolean_t link_changed;
1817 		timeout_id_t tid = 0;
1818 
1819 		stop_watchdog_timer(Adapter);
1820 
1821 		rw_enter(&Adapter->chip_lock, RW_WRITER);
1822 
1823 		/*
1824 		 * Because we got a link-status-change interrupt, force
1825 		 * e1000_check_for_link() to look at phy
1826 		 */
1827 		Adapter->shared.mac.get_link_status = B_TRUE;
1828 
1829 		/* e1000g_link_check takes care of link status change */
1830 		link_changed = e1000g_link_check(Adapter);
1831 
1832 		/* Get new phy state */
1833 		e1000g_get_phy_state(Adapter);
1834 
1835 		/*
1836 		 * If the link timer has not timed out, we'll not notify
1837 		 * the upper layer with any link state until the link is up.
1838 		 */
1839 		if (link_changed && !Adapter->link_complete) {
1840 			if (Adapter->link_state == LINK_STATE_UP) {
1841 				mutex_enter(&Adapter->link_lock);
1842 				Adapter->link_complete = B_TRUE;
1843 				tid = Adapter->link_tid;
1844 				Adapter->link_tid = 0;
1845 				mutex_exit(&Adapter->link_lock);
1846 			} else {
1847 				link_changed = B_FALSE;
1848 			}
1849 		}
1850 		rw_exit(&Adapter->chip_lock);
1851 
1852 		if (link_changed) {
1853 			if (tid != 0)
1854 				(void) untimeout(tid);
1855 
1856 			/*
1857 			 * Workaround for esb2. Data stuck in fifo on a link
1858 			 * down event. Reset the adapter to recover it.
1859 			 */
1860 			if ((Adapter->link_state == LINK_STATE_DOWN) &&
1861 			    (Adapter->shared.mac.type == e1000_80003es2lan))
1862 				(void) e1000g_reset(Adapter);
1863 
1864 			mac_link_update(Adapter->mh, Adapter->link_state);
1865 		}
1866 
1867 		start_watchdog_timer(Adapter);
1868 	}
1869 
1870 	if (icr & E1000G_ICR_TX_INTR) {
1871 		e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
1872 
1873 		if (!Adapter->tx_intr_enable)
1874 			e1000g_clear_tx_interrupt(Adapter);
1875 		/* Schedule the re-transmit */
1876 		if (tx_ring->resched_needed) {
1877 			E1000G_STAT(tx_ring->stat_reschedule);
1878 			tx_ring->resched_needed = B_FALSE;
1879 			mac_tx_update(Adapter->mh);
1880 		}
1881 		if (Adapter->tx_intr_enable) {
1882 			/* Recycle the tx descriptors */
1883 			rw_enter(&Adapter->chip_lock, RW_READER);
1884 			E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
1885 			e1000g_recycle(tx_ring);
1886 			rw_exit(&Adapter->chip_lock);
1887 			/* Free the recycled messages */
1888 			ddi_intr_trigger_softint(Adapter->tx_softint_handle,
1889 			    NULL);
1890 		}
1891 	}
1892 }
1893 
1894 static void
1895 e1000g_init_unicst(struct e1000g *Adapter)
1896 {
1897 	struct e1000_hw *hw;
1898 	int slot;
1899 
1900 	hw = &Adapter->shared;
1901 
1902 	if (Adapter->init_count == 0) {
1903 		/* Initialize the multiple unicast addresses */
1904 		Adapter->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
1905 
1906 		if ((hw->mac.type == e1000_82571) &&
1907 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
1908 			Adapter->unicst_total--;
1909 
1910 		Adapter->unicst_avail = Adapter->unicst_total - 1;
1911 
1912 		/* Store the default mac address */
1913 		e1000_rar_set(hw, hw->mac.addr, 0);
1914 		if ((hw->mac.type == e1000_82571) &&
1915 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
1916 			e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
1917 
1918 		bcopy(hw->mac.addr, Adapter->unicst_addr[0].mac.addr,
1919 		    ETHERADDRL);
1920 		Adapter->unicst_addr[0].mac.set = 1;
1921 
1922 		for (slot = 1; slot < Adapter->unicst_total; slot++)
1923 			Adapter->unicst_addr[slot].mac.set = 0;
1924 	} else {
1925 		/* Recover the default mac address */
1926 		bcopy(Adapter->unicst_addr[0].mac.addr, hw->mac.addr,
1927 		    ETHERADDRL);
1928 
1929 		/* Store the default mac address */
1930 		e1000_rar_set(hw, hw->mac.addr, 0);
1931 		if ((hw->mac.type == e1000_82571) &&
1932 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
1933 			e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
1934 
1935 		/* Re-configure the RAR registers */
1936 		for (slot = 1; slot < Adapter->unicst_total; slot++)
1937 			e1000_rar_set(hw,
1938 			    Adapter->unicst_addr[slot].mac.addr, slot);
1939 	}
1940 }
1941 
1942 static int
1943 e1000g_m_unicst(void *arg, const uint8_t *mac_addr)
1944 {
1945 	struct e1000g *Adapter;
1946 
1947 	Adapter = (struct e1000g *)arg;
1948 
1949 	/* Store the default MAC address */
1950 	bcopy(mac_addr, Adapter->shared.mac.addr, ETHERADDRL);
1951 
1952 	/* Set MAC address in address slot 0, which is the default address */
1953 	return (e1000g_unicst_set(Adapter, mac_addr, 0));
1954 }
1955 
1956 static int
1957 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
1958     mac_addr_slot_t slot)
1959 {
1960 	struct e1000_hw *hw;
1961 
1962 	hw = &Adapter->shared;
1963 
1964 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1965 
1966 #ifndef NO_82542_SUPPORT
1967 	/*
1968 	 * The first revision of Wiseman silicon (rev 2.0) has an errata
1969 	 * that requires the receiver to be in reset when any of the
1970 	 * receive address registers (RAR regs) are accessed.  The first
1971 	 * rev of Wiseman silicon also requires MWI to be disabled when
1972 	 * a global reset or a receive reset is issued.  So before we
1973 	 * initialize the RARs, we check the rev of the Wiseman controller
1974 	 * and work around any necessary HW errata.
1975 	 */
1976 	if ((hw->mac.type == e1000_82542) &&
1977 	    (hw->revision_id == E1000_REVISION_2)) {
1978 		e1000_pci_clear_mwi(hw);
1979 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
1980 		msec_delay(5);
1981 	}
1982 #endif
1983 
1984 	bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, ETHERADDRL);
1985 	e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
1986 
1987 	if (slot == 0) {
1988 		if ((hw->mac.type == e1000_82571) &&
1989 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
1990 			e1000_rar_set(hw, (uint8_t *)mac_addr, LAST_RAR_ENTRY);
1991 	}
1992 
1993 #ifndef NO_82542_SUPPORT
1994 	/*
1995 	 * If we are using Wiseman rev 2.0 silicon, we will have previously
1996 	 * put the receive in reset, and disabled MWI, to work around some
1997 	 * HW errata.  Now we should take the receiver out of reset, and
1998 	 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
1999 	 */
2000 	if ((hw->mac.type == e1000_82542) &&
2001 	    (hw->revision_id == E1000_REVISION_2)) {
2002 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2003 		msec_delay(1);
2004 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2005 			e1000_pci_set_mwi(hw);
2006 		e1000g_rx_setup(Adapter);
2007 	}
2008 #endif
2009 
2010 	rw_exit(&Adapter->chip_lock);
2011 
2012 	return (0);
2013 }
2014 
2015 /*
2016  * e1000g_m_unicst_add() - will find an unused address slot, set the
2017  * address value to the one specified, reserve that slot and enable
2018  * the NIC to start filtering on the new MAC address.
2019  * Returns 0 on success.
2020  */
2021 static int
2022 e1000g_m_unicst_add(void *arg, mac_multi_addr_t *maddr)
2023 {
2024 	struct e1000g *Adapter = (struct e1000g *)arg;
2025 	mac_addr_slot_t slot;
2026 	int err;
2027 
2028 	if (mac_unicst_verify(Adapter->mh,
2029 	    maddr->mma_addr, maddr->mma_addrlen) == B_FALSE)
2030 		return (EINVAL);
2031 
2032 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2033 	if (Adapter->unicst_avail == 0) {
2034 		/* no slots available */
2035 		rw_exit(&Adapter->chip_lock);
2036 		return (ENOSPC);
2037 	}
2038 
2039 	/*
2040 	 * Primary/default address is in slot 0. The next addresses
2041 	 * are the multiple MAC addresses. So multiple MAC address 0
2042 	 * is in slot 1, 1 in slot 2, and so on. So the first multiple
2043 	 * MAC address resides in slot 1.
2044 	 */
2045 	for (slot = 1; slot < Adapter->unicst_total; slot++) {
2046 		if (Adapter->unicst_addr[slot].mac.set == 0) {
2047 			Adapter->unicst_addr[slot].mac.set = 1;
2048 			break;
2049 		}
2050 	}
2051 
2052 	ASSERT((slot > 0) && (slot < Adapter->unicst_total));
2053 
2054 	Adapter->unicst_avail--;
2055 	rw_exit(&Adapter->chip_lock);
2056 
2057 	maddr->mma_slot = slot;
2058 
2059 	if ((err = e1000g_unicst_set(Adapter, maddr->mma_addr, slot)) != 0) {
2060 		rw_enter(&Adapter->chip_lock, RW_WRITER);
2061 		Adapter->unicst_addr[slot].mac.set = 0;
2062 		Adapter->unicst_avail++;
2063 		rw_exit(&Adapter->chip_lock);
2064 	}
2065 
2066 	return (err);
2067 }
2068 
2069 /*
2070  * e1000g_m_unicst_remove() - removes a MAC address that was added by a
2071  * call to e1000g_m_unicst_add(). The slot number that was returned in
2072  * e1000g_m_unicst_add() is passed in the call to remove the address.
2073  * Returns 0 on success.
2074  */
2075 static int
2076 e1000g_m_unicst_remove(void *arg, mac_addr_slot_t slot)
2077 {
2078 	struct e1000g *Adapter = (struct e1000g *)arg;
2079 	int err;
2080 
2081 	if ((slot <= 0) || (slot >= Adapter->unicst_total))
2082 		return (EINVAL);
2083 
2084 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2085 	if (Adapter->unicst_addr[slot].mac.set == 1) {
2086 		Adapter->unicst_addr[slot].mac.set = 0;
2087 		Adapter->unicst_avail++;
2088 		rw_exit(&Adapter->chip_lock);
2089 
2090 		/* Copy the default address to the passed slot */
2091 		if (err = e1000g_unicst_set(Adapter,
2092 		    Adapter->unicst_addr[0].mac.addr, slot) != 0) {
2093 			rw_enter(&Adapter->chip_lock, RW_WRITER);
2094 			Adapter->unicst_addr[slot].mac.set = 1;
2095 			Adapter->unicst_avail--;
2096 			rw_exit(&Adapter->chip_lock);
2097 		}
2098 		return (err);
2099 	}
2100 	rw_exit(&Adapter->chip_lock);
2101 
2102 	return (EINVAL);
2103 }
2104 
2105 /*
2106  * e1000g_m_unicst_modify() - modifies the value of an address that
2107  * has been added by e1000g_m_unicst_add(). The new address, address
2108  * length and the slot number that was returned in the call to add
2109  * should be passed to e1000g_m_unicst_modify(). mma_flags should be
2110  * set to 0. Returns 0 on success.
2111  */
2112 static int
2113 e1000g_m_unicst_modify(void *arg, mac_multi_addr_t *maddr)
2114 {
2115 	struct e1000g *Adapter = (struct e1000g *)arg;
2116 	mac_addr_slot_t slot;
2117 
2118 	if (mac_unicst_verify(Adapter->mh,
2119 	    maddr->mma_addr, maddr->mma_addrlen) == B_FALSE)
2120 		return (EINVAL);
2121 
2122 	slot = maddr->mma_slot;
2123 
2124 	if ((slot <= 0) || (slot >= Adapter->unicst_total))
2125 		return (EINVAL);
2126 
2127 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2128 	if (Adapter->unicst_addr[slot].mac.set == 1) {
2129 		rw_exit(&Adapter->chip_lock);
2130 
2131 		return (e1000g_unicst_set(Adapter, maddr->mma_addr, slot));
2132 	}
2133 	rw_exit(&Adapter->chip_lock);
2134 
2135 	return (EINVAL);
2136 }
2137 
2138 /*
2139  * e1000g_m_unicst_get() - will get the MAC address and all other
2140  * information related to the address slot passed in mac_multi_addr_t.
2141  * mma_flags should be set to 0 in the call.
2142  * On return, mma_flags can take the following values:
2143  * 1) MMAC_SLOT_UNUSED
2144  * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR
2145  * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR
2146  * 4) MMAC_SLOT_USED
2147  */
2148 static int
2149 e1000g_m_unicst_get(void *arg, mac_multi_addr_t *maddr)
2150 {
2151 	struct e1000g *Adapter = (struct e1000g *)arg;
2152 	mac_addr_slot_t slot;
2153 
2154 	slot = maddr->mma_slot;
2155 
2156 	if ((slot <= 0) || (slot >= Adapter->unicst_total))
2157 		return (EINVAL);
2158 
2159 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2160 	if (Adapter->unicst_addr[slot].mac.set == 1) {
2161 		bcopy(Adapter->unicst_addr[slot].mac.addr,
2162 		    maddr->mma_addr, ETHERADDRL);
2163 		maddr->mma_flags = MMAC_SLOT_USED;
2164 	} else {
2165 		maddr->mma_flags = MMAC_SLOT_UNUSED;
2166 	}
2167 	rw_exit(&Adapter->chip_lock);
2168 
2169 	return (0);
2170 }
2171 
2172 static int
2173 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2174 {
2175 	struct e1000_hw *hw = &Adapter->shared;
2176 	unsigned i;
2177 	int res = 0;
2178 
2179 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2180 
2181 	if ((multiaddr[0] & 01) == 0) {
2182 		res = EINVAL;
2183 		goto done;
2184 	}
2185 
2186 	if (Adapter->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2187 		res = ENOENT;
2188 		goto done;
2189 	}
2190 
2191 	bcopy(multiaddr,
2192 	    &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2193 	Adapter->mcast_count++;
2194 
2195 	/*
2196 	 * Update the MC table in the hardware
2197 	 */
2198 	e1000g_clear_interrupt(Adapter);
2199 
2200 	e1000g_setup_multicast(Adapter);
2201 
2202 #ifndef NO_82542_SUPPORT
2203 	if ((hw->mac.type == e1000_82542) &&
2204 	    (hw->revision_id == E1000_REVISION_2))
2205 		e1000g_rx_setup(Adapter);
2206 #endif
2207 
2208 	e1000g_mask_interrupt(Adapter);
2209 
2210 done:
2211 	rw_exit(&Adapter->chip_lock);
2212 	return (res);
2213 }
2214 
2215 static int
2216 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2217 {
2218 	struct e1000_hw *hw = &Adapter->shared;
2219 	unsigned i;
2220 
2221 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2222 
2223 	for (i = 0; i < Adapter->mcast_count; i++) {
2224 		if (bcmp(multiaddr, &Adapter->mcast_table[i],
2225 		    ETHERADDRL) == 0) {
2226 			for (i++; i < Adapter->mcast_count; i++) {
2227 				Adapter->mcast_table[i - 1] =
2228 				    Adapter->mcast_table[i];
2229 			}
2230 			Adapter->mcast_count--;
2231 			break;
2232 		}
2233 	}
2234 
2235 	/*
2236 	 * Update the MC table in the hardware
2237 	 */
2238 	e1000g_clear_interrupt(Adapter);
2239 
2240 	e1000g_setup_multicast(Adapter);
2241 
2242 #ifndef NO_82542_SUPPORT
2243 	if ((hw->mac.type == e1000_82542) &&
2244 	    (hw->revision_id == E1000_REVISION_2))
2245 		e1000g_rx_setup(Adapter);
2246 #endif
2247 
2248 	e1000g_mask_interrupt(Adapter);
2249 
2250 done:
2251 	rw_exit(&Adapter->chip_lock);
2252 	return (0);
2253 }
2254 
2255 /*
2256  * e1000g_setup_multicast - setup multicast data structures
2257  *
2258  * This routine initializes all of the multicast related structures.
2259  */
2260 void
2261 e1000g_setup_multicast(struct e1000g *Adapter)
2262 {
2263 	uint8_t *mc_addr_list;
2264 	uint32_t mc_addr_count;
2265 	uint32_t rctl;
2266 	struct e1000_hw *hw;
2267 
2268 	hw = &Adapter->shared;
2269 
2270 	/*
2271 	 * The e1000g has the ability to do perfect filtering of 16
2272 	 * addresses. The driver uses one of the e1000g's 16 receive
2273 	 * address registers for its node/network/mac/individual address.
2274 	 * So, we have room for up to 15 multicast addresses in the CAM,
2275 	 * additional MC addresses are handled by the MTA (Multicast Table
2276 	 * Array)
2277 	 */
2278 
2279 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2280 
2281 	mc_addr_list = (uint8_t *)Adapter->mcast_table;
2282 
2283 	if (Adapter->mcast_count > MAX_NUM_MULTICAST_ADDRESSES) {
2284 		E1000G_DEBUGLOG_1(Adapter, CE_WARN,
2285 		    "Adapter requested more than %d MC Addresses.\n",
2286 		    MAX_NUM_MULTICAST_ADDRESSES);
2287 		mc_addr_count = MAX_NUM_MULTICAST_ADDRESSES;
2288 	} else {
2289 		/*
2290 		 * Set the number of MC addresses that we are being
2291 		 * requested to use
2292 		 */
2293 		mc_addr_count = Adapter->mcast_count;
2294 	}
2295 #ifndef NO_82542_SUPPORT
2296 	/*
2297 	 * The Wiseman 2.0 silicon has an errata by which the receiver will
2298 	 * hang  while writing to the receive address registers if the receiver
2299 	 * is not in reset before writing to the registers. Updating the RAR
2300 	 * is done during the setting up of the multicast table, hence the
2301 	 * receiver has to be put in reset before updating the multicast table
2302 	 * and then taken out of reset at the end
2303 	 */
2304 	/*
2305 	 * if WMI was enabled then dis able it before issueing the global
2306 	 * reset to the hardware.
2307 	 */
2308 	/*
2309 	 * Only required for WISEMAN_2_0
2310 	 */
2311 	if ((hw->mac.type == e1000_82542) &&
2312 	    (hw->revision_id == E1000_REVISION_2)) {
2313 		e1000_pci_clear_mwi(hw);
2314 		/*
2315 		 * The e1000g must be in reset before changing any RA
2316 		 * registers. Reset receive unit.  The chip will remain in
2317 		 * the reset state until software explicitly restarts it.
2318 		 */
2319 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2320 		/* Allow receiver time to go in to reset */
2321 		msec_delay(5);
2322 	}
2323 #endif
2324 
2325 	e1000_mc_addr_list_update(hw, mc_addr_list, mc_addr_count,
2326 	    Adapter->unicst_total, hw->mac.rar_entry_count);
2327 
2328 #ifndef NO_82542_SUPPORT
2329 	/*
2330 	 * Only for Wiseman_2_0
2331 	 * If MWI was enabled then re-enable it after issueing (as we
2332 	 * disabled it up there) the receive reset command.
2333 	 * Wainwright does not have a receive reset command and only thing
2334 	 * close to it is global reset which will require tx setup also
2335 	 */
2336 	if ((hw->mac.type == e1000_82542) &&
2337 	    (hw->revision_id == E1000_REVISION_2)) {
2338 		/*
2339 		 * if WMI was enabled then reenable it after issueing the
2340 		 * global or receive reset to the hardware.
2341 		 */
2342 
2343 		/*
2344 		 * Take receiver out of reset
2345 		 * clear E1000_RCTL_RST bit (and all others)
2346 		 */
2347 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2348 		msec_delay(5);
2349 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2350 			e1000_pci_set_mwi(hw);
2351 	}
2352 #endif
2353 
2354 	/*
2355 	 * Restore original value
2356 	 */
2357 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2358 }
2359 
2360 int
2361 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2362 {
2363 	struct e1000g *Adapter = (struct e1000g *)arg;
2364 
2365 	return ((add) ? multicst_add(Adapter, addr)
2366 	    : multicst_remove(Adapter, addr));
2367 }
2368 
2369 int
2370 e1000g_m_promisc(void *arg, boolean_t on)
2371 {
2372 	struct e1000g *Adapter = (struct e1000g *)arg;
2373 	uint32_t rctl;
2374 
2375 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2376 
2377 	rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2378 
2379 	if (on)
2380 		rctl |=
2381 		    (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2382 	else
2383 		rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2384 
2385 	E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2386 
2387 	Adapter->e1000g_promisc = on;
2388 
2389 	rw_exit(&Adapter->chip_lock);
2390 
2391 	return (0);
2392 }
2393 
2394 static boolean_t
2395 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2396 {
2397 	struct e1000g *Adapter = (struct e1000g *)arg;
2398 	struct e1000_hw *hw = &Adapter->shared;
2399 
2400 	switch (cap) {
2401 	case MAC_CAPAB_HCKSUM: {
2402 		uint32_t *txflags = cap_data;
2403 		/*
2404 		 * Checksum on/off selection via global parameters.
2405 		 *
2406 		 * If the chip is flagged as not capable of (correctly)
2407 		 * handling checksumming, we don't enable it on either
2408 		 * Rx or Tx side.  Otherwise, we take this chip's settings
2409 		 * from the patchable global defaults.
2410 		 *
2411 		 * We advertise our capabilities only if TX offload is
2412 		 * enabled.  On receive, the stack will accept checksummed
2413 		 * packets anyway, even if we haven't said we can deliver
2414 		 * them.
2415 		 */
2416 		switch (hw->mac.type) {
2417 		case e1000_82540:
2418 		case e1000_82544:
2419 		case e1000_82545:
2420 		case e1000_82545_rev_3:
2421 		case e1000_82546:
2422 		case e1000_82546_rev_3:
2423 		case e1000_82571:
2424 		case e1000_82572:
2425 		case e1000_82573:
2426 		case e1000_80003es2lan:
2427 			*txflags = HCKSUM_IPHDRCKSUM | HCKSUM_INET_PARTIAL;
2428 			break;
2429 
2430 		/*
2431 		 * For the following Intel PRO/1000 chipsets, we have not
2432 		 * tested the hardware checksum offload capability, so we
2433 		 * disable the capability for them.
2434 		 *	e1000_82542,
2435 		 *	e1000_82543,
2436 		 *	e1000_82541,
2437 		 *	e1000_82541_rev_2,
2438 		 *	e1000_82547,
2439 		 *	e1000_82547_rev_2,
2440 		 */
2441 		default:
2442 			return (B_FALSE);
2443 		}
2444 
2445 		break;
2446 	}
2447 	case MAC_CAPAB_POLL:
2448 		/*
2449 		 * There's nothing for us to fill in, simply returning
2450 		 * B_TRUE stating that we support polling is sufficient.
2451 		 */
2452 		break;
2453 
2454 	case MAC_CAPAB_MULTIADDRESS: {
2455 		multiaddress_capab_t *mmacp = cap_data;
2456 
2457 		/*
2458 		 * The number of MAC addresses made available by
2459 		 * this capability is one less than the total as
2460 		 * the primary address in slot 0 is counted in
2461 		 * the total.
2462 		 */
2463 		mmacp->maddr_naddr = Adapter->unicst_total - 1;
2464 		mmacp->maddr_naddrfree = Adapter->unicst_avail;
2465 		/* No multiple factory addresses, set mma_flag to 0 */
2466 		mmacp->maddr_flag = 0;
2467 		mmacp->maddr_handle = Adapter;
2468 		mmacp->maddr_add = e1000g_m_unicst_add;
2469 		mmacp->maddr_remove = e1000g_m_unicst_remove;
2470 		mmacp->maddr_modify = e1000g_m_unicst_modify;
2471 		mmacp->maddr_get = e1000g_m_unicst_get;
2472 		mmacp->maddr_reserve = NULL;
2473 		break;
2474 	}
2475 	default:
2476 		return (B_FALSE);
2477 	}
2478 	return (B_TRUE);
2479 }
2480 
2481 /*
2482  * e1000g_get_conf - get configurations set in e1000g.conf
2483  *
2484  * This routine gets user-configured values out of the configuration
2485  * file e1000g.conf.
2486  *
2487  * For each configurable value, there is a minimum, a maximum, and a
2488  * default.
2489  * If user does not configure a value, use the default.
2490  * If user configures below the minimum, use the minumum.
2491  * If user configures above the maximum, use the maxumum.
2492  */
2493 static void
2494 e1000g_get_conf(struct e1000g *Adapter)
2495 {
2496 	struct e1000_hw *hw = &Adapter->shared;
2497 	boolean_t tbi_compatibility = B_FALSE;
2498 
2499 	/*
2500 	 * get each configurable property from e1000g.conf
2501 	 */
2502 
2503 	/*
2504 	 * NumTxDescriptors
2505 	 */
2506 	Adapter->tx_desc_num =
2507 	    e1000g_get_prop(Adapter, "NumTxDescriptors",
2508 	    MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
2509 	    DEFAULT_NUM_TX_DESCRIPTOR);
2510 
2511 	/*
2512 	 * NumRxDescriptors
2513 	 */
2514 	Adapter->rx_desc_num =
2515 	    e1000g_get_prop(Adapter, "NumRxDescriptors",
2516 	    MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
2517 	    DEFAULT_NUM_RX_DESCRIPTOR);
2518 
2519 	/*
2520 	 * NumRxFreeList
2521 	 */
2522 	Adapter->rx_freelist_num =
2523 	    e1000g_get_prop(Adapter, "NumRxFreeList",
2524 	    MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
2525 	    DEFAULT_NUM_RX_FREELIST);
2526 
2527 	/*
2528 	 * NumTxPacketList
2529 	 */
2530 	Adapter->tx_freelist_num =
2531 	    e1000g_get_prop(Adapter, "NumTxPacketList",
2532 	    MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
2533 	    DEFAULT_NUM_TX_FREELIST);
2534 
2535 	/*
2536 	 * FlowControl
2537 	 */
2538 	hw->mac.fc_send_xon = B_TRUE;
2539 	hw->mac.fc =
2540 	    e1000g_get_prop(Adapter, "FlowControl",
2541 	    e1000_fc_none, 4, DEFAULT_FLOW_CONTROL);
2542 	/* 4 is the setting that says "let the eeprom decide" */
2543 	if (hw->mac.fc == 4)
2544 		hw->mac.fc = e1000_fc_default;
2545 
2546 	/*
2547 	 * Max Num Receive Packets on Interrupt
2548 	 */
2549 	Adapter->rx_limit_onintr =
2550 	    e1000g_get_prop(Adapter, "MaxNumReceivePackets",
2551 	    MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
2552 	    DEFAULT_RX_LIMIT_ON_INTR);
2553 
2554 	/*
2555 	 * PHY master slave setting
2556 	 */
2557 	hw->phy.ms_type =
2558 	    e1000g_get_prop(Adapter, "SetMasterSlave",
2559 	    e1000_ms_hw_default, e1000_ms_auto,
2560 	    e1000_ms_hw_default);
2561 
2562 	/*
2563 	 * Parameter which controls TBI mode workaround, which is only
2564 	 * needed on certain switches such as Cisco 6500/Foundry
2565 	 */
2566 	tbi_compatibility =
2567 	    e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
2568 	    0, 1, DEFAULT_TBI_COMPAT_ENABLE);
2569 	e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
2570 
2571 	/*
2572 	 * MSI Enable
2573 	 */
2574 	Adapter->msi_enabled =
2575 	    e1000g_get_prop(Adapter, "MSIEnable",
2576 	    0, 1, DEFAULT_MSI_ENABLE);
2577 
2578 	/*
2579 	 * Interrupt Throttling Rate
2580 	 */
2581 	Adapter->intr_throttling_rate =
2582 	    e1000g_get_prop(Adapter, "intr_throttling_rate",
2583 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
2584 	    DEFAULT_INTR_THROTTLING);
2585 
2586 	/*
2587 	 * Adaptive Interrupt Blanking Enable/Disable
2588 	 * It is enabled by default
2589 	 */
2590 	Adapter->intr_adaptive =
2591 	    (e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1) == 1) ?
2592 	    B_TRUE : B_FALSE;
2593 }
2594 
2595 /*
2596  * e1000g_get_prop - routine to read properties
2597  *
2598  * Get a user-configure property value out of the configuration
2599  * file e1000g.conf.
2600  *
2601  * Caller provides name of the property, a default value, a minimum
2602  * value, and a maximum value.
2603  *
2604  * Return configured value of the property, with default, minimum and
2605  * maximum properly applied.
2606  */
2607 static int
2608 e1000g_get_prop(struct e1000g *Adapter,	/* point to per-adapter structure */
2609     char *propname,		/* name of the property */
2610     int minval,			/* minimum acceptable value */
2611     int maxval,			/* maximim acceptable value */
2612     int defval)			/* default value */
2613 {
2614 	int propval;		/* value returned for requested property */
2615 	int *props;		/* point to array of properties returned */
2616 	uint_t nprops;		/* number of property value returned */
2617 
2618 	/*
2619 	 * get the array of properties from the config file
2620 	 */
2621 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
2622 	    DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
2623 		/* got some properties, test if we got enough */
2624 		if (Adapter->instance < nprops) {
2625 			propval = props[Adapter->instance];
2626 		} else {
2627 			/* not enough properties configured */
2628 			propval = defval;
2629 			E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
2630 			    "Not Enough %s values found in e1000g.conf"
2631 			    " - set to %d\n",
2632 			    propname, propval);
2633 		}
2634 
2635 		/* free memory allocated for properties */
2636 		ddi_prop_free(props);
2637 
2638 	} else {
2639 		propval = defval;
2640 	}
2641 
2642 	/*
2643 	 * enforce limits
2644 	 */
2645 	if (propval > maxval) {
2646 		propval = maxval;
2647 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
2648 		    "Too High %s value in e1000g.conf - set to %d\n",
2649 		    propname, propval);
2650 	}
2651 
2652 	if (propval < minval) {
2653 		propval = minval;
2654 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
2655 		    "Too Low %s value in e1000g.conf - set to %d\n",
2656 		    propname, propval);
2657 	}
2658 
2659 	return (propval);
2660 }
2661 
2662 static boolean_t
2663 e1000g_link_check(struct e1000g *Adapter)
2664 {
2665 	uint16_t speed, duplex, phydata;
2666 	boolean_t link_changed = B_FALSE;
2667 	struct e1000_hw *hw;
2668 	uint32_t reg_tarc;
2669 
2670 	hw = &Adapter->shared;
2671 
2672 	if (e1000g_link_up(Adapter)) {
2673 		/*
2674 		 * The Link is up, check whether it was marked as down earlier
2675 		 */
2676 		if (Adapter->link_state != LINK_STATE_UP) {
2677 			e1000_get_speed_and_duplex(hw, &speed, &duplex);
2678 			Adapter->link_speed = speed;
2679 			Adapter->link_duplex = duplex;
2680 			Adapter->link_state = LINK_STATE_UP;
2681 			link_changed = B_TRUE;
2682 
2683 			Adapter->tx_link_down_timeout = 0;
2684 
2685 			if ((hw->mac.type == e1000_82571) ||
2686 			    (hw->mac.type == e1000_82572)) {
2687 				reg_tarc = E1000_READ_REG(hw, E1000_TARC0);
2688 				if (speed == SPEED_1000)
2689 					reg_tarc |= (1 << 21);
2690 				else
2691 					reg_tarc &= ~(1 << 21);
2692 				E1000_WRITE_REG(hw, E1000_TARC0, reg_tarc);
2693 			}
2694 		}
2695 		Adapter->smartspeed = 0;
2696 	} else {
2697 		if (Adapter->link_state != LINK_STATE_DOWN) {
2698 			Adapter->link_speed = 0;
2699 			Adapter->link_duplex = 0;
2700 			Adapter->link_state = LINK_STATE_DOWN;
2701 			link_changed = B_TRUE;
2702 
2703 			/*
2704 			 * SmartSpeed workaround for Tabor/TanaX, When the
2705 			 * driver loses link disable auto master/slave
2706 			 * resolution.
2707 			 */
2708 			if (hw->phy.type == e1000_phy_igp) {
2709 				e1000_read_phy_reg(hw,
2710 				    PHY_1000T_CTRL, &phydata);
2711 				phydata |= CR_1000T_MS_ENABLE;
2712 				e1000_write_phy_reg(hw,
2713 				    PHY_1000T_CTRL, phydata);
2714 			}
2715 		} else {
2716 			e1000g_smartspeed(Adapter);
2717 		}
2718 
2719 		if (Adapter->started) {
2720 			if (Adapter->tx_link_down_timeout <
2721 			    MAX_TX_LINK_DOWN_TIMEOUT) {
2722 				Adapter->tx_link_down_timeout++;
2723 			} else if (Adapter->tx_link_down_timeout ==
2724 			    MAX_TX_LINK_DOWN_TIMEOUT) {
2725 				e1000g_tx_clean(Adapter);
2726 				Adapter->tx_link_down_timeout++;
2727 			}
2728 		}
2729 	}
2730 
2731 	return (link_changed);
2732 }
2733 
2734 static void
2735 e1000g_local_timer(void *ws)
2736 {
2737 	struct e1000g *Adapter = (struct e1000g *)ws;
2738 	struct e1000_hw *hw;
2739 	e1000g_ether_addr_t ether_addr;
2740 	boolean_t link_changed;
2741 
2742 	hw = &Adapter->shared;
2743 
2744 	(void) e1000g_tx_freemsg(Adapter->tx_ring);
2745 
2746 	if (e1000g_stall_check(Adapter)) {
2747 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
2748 		    "Tx stall detected. Activate automatic recovery.\n");
2749 		Adapter->reset_count++;
2750 		(void) e1000g_reset(Adapter);
2751 	}
2752 
2753 	link_changed = B_FALSE;
2754 	rw_enter(&Adapter->chip_lock, RW_READER);
2755 	if (Adapter->link_complete)
2756 		link_changed = e1000g_link_check(Adapter);
2757 	rw_exit(&Adapter->chip_lock);
2758 
2759 	if (link_changed) {
2760 		/*
2761 		 * Workaround for esb2. Data stuck in fifo on a link
2762 		 * down event. Reset the adapter to recover it.
2763 		 */
2764 		if ((Adapter->link_state == LINK_STATE_DOWN) &&
2765 		    (hw->mac.type == e1000_80003es2lan))
2766 			(void) e1000g_reset(Adapter);
2767 
2768 		mac_link_update(Adapter->mh, Adapter->link_state);
2769 	}
2770 
2771 	/*
2772 	 * With 82571 controllers, any locally administered address will
2773 	 * be overwritten when there is a reset on the other port.
2774 	 * Detect this circumstance and correct it.
2775 	 */
2776 	if ((hw->mac.type == e1000_82571) &&
2777 	    (e1000_get_laa_state_82571(hw) == B_TRUE)) {
2778 		ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
2779 		ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
2780 
2781 		ether_addr.reg.low = ntohl(ether_addr.reg.low);
2782 		ether_addr.reg.high = ntohl(ether_addr.reg.high);
2783 
2784 		if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
2785 		    (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
2786 		    (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
2787 		    (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
2788 		    (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
2789 		    (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
2790 			e1000_rar_set(hw, hw->mac.addr, 0);
2791 		}
2792 	}
2793 
2794 	/*
2795 	 * Long TTL workaround for 82541/82547
2796 	 */
2797 	e1000_igp_ttl_workaround_82547(hw);
2798 
2799 	/*
2800 	 * Check for Adaptive IFS settings If there are lots of collisions
2801 	 * change the value in steps...
2802 	 * These properties should only be set for 10/100
2803 	 */
2804 	if ((hw->media_type == e1000_media_type_copper) &&
2805 	    ((Adapter->link_speed == SPEED_100) ||
2806 	    (Adapter->link_speed == SPEED_10))) {
2807 		e1000_update_adaptive(hw);
2808 	}
2809 	/*
2810 	 * Set Timer Interrupts
2811 	 */
2812 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2813 
2814 	restart_watchdog_timer(Adapter);
2815 }
2816 
2817 /*
2818  * The function e1000g_link_timer() is called when the timer for link setup
2819  * is expired, which indicates the completion of the link setup. The link
2820  * state will not be updated until the link setup is completed. And the
2821  * link state will not be sent to the upper layer through mac_link_update()
2822  * in this function. It will be updated in the local timer routine or the
2823  * interrupt service routine after the interface is started (plumbed).
2824  */
2825 static void
2826 e1000g_link_timer(void *arg)
2827 {
2828 	struct e1000g *Adapter = (struct e1000g *)arg;
2829 
2830 	mutex_enter(&Adapter->link_lock);
2831 	Adapter->link_complete = B_TRUE;
2832 	Adapter->link_tid = 0;
2833 	mutex_exit(&Adapter->link_lock);
2834 }
2835 
2836 /*
2837  * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
2838  *
2839  * This function read the forced speed and duplex for 10/100 Mbps speeds
2840  * and also for 1000 Mbps speeds from the e1000g.conf file
2841  */
2842 static void
2843 e1000g_force_speed_duplex(struct e1000g *Adapter)
2844 {
2845 	int forced;
2846 	struct e1000_mac_info *mac = &Adapter->shared.mac;
2847 	struct e1000_phy_info *phy = &Adapter->shared.phy;
2848 
2849 	/*
2850 	 * get value out of config file
2851 	 */
2852 	forced = e1000g_get_prop(Adapter, "ForceSpeedDuplex",
2853 	    GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY);
2854 
2855 	switch (forced) {
2856 	case GDIAG_10_HALF:
2857 		/*
2858 		 * Disable Auto Negotiation
2859 		 */
2860 		mac->autoneg = B_FALSE;
2861 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
2862 		break;
2863 	case GDIAG_10_FULL:
2864 		/*
2865 		 * Disable Auto Negotiation
2866 		 */
2867 		mac->autoneg = B_FALSE;
2868 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
2869 		break;
2870 	case GDIAG_100_HALF:
2871 		/*
2872 		 * Disable Auto Negotiation
2873 		 */
2874 		mac->autoneg = B_FALSE;
2875 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
2876 		break;
2877 	case GDIAG_100_FULL:
2878 		/*
2879 		 * Disable Auto Negotiation
2880 		 */
2881 		mac->autoneg = B_FALSE;
2882 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
2883 		break;
2884 	case GDIAG_1000_FULL:
2885 		/*
2886 		 * The gigabit spec requires autonegotiation.  Therefore,
2887 		 * when the user wants to force the speed to 1000Mbps, we
2888 		 * enable AutoNeg, but only allow the harware to advertise
2889 		 * 1000Mbps.  This is different from 10/100 operation, where
2890 		 * we are allowed to link without any negotiation.
2891 		 */
2892 		mac->autoneg = B_TRUE;
2893 		phy->autoneg_advertised = ADVERTISE_1000_FULL;
2894 		break;
2895 	default:	/* obey the setting of AutoNegAdvertised */
2896 		mac->autoneg = B_TRUE;
2897 		phy->autoneg_advertised =
2898 		    (uint16_t)e1000g_get_prop(Adapter, "AutoNegAdvertised",
2899 		    0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
2900 		    AUTONEG_ADVERTISE_SPEED_DEFAULT);
2901 		break;
2902 	}	/* switch */
2903 }
2904 
2905 /*
2906  * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
2907  *
2908  * This function reads MaxFrameSize from e1000g.conf
2909  */
2910 static void
2911 e1000g_get_max_frame_size(struct e1000g *Adapter)
2912 {
2913 	int max_frame;
2914 	struct e1000_mac_info *mac = &Adapter->shared.mac;
2915 	struct e1000_phy_info *phy = &Adapter->shared.phy;
2916 
2917 	/*
2918 	 * get value out of config file
2919 	 */
2920 	max_frame = e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0);
2921 
2922 	switch (max_frame) {
2923 	case 0:
2924 		mac->max_frame_size = ETHERMAX;
2925 		break;
2926 	case 1:
2927 		mac->max_frame_size = FRAME_SIZE_UPTO_4K;
2928 		break;
2929 	case 2:
2930 		mac->max_frame_size = FRAME_SIZE_UPTO_8K;
2931 		break;
2932 	case 3:
2933 		if (mac->type < e1000_82571)
2934 			mac->max_frame_size = FRAME_SIZE_UPTO_16K;
2935 		else
2936 			mac->max_frame_size = FRAME_SIZE_UPTO_9K;
2937 		break;
2938 	default:
2939 		mac->max_frame_size = ETHERMAX;
2940 		break;
2941 	}	/* switch */
2942 
2943 	/* ich8 does not do jumbo frames */
2944 	if (mac->type == e1000_ich8lan) {
2945 		mac->max_frame_size = ETHERMAX;
2946 	}
2947 
2948 	/* ich9 does not do jumbo frames on one phy type */
2949 	if ((mac->type == e1000_ich9lan) &&
2950 	    (phy->type == e1000_phy_ife)) {
2951 		mac->max_frame_size = ETHERMAX;
2952 	}
2953 }
2954 
2955 static void
2956 arm_watchdog_timer(struct e1000g *Adapter)
2957 {
2958 	Adapter->watchdog_tid =
2959 	    timeout(e1000g_local_timer,
2960 	    (void *)Adapter, 1 * drv_usectohz(1000000));
2961 }
2962 #pragma inline(arm_watchdog_timer)
2963 
2964 static void
2965 enable_watchdog_timer(struct e1000g *Adapter)
2966 {
2967 	mutex_enter(&Adapter->watchdog_lock);
2968 
2969 	if (!Adapter->watchdog_timer_enabled) {
2970 		Adapter->watchdog_timer_enabled = B_TRUE;
2971 		Adapter->watchdog_timer_started = B_TRUE;
2972 		arm_watchdog_timer(Adapter);
2973 	}
2974 
2975 	mutex_exit(&Adapter->watchdog_lock);
2976 }
2977 
2978 static void
2979 disable_watchdog_timer(struct e1000g *Adapter)
2980 {
2981 	timeout_id_t tid;
2982 
2983 	mutex_enter(&Adapter->watchdog_lock);
2984 
2985 	Adapter->watchdog_timer_enabled = B_FALSE;
2986 	Adapter->watchdog_timer_started = B_FALSE;
2987 	tid = Adapter->watchdog_tid;
2988 	Adapter->watchdog_tid = 0;
2989 
2990 	mutex_exit(&Adapter->watchdog_lock);
2991 
2992 	if (tid != 0)
2993 		(void) untimeout(tid);
2994 }
2995 
2996 static void
2997 start_watchdog_timer(struct e1000g *Adapter)
2998 {
2999 	mutex_enter(&Adapter->watchdog_lock);
3000 
3001 	if (Adapter->watchdog_timer_enabled) {
3002 		if (!Adapter->watchdog_timer_started) {
3003 			Adapter->watchdog_timer_started = B_TRUE;
3004 			arm_watchdog_timer(Adapter);
3005 		}
3006 	}
3007 
3008 	mutex_exit(&Adapter->watchdog_lock);
3009 }
3010 
3011 static void
3012 restart_watchdog_timer(struct e1000g *Adapter)
3013 {
3014 	mutex_enter(&Adapter->watchdog_lock);
3015 
3016 	if (Adapter->watchdog_timer_started)
3017 		arm_watchdog_timer(Adapter);
3018 
3019 	mutex_exit(&Adapter->watchdog_lock);
3020 }
3021 
3022 static void
3023 stop_watchdog_timer(struct e1000g *Adapter)
3024 {
3025 	timeout_id_t tid;
3026 
3027 	mutex_enter(&Adapter->watchdog_lock);
3028 
3029 	Adapter->watchdog_timer_started = B_FALSE;
3030 	tid = Adapter->watchdog_tid;
3031 	Adapter->watchdog_tid = 0;
3032 
3033 	mutex_exit(&Adapter->watchdog_lock);
3034 
3035 	if (tid != 0)
3036 		(void) untimeout(tid);
3037 }
3038 
3039 static void
3040 stop_link_timer(struct e1000g *Adapter)
3041 {
3042 	timeout_id_t tid;
3043 
3044 	/* Disable the link timer */
3045 	mutex_enter(&Adapter->link_lock);
3046 
3047 	tid = Adapter->link_tid;
3048 	Adapter->link_tid = 0;
3049 
3050 	mutex_exit(&Adapter->link_lock);
3051 
3052 	if (tid != 0)
3053 		(void) untimeout(tid);
3054 }
3055 
3056 static void
3057 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
3058 {
3059 	timeout_id_t tid;
3060 
3061 	/* Disable the tx timer for 82547 chipset */
3062 	mutex_enter(&tx_ring->tx_lock);
3063 
3064 	tx_ring->timer_enable_82547 = B_FALSE;
3065 	tid = tx_ring->timer_id_82547;
3066 	tx_ring->timer_id_82547 = 0;
3067 
3068 	mutex_exit(&tx_ring->tx_lock);
3069 
3070 	if (tid != 0)
3071 		(void) untimeout(tid);
3072 }
3073 
3074 void
3075 e1000g_clear_interrupt(struct e1000g *Adapter)
3076 {
3077 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
3078 	    0xffffffff & ~E1000_IMS_RXSEQ);
3079 }
3080 
3081 void
3082 e1000g_mask_interrupt(struct e1000g *Adapter)
3083 {
3084 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
3085 	    IMS_ENABLE_MASK & ~E1000_IMS_TXDW & ~E1000_IMS_TXQE);
3086 }
3087 
3088 void
3089 e1000g_clear_all_interrupts(struct e1000g *Adapter)
3090 {
3091 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
3092 }
3093 
3094 void
3095 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
3096 {
3097 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000G_IMS_TX_INTR);
3098 }
3099 
3100 void
3101 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
3102 {
3103 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000G_IMS_TX_INTR);
3104 }
3105 
3106 static void
3107 e1000g_smartspeed(struct e1000g *Adapter)
3108 {
3109 	struct e1000_hw *hw = &Adapter->shared;
3110 	uint16_t phy_status;
3111 	uint16_t phy_ctrl;
3112 
3113 	/*
3114 	 * If we're not T-or-T, or we're not autoneg'ing, or we're not
3115 	 * advertising 1000Full, we don't even use the workaround
3116 	 */
3117 	if ((hw->phy.type != e1000_phy_igp) ||
3118 	    !hw->mac.autoneg ||
3119 	    !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
3120 		return;
3121 
3122 	/*
3123 	 * True if this is the first call of this function or after every
3124 	 * 30 seconds of not having link
3125 	 */
3126 	if (Adapter->smartspeed == 0) {
3127 		/*
3128 		 * If Master/Slave config fault is asserted twice, we
3129 		 * assume back-to-back
3130 		 */
3131 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
3132 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
3133 			return;
3134 
3135 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
3136 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
3137 			return;
3138 		/*
3139 		 * We're assuming back-2-back because our status register
3140 		 * insists! there's a fault in the master/slave
3141 		 * relationship that was "negotiated"
3142 		 */
3143 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
3144 		/*
3145 		 * Is the phy configured for manual configuration of
3146 		 * master/slave?
3147 		 */
3148 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
3149 			/*
3150 			 * Yes.  Then disable manual configuration (enable
3151 			 * auto configuration) of master/slave
3152 			 */
3153 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
3154 			e1000_write_phy_reg(hw,
3155 			    PHY_1000T_CTRL, phy_ctrl);
3156 			/*
3157 			 * Effectively starting the clock
3158 			 */
3159 			Adapter->smartspeed++;
3160 			/*
3161 			 * Restart autonegotiation
3162 			 */
3163 			if (!e1000_phy_setup_autoneg(hw) &&
3164 			    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
3165 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
3166 				    MII_CR_RESTART_AUTO_NEG);
3167 				e1000_write_phy_reg(hw,
3168 				    PHY_CONTROL, phy_ctrl);
3169 			}
3170 		}
3171 		return;
3172 		/*
3173 		 * Has 6 seconds transpired still without link? Remember,
3174 		 * you should reset the smartspeed counter once you obtain
3175 		 * link
3176 		 */
3177 	} else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
3178 		/*
3179 		 * Yes.  Remember, we did at the start determine that
3180 		 * there's a master/slave configuration fault, so we're
3181 		 * still assuming there's someone on the other end, but we
3182 		 * just haven't yet been able to talk to it. We then
3183 		 * re-enable auto configuration of master/slave to see if
3184 		 * we're running 2/3 pair cables.
3185 		 */
3186 		/*
3187 		 * If still no link, perhaps using 2/3 pair cable
3188 		 */
3189 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
3190 		phy_ctrl |= CR_1000T_MS_ENABLE;
3191 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
3192 		/*
3193 		 * Restart autoneg with phy enabled for manual
3194 		 * configuration of master/slave
3195 		 */
3196 		if (!e1000_phy_setup_autoneg(hw) &&
3197 		    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
3198 			phy_ctrl |=
3199 			    (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
3200 			e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
3201 		}
3202 		/*
3203 		 * Hopefully, there are no more faults and we've obtained
3204 		 * link as a result.
3205 		 */
3206 	}
3207 	/*
3208 	 * Restart process after E1000_SMARTSPEED_MAX iterations (30
3209 	 * seconds)
3210 	 */
3211 	if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
3212 		Adapter->smartspeed = 0;
3213 }
3214 
3215 static boolean_t
3216 is_valid_mac_addr(uint8_t *mac_addr)
3217 {
3218 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3219 	const uint8_t addr_test2[6] =
3220 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3221 
3222 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3223 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3224 		return (B_FALSE);
3225 
3226 	return (B_TRUE);
3227 }
3228 
3229 /*
3230  * e1000g_stall_check - check for tx stall
3231  *
3232  * This function checks if the adapter is stalled (in transmit).
3233  *
3234  * It is called each time the watchdog timeout is invoked.
3235  * If the transmit descriptor reclaim continuously fails,
3236  * the watchdog value will increment by 1. If the watchdog
3237  * value exceeds the threshold, the adapter is assumed to
3238  * have stalled and need to be reset.
3239  */
3240 static boolean_t
3241 e1000g_stall_check(struct e1000g *Adapter)
3242 {
3243 	e1000g_tx_ring_t *tx_ring;
3244 
3245 	tx_ring = Adapter->tx_ring;
3246 
3247 	if (Adapter->link_state != LINK_STATE_UP)
3248 		return (B_FALSE);
3249 
3250 	if (tx_ring->recycle_fail > 0)
3251 		tx_ring->stall_watchdog++;
3252 	else
3253 		tx_ring->stall_watchdog = 0;
3254 
3255 	if (tx_ring->stall_watchdog < E1000G_STALL_WATCHDOG_COUNT)
3256 		return (B_FALSE);
3257 
3258 	tx_ring->stall_watchdog = 0;
3259 	tx_ring->recycle_fail = 0;
3260 
3261 	return (B_TRUE);
3262 }
3263 
3264 #ifdef E1000G_DEBUG
3265 static enum ioc_reply
3266 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
3267 {
3268 	void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
3269 	e1000g_peekpoke_t *ppd;
3270 	uint64_t mem_va;
3271 	uint64_t maxoff;
3272 	boolean_t peek;
3273 
3274 	switch (iocp->ioc_cmd) {
3275 
3276 	case E1000G_IOC_REG_PEEK:
3277 		peek = B_TRUE;
3278 		break;
3279 
3280 	case E1000G_IOC_REG_POKE:
3281 		peek = B_FALSE;
3282 		break;
3283 
3284 	deault:
3285 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
3286 		    "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
3287 		    iocp->ioc_cmd);
3288 		return (IOC_INVAL);
3289 	}
3290 
3291 	/*
3292 	 * Validate format of ioctl
3293 	 */
3294 	if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
3295 		return (IOC_INVAL);
3296 	if (mp->b_cont == NULL)
3297 		return (IOC_INVAL);
3298 
3299 	ppd = (e1000g_peekpoke_t *)mp->b_cont->b_rptr;
3300 
3301 	/*
3302 	 * Validate request parameters
3303 	 */
3304 	switch (ppd->pp_acc_space) {
3305 
3306 	default:
3307 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
3308 		    "e1000g_diag_ioctl: invalid access space 0x%X\n",
3309 		    ppd->pp_acc_space);
3310 		return (IOC_INVAL);
3311 
3312 	case E1000G_PP_SPACE_REG:
3313 		/*
3314 		 * Memory-mapped I/O space
3315 		 */
3316 		ASSERT(ppd->pp_acc_size == 4);
3317 		if (ppd->pp_acc_size != 4)
3318 			return (IOC_INVAL);
3319 
3320 		if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
3321 			return (IOC_INVAL);
3322 
3323 		mem_va = 0;
3324 		maxoff = 0x10000;
3325 		ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
3326 		break;
3327 
3328 	case E1000G_PP_SPACE_E1000G:
3329 		/*
3330 		 * E1000g data structure!
3331 		 */
3332 		mem_va = (uintptr_t)e1000gp;
3333 		maxoff = sizeof (struct e1000g);
3334 		ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
3335 		break;
3336 
3337 	}
3338 
3339 	if (ppd->pp_acc_offset >= maxoff)
3340 		return (IOC_INVAL);
3341 
3342 	if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
3343 		return (IOC_INVAL);
3344 
3345 	/*
3346 	 * All OK - go!
3347 	 */
3348 	ppd->pp_acc_offset += mem_va;
3349 	(*ppfn)(e1000gp, ppd);
3350 	return (peek ? IOC_REPLY : IOC_ACK);
3351 }
3352 
3353 static void
3354 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
3355 {
3356 	ddi_acc_handle_t handle;
3357 	uint32_t *regaddr;
3358 
3359 	handle = e1000gp->osdep.reg_handle;
3360 	regaddr =
3361 	    (uint32_t *)(e1000gp->shared.hw_addr + ppd->pp_acc_offset);
3362 
3363 	ppd->pp_acc_data = ddi_get32(handle, regaddr);
3364 }
3365 
3366 static void
3367 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
3368 {
3369 	ddi_acc_handle_t handle;
3370 	uint32_t *regaddr;
3371 	uint32_t value;
3372 
3373 	handle = e1000gp->osdep.reg_handle;
3374 	regaddr =
3375 	    (uint32_t *)(e1000gp->shared.hw_addr + ppd->pp_acc_offset);
3376 	value = (uint32_t)ppd->pp_acc_data;
3377 
3378 	ddi_put32(handle, regaddr, value);
3379 }
3380 
3381 static void
3382 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
3383 {
3384 	uint64_t value;
3385 	void *vaddr;
3386 
3387 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
3388 
3389 	switch (ppd->pp_acc_size) {
3390 	case 1:
3391 		value = *(uint8_t *)vaddr;
3392 		break;
3393 
3394 	case 2:
3395 		value = *(uint16_t *)vaddr;
3396 		break;
3397 
3398 	case 4:
3399 		value = *(uint32_t *)vaddr;
3400 		break;
3401 
3402 	case 8:
3403 		value = *(uint64_t *)vaddr;
3404 		break;
3405 	}
3406 
3407 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
3408 	    "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
3409 	    (void *)e1000gp, (void *)ppd, value, vaddr);
3410 
3411 	ppd->pp_acc_data = value;
3412 }
3413 
3414 static void
3415 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
3416 {
3417 	uint64_t value;
3418 	void *vaddr;
3419 
3420 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
3421 	value = ppd->pp_acc_data;
3422 
3423 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
3424 	    "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
3425 	    (void *)e1000gp, (void *)ppd, value, vaddr);
3426 
3427 	switch (ppd->pp_acc_size) {
3428 	case 1:
3429 		*(uint8_t *)vaddr = (uint8_t)value;
3430 		break;
3431 
3432 	case 2:
3433 		*(uint16_t *)vaddr = (uint16_t)value;
3434 		break;
3435 
3436 	case 4:
3437 		*(uint32_t *)vaddr = (uint32_t)value;
3438 		break;
3439 
3440 	case 8:
3441 		*(uint64_t *)vaddr = (uint64_t)value;
3442 		break;
3443 	}
3444 }
3445 #endif
3446 
3447 /*
3448  * Loopback Support
3449  */
3450 static lb_property_t lb_normal =
3451 	{ normal,	"normal",	E1000G_LB_NONE		};
3452 static lb_property_t lb_external1000 =
3453 	{ external,	"1000Mbps",	E1000G_LB_EXTERNAL_1000	};
3454 static lb_property_t lb_external100 =
3455 	{ external,	"100Mbps",	E1000G_LB_EXTERNAL_100	};
3456 static lb_property_t lb_external10 =
3457 	{ external,	"10Mbps",	E1000G_LB_EXTERNAL_10	};
3458 static lb_property_t lb_phy =
3459 	{ internal,	"PHY",		E1000G_LB_INTERNAL_PHY	};
3460 
3461 static enum ioc_reply
3462 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
3463 {
3464 	lb_info_sz_t *lbsp;
3465 	lb_property_t *lbpp;
3466 	struct e1000_hw *hw;
3467 	uint32_t *lbmp;
3468 	uint32_t size;
3469 	uint32_t value;
3470 
3471 	hw = &Adapter->shared;
3472 
3473 	if (mp->b_cont == NULL)
3474 		return (IOC_INVAL);
3475 
3476 	switch (iocp->ioc_cmd) {
3477 	default:
3478 		return (IOC_INVAL);
3479 
3480 	case LB_GET_INFO_SIZE:
3481 		size = sizeof (lb_info_sz_t);
3482 		if (iocp->ioc_count != size)
3483 			return (IOC_INVAL);
3484 
3485 		rw_enter(&Adapter->chip_lock, RW_WRITER);
3486 		e1000g_get_phy_state(Adapter);
3487 
3488 		/*
3489 		 * Workaround for hardware faults. In order to get a stable
3490 		 * state of phy, we will wait for a specific interval and
3491 		 * try again. The time delay is an experiential value based
3492 		 * on our testing.
3493 		 */
3494 		msec_delay(100);
3495 		e1000g_get_phy_state(Adapter);
3496 		rw_exit(&Adapter->chip_lock);
3497 
3498 		value = sizeof (lb_normal);
3499 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
3500 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
3501 		    (hw->media_type == e1000_media_type_fiber) ||
3502 		    (hw->media_type == e1000_media_type_internal_serdes)) {
3503 			value += sizeof (lb_phy);
3504 			switch (hw->mac.type) {
3505 			case e1000_82571:
3506 			case e1000_82572:
3507 				value += sizeof (lb_external1000);
3508 				break;
3509 			}
3510 		}
3511 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3512 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3513 			value += sizeof (lb_external100);
3514 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
3515 			value += sizeof (lb_external10);
3516 
3517 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
3518 		*lbsp = value;
3519 		break;
3520 
3521 	case LB_GET_INFO:
3522 		value = sizeof (lb_normal);
3523 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
3524 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
3525 		    (hw->media_type == e1000_media_type_fiber) ||
3526 		    (hw->media_type == e1000_media_type_internal_serdes)) {
3527 			value += sizeof (lb_phy);
3528 			switch (hw->mac.type) {
3529 			case e1000_82571:
3530 			case e1000_82572:
3531 				value += sizeof (lb_external1000);
3532 				break;
3533 			}
3534 		}
3535 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3536 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3537 			value += sizeof (lb_external100);
3538 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
3539 			value += sizeof (lb_external10);
3540 
3541 		size = value;
3542 		if (iocp->ioc_count != size)
3543 			return (IOC_INVAL);
3544 
3545 		value = 0;
3546 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
3547 		lbpp[value++] = lb_normal;
3548 		if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
3549 		    (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
3550 		    (hw->media_type == e1000_media_type_fiber) ||
3551 		    (hw->media_type == e1000_media_type_internal_serdes)) {
3552 			lbpp[value++] = lb_phy;
3553 			switch (hw->mac.type) {
3554 			case e1000_82571:
3555 			case e1000_82572:
3556 				lbpp[value++] = lb_external1000;
3557 				break;
3558 			}
3559 		}
3560 		if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3561 		    (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3562 			lbpp[value++] = lb_external100;
3563 		if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
3564 			lbpp[value++] = lb_external10;
3565 		break;
3566 
3567 	case LB_GET_MODE:
3568 		size = sizeof (uint32_t);
3569 		if (iocp->ioc_count != size)
3570 			return (IOC_INVAL);
3571 
3572 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
3573 		*lbmp = Adapter->loopback_mode;
3574 		break;
3575 
3576 	case LB_SET_MODE:
3577 		size = 0;
3578 		if (iocp->ioc_count != sizeof (uint32_t))
3579 			return (IOC_INVAL);
3580 
3581 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
3582 		if (!e1000g_set_loopback_mode(Adapter, *lbmp))
3583 			return (IOC_INVAL);
3584 		break;
3585 	}
3586 
3587 	iocp->ioc_count = size;
3588 	iocp->ioc_error = 0;
3589 
3590 	return (IOC_REPLY);
3591 }
3592 
3593 static boolean_t
3594 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
3595 {
3596 	struct e1000_hw *hw;
3597 	int i, times;
3598 	boolean_t link_up;
3599 
3600 	if (mode == Adapter->loopback_mode)
3601 		return (B_TRUE);
3602 
3603 	hw = &Adapter->shared;
3604 	times = 0;
3605 
3606 	Adapter->loopback_mode = mode;
3607 
3608 	if (mode == E1000G_LB_NONE) {
3609 		/* Reset the chip */
3610 		hw->phy.wait_for_link = B_TRUE;
3611 		(void) e1000g_reset(Adapter);
3612 		hw->phy.wait_for_link = B_FALSE;
3613 		return (B_TRUE);
3614 	}
3615 
3616 again:
3617 
3618 	(void) e1000g_reset(Adapter);
3619 
3620 	rw_enter(&Adapter->chip_lock, RW_WRITER);
3621 
3622 	switch (mode) {
3623 	default:
3624 		rw_exit(&Adapter->chip_lock);
3625 		return (B_FALSE);
3626 
3627 	case E1000G_LB_EXTERNAL_1000:
3628 		e1000g_set_external_loopback_1000(Adapter);
3629 		break;
3630 
3631 	case E1000G_LB_EXTERNAL_100:
3632 		e1000g_set_external_loopback_100(Adapter);
3633 		break;
3634 
3635 	case E1000G_LB_EXTERNAL_10:
3636 		e1000g_set_external_loopback_10(Adapter);
3637 		break;
3638 
3639 	case E1000G_LB_INTERNAL_PHY:
3640 		e1000g_set_internal_loopback(Adapter);
3641 		break;
3642 	}
3643 
3644 	times++;
3645 
3646 	/* Wait for link up */
3647 	for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
3648 		msec_delay(100);
3649 
3650 	link_up = e1000g_link_up(Adapter);
3651 
3652 	rw_exit(&Adapter->chip_lock);
3653 
3654 	if (!link_up) {
3655 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
3656 		    "Failed to get the link up");
3657 		if (times < 2) {
3658 			/* Reset the link */
3659 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
3660 			    "Reset the link ...");
3661 			(void) e1000g_reset(Adapter);
3662 			goto again;
3663 		}
3664 	}
3665 
3666 	return (B_TRUE);
3667 }
3668 
3669 /*
3670  * The following loopback settings are from Intel's technical
3671  * document - "How To Loopback". All the register settings and
3672  * time delay values are directly inherited from the document
3673  * without more explanations available.
3674  */
3675 static void
3676 e1000g_set_internal_loopback(struct e1000g *Adapter)
3677 {
3678 	struct e1000_hw *hw;
3679 	uint32_t ctrl;
3680 	uint32_t status;
3681 	uint16_t phy_ctrl;
3682 	uint32_t txcw;
3683 
3684 	hw = &Adapter->shared;
3685 
3686 	/* Disable Smart Power Down */
3687 	phy_spd_state(hw, B_FALSE);
3688 
3689 	e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
3690 	phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
3691 	phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
3692 
3693 	switch (hw->mac.type) {
3694 	case e1000_82540:
3695 	case e1000_82545:
3696 	case e1000_82545_rev_3:
3697 	case e1000_82546:
3698 	case e1000_82546_rev_3:
3699 	case e1000_82573:
3700 		/* Auto-MDI/MDIX off */
3701 		e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
3702 		/* Reset PHY to update Auto-MDI/MDIX */
3703 		e1000_write_phy_reg(hw, PHY_CONTROL,
3704 		    phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
3705 		/* Reset PHY to auto-neg off and force 1000 */
3706 		e1000_write_phy_reg(hw, PHY_CONTROL,
3707 		    phy_ctrl | MII_CR_RESET);
3708 		/*
3709 		 * Disable PHY receiver for 82540/545/546 and 82573 Family.
3710 		 * See comments above e1000g_set_internal_loopback() for the
3711 		 * background.
3712 		 */
3713 		e1000_write_phy_reg(hw, 29, 0x001F);
3714 		e1000_write_phy_reg(hw, 30, 0x8FFC);
3715 		e1000_write_phy_reg(hw, 29, 0x001A);
3716 		e1000_write_phy_reg(hw, 30, 0x8FF0);
3717 		break;
3718 	}
3719 
3720 	/* Set loopback */
3721 	e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
3722 
3723 	msec_delay(250);
3724 
3725 	/* Now set up the MAC to the same speed/duplex as the PHY. */
3726 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3727 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
3728 	ctrl |= (E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
3729 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
3730 	    E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
3731 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
3732 
3733 	switch (hw->mac.type) {
3734 	case e1000_82540:
3735 	case e1000_82545:
3736 	case e1000_82545_rev_3:
3737 	case e1000_82546:
3738 	case e1000_82546_rev_3:
3739 		/*
3740 		 * For some serdes we'll need to commit the writes now
3741 		 * so that the status is updated on link
3742 		 */
3743 		if (hw->media_type == e1000_media_type_internal_serdes) {
3744 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3745 			msec_delay(100);
3746 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
3747 		}
3748 
3749 		if (hw->media_type == e1000_media_type_copper) {
3750 			/* Invert Loss of Signal */
3751 			ctrl |= E1000_CTRL_ILOS;
3752 		} else {
3753 			/* Set ILOS on fiber nic if half duplex is detected */
3754 			status = E1000_READ_REG(hw, E1000_STATUS);
3755 			if ((status & E1000_STATUS_FD) == 0)
3756 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
3757 		}
3758 		break;
3759 
3760 	case e1000_82571:
3761 	case e1000_82572:
3762 		/*
3763 		 * The fiber/SerDes versions of this adapter do not contain an
3764 		 * accessible PHY. Therefore, loopback beyond MAC must be done
3765 		 * using SerDes analog loopback.
3766 		 */
3767 		if (hw->media_type != e1000_media_type_copper) {
3768 			status = E1000_READ_REG(hw, E1000_STATUS);
3769 			/* Set ILOS on fiber nic if half duplex is detected */
3770 			if (((status & E1000_STATUS_LU) == 0) ||
3771 			    ((status & E1000_STATUS_FD) == 0) ||
3772 			    (hw->media_type ==
3773 			    e1000_media_type_internal_serdes))
3774 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
3775 
3776 			/* Disable autoneg by setting bit 31 of TXCW to zero */
3777 			txcw = E1000_READ_REG(hw, E1000_TXCW);
3778 			txcw &= ~((uint32_t)1 << 31);
3779 			E1000_WRITE_REG(hw, E1000_TXCW, txcw);
3780 
3781 			/*
3782 			 * Write 0x410 to Serdes Control register
3783 			 * to enable Serdes analog loopback
3784 			 */
3785 			E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
3786 			msec_delay(10);
3787 		}
3788 		break;
3789 
3790 	case e1000_82573:
3791 		ctrl |= E1000_CTRL_ILOS;
3792 		break;
3793 	}
3794 
3795 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3796 
3797 }
3798 
3799 static void
3800 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
3801 {
3802 	struct e1000_hw *hw;
3803 	uint32_t rctl;
3804 	uint32_t ctrl_ext;
3805 	uint32_t ctrl;
3806 	uint32_t status;
3807 	uint32_t txcw;
3808 
3809 	hw = &Adapter->shared;
3810 
3811 	/* Disable Smart Power Down */
3812 	phy_spd_state(hw, B_FALSE);
3813 
3814 	switch (hw->media_type) {
3815 	case e1000_media_type_copper:
3816 		/* Force link up (Must be done before the PHY writes) */
3817 		ctrl = E1000_READ_REG(hw, E1000_CTRL);
3818 		ctrl |= E1000_CTRL_SLU;	/* Force Link Up */
3819 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3820 
3821 		rctl = E1000_READ_REG(hw, E1000_RCTL);
3822 		rctl |= (E1000_RCTL_EN |
3823 		    E1000_RCTL_SBP |
3824 		    E1000_RCTL_UPE |
3825 		    E1000_RCTL_MPE |
3826 		    E1000_RCTL_LPE |
3827 		    E1000_RCTL_BAM);		/* 0x803E */
3828 		E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3829 
3830 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3831 		ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
3832 		    E1000_CTRL_EXT_SDP6_DATA |
3833 		    E1000_CTRL_EXT_SDP7_DATA |
3834 		    E1000_CTRL_EXT_SDP4_DIR |
3835 		    E1000_CTRL_EXT_SDP6_DIR |
3836 		    E1000_CTRL_EXT_SDP7_DIR);	/* 0x0DD0 */
3837 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3838 
3839 		/*
3840 		 * This sequence tunes the PHY's SDP and no customer
3841 		 * settable values. For background, see comments above
3842 		 * e1000g_set_internal_loopback().
3843 		 */
3844 		e1000_write_phy_reg(hw, 0x0, 0x140);
3845 		msec_delay(10);
3846 		e1000_write_phy_reg(hw, 0x9, 0x1A00);
3847 		e1000_write_phy_reg(hw, 0x12, 0xC10);
3848 		e1000_write_phy_reg(hw, 0x12, 0x1C10);
3849 		e1000_write_phy_reg(hw, 0x1F37, 0x76);
3850 		e1000_write_phy_reg(hw, 0x1F33, 0x1);
3851 		e1000_write_phy_reg(hw, 0x1F33, 0x0);
3852 
3853 		e1000_write_phy_reg(hw, 0x1F35, 0x65);
3854 		e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
3855 		e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
3856 		e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
3857 		e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
3858 
3859 		msec_delay(50);
3860 		break;
3861 	case e1000_media_type_fiber:
3862 	case e1000_media_type_internal_serdes:
3863 		status = E1000_READ_REG(hw, E1000_STATUS);
3864 		if (((status & E1000_STATUS_LU) == 0) ||
3865 		    (hw->media_type == e1000_media_type_internal_serdes)) {
3866 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
3867 			ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
3868 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3869 		}
3870 
3871 		/* Disable autoneg by setting bit 31 of TXCW to zero */
3872 		txcw = E1000_READ_REG(hw, E1000_TXCW);
3873 		txcw &= ~((uint32_t)1 << 31);
3874 		E1000_WRITE_REG(hw, E1000_TXCW, txcw);
3875 
3876 		/*
3877 		 * Write 0x410 to Serdes Control register
3878 		 * to enable Serdes analog loopback
3879 		 */
3880 		E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
3881 		msec_delay(10);
3882 		break;
3883 	default:
3884 		break;
3885 	}
3886 }
3887 
3888 static void
3889 e1000g_set_external_loopback_100(struct e1000g *Adapter)
3890 {
3891 	struct e1000_hw *hw;
3892 	uint32_t ctrl;
3893 	uint16_t phy_ctrl;
3894 
3895 	hw = &Adapter->shared;
3896 
3897 	/* Disable Smart Power Down */
3898 	phy_spd_state(hw, B_FALSE);
3899 
3900 	phy_ctrl = (MII_CR_FULL_DUPLEX |
3901 	    MII_CR_SPEED_100);
3902 
3903 	/* Force 100/FD, reset PHY */
3904 	e1000_write_phy_reg(hw, PHY_CONTROL,
3905 	    phy_ctrl | MII_CR_RESET);	/* 0xA100 */
3906 	msec_delay(10);
3907 
3908 	/* Force 100/FD */
3909 	e1000_write_phy_reg(hw, PHY_CONTROL,
3910 	    phy_ctrl);			/* 0x2100 */
3911 	msec_delay(10);
3912 
3913 	/* Now setup the MAC to the same speed/duplex as the PHY. */
3914 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3915 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
3916 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
3917 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
3918 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
3919 	    E1000_CTRL_SPD_100 |	/* Force Speed to 100 */
3920 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
3921 
3922 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3923 }
3924 
3925 static void
3926 e1000g_set_external_loopback_10(struct e1000g *Adapter)
3927 {
3928 	struct e1000_hw *hw;
3929 	uint32_t ctrl;
3930 	uint16_t phy_ctrl;
3931 
3932 	hw = &Adapter->shared;
3933 
3934 	/* Disable Smart Power Down */
3935 	phy_spd_state(hw, B_FALSE);
3936 
3937 	phy_ctrl = (MII_CR_FULL_DUPLEX |
3938 	    MII_CR_SPEED_10);
3939 
3940 	/* Force 10/FD, reset PHY */
3941 	e1000_write_phy_reg(hw, PHY_CONTROL,
3942 	    phy_ctrl | MII_CR_RESET);	/* 0x8100 */
3943 	msec_delay(10);
3944 
3945 	/* Force 10/FD */
3946 	e1000_write_phy_reg(hw, PHY_CONTROL,
3947 	    phy_ctrl);			/* 0x0100 */
3948 	msec_delay(10);
3949 
3950 	/* Now setup the MAC to the same speed/duplex as the PHY. */
3951 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3952 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
3953 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
3954 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
3955 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
3956 	    E1000_CTRL_SPD_10 |		/* Force Speed to 10 */
3957 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
3958 
3959 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3960 }
3961 
3962 #ifdef __sparc
3963 static boolean_t
3964 e1000g_find_mac_address(struct e1000g *Adapter)
3965 {
3966 	struct e1000_hw *hw = &Adapter->shared;
3967 	uchar_t *bytes;
3968 	struct ether_addr sysaddr;
3969 	uint_t nelts;
3970 	int err;
3971 	boolean_t found = B_FALSE;
3972 
3973 	/*
3974 	 * The "vendor's factory-set address" may already have
3975 	 * been extracted from the chip, but if the property
3976 	 * "local-mac-address" is set we use that instead.
3977 	 *
3978 	 * We check whether it looks like an array of 6
3979 	 * bytes (which it should, if OBP set it).  If we can't
3980 	 * make sense of it this way, we'll ignore it.
3981 	 */
3982 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
3983 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3984 	if (err == DDI_PROP_SUCCESS) {
3985 		if (nelts == ETHERADDRL) {
3986 			while (nelts--)
3987 				hw->mac.addr[nelts] = bytes[nelts];
3988 			found = B_TRUE;
3989 		}
3990 		ddi_prop_free(bytes);
3991 	}
3992 
3993 	/*
3994 	 * Look up the OBP property "local-mac-address?". If the user has set
3995 	 * 'local-mac-address? = false', use "the system address" instead.
3996 	 */
3997 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
3998 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3999 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
4000 			if (localetheraddr(NULL, &sysaddr) != 0) {
4001 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
4002 				found = B_TRUE;
4003 			}
4004 		}
4005 		ddi_prop_free(bytes);
4006 	}
4007 
4008 	/*
4009 	 * Finally(!), if there's a valid "mac-address" property (created
4010 	 * if we netbooted from this interface), we must use this instead
4011 	 * of any of the above to ensure that the NFS/install server doesn't
4012 	 * get confused by the address changing as Solaris takes over!
4013 	 */
4014 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
4015 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
4016 	if (err == DDI_PROP_SUCCESS) {
4017 		if (nelts == ETHERADDRL) {
4018 			while (nelts--)
4019 				hw->mac.addr[nelts] = bytes[nelts];
4020 			found = B_TRUE;
4021 		}
4022 		ddi_prop_free(bytes);
4023 	}
4024 
4025 	if (found) {
4026 		bcopy(hw->mac.addr, hw->mac.perm_addr,
4027 		    ETHERADDRL);
4028 	}
4029 
4030 	return (found);
4031 }
4032 #endif
4033 
4034 static int
4035 e1000g_add_intrs(struct e1000g *Adapter)
4036 {
4037 	dev_info_t *devinfo;
4038 	int intr_types;
4039 	int rc;
4040 
4041 	devinfo = Adapter->dip;
4042 
4043 	/* Get supported interrupt types */
4044 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4045 
4046 	if (rc != DDI_SUCCESS) {
4047 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4048 		    "Get supported interrupt types failed: %d\n", rc);
4049 		return (DDI_FAILURE);
4050 	}
4051 
4052 	/*
4053 	 * Based on Intel Technical Advisory document (TA-160), there are some
4054 	 * cases where some older Intel PCI-X NICs may "advertise" to the OS
4055 	 * that it supports MSI, but in fact has problems.
4056 	 * So we should only enable MSI for PCI-E NICs and disable MSI for old
4057 	 * PCI/PCI-X NICs.
4058 	 */
4059 	if (Adapter->shared.mac.type < e1000_82571)
4060 		Adapter->msi_enabled = B_FALSE;
4061 
4062 	if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enabled) {
4063 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
4064 
4065 		if (rc != DDI_SUCCESS) {
4066 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
4067 			    "Add MSI failed, trying Legacy interrupts\n");
4068 		} else {
4069 			Adapter->intr_type = DDI_INTR_TYPE_MSI;
4070 		}
4071 	}
4072 
4073 	if ((Adapter->intr_type == 0) &&
4074 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
4075 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
4076 
4077 		if (rc != DDI_SUCCESS) {
4078 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
4079 			    "Add Legacy interrupts failed\n");
4080 			return (DDI_FAILURE);
4081 		}
4082 
4083 		Adapter->intr_type = DDI_INTR_TYPE_FIXED;
4084 	}
4085 
4086 	if (Adapter->intr_type == 0) {
4087 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
4088 		    "No interrupts registered\n");
4089 		return (DDI_FAILURE);
4090 	}
4091 
4092 	return (DDI_SUCCESS);
4093 }
4094 
4095 /*
4096  * e1000g_intr_add() handles MSI/Legacy interrupts
4097  */
4098 static int
4099 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
4100 {
4101 	dev_info_t *devinfo;
4102 	int count, avail, actual;
4103 	int x, y, rc, inum = 0;
4104 	int flag;
4105 	ddi_intr_handler_t *intr_handler;
4106 
4107 	devinfo = Adapter->dip;
4108 
4109 	/* get number of interrupts */
4110 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4111 	if ((rc != DDI_SUCCESS) || (count == 0)) {
4112 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4113 		    "Get interrupt number failed. Return: %d, count: %d\n",
4114 		    rc, count);
4115 		return (DDI_FAILURE);
4116 	}
4117 
4118 	/* get number of available interrupts */
4119 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4120 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
4121 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4122 		    "Get interrupt available number failed. "
4123 		    "Return: %d, available: %d\n", rc, avail);
4124 		return (DDI_FAILURE);
4125 	}
4126 
4127 	if (avail < count) {
4128 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4129 		    "Interrupts count: %d, available: %d\n",
4130 		    count, avail);
4131 	}
4132 
4133 	/* Allocate an array of interrupt handles */
4134 	Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
4135 	Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
4136 
4137 	/* Set NORMAL behavior for both MSI and FIXED interrupt */
4138 	flag = DDI_INTR_ALLOC_NORMAL;
4139 
4140 	/* call ddi_intr_alloc() */
4141 	rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
4142 	    count, &actual, flag);
4143 
4144 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
4145 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4146 		    "Allocate interrupts failed: %d\n", rc);
4147 
4148 		kmem_free(Adapter->htable, Adapter->intr_size);
4149 		return (DDI_FAILURE);
4150 	}
4151 
4152 	if (actual < count) {
4153 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4154 		    "Interrupts requested: %d, received: %d\n",
4155 		    count, actual);
4156 	}
4157 
4158 	Adapter->intr_cnt = actual;
4159 
4160 	/* Get priority for first msi, assume remaining are all the same */
4161 	rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
4162 
4163 	if (rc != DDI_SUCCESS) {
4164 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4165 		    "Get interrupt priority failed: %d\n", rc);
4166 
4167 		/* Free already allocated intr */
4168 		for (y = 0; y < actual; y++)
4169 			(void) ddi_intr_free(Adapter->htable[y]);
4170 
4171 		kmem_free(Adapter->htable, Adapter->intr_size);
4172 		return (DDI_FAILURE);
4173 	}
4174 
4175 	/*
4176 	 * In Legacy Interrupt mode, for PCI-Express adapters, we should
4177 	 * use the interrupt service routine e1000g_intr_pciexpress()
4178 	 * to avoid interrupt stealing when sharing interrupt with other
4179 	 * devices.
4180 	 */
4181 	if (Adapter->shared.mac.type < e1000_82571)
4182 		intr_handler = (ddi_intr_handler_t *)e1000g_intr;
4183 	else
4184 		intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
4185 
4186 	/* Call ddi_intr_add_handler() */
4187 	for (x = 0; x < actual; x++) {
4188 		rc = ddi_intr_add_handler(Adapter->htable[x],
4189 		    intr_handler, (caddr_t)Adapter, NULL);
4190 
4191 		if (rc != DDI_SUCCESS) {
4192 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4193 			    "Add interrupt handler failed: %d\n", rc);
4194 
4195 			/* Remove already added handler */
4196 			for (y = 0; y < x; y++)
4197 				(void) ddi_intr_remove_handler(
4198 				    Adapter->htable[y]);
4199 
4200 			/* Free already allocated intr */
4201 			for (y = 0; y < actual; y++)
4202 				(void) ddi_intr_free(Adapter->htable[y]);
4203 
4204 			kmem_free(Adapter->htable, Adapter->intr_size);
4205 			return (DDI_FAILURE);
4206 		}
4207 	}
4208 
4209 	rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
4210 
4211 	if (rc != DDI_SUCCESS) {
4212 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4213 		    "Get interrupt cap failed: %d\n", rc);
4214 
4215 		/* Free already allocated intr */
4216 		for (y = 0; y < actual; y++) {
4217 			(void) ddi_intr_remove_handler(Adapter->htable[y]);
4218 			(void) ddi_intr_free(Adapter->htable[y]);
4219 		}
4220 
4221 		kmem_free(Adapter->htable, Adapter->intr_size);
4222 		return (DDI_FAILURE);
4223 	}
4224 
4225 	return (DDI_SUCCESS);
4226 }
4227 
4228 static int
4229 e1000g_rem_intrs(struct e1000g *Adapter)
4230 {
4231 	int x;
4232 	int rc;
4233 
4234 	for (x = 0; x < Adapter->intr_cnt; x++) {
4235 		rc = ddi_intr_remove_handler(Adapter->htable[x]);
4236 		if (rc != DDI_SUCCESS) {
4237 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4238 			    "Remove intr handler failed: %d\n", rc);
4239 			return (DDI_FAILURE);
4240 		}
4241 
4242 		rc = ddi_intr_free(Adapter->htable[x]);
4243 		if (rc != DDI_SUCCESS) {
4244 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4245 			    "Free intr failed: %d\n", rc);
4246 			return (DDI_FAILURE);
4247 		}
4248 	}
4249 
4250 	kmem_free(Adapter->htable, Adapter->intr_size);
4251 
4252 	return (DDI_SUCCESS);
4253 }
4254 
4255 static int
4256 e1000g_enable_intrs(struct e1000g *Adapter)
4257 {
4258 	int x;
4259 	int rc;
4260 
4261 	/* Enable interrupts */
4262 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
4263 		/* Call ddi_intr_block_enable() for MSI */
4264 		rc = ddi_intr_block_enable(Adapter->htable,
4265 		    Adapter->intr_cnt);
4266 		if (rc != DDI_SUCCESS) {
4267 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4268 			    "Enable block intr failed: %d\n", rc);
4269 			return (DDI_FAILURE);
4270 		}
4271 	} else {
4272 		/* Call ddi_intr_enable() for Legacy/MSI non block enable */
4273 		for (x = 0; x < Adapter->intr_cnt; x++) {
4274 			rc = ddi_intr_enable(Adapter->htable[x]);
4275 			if (rc != DDI_SUCCESS) {
4276 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4277 				    "Enable intr failed: %d\n", rc);
4278 				return (DDI_FAILURE);
4279 			}
4280 		}
4281 	}
4282 
4283 	return (DDI_SUCCESS);
4284 }
4285 
4286 static int
4287 e1000g_disable_intrs(struct e1000g *Adapter)
4288 {
4289 	int x;
4290 	int rc;
4291 
4292 	/* Disable all interrupts */
4293 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
4294 		rc = ddi_intr_block_disable(Adapter->htable,
4295 		    Adapter->intr_cnt);
4296 		if (rc != DDI_SUCCESS) {
4297 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4298 			    "Disable block intr failed: %d\n", rc);
4299 			return (DDI_FAILURE);
4300 		}
4301 	} else {
4302 		for (x = 0; x < Adapter->intr_cnt; x++) {
4303 			rc = ddi_intr_disable(Adapter->htable[x]);
4304 			if (rc != DDI_SUCCESS) {
4305 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4306 				    "Disable intr failed: %d\n", rc);
4307 				return (DDI_FAILURE);
4308 			}
4309 		}
4310 	}
4311 
4312 	return (DDI_SUCCESS);
4313 }
4314 
4315 /*
4316  * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
4317  */
4318 static void
4319 e1000g_get_phy_state(struct e1000g *Adapter)
4320 {
4321 	struct e1000_hw *hw = &Adapter->shared;
4322 
4323 	e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
4324 	e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
4325 	e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &Adapter->phy_an_adv);
4326 	e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &Adapter->phy_an_exp);
4327 	e1000_read_phy_reg(hw, PHY_EXT_STATUS, &Adapter->phy_ext_status);
4328 	e1000_read_phy_reg(hw, PHY_1000T_CTRL, &Adapter->phy_1000t_ctrl);
4329 	e1000_read_phy_reg(hw, PHY_1000T_STATUS, &Adapter->phy_1000t_status);
4330 	e1000_read_phy_reg(hw, PHY_LP_ABILITY, &Adapter->phy_lp_able);
4331 }
4332