xref: /titanic_44/usr/src/uts/common/io/e1000g/e1000g_main.c (revision ee5416c9d7e449233197d5d20bc6b81e4ff091b2)
1 /*
2  * This file is provided under a CDDLv1 license.  When using or
3  * redistributing this file, you may do so under this license.
4  * In redistributing this file this license must be included
5  * and no other modification of this header file is permitted.
6  *
7  * CDDL LICENSE SUMMARY
8  *
9  * Copyright(c) 1999 - 2007 Intel Corporation. All rights reserved.
10  *
11  * The contents of this file are subject to the terms of Version
12  * 1.0 of the Common Development and Distribution License (the "License").
13  *
14  * You should have received a copy of the License with this software.
15  * You can obtain a copy of the License at
16  *	http://www.opensolaris.org/os/licensing.
17  * See the License for the specific language governing permissions
18  * and limitations under the License.
19  */
20 
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms of the CDDLv1.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * **********************************************************************
30  *									*
31  * Module Name:								*
32  *   e1000g_main.c							*
33  *									*
34  * Abstract:								*
35  *   This file contains the interface routines for the solaris OS.	*
36  *   It has all DDI entry point routines and GLD entry point routines.	*
37  *									*
38  *   This file also contains routines that take care of initialization	*
39  *   uninit routine and interrupt routine.				*
40  *									*
41  * **********************************************************************
42  */
43 
44 #include <sys/dlpi.h>
45 #include <sys/mac.h>
46 #include "e1000g_sw.h"
47 #include "e1000g_debug.h"
48 
49 #define	E1000_RX_INTPT_TIME	128
50 #define	E1000_RX_PKT_CNT	8
51 
52 static char ident[] = "Intel PRO/1000 Ethernet 5.2.1";
53 static char e1000g_string[] = "Intel(R) PRO/1000 Network Connection";
54 static char e1000g_version[] = "Driver Ver. 5.2.1";
55 
56 /*
57  * Proto types for DDI entry points
58  */
59 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
60 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
61 
62 /*
63  * init and intr routines prototype
64  */
65 static int e1000g_resume(dev_info_t *);
66 static int e1000g_suspend(dev_info_t *);
67 static uint_t e1000g_intr_pciexpress(caddr_t);
68 static uint_t e1000g_intr(caddr_t);
69 static void e1000g_intr_work(struct e1000g *, uint32_t);
70 #pragma inline(e1000g_intr_work)
71 static int e1000g_init(struct e1000g *);
72 static int e1000g_start(struct e1000g *, boolean_t);
73 static void e1000g_stop(struct e1000g *, boolean_t);
74 static int e1000g_m_start(void *);
75 static void e1000g_m_stop(void *);
76 static int e1000g_m_promisc(void *, boolean_t);
77 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
78 static int e1000g_m_unicst(void *, const uint8_t *);
79 static int e1000g_m_unicst_add(void *, mac_multi_addr_t *);
80 static int e1000g_m_unicst_remove(void *, mac_addr_slot_t);
81 static int e1000g_m_unicst_modify(void *, mac_multi_addr_t *);
82 static int e1000g_m_unicst_get(void *, mac_multi_addr_t *);
83 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
84 static void e1000g_m_blank(void *, time_t, uint32_t);
85 static void e1000g_m_resources(void *);
86 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
87 static void e1000g_init_locks(struct e1000g *);
88 static void e1000g_destroy_locks(struct e1000g *);
89 static int e1000g_identify_hardware(struct e1000g *);
90 static int e1000g_regs_map(struct e1000g *);
91 static int e1000g_set_driver_params(struct e1000g *);
92 static int e1000g_register_mac(struct e1000g *);
93 static boolean_t e1000g_rx_drain(struct e1000g *);
94 static boolean_t e1000g_tx_drain(struct e1000g *);
95 static void e1000g_init_unicst(struct e1000g *);
96 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, mac_addr_slot_t);
97 
98 /*
99  * Local routines
100  */
101 static void e1000g_tx_clean(struct e1000g *);
102 static void e1000g_rx_clean(struct e1000g *);
103 static void e1000g_link_timer(void *);
104 static void e1000g_local_timer(void *);
105 static boolean_t e1000g_link_check(struct e1000g *);
106 static boolean_t e1000g_stall_check(struct e1000g *);
107 static void e1000g_smartspeed(struct e1000g *);
108 static void e1000g_get_conf(struct e1000g *);
109 static int e1000g_get_prop(struct e1000g *, char *, int, int, int);
110 static void enable_watchdog_timer(struct e1000g *);
111 static void disable_watchdog_timer(struct e1000g *);
112 static void start_watchdog_timer(struct e1000g *);
113 static void restart_watchdog_timer(struct e1000g *);
114 static void stop_watchdog_timer(struct e1000g *);
115 static void stop_link_timer(struct e1000g *);
116 static void stop_82547_timer(e1000g_tx_ring_t *);
117 static void e1000g_force_speed_duplex(struct e1000g *);
118 static void e1000g_get_max_frame_size(struct e1000g *);
119 static boolean_t is_valid_mac_addr(uint8_t *);
120 static void e1000g_unattach(dev_info_t *, struct e1000g *);
121 #ifdef E1000G_DEBUG
122 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
123 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
124 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
125 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
126 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
127     struct iocblk *, mblk_t *);
128 #endif
129 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
130     struct iocblk *, mblk_t *);
131 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
132 static void e1000g_set_internal_loopback(struct e1000g *);
133 static void e1000g_set_external_loopback_1000(struct e1000g *);
134 static void e1000g_set_external_loopback_100(struct e1000g *);
135 static void e1000g_set_external_loopback_10(struct e1000g *);
136 static int e1000g_add_intrs(struct e1000g *);
137 static int e1000g_intr_add(struct e1000g *, int);
138 static int e1000g_rem_intrs(struct e1000g *);
139 static int e1000g_enable_intrs(struct e1000g *);
140 static int e1000g_disable_intrs(struct e1000g *);
141 static boolean_t e1000g_link_up(struct e1000g *);
142 #ifdef __sparc
143 static boolean_t e1000g_find_mac_address(struct e1000g *);
144 #endif
145 static void e1000g_free_priv_devi_node(struct e1000g *, boolean_t);
146 
147 static struct cb_ops cb_ws_ops = {
148 	nulldev,		/* cb_open */
149 	nulldev,		/* cb_close */
150 	nodev,			/* cb_strategy */
151 	nodev,			/* cb_print */
152 	nodev,			/* cb_dump */
153 	nodev,			/* cb_read */
154 	nodev,			/* cb_write */
155 	nodev,			/* cb_ioctl */
156 	nodev,			/* cb_devmap */
157 	nodev,			/* cb_mmap */
158 	nodev,			/* cb_segmap */
159 	nochpoll,		/* cb_chpoll */
160 	ddi_prop_op,		/* cb_prop_op */
161 	NULL,			/* cb_stream */
162 	D_MP | D_HOTPLUG,	/* cb_flag */
163 	CB_REV,			/* cb_rev */
164 	nodev,			/* cb_aread */
165 	nodev			/* cb_awrite */
166 };
167 
168 static struct dev_ops ws_ops = {
169 	DEVO_REV,		/* devo_rev */
170 	0,			/* devo_refcnt */
171 	NULL,			/* devo_getinfo */
172 	nulldev,		/* devo_identify */
173 	nulldev,		/* devo_probe */
174 	e1000g_attach,		/* devo_attach */
175 	e1000g_detach,		/* devo_detach */
176 	nodev,			/* devo_reset */
177 	&cb_ws_ops,		/* devo_cb_ops */
178 	NULL,			/* devo_bus_ops */
179 	ddi_power		/* devo_power */
180 };
181 
182 static struct modldrv modldrv = {
183 	&mod_driverops,		/* Type of module.  This one is a driver */
184 	ident,			/* Discription string */
185 	&ws_ops,		/* driver ops */
186 };
187 
188 static struct modlinkage modlinkage = {
189 	MODREV_1, &modldrv, NULL
190 };
191 
192 /* Access attributes for register mapping */
193 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
194 	DDI_DEVICE_ATTR_V0,
195 	DDI_STRUCTURE_LE_ACC,
196 	DDI_STRICTORDER_ACC,
197 };
198 
199 #define	E1000G_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
200 
201 static mac_callbacks_t e1000g_m_callbacks = {
202 	E1000G_M_CALLBACK_FLAGS,
203 	e1000g_m_stat,
204 	e1000g_m_start,
205 	e1000g_m_stop,
206 	e1000g_m_promisc,
207 	e1000g_m_multicst,
208 	e1000g_m_unicst,
209 	e1000g_m_tx,
210 	e1000g_m_resources,
211 	e1000g_m_ioctl,
212 	e1000g_m_getcapab
213 };
214 
215 /*
216  * Global variables
217  */
218 
219 uint32_t e1000g_mblks_pending = 0;
220 /*
221  * Workaround for Dynamic Reconfiguration support, for x86 platform only.
222  * Here we maintain a private dev_info list if e1000g_force_detach is
223  * enabled. If we force the driver to detach while there are still some
224  * rx buffers retained in the upper layer, we have to keep a copy of the
225  * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
226  * structure will be freed after the driver is detached. However when we
227  * finally free those rx buffers released by the upper layer, we need to
228  * refer to the dev_info to free the dma buffers. So we save a copy of
229  * the dev_info for this purpose. On x86 platform, we assume this copy
230  * of dev_info is always valid, but on SPARC platform, it could be invalid
231  * after the system board level DR operation. For this reason, the global
232  * variable e1000g_force_detach must be B_FALSE on SPARC platform.
233  */
234 #ifdef __sparc
235 boolean_t e1000g_force_detach = B_FALSE;
236 #else
237 boolean_t e1000g_force_detach = B_TRUE;
238 #endif
239 private_devi_list_t *e1000g_private_devi_list = NULL;
240 
241 /*
242  * The rwlock is defined to protect the whole processing of rx recycling
243  * and the rx packets release in detach processing to make them mutually
244  * exclusive.
245  * The rx recycling processes different rx packets in different threads,
246  * so it will be protected with RW_READER and it won't block any other rx
247  * recycling threads.
248  * While the detach processing will be protected with RW_WRITER to make
249  * it mutually exclusive with the rx recycling.
250  */
251 krwlock_t e1000g_rx_detach_lock;
252 /*
253  * The rwlock e1000g_dma_type_lock is defined to protect the global flag
254  * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
255  * If there are many e1000g instances, the system may run out of DVMA
256  * resources during the initialization of the instances, then the flag will
257  * be changed to "USE_DMA". Because different e1000g instances are initialized
258  * in parallel, we need to use this lock to protect the flag.
259  */
260 krwlock_t e1000g_dma_type_lock;
261 
262 
263 /*
264  * Loadable module configuration entry points for the driver
265  */
266 
267 /*
268  * _init - module initialization
269  */
270 int
271 _init(void)
272 {
273 	int status;
274 
275 	mac_init_ops(&ws_ops, WSNAME);
276 	status = mod_install(&modlinkage);
277 	if (status != DDI_SUCCESS)
278 		mac_fini_ops(&ws_ops);
279 	else {
280 		rw_init(&e1000g_rx_detach_lock, NULL, RW_DRIVER, NULL);
281 		rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
282 	}
283 
284 	return (status);
285 }
286 
287 /*
288  * _fini - module finalization
289  */
290 int
291 _fini(void)
292 {
293 	int status;
294 
295 	rw_enter(&e1000g_rx_detach_lock, RW_READER);
296 	if (e1000g_mblks_pending != 0) {
297 		rw_exit(&e1000g_rx_detach_lock);
298 		return (EBUSY);
299 	}
300 	rw_exit(&e1000g_rx_detach_lock);
301 
302 	status = mod_remove(&modlinkage);
303 	if (status == DDI_SUCCESS) {
304 		mac_fini_ops(&ws_ops);
305 
306 		if (e1000g_force_detach) {
307 			private_devi_list_t *devi_node;
308 
309 			rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
310 			while (e1000g_private_devi_list != NULL) {
311 				devi_node = e1000g_private_devi_list;
312 				e1000g_private_devi_list =
313 				    e1000g_private_devi_list->next;
314 
315 				kmem_free(devi_node->priv_dip,
316 				    sizeof (struct dev_info));
317 				kmem_free(devi_node,
318 				    sizeof (private_devi_list_t));
319 			}
320 			rw_exit(&e1000g_rx_detach_lock);
321 		}
322 
323 		rw_destroy(&e1000g_rx_detach_lock);
324 		rw_destroy(&e1000g_dma_type_lock);
325 	}
326 
327 	return (status);
328 }
329 
330 /*
331  * _info - module information
332  */
333 int
334 _info(struct modinfo *modinfop)
335 {
336 	return (mod_info(&modlinkage, modinfop));
337 }
338 
339 /*
340  * e1000g_attach - driver attach
341  *
342  * This function is the device-specific initialization entry
343  * point. This entry point is required and must be written.
344  * The DDI_ATTACH command must be provided in the attach entry
345  * point. When attach() is called with cmd set to DDI_ATTACH,
346  * all normal kernel services (such as kmem_alloc(9F)) are
347  * available for use by the driver.
348  *
349  * The attach() function will be called once for each instance
350  * of  the  device  on  the  system with cmd set to DDI_ATTACH.
351  * Until attach() succeeds, the only driver entry points which
352  * may be called are open(9E) and getinfo(9E).
353  */
354 static int
355 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
356 {
357 	struct e1000g *Adapter;
358 	struct e1000_hw *hw;
359 	struct e1000g_osdep *osdep;
360 	int instance;
361 
362 	switch (cmd) {
363 	default:
364 		e1000g_log(NULL, CE_WARN,
365 		    "Unsupported command send to e1000g_attach... ");
366 		return (DDI_FAILURE);
367 
368 	case DDI_RESUME:
369 		return (e1000g_resume(devinfo));
370 
371 	case DDI_ATTACH:
372 		break;
373 	}
374 
375 	/*
376 	 * get device instance number
377 	 */
378 	instance = ddi_get_instance(devinfo);
379 
380 	/*
381 	 * Allocate soft data structure
382 	 */
383 	Adapter =
384 	    (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
385 
386 	Adapter->dip = devinfo;
387 	Adapter->instance = instance;
388 	Adapter->tx_ring->adapter = Adapter;
389 	Adapter->rx_ring->adapter = Adapter;
390 
391 	hw = &Adapter->shared;
392 	osdep = &Adapter->osdep;
393 	hw->back = osdep;
394 	osdep->adapter = Adapter;
395 
396 	ddi_set_driver_private(devinfo, (caddr_t)Adapter);
397 
398 	/*
399 	 * PCI Configure
400 	 */
401 	if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
402 		e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
403 		goto attach_fail;
404 	}
405 	Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
406 
407 	/*
408 	 * Setup hardware
409 	 */
410 	if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
411 		e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
412 		goto attach_fail;
413 	}
414 
415 	/*
416 	 * Map in the device registers.
417 	 */
418 	if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
419 		e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
420 		goto attach_fail;
421 	}
422 	Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
423 
424 	/*
425 	 * Initialize driver parameters
426 	 */
427 	if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
428 		goto attach_fail;
429 	}
430 	Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
431 
432 	/*
433 	 * Initialize interrupts
434 	 */
435 	if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
436 		e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
437 		goto attach_fail;
438 	}
439 	Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
440 
441 	/*
442 	 * Initialize mutex's for this device.
443 	 * Do this before enabling the interrupt handler and
444 	 * register the softint to avoid the condition where
445 	 * interrupt handler can try using uninitialized mutex
446 	 */
447 	e1000g_init_locks(Adapter);
448 	Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
449 
450 	Adapter->tx_softint_pri = DDI_INTR_SOFTPRI_MAX;
451 	if (ddi_intr_add_softint(devinfo,
452 	    &Adapter->tx_softint_handle, Adapter->tx_softint_pri,
453 	    e1000g_tx_softint_worker, (caddr_t)Adapter) != DDI_SUCCESS) {
454 		e1000g_log(Adapter, CE_WARN, "Add soft intr failed");
455 		goto attach_fail;
456 	}
457 	Adapter->attach_progress |= ATTACH_PROGRESS_SOFT_INTR;
458 
459 	/*
460 	 * Initialize Driver Counters
461 	 */
462 	if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
463 		e1000g_log(Adapter, CE_WARN, "Init stats failed");
464 		goto attach_fail;
465 	}
466 	Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
467 
468 	/*
469 	 * Initialize chip hardware and software structures
470 	 */
471 	if (e1000g_init(Adapter) != DDI_SUCCESS) {
472 		e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
473 		goto attach_fail;
474 	}
475 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
476 
477 	/*
478 	 * Initialize NDD parameters
479 	 */
480 	if (e1000g_nd_init(Adapter) != DDI_SUCCESS) {
481 		e1000g_log(Adapter, CE_WARN, "Init ndd failed");
482 		goto attach_fail;
483 	}
484 	Adapter->attach_progress |= ATTACH_PROGRESS_NDD;
485 
486 	/*
487 	 * Register the driver to the MAC
488 	 */
489 	if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
490 		e1000g_log(Adapter, CE_WARN, "Register MAC failed");
491 		goto attach_fail;
492 	}
493 	Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
494 
495 	/*
496 	 * Now that mutex locks are initialized, and the chip is also
497 	 * initialized, enable interrupts.
498 	 */
499 	if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
500 		e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
501 		goto attach_fail;
502 	}
503 	Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
504 
505 	/*
506 	 * If e1000g_force_detach is enabled, in global private dip list,
507 	 * we will create a new entry, which maintains the priv_dip for DR
508 	 * supports after driver detached.
509 	 */
510 	if (e1000g_force_detach) {
511 		private_devi_list_t *devi_node;
512 
513 		Adapter->priv_dip =
514 		    kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
515 		bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
516 		    sizeof (struct dev_info));
517 
518 		devi_node =
519 		    kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
520 
521 		rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
522 		devi_node->priv_dip = Adapter->priv_dip;
523 		devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
524 		devi_node->next = e1000g_private_devi_list;
525 		e1000g_private_devi_list = devi_node;
526 		rw_exit(&e1000g_rx_detach_lock);
527 	}
528 
529 	cmn_err(CE_CONT, "!%s, %s\n", e1000g_string, e1000g_version);
530 
531 	return (DDI_SUCCESS);
532 
533 attach_fail:
534 	e1000g_unattach(devinfo, Adapter);
535 	return (DDI_FAILURE);
536 }
537 
538 static int
539 e1000g_register_mac(struct e1000g *Adapter)
540 {
541 	struct e1000_hw *hw = &Adapter->shared;
542 	mac_register_t *mac;
543 	int err;
544 
545 	if ((mac = mac_alloc(MAC_VERSION)) == NULL)
546 		return (DDI_FAILURE);
547 
548 	mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
549 	mac->m_driver = Adapter;
550 	mac->m_dip = Adapter->dip;
551 	mac->m_src_addr = hw->mac.addr;
552 	mac->m_callbacks = &e1000g_m_callbacks;
553 	mac->m_min_sdu = 0;
554 	mac->m_max_sdu =
555 	    (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K) ?
556 	    hw->mac.max_frame_size - 256 :
557 	    (hw->mac.max_frame_size != ETHERMAX) ?
558 	    hw->mac.max_frame_size - 24 : ETHERMTU;
559 
560 	err = mac_register(mac, &Adapter->mh);
561 	mac_free(mac);
562 
563 	return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
564 }
565 
566 static int
567 e1000g_identify_hardware(struct e1000g *Adapter)
568 {
569 	struct e1000_hw *hw = &Adapter->shared;
570 	struct e1000g_osdep *osdep = &Adapter->osdep;
571 
572 	/* Get the device id */
573 	hw->vendor_id =
574 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
575 	hw->device_id =
576 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
577 	hw->revision_id =
578 	    pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
579 	hw->subsystem_device_id =
580 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
581 	hw->subsystem_vendor_id =
582 	    pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
583 
584 	if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
585 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
586 		    "MAC type could not be set properly.");
587 		return (DDI_FAILURE);
588 	}
589 
590 	return (DDI_SUCCESS);
591 }
592 
593 static int
594 e1000g_regs_map(struct e1000g *Adapter)
595 {
596 	dev_info_t *devinfo = Adapter->dip;
597 	struct e1000_hw *hw = &Adapter->shared;
598 	struct e1000g_osdep *osdep = &Adapter->osdep;
599 	off_t mem_size;
600 
601 	/*
602 	 * first get the size of device register to be mapped. The
603 	 * second parameter is the register we are interested. I our
604 	 * wiseman 0 is for config registers and 1 is for memory mapped
605 	 * registers Mem size should have memory mapped region size
606 	 */
607 	if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) {
608 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
609 		    "ddi_dev_regsize for registers failed");
610 		return (DDI_FAILURE);
611 	}
612 
613 	if ((ddi_regs_map_setup(devinfo, 1, /* register of interest */
614 	    (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
615 	    &osdep->reg_handle)) != DDI_SUCCESS) {
616 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
617 		    "ddi_regs_map_setup for registers failed");
618 		goto regs_map_fail;
619 	}
620 
621 	/* ICH needs to map flash memory */
622 	if (hw->mac.type == e1000_ich8lan || hw->mac.type == e1000_ich9lan) {
623 		/* get flash size */
624 		if (ddi_dev_regsize(devinfo, ICH_FLASH_REG_SET,
625 		    &mem_size) != DDI_SUCCESS) {
626 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
627 			    "ddi_dev_regsize for ICH flash failed");
628 			goto regs_map_fail;
629 		}
630 
631 		/* map flash in */
632 		if (ddi_regs_map_setup(devinfo, ICH_FLASH_REG_SET,
633 		    (caddr_t *)&hw->flash_address, 0,
634 		    mem_size, &e1000g_regs_acc_attr,
635 		    &osdep->ich_flash_handle) != DDI_SUCCESS) {
636 			E1000G_DEBUGLOG_0(Adapter, CE_WARN,
637 			    "ddi_regs_map_setup for ICH flash failed");
638 			goto regs_map_fail;
639 		}
640 	}
641 
642 	return (DDI_SUCCESS);
643 
644 regs_map_fail:
645 	if (osdep->reg_handle != NULL)
646 		ddi_regs_map_free(&osdep->reg_handle);
647 
648 	return (DDI_FAILURE);
649 }
650 
651 static int
652 e1000g_set_driver_params(struct e1000g *Adapter)
653 {
654 	struct e1000_hw *hw;
655 	e1000g_tx_ring_t *tx_ring;
656 	uint32_t mem_bar, io_bar, bar64;
657 #ifdef __sparc
658 	dev_info_t *devinfo = Adapter->dip;
659 	ulong_t iommu_pagesize;
660 #endif
661 
662 	hw = &Adapter->shared;
663 
664 	/* Set MAC type and initialize hardware functions */
665 	if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
666 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
667 		    "Could not setup hardware functions");
668 		return (DDI_FAILURE);
669 	}
670 
671 	/* Get bus information */
672 	if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
673 		E1000G_DEBUGLOG_0(Adapter, CE_WARN,
674 		    "Could not get bus information");
675 		return (DDI_FAILURE);
676 	}
677 
678 	/* get mem_base addr */
679 	mem_bar = pci_config_get32(Adapter->osdep.cfg_handle, PCI_CONF_BASE0);
680 	bar64 = mem_bar & PCI_BASE_TYPE_ALL;
681 
682 	/* get io_base addr */
683 	if (hw->mac.type >= e1000_82544) {
684 		if (bar64) {
685 			/* IO BAR is different for 64 bit BAR mode */
686 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
687 			    PCI_CONF_BASE4);
688 		} else {
689 			/* normal 32-bit BAR mode */
690 			io_bar = pci_config_get32(Adapter->osdep.cfg_handle,
691 			    PCI_CONF_BASE2);
692 		}
693 		hw->io_base = io_bar & PCI_BASE_IO_ADDR_M;
694 	} else {
695 		/* no I/O access for adapters prior to 82544 */
696 		hw->io_base = 0x0;
697 	}
698 
699 	e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
700 
701 	hw->mac.autoneg_failed = B_TRUE;
702 
703 	/* Set the wait_for_link flag to B_FALSE */
704 	hw->phy.wait_for_link = B_FALSE;
705 
706 	/* Adaptive IFS related changes */
707 	hw->mac.adaptive_ifs = B_TRUE;
708 
709 	/* Enable phy init script for IGP phy of 82541/82547 */
710 	if ((hw->mac.type == e1000_82547) ||
711 	    (hw->mac.type == e1000_82541) ||
712 	    (hw->mac.type == e1000_82547_rev_2) ||
713 	    (hw->mac.type == e1000_82541_rev_2))
714 		e1000_init_script_state_82541(hw, B_TRUE);
715 
716 	/* Enable the TTL workaround for 82541/82547 */
717 	e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
718 
719 #ifdef __sparc
720 	Adapter->strip_crc = B_TRUE;
721 #else
722 	Adapter->strip_crc = B_FALSE;
723 #endif
724 
725 	/* Get conf file properties */
726 	e1000g_get_conf(Adapter);
727 
728 	/* Get speed/duplex settings in conf file */
729 	hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
730 	hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
731 	e1000g_force_speed_duplex(Adapter);
732 
733 	/* Get Jumbo Frames settings in conf file */
734 	e1000g_get_max_frame_size(Adapter);
735 	hw->mac.min_frame_size =
736 	    MINIMUM_ETHERNET_PACKET_SIZE + CRC_LENGTH;
737 
738 #ifdef __sparc
739 	/* Get the system page size */
740 	Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
741 	iommu_pagesize = dvma_pagesize(devinfo);
742 	if (iommu_pagesize != 0) {
743 		if (Adapter->sys_page_sz == iommu_pagesize) {
744 			if (iommu_pagesize > 0x4000)
745 				Adapter->sys_page_sz = 0x4000;
746 		} else {
747 			if (Adapter->sys_page_sz > iommu_pagesize)
748 				Adapter->sys_page_sz = iommu_pagesize;
749 		}
750 	}
751 	Adapter->dvma_page_num = hw->mac.max_frame_size /
752 	    Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
753 	ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
754 #endif
755 
756 	/* Set Rx/Tx buffer size */
757 	switch (hw->mac.max_frame_size) {
758 	case ETHERMAX:
759 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
760 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
761 		break;
762 	case FRAME_SIZE_UPTO_4K:
763 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
764 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
765 		break;
766 	case FRAME_SIZE_UPTO_8K:
767 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
768 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
769 		break;
770 	case FRAME_SIZE_UPTO_9K:
771 	case FRAME_SIZE_UPTO_16K:
772 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
773 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
774 		break;
775 	default:
776 		Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
777 		Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
778 		break;
779 	}
780 	Adapter->rx_buffer_size += E1000G_IPALIGNPRESERVEROOM;
781 
782 #ifndef NO_82542_SUPPORT
783 	/*
784 	 * For Wiseman adapters we have an requirement of having receive
785 	 * buffers aligned at 256 byte boundary. Since Livengood does not
786 	 * require this and forcing it for all hardwares will have
787 	 * performance implications, I am making it applicable only for
788 	 * Wiseman and for Jumbo frames enabled mode as rest of the time,
789 	 * it is okay to have normal frames...but it does involve a
790 	 * potential risk where we may loose data if buffer is not
791 	 * aligned...so all wiseman boards to have 256 byte aligned
792 	 * buffers
793 	 */
794 	if (hw->mac.type < e1000_82543)
795 		Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
796 	else
797 		Adapter->rx_buf_align = 1;
798 #endif
799 
800 	/* Master Latency Timer */
801 	Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
802 
803 	/* copper options */
804 	if (hw->media_type == e1000_media_type_copper) {
805 		hw->phy.mdix = 0;	/* AUTO_ALL_MODES */
806 		hw->phy.disable_polarity_correction = B_FALSE;
807 		hw->phy.ms_type = e1000_ms_hw_default;	/* E1000_MASTER_SLAVE */
808 	}
809 
810 	/* The initial link state should be "unknown" */
811 	Adapter->link_state = LINK_STATE_UNKNOWN;
812 
813 	/* Initialize tx parameters */
814 	Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
815 	Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
816 
817 	tx_ring = Adapter->tx_ring;
818 	tx_ring->recycle_low_water = DEFAULT_TX_RECYCLE_LOW_WATER;
819 	tx_ring->recycle_num = DEFAULT_TX_RECYCLE_NUM;
820 	tx_ring->frags_limit =
821 	    (hw->mac.max_frame_size / Adapter->tx_bcopy_thresh) + 2;
822 	if (tx_ring->frags_limit > (MAX_TX_DESC_PER_PACKET >> 1))
823 		tx_ring->frags_limit = (MAX_TX_DESC_PER_PACKET >> 1);
824 
825 	/* Initialize rx parameters */
826 	Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
827 
828 	return (DDI_SUCCESS);
829 }
830 
831 /*
832  * e1000g_detach - driver detach
833  *
834  * The detach() function is the complement of the attach routine.
835  * If cmd is set to DDI_DETACH, detach() is used to remove  the
836  * state  associated  with  a  given  instance of a device node
837  * prior to the removal of that instance from the system.
838  *
839  * The detach() function will be called once for each  instance
840  * of the device for which there has been a successful attach()
841  * once there are no longer  any  opens  on  the  device.
842  *
843  * Interrupts routine are disabled, All memory allocated by this
844  * driver are freed.
845  */
846 static int
847 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
848 {
849 	struct e1000g *Adapter;
850 	boolean_t rx_drain;
851 
852 	switch (cmd) {
853 	default:
854 		return (DDI_FAILURE);
855 
856 	case DDI_SUSPEND:
857 		return (e1000g_suspend(devinfo));
858 
859 	case DDI_DETACH:
860 		break;
861 	}
862 
863 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
864 	if (Adapter == NULL)
865 		return (DDI_FAILURE);
866 
867 	if (mac_unregister(Adapter->mh) != 0) {
868 		e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
869 		return (DDI_FAILURE);
870 	}
871 	Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
872 
873 	if (Adapter->started)
874 		e1000g_stop(Adapter, B_TRUE);
875 
876 	rx_drain = e1000g_rx_drain(Adapter);
877 
878 	/*
879 	 * If e1000g_force_detach is enabled, driver detach is safe.
880 	 * We will let e1000g_free_priv_devi_node routine determine
881 	 * whether we need to free the priv_dip entry for current
882 	 * driver instance.
883 	 */
884 	if (e1000g_force_detach) {
885 		e1000g_free_priv_devi_node(Adapter, rx_drain);
886 	} else {
887 		if (!rx_drain)
888 			return (DDI_FAILURE);
889 	}
890 
891 	e1000g_unattach(devinfo, Adapter);
892 
893 	return (DDI_SUCCESS);
894 }
895 
896 /*
897  * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
898  *
899  * If free_flag is true, that indicates the upper layer is not holding
900  * the rx buffers, we could free the priv_dip entry safely.
901  *
902  * Otherwise, we have to keep this entry even after driver detached,
903  * and we also need to mark this entry with E1000G_PRIV_DEVI_DETACH flag,
904  * so that driver could free it while all of rx buffers are returned
905  * by upper layer later.
906  */
907 static void
908 e1000g_free_priv_devi_node(struct e1000g *Adapter, boolean_t free_flag)
909 {
910 	private_devi_list_t *devi_node, *devi_del;
911 
912 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
913 	ASSERT(e1000g_private_devi_list != NULL);
914 	ASSERT(Adapter->priv_dip != NULL);
915 
916 	devi_node = e1000g_private_devi_list;
917 	if (devi_node->priv_dip == Adapter->priv_dip) {
918 		if (free_flag) {
919 			e1000g_private_devi_list =
920 			    devi_node->next;
921 			kmem_free(devi_node->priv_dip,
922 			    sizeof (struct dev_info));
923 			kmem_free(devi_node,
924 			    sizeof (private_devi_list_t));
925 		} else {
926 			ASSERT(e1000g_mblks_pending != 0);
927 			devi_node->flag =
928 			    E1000G_PRIV_DEVI_DETACH;
929 		}
930 		rw_exit(&e1000g_rx_detach_lock);
931 		return;
932 	}
933 
934 	devi_node = e1000g_private_devi_list;
935 	while (devi_node->next != NULL) {
936 		if (devi_node->next->priv_dip == Adapter->priv_dip) {
937 			if (free_flag) {
938 				devi_del = devi_node->next;
939 				devi_node->next = devi_del->next;
940 				kmem_free(devi_del->priv_dip,
941 				    sizeof (struct dev_info));
942 				kmem_free(devi_del,
943 				    sizeof (private_devi_list_t));
944 			} else {
945 				ASSERT(e1000g_mblks_pending != 0);
946 				devi_node->next->flag =
947 				    E1000G_PRIV_DEVI_DETACH;
948 			}
949 			break;
950 		}
951 		devi_node = devi_node->next;
952 	}
953 	rw_exit(&e1000g_rx_detach_lock);
954 }
955 
956 static void
957 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
958 {
959 	if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
960 		(void) e1000g_disable_intrs(Adapter);
961 	}
962 
963 	if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
964 		(void) mac_unregister(Adapter->mh);
965 	}
966 
967 	if (Adapter->attach_progress & ATTACH_PROGRESS_NDD) {
968 		e1000g_nd_cleanup(Adapter);
969 	}
970 
971 	if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
972 		(void) e1000g_rem_intrs(Adapter);
973 	}
974 
975 	if (Adapter->attach_progress & ATTACH_PROGRESS_SOFT_INTR) {
976 		(void) ddi_intr_remove_softint(Adapter->tx_softint_handle);
977 	}
978 
979 	if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
980 		(void) ddi_prop_remove_all(devinfo);
981 	}
982 
983 	if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
984 		kstat_delete((kstat_t *)Adapter->e1000g_ksp);
985 	}
986 
987 	if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
988 		stop_link_timer(Adapter);
989 		e1000_reset_hw(&Adapter->shared);
990 	}
991 
992 	if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
993 		if (Adapter->osdep.reg_handle != NULL)
994 			ddi_regs_map_free(&Adapter->osdep.reg_handle);
995 		if (Adapter->osdep.ich_flash_handle != NULL)
996 			ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
997 	}
998 
999 	if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1000 		if (Adapter->osdep.cfg_handle != NULL)
1001 			pci_config_teardown(&Adapter->osdep.cfg_handle);
1002 	}
1003 
1004 	if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1005 		e1000g_destroy_locks(Adapter);
1006 	}
1007 
1008 	e1000_remove_device(&Adapter->shared);
1009 
1010 	kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1011 
1012 	/*
1013 	 * Another hotplug spec requirement,
1014 	 * run ddi_set_driver_private(devinfo, null);
1015 	 */
1016 	ddi_set_driver_private(devinfo, NULL);
1017 }
1018 
1019 static void
1020 e1000g_init_locks(struct e1000g *Adapter)
1021 {
1022 	e1000g_tx_ring_t *tx_ring;
1023 	e1000g_rx_ring_t *rx_ring;
1024 
1025 	rw_init(&Adapter->chip_lock, NULL,
1026 	    RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1027 	mutex_init(&Adapter->link_lock, NULL,
1028 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1029 	mutex_init(&Adapter->watchdog_lock, NULL,
1030 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1031 
1032 	tx_ring = Adapter->tx_ring;
1033 
1034 	mutex_init(&tx_ring->tx_lock, NULL,
1035 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1036 	mutex_init(&tx_ring->usedlist_lock, NULL,
1037 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1038 	mutex_init(&tx_ring->freelist_lock, NULL,
1039 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1040 	mutex_init(&tx_ring->mblks_lock, NULL,
1041 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1042 
1043 	rx_ring = Adapter->rx_ring;
1044 
1045 	mutex_init(&rx_ring->rx_lock, NULL,
1046 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1047 	mutex_init(&rx_ring->freelist_lock, NULL,
1048 	    MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1049 }
1050 
1051 static void
1052 e1000g_destroy_locks(struct e1000g *Adapter)
1053 {
1054 	e1000g_tx_ring_t *tx_ring;
1055 	e1000g_rx_ring_t *rx_ring;
1056 
1057 	tx_ring = Adapter->tx_ring;
1058 	mutex_destroy(&tx_ring->tx_lock);
1059 	mutex_destroy(&tx_ring->usedlist_lock);
1060 	mutex_destroy(&tx_ring->freelist_lock);
1061 	mutex_destroy(&tx_ring->mblks_lock);
1062 
1063 	rx_ring = Adapter->rx_ring;
1064 	mutex_destroy(&rx_ring->rx_lock);
1065 	mutex_destroy(&rx_ring->freelist_lock);
1066 
1067 	mutex_destroy(&Adapter->link_lock);
1068 	mutex_destroy(&Adapter->watchdog_lock);
1069 	rw_destroy(&Adapter->chip_lock);
1070 }
1071 
1072 static int
1073 e1000g_resume(dev_info_t *devinfo)
1074 {
1075 	struct e1000g *Adapter;
1076 
1077 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1078 	if (Adapter == NULL)
1079 		return (DDI_FAILURE);
1080 
1081 	if (e1000g_start(Adapter, B_TRUE))
1082 		return (DDI_FAILURE);
1083 
1084 	return (DDI_SUCCESS);
1085 }
1086 
1087 static int
1088 e1000g_suspend(dev_info_t *devinfo)
1089 {
1090 	struct e1000g *Adapter;
1091 
1092 	Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1093 	if (Adapter == NULL)
1094 		return (DDI_FAILURE);
1095 
1096 	e1000g_stop(Adapter, B_TRUE);
1097 
1098 	return (DDI_SUCCESS);
1099 }
1100 
1101 static int
1102 e1000g_init(struct e1000g *Adapter)
1103 {
1104 	uint32_t pba;
1105 	uint32_t high_water;
1106 	struct e1000_hw *hw;
1107 	clock_t link_timeout;
1108 
1109 	hw = &Adapter->shared;
1110 
1111 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1112 
1113 	/*
1114 	 * reset to put the hardware in a known state
1115 	 * before we try to do anything with the eeprom
1116 	 */
1117 	(void) e1000_reset_hw(hw);
1118 
1119 	if (e1000_validate_nvm_checksum(hw) < 0) {
1120 		/*
1121 		 * Some PCI-E parts fail the first check due to
1122 		 * the link being in sleep state.  Call it again,
1123 		 * if it fails a second time its a real issue.
1124 		 */
1125 		if (e1000_validate_nvm_checksum(hw) < 0) {
1126 			e1000g_log(Adapter, CE_WARN,
1127 			    "Invalid NVM checksum. Please contact "
1128 			    "the vendor to update the NVM.");
1129 			goto init_fail;
1130 		}
1131 	}
1132 
1133 #ifdef __sparc
1134 	/*
1135 	 * Firstly, we try to get the local ethernet address from OBP. If
1136 	 * fail, we get from EEPROM of NIC card.
1137 	 */
1138 	if (!e1000g_find_mac_address(Adapter)) {
1139 		if (e1000_read_mac_addr(hw) < 0) {
1140 			e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1141 			goto init_fail;
1142 		}
1143 	}
1144 #else
1145 	/* Get the local ethernet address. */
1146 	if (e1000_read_mac_addr(hw) < 0) {
1147 		e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1148 		goto init_fail;
1149 	}
1150 #endif
1151 
1152 	/* check for valid mac address */
1153 	if (!is_valid_mac_addr(hw->mac.addr)) {
1154 		e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1155 		goto init_fail;
1156 	}
1157 
1158 	/* Set LAA state for 82571 chipset */
1159 	e1000_set_laa_state_82571(hw, B_TRUE);
1160 
1161 	/* Master Latency Timer implementation */
1162 	if (Adapter->master_latency_timer) {
1163 		pci_config_put8(Adapter->osdep.cfg_handle,
1164 		    PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1165 	}
1166 
1167 	if (hw->mac.type < e1000_82547) {
1168 		/*
1169 		 * Total FIFO is 64K
1170 		 */
1171 		if (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K)
1172 			pba = E1000_PBA_40K;	/* 40K for Rx, 24K for Tx */
1173 		else
1174 			pba = E1000_PBA_48K;	/* 48K for Rx, 16K for Tx */
1175 	} else if (hw->mac.type >= e1000_82571 &&
1176 	    hw->mac.type <= e1000_82572) {
1177 		/*
1178 		 * Total FIFO is 48K
1179 		 */
1180 		if (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K)
1181 			pba = E1000_PBA_30K;	/* 30K for Rx, 18K for Tx */
1182 		else
1183 			pba = E1000_PBA_38K;	/* 38K for Rx, 10K for Tx */
1184 	} else if (hw->mac.type == e1000_ich8lan) {
1185 		pba = E1000_PBA_8K;		/* 8K for Rx, 12K for Tx */
1186 	} else if (hw->mac.type == e1000_ich9lan) {
1187 		pba = E1000_PBA_12K;
1188 	} else {
1189 		/*
1190 		 * Total FIFO is 40K
1191 		 */
1192 		if (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K)
1193 			pba = E1000_PBA_22K;	/* 22K for Rx, 18K for Tx */
1194 		else
1195 			pba = E1000_PBA_30K;	/* 30K for Rx, 10K for Tx */
1196 	}
1197 	E1000_WRITE_REG(hw, E1000_PBA, pba);
1198 
1199 	/*
1200 	 * These parameters set thresholds for the adapter's generation(Tx)
1201 	 * and response(Rx) to Ethernet PAUSE frames.  These are just threshold
1202 	 * settings.  Flow control is enabled or disabled in the configuration
1203 	 * file.
1204 	 * High-water mark is set down from the top of the rx fifo (not
1205 	 * sensitive to max_frame_size) and low-water is set just below
1206 	 * high-water mark.
1207 	 * The high water mark must be low enough to fit one full frame above
1208 	 * it in the rx FIFO.  Should be the lower of:
1209 	 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1210 	 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1211 	 * Rx FIFO size minus one full frame.
1212 	 */
1213 	high_water = min(((pba << 10) * 9 / 10),
1214 	    ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_ich9lan) ?
1215 	    ((pba << 10) - (E1000_ERT_2048 << 3)) :
1216 	    ((pba << 10) - hw->mac.max_frame_size)));
1217 
1218 	hw->mac.fc_high_water = high_water & 0xFFF8;
1219 	hw->mac.fc_low_water = hw->mac.fc_high_water - 8;
1220 
1221 	if (hw->mac.type == e1000_80003es2lan)
1222 		hw->mac.fc_pause_time = 0xFFFF;
1223 	else
1224 		hw->mac.fc_pause_time = E1000_FC_PAUSE_TIME;
1225 	hw->mac.fc_send_xon = B_TRUE;
1226 	hw->mac.fc = hw->mac.original_fc;
1227 
1228 	/*
1229 	 * Reset the adapter hardware the second time.
1230 	 */
1231 	(void) e1000_reset_hw(hw);
1232 
1233 	/* disable wakeup control by default */
1234 	if (hw->mac.type >= e1000_82544)
1235 		E1000_WRITE_REG(hw, E1000_WUC, 0);
1236 
1237 	/* MWI setup */
1238 	e1000_pci_set_mwi(hw);
1239 
1240 	/*
1241 	 * Configure/Initialize hardware
1242 	 */
1243 	if (e1000_init_hw(hw) < 0) {
1244 		e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1245 		goto init_fail;
1246 	}
1247 
1248 	/* Disable Smart Power Down */
1249 	phy_spd_state(hw, B_FALSE);
1250 
1251 	/*
1252 	 * Initialize unicast addresses.
1253 	 */
1254 	e1000g_init_unicst(Adapter);
1255 
1256 	/*
1257 	 * Setup and initialize the mctable structures.  After this routine
1258 	 * completes  Multicast table will be set
1259 	 */
1260 	e1000g_setup_multicast(Adapter);
1261 	msec_delay(5);
1262 
1263 	/*
1264 	 * Implement Adaptive IFS
1265 	 */
1266 	e1000_reset_adaptive(hw);
1267 
1268 	/* Setup Interrupt Throttling Register */
1269 	E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1270 
1271 	/* Start the timer for link setup */
1272 	if (hw->mac.autoneg)
1273 		link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1274 	else
1275 		link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1276 
1277 	mutex_enter(&Adapter->link_lock);
1278 	if (hw->phy.wait_for_link) {
1279 		Adapter->link_complete = B_TRUE;
1280 	} else {
1281 		Adapter->link_complete = B_FALSE;
1282 		Adapter->link_tid = timeout(e1000g_link_timer,
1283 		    (void *)Adapter, link_timeout);
1284 	}
1285 	mutex_exit(&Adapter->link_lock);
1286 
1287 	/* Enable PCI-Ex master */
1288 	if (hw->bus.type == e1000_bus_type_pci_express) {
1289 		e1000_enable_pciex_master(hw);
1290 	}
1291 
1292 	Adapter->init_count++;
1293 
1294 	rw_exit(&Adapter->chip_lock);
1295 
1296 	return (DDI_SUCCESS);
1297 
1298 init_fail:
1299 	rw_exit(&Adapter->chip_lock);
1300 	return (DDI_FAILURE);
1301 }
1302 
1303 /*
1304  * Check if the link is up
1305  */
1306 static boolean_t
1307 e1000g_link_up(struct e1000g *Adapter)
1308 {
1309 	struct e1000_hw *hw;
1310 	boolean_t link_up;
1311 
1312 	hw = &Adapter->shared;
1313 
1314 	/* Ensure this is set to get accurate copper link status */
1315 	hw->mac.get_link_status = B_TRUE;
1316 
1317 	e1000_check_for_link(hw);
1318 
1319 	if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) ||
1320 	    ((!hw->mac.get_link_status) && (hw->mac.type == e1000_82543)) ||
1321 	    ((hw->media_type == e1000_media_type_internal_serdes) &&
1322 	    (hw->mac.serdes_has_link))) {
1323 		link_up = B_TRUE;
1324 	} else {
1325 		link_up = B_FALSE;
1326 	}
1327 
1328 	return (link_up);
1329 }
1330 
1331 static void
1332 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1333 {
1334 	struct iocblk *iocp;
1335 	struct e1000g *e1000gp;
1336 	enum ioc_reply status;
1337 	int err;
1338 
1339 	iocp = (struct iocblk *)mp->b_rptr;
1340 	iocp->ioc_error = 0;
1341 	e1000gp = (struct e1000g *)arg;
1342 
1343 	ASSERT(e1000gp);
1344 	if (e1000gp == NULL) {
1345 		miocnak(q, mp, 0, EINVAL);
1346 		return;
1347 	}
1348 
1349 	switch (iocp->ioc_cmd) {
1350 
1351 	case LB_GET_INFO_SIZE:
1352 	case LB_GET_INFO:
1353 	case LB_GET_MODE:
1354 	case LB_SET_MODE:
1355 		status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1356 		break;
1357 
1358 	case ND_GET:
1359 	case ND_SET:
1360 		status = e1000g_nd_ioctl(e1000gp, q, mp, iocp);
1361 		break;
1362 
1363 #ifdef E1000G_DEBUG
1364 	case E1000G_IOC_REG_PEEK:
1365 	case E1000G_IOC_REG_POKE:
1366 		status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1367 		break;
1368 	case E1000G_IOC_CHIP_RESET:
1369 		e1000gp->reset_count++;
1370 		if (e1000g_reset(e1000gp))
1371 			status = IOC_ACK;
1372 		else
1373 			status = IOC_INVAL;
1374 		break;
1375 #endif
1376 	default:
1377 		status = IOC_INVAL;
1378 		break;
1379 	}
1380 
1381 	/*
1382 	 * Decide how to reply
1383 	 */
1384 	switch (status) {
1385 	default:
1386 	case IOC_INVAL:
1387 		/*
1388 		 * Error, reply with a NAK and EINVAL or the specified error
1389 		 */
1390 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1391 		    EINVAL : iocp->ioc_error);
1392 		break;
1393 
1394 	case IOC_DONE:
1395 		/*
1396 		 * OK, reply already sent
1397 		 */
1398 		break;
1399 
1400 	case IOC_ACK:
1401 		/*
1402 		 * OK, reply with an ACK
1403 		 */
1404 		miocack(q, mp, 0, 0);
1405 		break;
1406 
1407 	case IOC_REPLY:
1408 		/*
1409 		 * OK, send prepared reply as ACK or NAK
1410 		 */
1411 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1412 		    M_IOCACK : M_IOCNAK;
1413 		qreply(q, mp);
1414 		break;
1415 	}
1416 }
1417 
1418 static void e1000g_m_blank(void *arg, time_t ticks, uint32_t count)
1419 {
1420 	struct e1000g *Adapter;
1421 
1422 	Adapter = (struct e1000g *)arg;
1423 
1424 	/*
1425 	 * Adjust ITR (Interrupt Throttling Register) to coalesce
1426 	 * interrupts. This formula and its coefficient come from
1427 	 * our experiments.
1428 	 */
1429 	if (Adapter->intr_adaptive) {
1430 		Adapter->intr_throttling_rate = count << 5;
1431 		E1000_WRITE_REG(&Adapter->shared, E1000_ITR,
1432 		    Adapter->intr_throttling_rate);
1433 	}
1434 }
1435 
1436 static void
1437 e1000g_m_resources(void *arg)
1438 {
1439 	struct e1000g *adapter = (struct e1000g *)arg;
1440 	mac_rx_fifo_t mrf;
1441 
1442 	mrf.mrf_type = MAC_RX_FIFO;
1443 	mrf.mrf_blank = e1000g_m_blank;
1444 	mrf.mrf_arg = (void *)adapter;
1445 	mrf.mrf_normal_blank_time = E1000_RX_INTPT_TIME;
1446 	mrf.mrf_normal_pkt_count = E1000_RX_PKT_CNT;
1447 
1448 	adapter->mrh = mac_resource_add(adapter->mh, (mac_resource_t *)&mrf);
1449 }
1450 
1451 static int
1452 e1000g_m_start(void *arg)
1453 {
1454 	struct e1000g *Adapter = (struct e1000g *)arg;
1455 
1456 	return (e1000g_start(Adapter, B_TRUE));
1457 }
1458 
1459 static int
1460 e1000g_start(struct e1000g *Adapter, boolean_t global)
1461 {
1462 	if (global) {
1463 		/* Allocate dma resources for descriptors and buffers */
1464 		if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1465 			e1000g_log(Adapter, CE_WARN,
1466 			    "Alloc DMA resources failed");
1467 			return (ENOTACTIVE);
1468 		}
1469 		Adapter->rx_buffer_setup = B_FALSE;
1470 	}
1471 
1472 	if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1473 		if (e1000g_init(Adapter) != DDI_SUCCESS) {
1474 			e1000g_log(Adapter, CE_WARN,
1475 			    "Adapter initialization failed");
1476 			if (global)
1477 				e1000g_release_dma_resources(Adapter);
1478 			return (ENOTACTIVE);
1479 		}
1480 	}
1481 
1482 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1483 
1484 	/* Setup and initialize the transmit structures */
1485 	e1000g_tx_setup(Adapter);
1486 	msec_delay(5);
1487 
1488 	/* Setup and initialize the receive structures */
1489 	e1000g_rx_setup(Adapter);
1490 	msec_delay(5);
1491 
1492 	e1000g_mask_interrupt(Adapter);
1493 	if (Adapter->tx_intr_enable)
1494 		e1000g_mask_tx_interrupt(Adapter);
1495 
1496 	Adapter->started = B_TRUE;
1497 	Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1498 
1499 	rw_exit(&Adapter->chip_lock);
1500 
1501 	/* Enable and start the watchdog timer */
1502 	enable_watchdog_timer(Adapter);
1503 
1504 	return (0);
1505 }
1506 
1507 static void
1508 e1000g_m_stop(void *arg)
1509 {
1510 	struct e1000g *Adapter = (struct e1000g *)arg;
1511 
1512 	e1000g_stop(Adapter, B_TRUE);
1513 }
1514 
1515 static void
1516 e1000g_stop(struct e1000g *Adapter, boolean_t global)
1517 {
1518 	/* Set stop flags */
1519 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1520 
1521 	Adapter->started = B_FALSE;
1522 	Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
1523 
1524 	rw_exit(&Adapter->chip_lock);
1525 
1526 	/* Drain tx sessions */
1527 	(void) e1000g_tx_drain(Adapter);
1528 
1529 	/* Disable and stop all the timers */
1530 	disable_watchdog_timer(Adapter);
1531 	stop_link_timer(Adapter);
1532 	stop_82547_timer(Adapter->tx_ring);
1533 
1534 	/* Stop the chip and release pending resources */
1535 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1536 
1537 	e1000g_clear_all_interrupts(Adapter);
1538 	e1000_reset_hw(&Adapter->shared);
1539 
1540 	/* Release resources still held by the TX descriptors */
1541 	e1000g_tx_clean(Adapter);
1542 
1543 	/* Clean the pending rx jumbo packet fragment */
1544 	e1000g_rx_clean(Adapter);
1545 
1546 	rw_exit(&Adapter->chip_lock);
1547 
1548 	if (global)
1549 		e1000g_release_dma_resources(Adapter);
1550 }
1551 
1552 static void
1553 e1000g_rx_clean(struct e1000g *Adapter)
1554 {
1555 	e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
1556 
1557 	if (rx_ring->rx_mblk != NULL) {
1558 		freemsg(rx_ring->rx_mblk);
1559 		rx_ring->rx_mblk = NULL;
1560 		rx_ring->rx_mblk_tail = NULL;
1561 		rx_ring->rx_mblk_len = 0;
1562 	}
1563 }
1564 
1565 static void
1566 e1000g_tx_clean(struct e1000g *Adapter)
1567 {
1568 	e1000g_tx_ring_t *tx_ring;
1569 	p_tx_sw_packet_t packet;
1570 	mblk_t *mp;
1571 	mblk_t *nmp;
1572 	uint32_t packet_count;
1573 
1574 	tx_ring = Adapter->tx_ring;
1575 
1576 	/*
1577 	 * Here we don't need to protect the lists using
1578 	 * the usedlist_lock and freelist_lock, for they
1579 	 * have been protected by the chip_lock.
1580 	 */
1581 	mp = NULL;
1582 	nmp = NULL;
1583 	packet_count = 0;
1584 	packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
1585 	while (packet != NULL) {
1586 		if (packet->mp != NULL) {
1587 			/* Assemble the message chain */
1588 			if (mp == NULL) {
1589 				mp = packet->mp;
1590 				nmp = packet->mp;
1591 			} else {
1592 				nmp->b_next = packet->mp;
1593 				nmp = packet->mp;
1594 			}
1595 			/* Disconnect the message from the sw packet */
1596 			packet->mp = NULL;
1597 		}
1598 
1599 		e1000g_free_tx_swpkt(packet);
1600 		packet_count++;
1601 
1602 		packet = (p_tx_sw_packet_t)
1603 		    QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
1604 	}
1605 
1606 	if (mp != NULL) {
1607 		mutex_enter(&tx_ring->mblks_lock);
1608 		if (tx_ring->mblks.head == NULL) {
1609 			tx_ring->mblks.head = mp;
1610 			tx_ring->mblks.tail = nmp;
1611 		} else {
1612 			tx_ring->mblks.tail->b_next = mp;
1613 			tx_ring->mblks.tail = nmp;
1614 		}
1615 		mutex_exit(&tx_ring->mblks_lock);
1616 	}
1617 
1618 	ddi_intr_trigger_softint(Adapter->tx_softint_handle, NULL);
1619 
1620 	if (packet_count > 0) {
1621 		QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
1622 		QUEUE_INIT_LIST(&tx_ring->used_list);
1623 
1624 		/* Setup TX descriptor pointers */
1625 		tx_ring->tbd_next = tx_ring->tbd_first;
1626 		tx_ring->tbd_oldest = tx_ring->tbd_first;
1627 
1628 		/* Setup our HW Tx Head & Tail descriptor pointers */
1629 		E1000_WRITE_REG(&Adapter->shared, E1000_TDH, 0);
1630 		E1000_WRITE_REG(&Adapter->shared, E1000_TDT, 0);
1631 	}
1632 }
1633 
1634 static boolean_t
1635 e1000g_tx_drain(struct e1000g *Adapter)
1636 {
1637 	int i;
1638 	boolean_t done;
1639 	e1000g_tx_ring_t *tx_ring;
1640 
1641 	tx_ring = Adapter->tx_ring;
1642 
1643 	/* Allow up to 'wsdraintime' for pending xmit's to complete. */
1644 	for (i = 0; i < TX_DRAIN_TIME; i++) {
1645 		mutex_enter(&tx_ring->usedlist_lock);
1646 		done = IS_QUEUE_EMPTY(&tx_ring->used_list);
1647 		mutex_exit(&tx_ring->usedlist_lock);
1648 
1649 		if (done)
1650 			break;
1651 
1652 		msec_delay(1);
1653 	}
1654 
1655 	return (done);
1656 }
1657 
1658 static boolean_t
1659 e1000g_rx_drain(struct e1000g *Adapter)
1660 {
1661 	e1000g_rx_ring_t *rx_ring;
1662 	p_rx_sw_packet_t packet;
1663 	boolean_t done;
1664 
1665 	rx_ring = Adapter->rx_ring;
1666 	done = B_TRUE;
1667 
1668 	rw_enter(&e1000g_rx_detach_lock, RW_WRITER);
1669 
1670 	while (rx_ring->pending_list != NULL) {
1671 		packet = rx_ring->pending_list;
1672 		rx_ring->pending_list =
1673 		    rx_ring->pending_list->next;
1674 
1675 		if (packet->flag == E1000G_RX_SW_STOP) {
1676 			packet->flag = E1000G_RX_SW_DETACH;
1677 			done = B_FALSE;
1678 		} else {
1679 			ASSERT(packet->flag == E1000G_RX_SW_FREE);
1680 			ASSERT(packet->mp == NULL);
1681 			e1000g_free_rx_sw_packet(packet);
1682 		}
1683 	}
1684 
1685 	rw_exit(&e1000g_rx_detach_lock);
1686 
1687 	return (done);
1688 }
1689 
1690 boolean_t
1691 e1000g_reset(struct e1000g *Adapter)
1692 {
1693 	e1000g_stop(Adapter, B_FALSE);
1694 
1695 	if (e1000g_start(Adapter, B_FALSE)) {
1696 		e1000g_log(Adapter, CE_WARN, "Reset failed");
1697 		return (B_FALSE);
1698 	}
1699 
1700 	return (B_TRUE);
1701 }
1702 
1703 /*
1704  * e1000g_intr_pciexpress - ISR for PCI Express chipsets
1705  *
1706  * This interrupt service routine is for PCI-Express adapters.
1707  * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
1708  * bit is set.
1709  */
1710 static uint_t
1711 e1000g_intr_pciexpress(caddr_t arg)
1712 {
1713 	struct e1000g *Adapter;
1714 	uint32_t icr;
1715 
1716 	Adapter = (struct e1000g *)arg;
1717 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
1718 
1719 	if (icr & E1000_ICR_INT_ASSERTED) {
1720 		/*
1721 		 * E1000_ICR_INT_ASSERTED bit was set:
1722 		 * Read(Clear) the ICR, claim this interrupt,
1723 		 * look for work to do.
1724 		 */
1725 		e1000g_intr_work(Adapter, icr);
1726 		return (DDI_INTR_CLAIMED);
1727 	} else {
1728 		/*
1729 		 * E1000_ICR_INT_ASSERTED bit was not set:
1730 		 * Don't claim this interrupt, return immediately.
1731 		 */
1732 		return (DDI_INTR_UNCLAIMED);
1733 	}
1734 }
1735 
1736 /*
1737  * e1000g_intr - ISR for PCI/PCI-X chipsets
1738  *
1739  * This interrupt service routine is for PCI/PCI-X adapters.
1740  * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
1741  * bit is set or not.
1742  */
1743 static uint_t
1744 e1000g_intr(caddr_t arg)
1745 {
1746 	struct e1000g *Adapter;
1747 	uint32_t icr;
1748 
1749 	Adapter = (struct e1000g *)arg;
1750 	icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
1751 
1752 	if (icr) {
1753 		/*
1754 		 * Any bit was set in ICR:
1755 		 * Read(Clear) the ICR, claim this interrupt,
1756 		 * look for work to do.
1757 		 */
1758 		e1000g_intr_work(Adapter, icr);
1759 		return (DDI_INTR_CLAIMED);
1760 	} else {
1761 		/*
1762 		 * No bit was set in ICR:
1763 		 * Don't claim this interrupt, return immediately.
1764 		 */
1765 		return (DDI_INTR_UNCLAIMED);
1766 	}
1767 }
1768 
1769 /*
1770  * e1000g_intr_work - actual processing of ISR
1771  *
1772  * Read(clear) the ICR contents and call appropriate interrupt
1773  * processing routines.
1774  */
1775 static void
1776 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
1777 {
1778 	rw_enter(&Adapter->chip_lock, RW_READER);
1779 	/*
1780 	 * Here we need to check the "started" flag within the chip_lock to
1781 	 * ensure the receive routine will not execute when the adapter is
1782 	 * being reset.
1783 	 */
1784 	if (!Adapter->started) {
1785 		rw_exit(&Adapter->chip_lock);
1786 		return;
1787 	}
1788 
1789 	if (icr & E1000_ICR_RXT0) {
1790 		mblk_t *mp;
1791 
1792 		mutex_enter(&Adapter->rx_ring->rx_lock);
1793 		mp = e1000g_receive(Adapter);
1794 		mutex_exit(&Adapter->rx_ring->rx_lock);
1795 
1796 		rw_exit(&Adapter->chip_lock);
1797 
1798 		if (mp != NULL)
1799 			mac_rx(Adapter->mh, Adapter->mrh, mp);
1800 	} else
1801 		rw_exit(&Adapter->chip_lock);
1802 
1803 	/*
1804 	 * The Receive Sequence errors RXSEQ and the link status change LSC
1805 	 * are checked to detect that the cable has been pulled out. For
1806 	 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
1807 	 * are an indication that cable is not connected.
1808 	 */
1809 	if ((icr & E1000_ICR_RXSEQ) ||
1810 	    (icr & E1000_ICR_LSC) ||
1811 	    (icr & E1000_ICR_GPI_EN1)) {
1812 		boolean_t link_changed;
1813 		timeout_id_t tid = 0;
1814 
1815 		stop_watchdog_timer(Adapter);
1816 
1817 		mutex_enter(&Adapter->link_lock);
1818 		/* e1000g_link_check takes care of link status change */
1819 		link_changed = e1000g_link_check(Adapter);
1820 		/*
1821 		 * If the link timer has not timed out, we'll not notify
1822 		 * the upper layer with any link state until the link is up.
1823 		 */
1824 		if (link_changed && !Adapter->link_complete) {
1825 			if (Adapter->link_state == LINK_STATE_UP) {
1826 				Adapter->link_complete = B_TRUE;
1827 				tid = Adapter->link_tid;
1828 				Adapter->link_tid = 0;
1829 			} else {
1830 				link_changed = B_FALSE;
1831 			}
1832 		}
1833 		mutex_exit(&Adapter->link_lock);
1834 
1835 		if (link_changed) {
1836 			if (tid != 0)
1837 				(void) untimeout(tid);
1838 
1839 			/*
1840 			 * Workaround for esb2. Data stuck in fifo on a link
1841 			 * down event. Reset the adapter to recover it.
1842 			 */
1843 			if ((Adapter->link_state == LINK_STATE_DOWN) &&
1844 			    (Adapter->shared.mac.type == e1000_80003es2lan))
1845 				(void) e1000g_reset(Adapter);
1846 
1847 			mac_link_update(Adapter->mh, Adapter->link_state);
1848 		}
1849 
1850 		start_watchdog_timer(Adapter);
1851 	}
1852 
1853 	if (icr & E1000G_ICR_TX_INTR) {
1854 		e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
1855 
1856 		if (!Adapter->tx_intr_enable)
1857 			e1000g_clear_tx_interrupt(Adapter);
1858 		/* Schedule the re-transmit */
1859 		if (tx_ring->resched_needed) {
1860 			E1000G_STAT(tx_ring->stat_reschedule);
1861 			tx_ring->resched_needed = B_FALSE;
1862 			mac_tx_update(Adapter->mh);
1863 		}
1864 		if (Adapter->tx_intr_enable) {
1865 			/* Recycle the tx descriptors */
1866 			rw_enter(&Adapter->chip_lock, RW_READER);
1867 			E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
1868 			e1000g_recycle(tx_ring);
1869 			rw_exit(&Adapter->chip_lock);
1870 			/* Free the recycled messages */
1871 			ddi_intr_trigger_softint(Adapter->tx_softint_handle,
1872 			    NULL);
1873 		}
1874 	}
1875 }
1876 
1877 static void
1878 e1000g_init_unicst(struct e1000g *Adapter)
1879 {
1880 	struct e1000_hw *hw;
1881 	int slot;
1882 
1883 	hw = &Adapter->shared;
1884 
1885 	if (Adapter->init_count == 0) {
1886 		/* Initialize the multiple unicast addresses */
1887 		Adapter->unicst_total = MAX_NUM_UNICAST_ADDRESSES;
1888 
1889 		if ((hw->mac.type == e1000_82571) &&
1890 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
1891 			Adapter->unicst_total--;
1892 
1893 		Adapter->unicst_avail = Adapter->unicst_total - 1;
1894 
1895 		/* Store the default mac address */
1896 		e1000_rar_set(hw, hw->mac.addr, 0);
1897 		if ((hw->mac.type == e1000_82571) &&
1898 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
1899 			e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
1900 
1901 		bcopy(hw->mac.addr, Adapter->unicst_addr[0].mac.addr,
1902 		    ETHERADDRL);
1903 		Adapter->unicst_addr[0].mac.set = 1;
1904 
1905 		for (slot = 1; slot < Adapter->unicst_total; slot++)
1906 			Adapter->unicst_addr[slot].mac.set = 0;
1907 	} else {
1908 		/* Recover the default mac address */
1909 		bcopy(Adapter->unicst_addr[0].mac.addr, hw->mac.addr,
1910 		    ETHERADDRL);
1911 
1912 		/* Store the default mac address */
1913 		e1000_rar_set(hw, hw->mac.addr, 0);
1914 		if ((hw->mac.type == e1000_82571) &&
1915 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
1916 			e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
1917 
1918 		/* Re-configure the RAR registers */
1919 		for (slot = 1; slot < Adapter->unicst_total; slot++)
1920 			e1000_rar_set(hw,
1921 			    Adapter->unicst_addr[slot].mac.addr, slot);
1922 	}
1923 }
1924 
1925 static int
1926 e1000g_m_unicst(void *arg, const uint8_t *mac_addr)
1927 {
1928 	struct e1000g *Adapter;
1929 
1930 	Adapter = (struct e1000g *)arg;
1931 
1932 	/* Store the default MAC address */
1933 	bcopy(mac_addr, Adapter->shared.mac.addr, ETHERADDRL);
1934 
1935 	/* Set MAC address in address slot 0, which is the default address */
1936 	return (e1000g_unicst_set(Adapter, mac_addr, 0));
1937 }
1938 
1939 static int
1940 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
1941     mac_addr_slot_t slot)
1942 {
1943 	struct e1000_hw *hw;
1944 
1945 	hw = &Adapter->shared;
1946 
1947 	rw_enter(&Adapter->chip_lock, RW_WRITER);
1948 
1949 #ifndef NO_82542_SUPPORT
1950 	/*
1951 	 * The first revision of Wiseman silicon (rev 2.0) has an errata
1952 	 * that requires the receiver to be in reset when any of the
1953 	 * receive address registers (RAR regs) are accessed.  The first
1954 	 * rev of Wiseman silicon also requires MWI to be disabled when
1955 	 * a global reset or a receive reset is issued.  So before we
1956 	 * initialize the RARs, we check the rev of the Wiseman controller
1957 	 * and work around any necessary HW errata.
1958 	 */
1959 	if ((hw->mac.type == e1000_82542) &&
1960 	    (hw->revision_id == E1000_REVISION_2)) {
1961 		e1000_pci_clear_mwi(hw);
1962 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
1963 		msec_delay(5);
1964 	}
1965 #endif
1966 
1967 	bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, ETHERADDRL);
1968 	e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
1969 
1970 	if (slot == 0) {
1971 		if ((hw->mac.type == e1000_82571) &&
1972 		    (e1000_get_laa_state_82571(hw) == B_TRUE))
1973 			e1000_rar_set(hw, (uint8_t *)mac_addr, LAST_RAR_ENTRY);
1974 	}
1975 
1976 #ifndef NO_82542_SUPPORT
1977 	/*
1978 	 * If we are using Wiseman rev 2.0 silicon, we will have previously
1979 	 * put the receive in reset, and disabled MWI, to work around some
1980 	 * HW errata.  Now we should take the receiver out of reset, and
1981 	 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
1982 	 */
1983 	if ((hw->mac.type == e1000_82542) &&
1984 	    (hw->revision_id == E1000_REVISION_2)) {
1985 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
1986 		msec_delay(1);
1987 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1988 			e1000_pci_set_mwi(hw);
1989 		e1000g_rx_setup(Adapter);
1990 	}
1991 #endif
1992 
1993 	rw_exit(&Adapter->chip_lock);
1994 
1995 	return (0);
1996 }
1997 
1998 /*
1999  * e1000g_m_unicst_add() - will find an unused address slot, set the
2000  * address value to the one specified, reserve that slot and enable
2001  * the NIC to start filtering on the new MAC address.
2002  * Returns 0 on success.
2003  */
2004 static int
2005 e1000g_m_unicst_add(void *arg, mac_multi_addr_t *maddr)
2006 {
2007 	struct e1000g *Adapter = (struct e1000g *)arg;
2008 	mac_addr_slot_t slot;
2009 	int err;
2010 
2011 	if (mac_unicst_verify(Adapter->mh,
2012 	    maddr->mma_addr, maddr->mma_addrlen) == B_FALSE)
2013 		return (EINVAL);
2014 
2015 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2016 	if (Adapter->unicst_avail == 0) {
2017 		/* no slots available */
2018 		rw_exit(&Adapter->chip_lock);
2019 		return (ENOSPC);
2020 	}
2021 
2022 	/*
2023 	 * Primary/default address is in slot 0. The next addresses
2024 	 * are the multiple MAC addresses. So multiple MAC address 0
2025 	 * is in slot 1, 1 in slot 2, and so on. So the first multiple
2026 	 * MAC address resides in slot 1.
2027 	 */
2028 	for (slot = 1; slot < Adapter->unicst_total; slot++) {
2029 		if (Adapter->unicst_addr[slot].mac.set == 0) {
2030 			Adapter->unicst_addr[slot].mac.set = 1;
2031 			break;
2032 		}
2033 	}
2034 
2035 	ASSERT((slot > 0) && (slot < Adapter->unicst_total));
2036 
2037 	Adapter->unicst_avail--;
2038 	rw_exit(&Adapter->chip_lock);
2039 
2040 	maddr->mma_slot = slot;
2041 
2042 	if ((err = e1000g_unicst_set(Adapter, maddr->mma_addr, slot)) != 0) {
2043 		rw_enter(&Adapter->chip_lock, RW_WRITER);
2044 		Adapter->unicst_addr[slot].mac.set = 0;
2045 		Adapter->unicst_avail++;
2046 		rw_exit(&Adapter->chip_lock);
2047 	}
2048 
2049 	return (err);
2050 }
2051 
2052 /*
2053  * e1000g_m_unicst_remove() - removes a MAC address that was added by a
2054  * call to e1000g_m_unicst_add(). The slot number that was returned in
2055  * e1000g_m_unicst_add() is passed in the call to remove the address.
2056  * Returns 0 on success.
2057  */
2058 static int
2059 e1000g_m_unicst_remove(void *arg, mac_addr_slot_t slot)
2060 {
2061 	struct e1000g *Adapter = (struct e1000g *)arg;
2062 	int err;
2063 
2064 	if ((slot <= 0) || (slot >= Adapter->unicst_total))
2065 		return (EINVAL);
2066 
2067 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2068 	if (Adapter->unicst_addr[slot].mac.set == 1) {
2069 		Adapter->unicst_addr[slot].mac.set = 0;
2070 		Adapter->unicst_avail++;
2071 		rw_exit(&Adapter->chip_lock);
2072 
2073 		/* Copy the default address to the passed slot */
2074 		if (err = e1000g_unicst_set(Adapter,
2075 		    Adapter->unicst_addr[0].mac.addr, slot) != 0) {
2076 			rw_enter(&Adapter->chip_lock, RW_WRITER);
2077 			Adapter->unicst_addr[slot].mac.set = 1;
2078 			Adapter->unicst_avail--;
2079 			rw_exit(&Adapter->chip_lock);
2080 		}
2081 		return (err);
2082 	}
2083 	rw_exit(&Adapter->chip_lock);
2084 
2085 	return (EINVAL);
2086 }
2087 
2088 /*
2089  * e1000g_m_unicst_modify() - modifies the value of an address that
2090  * has been added by e1000g_m_unicst_add(). The new address, address
2091  * length and the slot number that was returned in the call to add
2092  * should be passed to e1000g_m_unicst_modify(). mma_flags should be
2093  * set to 0. Returns 0 on success.
2094  */
2095 static int
2096 e1000g_m_unicst_modify(void *arg, mac_multi_addr_t *maddr)
2097 {
2098 	struct e1000g *Adapter = (struct e1000g *)arg;
2099 	mac_addr_slot_t slot;
2100 
2101 	if (mac_unicst_verify(Adapter->mh,
2102 	    maddr->mma_addr, maddr->mma_addrlen) == B_FALSE)
2103 		return (EINVAL);
2104 
2105 	slot = maddr->mma_slot;
2106 
2107 	if ((slot <= 0) || (slot >= Adapter->unicst_total))
2108 		return (EINVAL);
2109 
2110 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2111 	if (Adapter->unicst_addr[slot].mac.set == 1) {
2112 		rw_exit(&Adapter->chip_lock);
2113 
2114 		return (e1000g_unicst_set(Adapter, maddr->mma_addr, slot));
2115 	}
2116 	rw_exit(&Adapter->chip_lock);
2117 
2118 	return (EINVAL);
2119 }
2120 
2121 /*
2122  * e1000g_m_unicst_get() - will get the MAC address and all other
2123  * information related to the address slot passed in mac_multi_addr_t.
2124  * mma_flags should be set to 0 in the call.
2125  * On return, mma_flags can take the following values:
2126  * 1) MMAC_SLOT_UNUSED
2127  * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR
2128  * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR
2129  * 4) MMAC_SLOT_USED
2130  */
2131 static int
2132 e1000g_m_unicst_get(void *arg, mac_multi_addr_t *maddr)
2133 {
2134 	struct e1000g *Adapter = (struct e1000g *)arg;
2135 	mac_addr_slot_t slot;
2136 
2137 	slot = maddr->mma_slot;
2138 
2139 	if ((slot <= 0) || (slot >= Adapter->unicst_total))
2140 		return (EINVAL);
2141 
2142 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2143 	if (Adapter->unicst_addr[slot].mac.set == 1) {
2144 		bcopy(Adapter->unicst_addr[slot].mac.addr,
2145 		    maddr->mma_addr, ETHERADDRL);
2146 		maddr->mma_flags = MMAC_SLOT_USED;
2147 	} else {
2148 		maddr->mma_flags = MMAC_SLOT_UNUSED;
2149 	}
2150 	rw_exit(&Adapter->chip_lock);
2151 
2152 	return (0);
2153 }
2154 
2155 static int
2156 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2157 {
2158 	struct e1000_hw *hw = &Adapter->shared;
2159 	unsigned i;
2160 	int res = 0;
2161 
2162 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2163 
2164 	if ((multiaddr[0] & 01) == 0) {
2165 		res = EINVAL;
2166 		goto done;
2167 	}
2168 
2169 	if (Adapter->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2170 		res = ENOENT;
2171 		goto done;
2172 	}
2173 
2174 	bcopy(multiaddr,
2175 	    &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2176 	Adapter->mcast_count++;
2177 
2178 	/*
2179 	 * Update the MC table in the hardware
2180 	 */
2181 	e1000g_clear_interrupt(Adapter);
2182 
2183 	e1000g_setup_multicast(Adapter);
2184 
2185 #ifndef NO_82542_SUPPORT
2186 	if ((hw->mac.type == e1000_82542) &&
2187 	    (hw->revision_id == E1000_REVISION_2))
2188 		e1000g_rx_setup(Adapter);
2189 #endif
2190 
2191 	e1000g_mask_interrupt(Adapter);
2192 
2193 done:
2194 	rw_exit(&Adapter->chip_lock);
2195 	return (res);
2196 }
2197 
2198 static int
2199 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2200 {
2201 	struct e1000_hw *hw = &Adapter->shared;
2202 	unsigned i;
2203 
2204 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2205 
2206 	for (i = 0; i < Adapter->mcast_count; i++) {
2207 		if (bcmp(multiaddr, &Adapter->mcast_table[i],
2208 		    ETHERADDRL) == 0) {
2209 			for (i++; i < Adapter->mcast_count; i++) {
2210 				Adapter->mcast_table[i - 1] =
2211 				    Adapter->mcast_table[i];
2212 			}
2213 			Adapter->mcast_count--;
2214 			break;
2215 		}
2216 	}
2217 
2218 	/*
2219 	 * Update the MC table in the hardware
2220 	 */
2221 	e1000g_clear_interrupt(Adapter);
2222 
2223 	e1000g_setup_multicast(Adapter);
2224 
2225 #ifndef NO_82542_SUPPORT
2226 	if ((hw->mac.type == e1000_82542) &&
2227 	    (hw->revision_id == E1000_REVISION_2))
2228 		e1000g_rx_setup(Adapter);
2229 #endif
2230 
2231 	e1000g_mask_interrupt(Adapter);
2232 
2233 done:
2234 	rw_exit(&Adapter->chip_lock);
2235 	return (0);
2236 }
2237 
2238 /*
2239  * e1000g_setup_multicast - setup multicast data structures
2240  *
2241  * This routine initializes all of the multicast related structures.
2242  */
2243 void
2244 e1000g_setup_multicast(struct e1000g *Adapter)
2245 {
2246 	uint8_t *mc_addr_list;
2247 	uint32_t mc_addr_count;
2248 	uint32_t rctl;
2249 	struct e1000_hw *hw;
2250 
2251 	hw = &Adapter->shared;
2252 
2253 	/*
2254 	 * The e1000g has the ability to do perfect filtering of 16
2255 	 * addresses. The driver uses one of the e1000g's 16 receive
2256 	 * address registers for its node/network/mac/individual address.
2257 	 * So, we have room for up to 15 multicast addresses in the CAM,
2258 	 * additional MC addresses are handled by the MTA (Multicast Table
2259 	 * Array)
2260 	 */
2261 
2262 	rctl = E1000_READ_REG(hw, E1000_RCTL);
2263 
2264 	mc_addr_list = (uint8_t *)Adapter->mcast_table;
2265 
2266 	if (Adapter->mcast_count > MAX_NUM_MULTICAST_ADDRESSES) {
2267 		E1000G_DEBUGLOG_1(Adapter, CE_WARN,
2268 		    "Adapter requested more than %d MC Addresses.\n",
2269 		    MAX_NUM_MULTICAST_ADDRESSES);
2270 		mc_addr_count = MAX_NUM_MULTICAST_ADDRESSES;
2271 	} else {
2272 		/*
2273 		 * Set the number of MC addresses that we are being
2274 		 * requested to use
2275 		 */
2276 		mc_addr_count = Adapter->mcast_count;
2277 	}
2278 #ifndef NO_82542_SUPPORT
2279 	/*
2280 	 * The Wiseman 2.0 silicon has an errata by which the receiver will
2281 	 * hang  while writing to the receive address registers if the receiver
2282 	 * is not in reset before writing to the registers. Updating the RAR
2283 	 * is done during the setting up of the multicast table, hence the
2284 	 * receiver has to be put in reset before updating the multicast table
2285 	 * and then taken out of reset at the end
2286 	 */
2287 	/*
2288 	 * if WMI was enabled then dis able it before issueing the global
2289 	 * reset to the hardware.
2290 	 */
2291 	/*
2292 	 * Only required for WISEMAN_2_0
2293 	 */
2294 	if ((hw->mac.type == e1000_82542) &&
2295 	    (hw->revision_id == E1000_REVISION_2)) {
2296 		e1000_pci_clear_mwi(hw);
2297 		/*
2298 		 * The e1000g must be in reset before changing any RA
2299 		 * registers. Reset receive unit.  The chip will remain in
2300 		 * the reset state until software explicitly restarts it.
2301 		 */
2302 		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2303 		/* Allow receiver time to go in to reset */
2304 		msec_delay(5);
2305 	}
2306 #endif
2307 
2308 	e1000_mc_addr_list_update(hw, mc_addr_list, mc_addr_count,
2309 	    Adapter->unicst_total, hw->mac.rar_entry_count);
2310 
2311 #ifndef NO_82542_SUPPORT
2312 	/*
2313 	 * Only for Wiseman_2_0
2314 	 * If MWI was enabled then re-enable it after issueing (as we
2315 	 * disabled it up there) the receive reset command.
2316 	 * Wainwright does not have a receive reset command and only thing
2317 	 * close to it is global reset which will require tx setup also
2318 	 */
2319 	if ((hw->mac.type == e1000_82542) &&
2320 	    (hw->revision_id == E1000_REVISION_2)) {
2321 		/*
2322 		 * if WMI was enabled then reenable it after issueing the
2323 		 * global or receive reset to the hardware.
2324 		 */
2325 
2326 		/*
2327 		 * Take receiver out of reset
2328 		 * clear E1000_RCTL_RST bit (and all others)
2329 		 */
2330 		E1000_WRITE_REG(hw, E1000_RCTL, 0);
2331 		msec_delay(5);
2332 		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2333 			e1000_pci_set_mwi(hw);
2334 	}
2335 #endif
2336 
2337 	/*
2338 	 * Restore original value
2339 	 */
2340 	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2341 }
2342 
2343 int
2344 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2345 {
2346 	struct e1000g *Adapter = (struct e1000g *)arg;
2347 
2348 	return ((add) ? multicst_add(Adapter, addr)
2349 	    : multicst_remove(Adapter, addr));
2350 }
2351 
2352 int
2353 e1000g_m_promisc(void *arg, boolean_t on)
2354 {
2355 	struct e1000g *Adapter = (struct e1000g *)arg;
2356 	uint32_t rctl;
2357 
2358 	rw_enter(&Adapter->chip_lock, RW_WRITER);
2359 
2360 	rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2361 
2362 	if (on)
2363 		rctl |=
2364 		    (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2365 	else
2366 		rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2367 
2368 	E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2369 
2370 	Adapter->e1000g_promisc = on;
2371 
2372 	rw_exit(&Adapter->chip_lock);
2373 
2374 	return (0);
2375 }
2376 
2377 static boolean_t
2378 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2379 {
2380 	struct e1000g *Adapter = (struct e1000g *)arg;
2381 	struct e1000_hw *hw = &Adapter->shared;
2382 
2383 	switch (cap) {
2384 	case MAC_CAPAB_HCKSUM: {
2385 		uint32_t *txflags = cap_data;
2386 		/*
2387 		 * Checksum on/off selection via global parameters.
2388 		 *
2389 		 * If the chip is flagged as not capable of (correctly)
2390 		 * handling checksumming, we don't enable it on either
2391 		 * Rx or Tx side.  Otherwise, we take this chip's settings
2392 		 * from the patchable global defaults.
2393 		 *
2394 		 * We advertise our capabilities only if TX offload is
2395 		 * enabled.  On receive, the stack will accept checksummed
2396 		 * packets anyway, even if we haven't said we can deliver
2397 		 * them.
2398 		 */
2399 		switch (hw->mac.type) {
2400 		case e1000_82540:
2401 		case e1000_82544:
2402 		case e1000_82545:
2403 		case e1000_82545_rev_3:
2404 		case e1000_82546:
2405 		case e1000_82546_rev_3:
2406 		case e1000_82571:
2407 		case e1000_82572:
2408 		case e1000_82573:
2409 		case e1000_80003es2lan:
2410 			*txflags = HCKSUM_IPHDRCKSUM | HCKSUM_INET_PARTIAL;
2411 			break;
2412 
2413 		/*
2414 		 * For the following Intel PRO/1000 chipsets, we have not
2415 		 * tested the hardware checksum offload capability, so we
2416 		 * disable the capability for them.
2417 		 *	e1000_82542,
2418 		 *	e1000_82543,
2419 		 *	e1000_82541,
2420 		 *	e1000_82541_rev_2,
2421 		 *	e1000_82547,
2422 		 *	e1000_82547_rev_2,
2423 		 */
2424 		default:
2425 			return (B_FALSE);
2426 		}
2427 
2428 		break;
2429 	}
2430 	case MAC_CAPAB_POLL:
2431 		/*
2432 		 * There's nothing for us to fill in, simply returning
2433 		 * B_TRUE stating that we support polling is sufficient.
2434 		 */
2435 		break;
2436 
2437 	case MAC_CAPAB_MULTIADDRESS: {
2438 		multiaddress_capab_t *mmacp = cap_data;
2439 
2440 		/*
2441 		 * The number of MAC addresses made available by
2442 		 * this capability is one less than the total as
2443 		 * the primary address in slot 0 is counted in
2444 		 * the total.
2445 		 */
2446 		mmacp->maddr_naddr = Adapter->unicst_total - 1;
2447 		mmacp->maddr_naddrfree = Adapter->unicst_avail;
2448 		/* No multiple factory addresses, set mma_flag to 0 */
2449 		mmacp->maddr_flag = 0;
2450 		mmacp->maddr_handle = Adapter;
2451 		mmacp->maddr_add = e1000g_m_unicst_add;
2452 		mmacp->maddr_remove = e1000g_m_unicst_remove;
2453 		mmacp->maddr_modify = e1000g_m_unicst_modify;
2454 		mmacp->maddr_get = e1000g_m_unicst_get;
2455 		mmacp->maddr_reserve = NULL;
2456 		break;
2457 	}
2458 	default:
2459 		return (B_FALSE);
2460 	}
2461 	return (B_TRUE);
2462 }
2463 
2464 /*
2465  * e1000g_get_conf - get configurations set in e1000g.conf
2466  *
2467  * This routine gets user-configured values out of the configuration
2468  * file e1000g.conf.
2469  *
2470  * For each configurable value, there is a minimum, a maximum, and a
2471  * default.
2472  * If user does not configure a value, use the default.
2473  * If user configures below the minimum, use the minumum.
2474  * If user configures above the maximum, use the maxumum.
2475  */
2476 static void
2477 e1000g_get_conf(struct e1000g *Adapter)
2478 {
2479 	struct e1000_hw *hw = &Adapter->shared;
2480 	boolean_t tbi_compatibility = B_FALSE;
2481 
2482 	/*
2483 	 * get each configurable property from e1000g.conf
2484 	 */
2485 
2486 	/*
2487 	 * NumTxDescriptors
2488 	 */
2489 	Adapter->tx_desc_num =
2490 	    e1000g_get_prop(Adapter, "NumTxDescriptors",
2491 	    MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
2492 	    DEFAULT_NUM_TX_DESCRIPTOR);
2493 
2494 	/*
2495 	 * NumRxDescriptors
2496 	 */
2497 	Adapter->rx_desc_num =
2498 	    e1000g_get_prop(Adapter, "NumRxDescriptors",
2499 	    MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
2500 	    DEFAULT_NUM_RX_DESCRIPTOR);
2501 
2502 	/*
2503 	 * NumRxFreeList
2504 	 */
2505 	Adapter->rx_freelist_num =
2506 	    e1000g_get_prop(Adapter, "NumRxFreeList",
2507 	    MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
2508 	    DEFAULT_NUM_RX_FREELIST);
2509 
2510 	/*
2511 	 * NumTxPacketList
2512 	 */
2513 	Adapter->tx_freelist_num =
2514 	    e1000g_get_prop(Adapter, "NumTxPacketList",
2515 	    MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
2516 	    DEFAULT_NUM_TX_FREELIST);
2517 
2518 	/*
2519 	 * FlowControl
2520 	 */
2521 	hw->mac.fc_send_xon = B_TRUE;
2522 	hw->mac.fc =
2523 	    e1000g_get_prop(Adapter, "FlowControl",
2524 	    e1000_fc_none, 4, DEFAULT_FLOW_CONTROL);
2525 	/* 4 is the setting that says "let the eeprom decide" */
2526 	if (hw->mac.fc == 4)
2527 		hw->mac.fc = e1000_fc_default;
2528 
2529 	/*
2530 	 * Max Num Receive Packets on Interrupt
2531 	 */
2532 	Adapter->rx_limit_onintr =
2533 	    e1000g_get_prop(Adapter, "MaxNumReceivePackets",
2534 	    MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
2535 	    DEFAULT_RX_LIMIT_ON_INTR);
2536 
2537 	/*
2538 	 * PHY master slave setting
2539 	 */
2540 	hw->phy.ms_type =
2541 	    e1000g_get_prop(Adapter, "SetMasterSlave",
2542 	    e1000_ms_hw_default, e1000_ms_auto,
2543 	    e1000_ms_hw_default);
2544 
2545 	/*
2546 	 * Parameter which controls TBI mode workaround, which is only
2547 	 * needed on certain switches such as Cisco 6500/Foundry
2548 	 */
2549 	tbi_compatibility =
2550 	    e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
2551 	    0, 1, DEFAULT_TBI_COMPAT_ENABLE);
2552 	e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
2553 
2554 	/*
2555 	 * MSI Enable
2556 	 */
2557 	Adapter->msi_enabled =
2558 	    e1000g_get_prop(Adapter, "MSIEnable",
2559 	    0, 1, DEFAULT_MSI_ENABLE);
2560 
2561 	/*
2562 	 * Interrupt Throttling Rate
2563 	 */
2564 	Adapter->intr_throttling_rate =
2565 	    e1000g_get_prop(Adapter, "intr_throttling_rate",
2566 	    MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
2567 	    DEFAULT_INTR_THROTTLING);
2568 
2569 	/*
2570 	 * Adaptive Interrupt Blanking Enable/Disable
2571 	 * It is enabled by default
2572 	 */
2573 	Adapter->intr_adaptive =
2574 	    (e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1) == 1) ?
2575 	    B_TRUE : B_FALSE;
2576 }
2577 
2578 /*
2579  * e1000g_get_prop - routine to read properties
2580  *
2581  * Get a user-configure property value out of the configuration
2582  * file e1000g.conf.
2583  *
2584  * Caller provides name of the property, a default value, a minimum
2585  * value, and a maximum value.
2586  *
2587  * Return configured value of the property, with default, minimum and
2588  * maximum properly applied.
2589  */
2590 static int
2591 e1000g_get_prop(struct e1000g *Adapter,	/* point to per-adapter structure */
2592     char *propname,		/* name of the property */
2593     int minval,			/* minimum acceptable value */
2594     int maxval,			/* maximim acceptable value */
2595     int defval)			/* default value */
2596 {
2597 	int propval;		/* value returned for requested property */
2598 	int *props;		/* point to array of properties returned */
2599 	uint_t nprops;		/* number of property value returned */
2600 
2601 	/*
2602 	 * get the array of properties from the config file
2603 	 */
2604 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
2605 	    DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
2606 		/* got some properties, test if we got enough */
2607 		if (Adapter->instance < nprops) {
2608 			propval = props[Adapter->instance];
2609 		} else {
2610 			/* not enough properties configured */
2611 			propval = defval;
2612 			E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
2613 			    "Not Enough %s values found in e1000g.conf"
2614 			    " - set to %d\n",
2615 			    propname, propval);
2616 		}
2617 
2618 		/* free memory allocated for properties */
2619 		ddi_prop_free(props);
2620 
2621 	} else {
2622 		propval = defval;
2623 	}
2624 
2625 	/*
2626 	 * enforce limits
2627 	 */
2628 	if (propval > maxval) {
2629 		propval = maxval;
2630 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
2631 		    "Too High %s value in e1000g.conf - set to %d\n",
2632 		    propname, propval);
2633 	}
2634 
2635 	if (propval < minval) {
2636 		propval = minval;
2637 		E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
2638 		    "Too Low %s value in e1000g.conf - set to %d\n",
2639 		    propname, propval);
2640 	}
2641 
2642 	return (propval);
2643 }
2644 
2645 static boolean_t
2646 e1000g_link_check(struct e1000g *Adapter)
2647 {
2648 	uint16_t speed, duplex, phydata;
2649 	boolean_t link_changed = B_FALSE;
2650 	struct e1000_hw *hw;
2651 	uint32_t reg_tarc;
2652 
2653 	hw = &Adapter->shared;
2654 
2655 	if (e1000g_link_up(Adapter)) {
2656 		/*
2657 		 * The Link is up, check whether it was marked as down earlier
2658 		 */
2659 		if (Adapter->link_state != LINK_STATE_UP) {
2660 			e1000_get_speed_and_duplex(hw, &speed, &duplex);
2661 			Adapter->link_speed = speed;
2662 			Adapter->link_duplex = duplex;
2663 			Adapter->link_state = LINK_STATE_UP;
2664 			link_changed = B_TRUE;
2665 
2666 			Adapter->tx_link_down_timeout = 0;
2667 
2668 			if ((hw->mac.type == e1000_82571) ||
2669 			    (hw->mac.type == e1000_82572)) {
2670 				reg_tarc = E1000_READ_REG(hw, E1000_TARC0);
2671 				if (speed == SPEED_1000)
2672 					reg_tarc |= (1 << 21);
2673 				else
2674 					reg_tarc &= ~(1 << 21);
2675 				E1000_WRITE_REG(hw, E1000_TARC0, reg_tarc);
2676 			}
2677 		}
2678 		Adapter->smartspeed = 0;
2679 	} else {
2680 		if (Adapter->link_state != LINK_STATE_DOWN) {
2681 			Adapter->link_speed = 0;
2682 			Adapter->link_duplex = 0;
2683 			Adapter->link_state = LINK_STATE_DOWN;
2684 			link_changed = B_TRUE;
2685 
2686 			/*
2687 			 * SmartSpeed workaround for Tabor/TanaX, When the
2688 			 * driver loses link disable auto master/slave
2689 			 * resolution.
2690 			 */
2691 			if (hw->phy.type == e1000_phy_igp) {
2692 				e1000_read_phy_reg(hw,
2693 				    PHY_1000T_CTRL, &phydata);
2694 				phydata |= CR_1000T_MS_ENABLE;
2695 				e1000_write_phy_reg(hw,
2696 				    PHY_1000T_CTRL, phydata);
2697 			}
2698 		} else {
2699 			e1000g_smartspeed(Adapter);
2700 		}
2701 
2702 		if (Adapter->started) {
2703 			if (Adapter->tx_link_down_timeout <
2704 			    MAX_TX_LINK_DOWN_TIMEOUT) {
2705 				Adapter->tx_link_down_timeout++;
2706 			} else if (Adapter->tx_link_down_timeout ==
2707 			    MAX_TX_LINK_DOWN_TIMEOUT) {
2708 				rw_enter(&Adapter->chip_lock, RW_WRITER);
2709 				e1000g_tx_clean(Adapter);
2710 				rw_exit(&Adapter->chip_lock);
2711 				Adapter->tx_link_down_timeout++;
2712 			}
2713 		}
2714 	}
2715 
2716 	return (link_changed);
2717 }
2718 
2719 static void
2720 e1000g_local_timer(void *ws)
2721 {
2722 	struct e1000g *Adapter = (struct e1000g *)ws;
2723 	struct e1000_hw *hw;
2724 	e1000g_ether_addr_t ether_addr;
2725 	boolean_t link_changed;
2726 
2727 	hw = &Adapter->shared;
2728 
2729 	(void) e1000g_tx_freemsg(Adapter->tx_ring);
2730 
2731 	if (e1000g_stall_check(Adapter)) {
2732 		E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
2733 		    "Tx stall detected. Activate automatic recovery.\n");
2734 		Adapter->reset_count++;
2735 		(void) e1000g_reset(Adapter);
2736 	}
2737 
2738 	link_changed = B_FALSE;
2739 	mutex_enter(&Adapter->link_lock);
2740 	if (Adapter->link_complete)
2741 		link_changed = e1000g_link_check(Adapter);
2742 	mutex_exit(&Adapter->link_lock);
2743 
2744 	if (link_changed) {
2745 		/*
2746 		 * Workaround for esb2. Data stuck in fifo on a link
2747 		 * down event. Reset the adapter to recover it.
2748 		 */
2749 		if ((Adapter->link_state == LINK_STATE_DOWN) &&
2750 		    (hw->mac.type == e1000_80003es2lan))
2751 			(void) e1000g_reset(Adapter);
2752 
2753 		mac_link_update(Adapter->mh, Adapter->link_state);
2754 	}
2755 
2756 	/*
2757 	 * With 82571 controllers, any locally administered address will
2758 	 * be overwritten when there is a reset on the other port.
2759 	 * Detect this circumstance and correct it.
2760 	 */
2761 	if ((hw->mac.type == e1000_82571) &&
2762 	    (e1000_get_laa_state_82571(hw) == B_TRUE)) {
2763 		ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
2764 		ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
2765 
2766 		ether_addr.reg.low = ntohl(ether_addr.reg.low);
2767 		ether_addr.reg.high = ntohl(ether_addr.reg.high);
2768 
2769 		if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
2770 		    (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
2771 		    (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
2772 		    (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
2773 		    (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
2774 		    (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
2775 			e1000_rar_set(hw, hw->mac.addr, 0);
2776 		}
2777 	}
2778 
2779 	/*
2780 	 * Long TTL workaround for 82541/82547
2781 	 */
2782 	e1000_igp_ttl_workaround_82547(hw);
2783 
2784 	/*
2785 	 * Check for Adaptive IFS settings If there are lots of collisions
2786 	 * change the value in steps...
2787 	 * These properties should only be set for 10/100
2788 	 */
2789 	if ((hw->media_type == e1000_media_type_copper) &&
2790 	    ((Adapter->link_speed == SPEED_100) ||
2791 	    (Adapter->link_speed == SPEED_10))) {
2792 		e1000_update_adaptive(hw);
2793 	}
2794 	/*
2795 	 * Set Timer Interrupts
2796 	 */
2797 	E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2798 
2799 	restart_watchdog_timer(Adapter);
2800 }
2801 
2802 /*
2803  * The function e1000g_link_timer() is called when the timer for link setup
2804  * is expired, which indicates the completion of the link setup. The link
2805  * state will not be updated until the link setup is completed. And the
2806  * link state will not be sent to the upper layer through mac_link_update()
2807  * in this function. It will be updated in the local timer routine or the
2808  * interrupt service routine after the interface is started (plumbed).
2809  */
2810 static void
2811 e1000g_link_timer(void *arg)
2812 {
2813 	struct e1000g *Adapter = (struct e1000g *)arg;
2814 
2815 	mutex_enter(&Adapter->link_lock);
2816 	Adapter->link_complete = B_TRUE;
2817 	Adapter->link_tid = 0;
2818 	mutex_exit(&Adapter->link_lock);
2819 }
2820 
2821 /*
2822  * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
2823  *
2824  * This function read the forced speed and duplex for 10/100 Mbps speeds
2825  * and also for 1000 Mbps speeds from the e1000g.conf file
2826  */
2827 static void
2828 e1000g_force_speed_duplex(struct e1000g *Adapter)
2829 {
2830 	int forced;
2831 	struct e1000_mac_info *mac = &Adapter->shared.mac;
2832 	struct e1000_phy_info *phy = &Adapter->shared.phy;
2833 
2834 	/*
2835 	 * get value out of config file
2836 	 */
2837 	forced = e1000g_get_prop(Adapter, "ForceSpeedDuplex",
2838 	    GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY);
2839 
2840 	switch (forced) {
2841 	case GDIAG_10_HALF:
2842 		/*
2843 		 * Disable Auto Negotiation
2844 		 */
2845 		mac->autoneg = B_FALSE;
2846 		mac->forced_speed_duplex = ADVERTISE_10_HALF;
2847 		break;
2848 	case GDIAG_10_FULL:
2849 		/*
2850 		 * Disable Auto Negotiation
2851 		 */
2852 		mac->autoneg = B_FALSE;
2853 		mac->forced_speed_duplex = ADVERTISE_10_FULL;
2854 		break;
2855 	case GDIAG_100_HALF:
2856 		/*
2857 		 * Disable Auto Negotiation
2858 		 */
2859 		mac->autoneg = B_FALSE;
2860 		mac->forced_speed_duplex = ADVERTISE_100_HALF;
2861 		break;
2862 	case GDIAG_100_FULL:
2863 		/*
2864 		 * Disable Auto Negotiation
2865 		 */
2866 		mac->autoneg = B_FALSE;
2867 		mac->forced_speed_duplex = ADVERTISE_100_FULL;
2868 		break;
2869 	case GDIAG_1000_FULL:
2870 		/*
2871 		 * The gigabit spec requires autonegotiation.  Therefore,
2872 		 * when the user wants to force the speed to 1000Mbps, we
2873 		 * enable AutoNeg, but only allow the harware to advertise
2874 		 * 1000Mbps.  This is different from 10/100 operation, where
2875 		 * we are allowed to link without any negotiation.
2876 		 */
2877 		mac->autoneg = B_TRUE;
2878 		phy->autoneg_advertised = ADVERTISE_1000_FULL;
2879 		break;
2880 	default:	/* obey the setting of AutoNegAdvertised */
2881 		mac->autoneg = B_TRUE;
2882 		phy->autoneg_advertised =
2883 		    (uint16_t)e1000g_get_prop(Adapter, "AutoNegAdvertised",
2884 		    0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
2885 		    AUTONEG_ADVERTISE_SPEED_DEFAULT);
2886 		break;
2887 	}	/* switch */
2888 }
2889 
2890 /*
2891  * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
2892  *
2893  * This function reads MaxFrameSize from e1000g.conf
2894  */
2895 static void
2896 e1000g_get_max_frame_size(struct e1000g *Adapter)
2897 {
2898 	int max_frame;
2899 	struct e1000_mac_info *mac = &Adapter->shared.mac;
2900 	struct e1000_phy_info *phy = &Adapter->shared.phy;
2901 
2902 	/*
2903 	 * get value out of config file
2904 	 */
2905 	max_frame = e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0);
2906 
2907 	switch (max_frame) {
2908 	case 0:
2909 		mac->max_frame_size = ETHERMAX;
2910 		break;
2911 	case 1:
2912 		mac->max_frame_size = FRAME_SIZE_UPTO_4K;
2913 		break;
2914 	case 2:
2915 		mac->max_frame_size = FRAME_SIZE_UPTO_8K;
2916 		break;
2917 	case 3:
2918 		if (mac->type < e1000_82571)
2919 			mac->max_frame_size = FRAME_SIZE_UPTO_16K;
2920 		else
2921 			mac->max_frame_size = FRAME_SIZE_UPTO_9K;
2922 		break;
2923 	default:
2924 		mac->max_frame_size = ETHERMAX;
2925 		break;
2926 	}	/* switch */
2927 
2928 	/* ich8 does not do jumbo frames */
2929 	if (mac->type == e1000_ich8lan) {
2930 		mac->max_frame_size = ETHERMAX;
2931 	}
2932 
2933 	/* ich9 does not do jumbo frames on one phy type */
2934 	if ((mac->type == e1000_ich9lan) &&
2935 	    (phy->type == e1000_phy_ife)) {
2936 		mac->max_frame_size = ETHERMAX;
2937 	}
2938 }
2939 
2940 static void
2941 arm_watchdog_timer(struct e1000g *Adapter)
2942 {
2943 	Adapter->watchdog_tid =
2944 	    timeout(e1000g_local_timer,
2945 	    (void *)Adapter, 1 * drv_usectohz(1000000));
2946 }
2947 #pragma inline(arm_watchdog_timer)
2948 
2949 static void
2950 enable_watchdog_timer(struct e1000g *Adapter)
2951 {
2952 	mutex_enter(&Adapter->watchdog_lock);
2953 
2954 	if (!Adapter->watchdog_timer_enabled) {
2955 		Adapter->watchdog_timer_enabled = B_TRUE;
2956 		Adapter->watchdog_timer_started = B_TRUE;
2957 		arm_watchdog_timer(Adapter);
2958 	}
2959 
2960 	mutex_exit(&Adapter->watchdog_lock);
2961 }
2962 
2963 static void
2964 disable_watchdog_timer(struct e1000g *Adapter)
2965 {
2966 	timeout_id_t tid;
2967 
2968 	mutex_enter(&Adapter->watchdog_lock);
2969 
2970 	Adapter->watchdog_timer_enabled = B_FALSE;
2971 	Adapter->watchdog_timer_started = B_FALSE;
2972 	tid = Adapter->watchdog_tid;
2973 	Adapter->watchdog_tid = 0;
2974 
2975 	mutex_exit(&Adapter->watchdog_lock);
2976 
2977 	if (tid != 0)
2978 		(void) untimeout(tid);
2979 }
2980 
2981 static void
2982 start_watchdog_timer(struct e1000g *Adapter)
2983 {
2984 	mutex_enter(&Adapter->watchdog_lock);
2985 
2986 	if (Adapter->watchdog_timer_enabled) {
2987 		if (!Adapter->watchdog_timer_started) {
2988 			Adapter->watchdog_timer_started = B_TRUE;
2989 			arm_watchdog_timer(Adapter);
2990 		}
2991 	}
2992 
2993 	mutex_exit(&Adapter->watchdog_lock);
2994 }
2995 
2996 static void
2997 restart_watchdog_timer(struct e1000g *Adapter)
2998 {
2999 	mutex_enter(&Adapter->watchdog_lock);
3000 
3001 	if (Adapter->watchdog_timer_started)
3002 		arm_watchdog_timer(Adapter);
3003 
3004 	mutex_exit(&Adapter->watchdog_lock);
3005 }
3006 
3007 static void
3008 stop_watchdog_timer(struct e1000g *Adapter)
3009 {
3010 	timeout_id_t tid;
3011 
3012 	mutex_enter(&Adapter->watchdog_lock);
3013 
3014 	Adapter->watchdog_timer_started = B_FALSE;
3015 	tid = Adapter->watchdog_tid;
3016 	Adapter->watchdog_tid = 0;
3017 
3018 	mutex_exit(&Adapter->watchdog_lock);
3019 
3020 	if (tid != 0)
3021 		(void) untimeout(tid);
3022 }
3023 
3024 static void
3025 stop_link_timer(struct e1000g *Adapter)
3026 {
3027 	timeout_id_t tid;
3028 
3029 	/* Disable the link timer */
3030 	mutex_enter(&Adapter->link_lock);
3031 
3032 	tid = Adapter->link_tid;
3033 	Adapter->link_tid = 0;
3034 
3035 	mutex_exit(&Adapter->link_lock);
3036 
3037 	if (tid != 0)
3038 		(void) untimeout(tid);
3039 }
3040 
3041 static void
3042 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
3043 {
3044 	timeout_id_t tid;
3045 
3046 	/* Disable the tx timer for 82547 chipset */
3047 	mutex_enter(&tx_ring->tx_lock);
3048 
3049 	tx_ring->timer_enable_82547 = B_FALSE;
3050 	tid = tx_ring->timer_id_82547;
3051 	tx_ring->timer_id_82547 = 0;
3052 
3053 	mutex_exit(&tx_ring->tx_lock);
3054 
3055 	if (tid != 0)
3056 		(void) untimeout(tid);
3057 }
3058 
3059 void
3060 e1000g_clear_interrupt(struct e1000g *Adapter)
3061 {
3062 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
3063 	    0xffffffff & ~E1000_IMS_RXSEQ);
3064 }
3065 
3066 void
3067 e1000g_mask_interrupt(struct e1000g *Adapter)
3068 {
3069 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
3070 	    IMS_ENABLE_MASK & ~E1000_IMS_TXDW & ~E1000_IMS_TXQE);
3071 }
3072 
3073 void
3074 e1000g_clear_all_interrupts(struct e1000g *Adapter)
3075 {
3076 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
3077 }
3078 
3079 void
3080 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
3081 {
3082 	E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000G_IMS_TX_INTR);
3083 }
3084 
3085 void
3086 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
3087 {
3088 	E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000G_IMS_TX_INTR);
3089 }
3090 
3091 static void
3092 e1000g_smartspeed(struct e1000g *Adapter)
3093 {
3094 	struct e1000_hw *hw = &Adapter->shared;
3095 	uint16_t phy_status;
3096 	uint16_t phy_ctrl;
3097 
3098 	/*
3099 	 * If we're not T-or-T, or we're not autoneg'ing, or we're not
3100 	 * advertising 1000Full, we don't even use the workaround
3101 	 */
3102 	if ((hw->phy.type != e1000_phy_igp) ||
3103 	    !hw->mac.autoneg ||
3104 	    !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
3105 		return;
3106 
3107 	/*
3108 	 * True if this is the first call of this function or after every
3109 	 * 30 seconds of not having link
3110 	 */
3111 	if (Adapter->smartspeed == 0) {
3112 		/*
3113 		 * If Master/Slave config fault is asserted twice, we
3114 		 * assume back-to-back
3115 		 */
3116 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
3117 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
3118 			return;
3119 
3120 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
3121 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
3122 			return;
3123 		/*
3124 		 * We're assuming back-2-back because our status register
3125 		 * insists! there's a fault in the master/slave
3126 		 * relationship that was "negotiated"
3127 		 */
3128 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
3129 		/*
3130 		 * Is the phy configured for manual configuration of
3131 		 * master/slave?
3132 		 */
3133 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
3134 			/*
3135 			 * Yes.  Then disable manual configuration (enable
3136 			 * auto configuration) of master/slave
3137 			 */
3138 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
3139 			e1000_write_phy_reg(hw,
3140 			    PHY_1000T_CTRL, phy_ctrl);
3141 			/*
3142 			 * Effectively starting the clock
3143 			 */
3144 			Adapter->smartspeed++;
3145 			/*
3146 			 * Restart autonegotiation
3147 			 */
3148 			if (!e1000_phy_setup_autoneg(hw) &&
3149 			    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
3150 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
3151 				    MII_CR_RESTART_AUTO_NEG);
3152 				e1000_write_phy_reg(hw,
3153 				    PHY_CONTROL, phy_ctrl);
3154 			}
3155 		}
3156 		return;
3157 		/*
3158 		 * Has 6 seconds transpired still without link? Remember,
3159 		 * you should reset the smartspeed counter once you obtain
3160 		 * link
3161 		 */
3162 	} else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
3163 		/*
3164 		 * Yes.  Remember, we did at the start determine that
3165 		 * there's a master/slave configuration fault, so we're
3166 		 * still assuming there's someone on the other end, but we
3167 		 * just haven't yet been able to talk to it. We then
3168 		 * re-enable auto configuration of master/slave to see if
3169 		 * we're running 2/3 pair cables.
3170 		 */
3171 		/*
3172 		 * If still no link, perhaps using 2/3 pair cable
3173 		 */
3174 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
3175 		phy_ctrl |= CR_1000T_MS_ENABLE;
3176 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
3177 		/*
3178 		 * Restart autoneg with phy enabled for manual
3179 		 * configuration of master/slave
3180 		 */
3181 		if (!e1000_phy_setup_autoneg(hw) &&
3182 		    !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
3183 			phy_ctrl |=
3184 			    (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
3185 			e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
3186 		}
3187 		/*
3188 		 * Hopefully, there are no more faults and we've obtained
3189 		 * link as a result.
3190 		 */
3191 	}
3192 	/*
3193 	 * Restart process after E1000_SMARTSPEED_MAX iterations (30
3194 	 * seconds)
3195 	 */
3196 	if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
3197 		Adapter->smartspeed = 0;
3198 }
3199 
3200 static boolean_t
3201 is_valid_mac_addr(uint8_t *mac_addr)
3202 {
3203 	const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3204 	const uint8_t addr_test2[6] =
3205 	    { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3206 
3207 	if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3208 	    !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3209 		return (B_FALSE);
3210 
3211 	return (B_TRUE);
3212 }
3213 
3214 /*
3215  * e1000g_stall_check - check for tx stall
3216  *
3217  * This function checks if the adapter is stalled (in transmit).
3218  *
3219  * It is called each time the watchdog timeout is invoked.
3220  * If the transmit descriptor reclaim continuously fails,
3221  * the watchdog value will increment by 1. If the watchdog
3222  * value exceeds the threshold, the adapter is assumed to
3223  * have stalled and need to be reset.
3224  */
3225 static boolean_t
3226 e1000g_stall_check(struct e1000g *Adapter)
3227 {
3228 	e1000g_tx_ring_t *tx_ring;
3229 
3230 	tx_ring = Adapter->tx_ring;
3231 
3232 	if (Adapter->link_state != LINK_STATE_UP)
3233 		return (B_FALSE);
3234 
3235 	if (tx_ring->recycle_fail > 0)
3236 		tx_ring->stall_watchdog++;
3237 	else
3238 		tx_ring->stall_watchdog = 0;
3239 
3240 	if (tx_ring->stall_watchdog < E1000G_STALL_WATCHDOG_COUNT)
3241 		return (B_FALSE);
3242 
3243 	tx_ring->stall_watchdog = 0;
3244 	tx_ring->recycle_fail = 0;
3245 
3246 	return (B_TRUE);
3247 }
3248 
3249 #ifdef E1000G_DEBUG
3250 static enum ioc_reply
3251 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
3252 {
3253 	void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
3254 	e1000g_peekpoke_t *ppd;
3255 	uint64_t mem_va;
3256 	uint64_t maxoff;
3257 	boolean_t peek;
3258 
3259 	switch (iocp->ioc_cmd) {
3260 
3261 	case E1000G_IOC_REG_PEEK:
3262 		peek = B_TRUE;
3263 		break;
3264 
3265 	case E1000G_IOC_REG_POKE:
3266 		peek = B_FALSE;
3267 		break;
3268 
3269 	deault:
3270 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
3271 		    "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
3272 		    iocp->ioc_cmd);
3273 		return (IOC_INVAL);
3274 	}
3275 
3276 	/*
3277 	 * Validate format of ioctl
3278 	 */
3279 	if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
3280 		return (IOC_INVAL);
3281 	if (mp->b_cont == NULL)
3282 		return (IOC_INVAL);
3283 
3284 	ppd = (e1000g_peekpoke_t *)mp->b_cont->b_rptr;
3285 
3286 	/*
3287 	 * Validate request parameters
3288 	 */
3289 	switch (ppd->pp_acc_space) {
3290 
3291 	default:
3292 		E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
3293 		    "e1000g_diag_ioctl: invalid access space 0x%X\n",
3294 		    ppd->pp_acc_space);
3295 		return (IOC_INVAL);
3296 
3297 	case E1000G_PP_SPACE_REG:
3298 		/*
3299 		 * Memory-mapped I/O space
3300 		 */
3301 		ASSERT(ppd->pp_acc_size == 4);
3302 		if (ppd->pp_acc_size != 4)
3303 			return (IOC_INVAL);
3304 
3305 		if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
3306 			return (IOC_INVAL);
3307 
3308 		mem_va = 0;
3309 		maxoff = 0x10000;
3310 		ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
3311 		break;
3312 
3313 	case E1000G_PP_SPACE_E1000G:
3314 		/*
3315 		 * E1000g data structure!
3316 		 */
3317 		mem_va = (uintptr_t)e1000gp;
3318 		maxoff = sizeof (struct e1000g);
3319 		ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
3320 		break;
3321 
3322 	}
3323 
3324 	if (ppd->pp_acc_offset >= maxoff)
3325 		return (IOC_INVAL);
3326 
3327 	if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
3328 		return (IOC_INVAL);
3329 
3330 	/*
3331 	 * All OK - go!
3332 	 */
3333 	ppd->pp_acc_offset += mem_va;
3334 	(*ppfn)(e1000gp, ppd);
3335 	return (peek ? IOC_REPLY : IOC_ACK);
3336 }
3337 
3338 static void
3339 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
3340 {
3341 	ddi_acc_handle_t handle;
3342 	uint32_t *regaddr;
3343 
3344 	handle = e1000gp->osdep.reg_handle;
3345 	regaddr =
3346 	    (uint32_t *)(e1000gp->shared.hw_addr + ppd->pp_acc_offset);
3347 
3348 	ppd->pp_acc_data = ddi_get32(handle, regaddr);
3349 }
3350 
3351 static void
3352 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
3353 {
3354 	ddi_acc_handle_t handle;
3355 	uint32_t *regaddr;
3356 	uint32_t value;
3357 
3358 	handle = e1000gp->osdep.reg_handle;
3359 	regaddr =
3360 	    (uint32_t *)(e1000gp->shared.hw_addr + ppd->pp_acc_offset);
3361 	value = (uint32_t)ppd->pp_acc_data;
3362 
3363 	ddi_put32(handle, regaddr, value);
3364 }
3365 
3366 static void
3367 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
3368 {
3369 	uint64_t value;
3370 	void *vaddr;
3371 
3372 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
3373 
3374 	switch (ppd->pp_acc_size) {
3375 	case 1:
3376 		value = *(uint8_t *)vaddr;
3377 		break;
3378 
3379 	case 2:
3380 		value = *(uint16_t *)vaddr;
3381 		break;
3382 
3383 	case 4:
3384 		value = *(uint32_t *)vaddr;
3385 		break;
3386 
3387 	case 8:
3388 		value = *(uint64_t *)vaddr;
3389 		break;
3390 	}
3391 
3392 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
3393 	    "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
3394 	    (void *)e1000gp, (void *)ppd, value, vaddr);
3395 
3396 	ppd->pp_acc_data = value;
3397 }
3398 
3399 static void
3400 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
3401 {
3402 	uint64_t value;
3403 	void *vaddr;
3404 
3405 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
3406 	value = ppd->pp_acc_data;
3407 
3408 	E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
3409 	    "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
3410 	    (void *)e1000gp, (void *)ppd, value, vaddr);
3411 
3412 	switch (ppd->pp_acc_size) {
3413 	case 1:
3414 		*(uint8_t *)vaddr = (uint8_t)value;
3415 		break;
3416 
3417 	case 2:
3418 		*(uint16_t *)vaddr = (uint16_t)value;
3419 		break;
3420 
3421 	case 4:
3422 		*(uint32_t *)vaddr = (uint32_t)value;
3423 		break;
3424 
3425 	case 8:
3426 		*(uint64_t *)vaddr = (uint64_t)value;
3427 		break;
3428 	}
3429 }
3430 #endif
3431 
3432 /*
3433  * Loopback Support
3434  */
3435 static lb_property_t lb_normal =
3436 	{ normal,	"normal",	E1000G_LB_NONE		};
3437 static lb_property_t lb_external1000 =
3438 	{ external,	"1000Mbps",	E1000G_LB_EXTERNAL_1000	};
3439 static lb_property_t lb_external100 =
3440 	{ external,	"100Mbps",	E1000G_LB_EXTERNAL_100	};
3441 static lb_property_t lb_external10 =
3442 	{ external,	"10Mbps",	E1000G_LB_EXTERNAL_10	};
3443 static lb_property_t lb_phy =
3444 	{ internal,	"PHY",		E1000G_LB_INTERNAL_PHY	};
3445 
3446 static enum ioc_reply
3447 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
3448 {
3449 	lb_info_sz_t *lbsp;
3450 	lb_property_t *lbpp;
3451 	struct e1000_hw *hw;
3452 	uint32_t *lbmp;
3453 	uint32_t size;
3454 	uint32_t value;
3455 	uint16_t phy_status;
3456 	uint16_t phy_ext_status;
3457 
3458 	hw = &Adapter->shared;
3459 
3460 	if (mp->b_cont == NULL)
3461 		return (IOC_INVAL);
3462 
3463 	switch (iocp->ioc_cmd) {
3464 	default:
3465 		return (IOC_INVAL);
3466 
3467 	case LB_GET_INFO_SIZE:
3468 		size = sizeof (lb_info_sz_t);
3469 		if (iocp->ioc_count != size)
3470 			return (IOC_INVAL);
3471 
3472 		e1000_read_phy_reg(hw, PHY_EXT_STATUS, &phy_ext_status);
3473 		e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
3474 
3475 		value = sizeof (lb_normal);
3476 		if ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
3477 		    (phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
3478 		    (hw->media_type == e1000_media_type_fiber) ||
3479 		    (hw->media_type == e1000_media_type_internal_serdes)) {
3480 			value += sizeof (lb_phy);
3481 			switch (hw->mac.type) {
3482 			case e1000_82571:
3483 			case e1000_82572:
3484 				value += sizeof (lb_external1000);
3485 				break;
3486 			}
3487 		}
3488 		if ((phy_status & MII_SR_100X_FD_CAPS) ||
3489 		    (phy_status & MII_SR_100T2_FD_CAPS))
3490 			value += sizeof (lb_external100);
3491 		if (phy_status & MII_SR_10T_FD_CAPS)
3492 			value += sizeof (lb_external10);
3493 
3494 		lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr;
3495 		*lbsp = value;
3496 		break;
3497 
3498 	case LB_GET_INFO:
3499 		e1000_read_phy_reg(hw, PHY_EXT_STATUS, &phy_ext_status);
3500 		e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
3501 
3502 		value = sizeof (lb_normal);
3503 		if ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
3504 		    (phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
3505 		    (hw->media_type == e1000_media_type_fiber) ||
3506 		    (hw->media_type == e1000_media_type_internal_serdes)) {
3507 			value += sizeof (lb_phy);
3508 			switch (hw->mac.type) {
3509 			case e1000_82571:
3510 			case e1000_82572:
3511 				value += sizeof (lb_external1000);
3512 				break;
3513 			}
3514 		}
3515 		if ((phy_status & MII_SR_100X_FD_CAPS) ||
3516 		    (phy_status & MII_SR_100T2_FD_CAPS))
3517 			value += sizeof (lb_external100);
3518 		if (phy_status & MII_SR_10T_FD_CAPS)
3519 			value += sizeof (lb_external10);
3520 
3521 		size = value;
3522 		if (iocp->ioc_count != size)
3523 			return (IOC_INVAL);
3524 
3525 		value = 0;
3526 		lbpp = (lb_property_t *)mp->b_cont->b_rptr;
3527 		lbpp[value++] = lb_normal;
3528 		if ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
3529 		    (phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
3530 		    (hw->media_type == e1000_media_type_fiber) ||
3531 		    (hw->media_type == e1000_media_type_internal_serdes)) {
3532 			lbpp[value++] = lb_phy;
3533 			switch (hw->mac.type) {
3534 			case e1000_82571:
3535 			case e1000_82572:
3536 				lbpp[value++] = lb_external1000;
3537 				break;
3538 			}
3539 		}
3540 		if ((phy_status & MII_SR_100X_FD_CAPS) ||
3541 		    (phy_status & MII_SR_100T2_FD_CAPS))
3542 			lbpp[value++] = lb_external100;
3543 		if (phy_status & MII_SR_10T_FD_CAPS)
3544 			lbpp[value++] = lb_external10;
3545 		break;
3546 
3547 	case LB_GET_MODE:
3548 		size = sizeof (uint32_t);
3549 		if (iocp->ioc_count != size)
3550 			return (IOC_INVAL);
3551 
3552 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
3553 		*lbmp = Adapter->loopback_mode;
3554 		break;
3555 
3556 	case LB_SET_MODE:
3557 		size = 0;
3558 		if (iocp->ioc_count != sizeof (uint32_t))
3559 			return (IOC_INVAL);
3560 
3561 		lbmp = (uint32_t *)mp->b_cont->b_rptr;
3562 		if (!e1000g_set_loopback_mode(Adapter, *lbmp))
3563 			return (IOC_INVAL);
3564 		break;
3565 	}
3566 
3567 	iocp->ioc_count = size;
3568 	iocp->ioc_error = 0;
3569 
3570 	return (IOC_REPLY);
3571 }
3572 
3573 static boolean_t
3574 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
3575 {
3576 	struct e1000_hw *hw;
3577 #ifndef __sparc
3578 	uint32_t reg_rctl;
3579 #endif
3580 	int i, times;
3581 
3582 	if (mode == Adapter->loopback_mode)
3583 		return (B_TRUE);
3584 
3585 	hw = &Adapter->shared;
3586 	times = 0;
3587 
3588 again:
3589 	switch (mode) {
3590 	default:
3591 		return (B_FALSE);
3592 
3593 	case E1000G_LB_NONE:
3594 		/* Get original speed and duplex settings */
3595 		e1000g_force_speed_duplex(Adapter);
3596 		/* Reset the chip */
3597 		hw->phy.wait_for_link = B_TRUE;
3598 		(void) e1000g_reset(Adapter);
3599 		hw->phy.wait_for_link = B_FALSE;
3600 		break;
3601 
3602 	case E1000G_LB_EXTERNAL_1000:
3603 		e1000g_set_external_loopback_1000(Adapter);
3604 		break;
3605 
3606 	case E1000G_LB_EXTERNAL_100:
3607 		e1000g_set_external_loopback_100(Adapter);
3608 		break;
3609 
3610 	case E1000G_LB_EXTERNAL_10:
3611 		e1000g_set_external_loopback_10(Adapter);
3612 		break;
3613 
3614 	case E1000G_LB_INTERNAL_PHY:
3615 		e1000g_set_internal_loopback(Adapter);
3616 		break;
3617 	}
3618 
3619 	times++;
3620 
3621 	switch (mode) {
3622 	case E1000G_LB_EXTERNAL_1000:
3623 	case E1000G_LB_EXTERNAL_100:
3624 	case E1000G_LB_EXTERNAL_10:
3625 	case E1000G_LB_INTERNAL_PHY:
3626 		/* Wait for link up */
3627 		for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
3628 			msec_delay(100);
3629 
3630 		if (!e1000g_link_up(Adapter)) {
3631 			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
3632 			    "Failed to get the link up");
3633 			if (times < 2) {
3634 				/* Reset the link */
3635 				E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
3636 				    "Reset the link ...");
3637 				(void) e1000g_reset(Adapter);
3638 				goto again;
3639 			}
3640 		}
3641 		break;
3642 	}
3643 
3644 	Adapter->loopback_mode = mode;
3645 
3646 	return (B_TRUE);
3647 }
3648 
3649 /*
3650  * The following loopback settings are from Intel's technical
3651  * document - "How To Loopback". All the register settings and
3652  * time delay values are directly inherited from the document
3653  * without more explanations available.
3654  */
3655 static void
3656 e1000g_set_internal_loopback(struct e1000g *Adapter)
3657 {
3658 	struct e1000_hw *hw;
3659 	uint32_t ctrl;
3660 	uint32_t status;
3661 	uint16_t phy_ctrl;
3662 
3663 	hw = &Adapter->shared;
3664 
3665 	/* Disable Smart Power Down */
3666 	phy_spd_state(hw, B_FALSE);
3667 
3668 	e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
3669 	phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
3670 	phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
3671 
3672 	switch (hw->mac.type) {
3673 	case e1000_82540:
3674 	case e1000_82545:
3675 	case e1000_82545_rev_3:
3676 	case e1000_82546:
3677 	case e1000_82546_rev_3:
3678 	case e1000_82573:
3679 		/* Auto-MDI/MDIX off */
3680 		e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
3681 		/* Reset PHY to update Auto-MDI/MDIX */
3682 		e1000_write_phy_reg(hw, PHY_CONTROL,
3683 		    phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
3684 		/* Reset PHY to auto-neg off and force 1000 */
3685 		e1000_write_phy_reg(hw, PHY_CONTROL,
3686 		    phy_ctrl | MII_CR_RESET);
3687 		break;
3688 	}
3689 
3690 	/* Set loopback */
3691 	e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
3692 
3693 	msec_delay(250);
3694 
3695 	/* Now set up the MAC to the same speed/duplex as the PHY. */
3696 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3697 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
3698 	ctrl |= (E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
3699 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
3700 	    E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
3701 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
3702 
3703 	switch (hw->mac.type) {
3704 	case e1000_82540:
3705 	case e1000_82545:
3706 	case e1000_82545_rev_3:
3707 	case e1000_82546:
3708 	case e1000_82546_rev_3:
3709 		/*
3710 		 * For some serdes we'll need to commit the writes now
3711 		 * so that the status is updated on link
3712 		 */
3713 		if (hw->media_type == e1000_media_type_internal_serdes) {
3714 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3715 			msec_delay(100);
3716 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
3717 		}
3718 
3719 		if (hw->media_type == e1000_media_type_copper) {
3720 			/* Invert Loss of Signal */
3721 			ctrl |= E1000_CTRL_ILOS;
3722 		} else {
3723 			/* Set ILOS on fiber nic if half duplex is detected */
3724 			status = E1000_READ_REG(hw, E1000_STATUS);
3725 			if ((status & E1000_STATUS_FD) == 0)
3726 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
3727 		}
3728 		break;
3729 
3730 	case e1000_82571:
3731 	case e1000_82572:
3732 		if (hw->media_type != e1000_media_type_copper) {
3733 			/* Set ILOS on fiber nic if half duplex is detected */
3734 			status = E1000_READ_REG(hw, E1000_STATUS);
3735 			if ((status & E1000_STATUS_FD) == 0)
3736 				ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
3737 		}
3738 		break;
3739 
3740 	case e1000_82573:
3741 		ctrl |= E1000_CTRL_ILOS;
3742 		break;
3743 	}
3744 
3745 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3746 
3747 	/*
3748 	 * Disable PHY receiver for 82540/545/546 and 82573 Family.
3749 	 * For background, see comments above e1000g_set_internal_loopback().
3750 	 */
3751 	switch (hw->mac.type) {
3752 	case e1000_82540:
3753 	case e1000_82545:
3754 	case e1000_82545_rev_3:
3755 	case e1000_82546:
3756 	case e1000_82546_rev_3:
3757 	case e1000_82573:
3758 		e1000_write_phy_reg(hw, 29, 0x001F);
3759 		e1000_write_phy_reg(hw, 30, 0x8FFC);
3760 		e1000_write_phy_reg(hw, 29, 0x001A);
3761 		e1000_write_phy_reg(hw, 30, 0x8FF0);
3762 		break;
3763 	}
3764 }
3765 
3766 static void
3767 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
3768 {
3769 	struct e1000_hw *hw;
3770 	uint32_t rctl;
3771 	uint32_t ctrl_ext;
3772 	uint32_t ctrl;
3773 	uint32_t status;
3774 	uint32_t txcw;
3775 
3776 	hw = &Adapter->shared;
3777 
3778 	/* Disable Smart Power Down */
3779 	phy_spd_state(hw, B_FALSE);
3780 
3781 	switch (hw->media_type) {
3782 	case e1000_media_type_copper:
3783 		/* Force link up (Must be done before the PHY writes) */
3784 		ctrl = E1000_READ_REG(hw, E1000_CTRL);
3785 		ctrl |= E1000_CTRL_SLU;	/* Force Link Up */
3786 		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3787 
3788 		rctl = E1000_READ_REG(hw, E1000_RCTL);
3789 		rctl |= (E1000_RCTL_EN |
3790 		    E1000_RCTL_SBP |
3791 		    E1000_RCTL_UPE |
3792 		    E1000_RCTL_MPE |
3793 		    E1000_RCTL_LPE |
3794 		    E1000_RCTL_BAM);		/* 0x803E */
3795 		E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3796 
3797 		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
3798 		ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
3799 		    E1000_CTRL_EXT_SDP6_DATA |
3800 		    E1000_CTRL_EXT_SDP7_DATA |
3801 		    E1000_CTRL_EXT_SDP4_DIR |
3802 		    E1000_CTRL_EXT_SDP6_DIR |
3803 		    E1000_CTRL_EXT_SDP7_DIR);	/* 0x0DD0 */
3804 		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
3805 
3806 		/*
3807 		 * This sequence tunes the PHY's SDP and no customer
3808 		 * settable values. For background, see comments above
3809 		 * e1000g_set_internal_loopback().
3810 		 */
3811 		e1000_write_phy_reg(hw, 0x0, 0x140);
3812 		msec_delay(10);
3813 		e1000_write_phy_reg(hw, 0x9, 0x1A00);
3814 		e1000_write_phy_reg(hw, 0x12, 0xC10);
3815 		e1000_write_phy_reg(hw, 0x12, 0x1C10);
3816 		e1000_write_phy_reg(hw, 0x1F37, 0x76);
3817 		e1000_write_phy_reg(hw, 0x1F33, 0x1);
3818 		e1000_write_phy_reg(hw, 0x1F33, 0x0);
3819 
3820 		e1000_write_phy_reg(hw, 0x1F35, 0x65);
3821 		e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
3822 		e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
3823 		e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
3824 		e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
3825 
3826 		msec_delay(50);
3827 		break;
3828 	case e1000_media_type_fiber:
3829 	case e1000_media_type_internal_serdes:
3830 		status = E1000_READ_REG(hw, E1000_STATUS);
3831 		if (((status & E1000_STATUS_LU) == 0) ||
3832 		    (hw->media_type == e1000_media_type_internal_serdes)) {
3833 			ctrl = E1000_READ_REG(hw, E1000_CTRL);
3834 			ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
3835 			E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3836 		}
3837 
3838 		/* Disable autoneg by setting bit 31 of TXCW to zero */
3839 		txcw = E1000_READ_REG(hw, E1000_TXCW);
3840 		txcw &= ~((uint32_t)1 << 31);
3841 		E1000_WRITE_REG(hw, E1000_TXCW, txcw);
3842 
3843 		/*
3844 		 * Write 0x410 to Serdes Control register
3845 		 * to enable Serdes analog loopback
3846 		 */
3847 		E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
3848 		msec_delay(10);
3849 		break;
3850 	default:
3851 		break;
3852 	}
3853 }
3854 
3855 static void
3856 e1000g_set_external_loopback_100(struct e1000g *Adapter)
3857 {
3858 	struct e1000_hw *hw;
3859 	uint32_t ctrl;
3860 	uint16_t phy_ctrl;
3861 
3862 	hw = &Adapter->shared;
3863 
3864 	/* Disable Smart Power Down */
3865 	phy_spd_state(hw, B_FALSE);
3866 
3867 	phy_ctrl = (MII_CR_FULL_DUPLEX |
3868 	    MII_CR_SPEED_100);
3869 
3870 	/* Force 100/FD, reset PHY */
3871 	e1000_write_phy_reg(hw, PHY_CONTROL,
3872 	    phy_ctrl | MII_CR_RESET);	/* 0xA100 */
3873 	msec_delay(10);
3874 
3875 	/* Force 100/FD */
3876 	e1000_write_phy_reg(hw, PHY_CONTROL,
3877 	    phy_ctrl);			/* 0x2100 */
3878 	msec_delay(10);
3879 
3880 	/* Now setup the MAC to the same speed/duplex as the PHY. */
3881 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3882 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
3883 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
3884 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
3885 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
3886 	    E1000_CTRL_SPD_100 |	/* Force Speed to 100 */
3887 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
3888 
3889 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3890 }
3891 
3892 static void
3893 e1000g_set_external_loopback_10(struct e1000g *Adapter)
3894 {
3895 	struct e1000_hw *hw;
3896 	uint32_t ctrl;
3897 	uint16_t phy_ctrl;
3898 
3899 	hw = &Adapter->shared;
3900 
3901 	/* Disable Smart Power Down */
3902 	phy_spd_state(hw, B_FALSE);
3903 
3904 	phy_ctrl = (MII_CR_FULL_DUPLEX |
3905 	    MII_CR_SPEED_10);
3906 
3907 	/* Force 10/FD, reset PHY */
3908 	e1000_write_phy_reg(hw, PHY_CONTROL,
3909 	    phy_ctrl | MII_CR_RESET);	/* 0x8100 */
3910 	msec_delay(10);
3911 
3912 	/* Force 10/FD */
3913 	e1000_write_phy_reg(hw, PHY_CONTROL,
3914 	    phy_ctrl);			/* 0x0100 */
3915 	msec_delay(10);
3916 
3917 	/* Now setup the MAC to the same speed/duplex as the PHY. */
3918 	ctrl = E1000_READ_REG(hw, E1000_CTRL);
3919 	ctrl &= ~E1000_CTRL_SPD_SEL;	/* Clear the speed sel bits */
3920 	ctrl |= (E1000_CTRL_SLU |	/* Force Link Up */
3921 	    E1000_CTRL_FRCSPD |		/* Set the Force Speed Bit */
3922 	    E1000_CTRL_FRCDPX |		/* Set the Force Duplex Bit */
3923 	    E1000_CTRL_SPD_10 |		/* Force Speed to 10 */
3924 	    E1000_CTRL_FD);		/* Force Duplex to FULL */
3925 
3926 	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
3927 }
3928 
3929 #ifdef __sparc
3930 static boolean_t
3931 e1000g_find_mac_address(struct e1000g *Adapter)
3932 {
3933 	struct e1000_hw *hw = &Adapter->shared;
3934 	uchar_t *bytes;
3935 	struct ether_addr sysaddr;
3936 	uint_t nelts;
3937 	int err;
3938 	boolean_t found = B_FALSE;
3939 
3940 	/*
3941 	 * The "vendor's factory-set address" may already have
3942 	 * been extracted from the chip, but if the property
3943 	 * "local-mac-address" is set we use that instead.
3944 	 *
3945 	 * We check whether it looks like an array of 6
3946 	 * bytes (which it should, if OBP set it).  If we can't
3947 	 * make sense of it this way, we'll ignore it.
3948 	 */
3949 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
3950 	    DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3951 	if (err == DDI_PROP_SUCCESS) {
3952 		if (nelts == ETHERADDRL) {
3953 			while (nelts--)
3954 				hw->mac.addr[nelts] = bytes[nelts];
3955 			found = B_TRUE;
3956 		}
3957 		ddi_prop_free(bytes);
3958 	}
3959 
3960 	/*
3961 	 * Look up the OBP property "local-mac-address?". If the user has set
3962 	 * 'local-mac-address? = false', use "the system address" instead.
3963 	 */
3964 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
3965 	    "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3966 		if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3967 			if (localetheraddr(NULL, &sysaddr) != 0) {
3968 				bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3969 				found = B_TRUE;
3970 			}
3971 		}
3972 		ddi_prop_free(bytes);
3973 	}
3974 
3975 	/*
3976 	 * Finally(!), if there's a valid "mac-address" property (created
3977 	 * if we netbooted from this interface), we must use this instead
3978 	 * of any of the above to ensure that the NFS/install server doesn't
3979 	 * get confused by the address changing as Solaris takes over!
3980 	 */
3981 	err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
3982 	    DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3983 	if (err == DDI_PROP_SUCCESS) {
3984 		if (nelts == ETHERADDRL) {
3985 			while (nelts--)
3986 				hw->mac.addr[nelts] = bytes[nelts];
3987 			found = B_TRUE;
3988 		}
3989 		ddi_prop_free(bytes);
3990 	}
3991 
3992 	if (found) {
3993 		bcopy(hw->mac.addr, hw->mac.perm_addr,
3994 		    ETHERADDRL);
3995 	}
3996 
3997 	return (found);
3998 }
3999 #endif
4000 
4001 static int
4002 e1000g_add_intrs(struct e1000g *Adapter)
4003 {
4004 	dev_info_t *devinfo;
4005 	int intr_types;
4006 	int rc;
4007 
4008 	devinfo = Adapter->dip;
4009 
4010 	/* Get supported interrupt types */
4011 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4012 
4013 	if (rc != DDI_SUCCESS) {
4014 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4015 		    "Get supported interrupt types failed: %d\n", rc);
4016 		return (DDI_FAILURE);
4017 	}
4018 
4019 	/*
4020 	 * Based on Intel Technical Advisory document (TA-160), there are some
4021 	 * cases where some older Intel PCI-X NICs may "advertise" to the OS
4022 	 * that it supports MSI, but in fact has problems.
4023 	 * So we should only enable MSI for PCI-E NICs and disable MSI for old
4024 	 * PCI/PCI-X NICs.
4025 	 */
4026 	if (Adapter->shared.mac.type < e1000_82571)
4027 		Adapter->msi_enabled = B_FALSE;
4028 
4029 	if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enabled) {
4030 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
4031 
4032 		if (rc != DDI_SUCCESS) {
4033 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
4034 			    "Add MSI failed, trying Legacy interrupts\n");
4035 		} else {
4036 			Adapter->intr_type = DDI_INTR_TYPE_MSI;
4037 		}
4038 	}
4039 
4040 	if ((Adapter->intr_type == 0) &&
4041 	    (intr_types & DDI_INTR_TYPE_FIXED)) {
4042 		rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
4043 
4044 		if (rc != DDI_SUCCESS) {
4045 			E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
4046 			    "Add Legacy interrupts failed\n");
4047 			return (DDI_FAILURE);
4048 		}
4049 
4050 		Adapter->intr_type = DDI_INTR_TYPE_FIXED;
4051 	}
4052 
4053 	if (Adapter->intr_type == 0) {
4054 		E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
4055 		    "No interrupts registered\n");
4056 		return (DDI_FAILURE);
4057 	}
4058 
4059 	return (DDI_SUCCESS);
4060 }
4061 
4062 /*
4063  * e1000g_intr_add() handles MSI/Legacy interrupts
4064  */
4065 static int
4066 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
4067 {
4068 	dev_info_t *devinfo;
4069 	int count, avail, actual;
4070 	int x, y, rc, inum = 0;
4071 	int flag;
4072 	ddi_intr_handler_t *intr_handler;
4073 
4074 	devinfo = Adapter->dip;
4075 
4076 	/* get number of interrupts */
4077 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4078 	if ((rc != DDI_SUCCESS) || (count == 0)) {
4079 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4080 		    "Get interrupt number failed. Return: %d, count: %d\n",
4081 		    rc, count);
4082 		return (DDI_FAILURE);
4083 	}
4084 
4085 	/* get number of available interrupts */
4086 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4087 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
4088 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4089 		    "Get interrupt available number failed. "
4090 		    "Return: %d, available: %d\n", rc, avail);
4091 		return (DDI_FAILURE);
4092 	}
4093 
4094 	if (avail < count) {
4095 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4096 		    "Interrupts count: %d, available: %d\n",
4097 		    count, avail);
4098 	}
4099 
4100 	/* Allocate an array of interrupt handles */
4101 	Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
4102 	Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
4103 
4104 	/* Set NORMAL behavior for both MSI and FIXED interrupt */
4105 	flag = DDI_INTR_ALLOC_NORMAL;
4106 
4107 	/* call ddi_intr_alloc() */
4108 	rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
4109 	    count, &actual, flag);
4110 
4111 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
4112 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4113 		    "Allocate interrupts failed: %d\n", rc);
4114 
4115 		kmem_free(Adapter->htable, Adapter->intr_size);
4116 		return (DDI_FAILURE);
4117 	}
4118 
4119 	if (actual < count) {
4120 		E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
4121 		    "Interrupts requested: %d, received: %d\n",
4122 		    count, actual);
4123 	}
4124 
4125 	Adapter->intr_cnt = actual;
4126 
4127 	/* Get priority for first msi, assume remaining are all the same */
4128 	rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
4129 
4130 	if (rc != DDI_SUCCESS) {
4131 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4132 		    "Get interrupt priority failed: %d\n", rc);
4133 
4134 		/* Free already allocated intr */
4135 		for (y = 0; y < actual; y++)
4136 			(void) ddi_intr_free(Adapter->htable[y]);
4137 
4138 		kmem_free(Adapter->htable, Adapter->intr_size);
4139 		return (DDI_FAILURE);
4140 	}
4141 
4142 	/*
4143 	 * In Legacy Interrupt mode, for PCI-Express adapters, we should
4144 	 * use the interrupt service routine e1000g_intr_pciexpress()
4145 	 * to avoid interrupt stealing when sharing interrupt with other
4146 	 * devices.
4147 	 */
4148 	if (Adapter->shared.mac.type < e1000_82571)
4149 		intr_handler = (ddi_intr_handler_t *)e1000g_intr;
4150 	else
4151 		intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
4152 
4153 	/* Call ddi_intr_add_handler() */
4154 	for (x = 0; x < actual; x++) {
4155 		rc = ddi_intr_add_handler(Adapter->htable[x],
4156 		    intr_handler, (caddr_t)Adapter, NULL);
4157 
4158 		if (rc != DDI_SUCCESS) {
4159 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4160 			    "Add interrupt handler failed: %d\n", rc);
4161 
4162 			/* Remove already added handler */
4163 			for (y = 0; y < x; y++)
4164 				(void) ddi_intr_remove_handler(
4165 				    Adapter->htable[y]);
4166 
4167 			/* Free already allocated intr */
4168 			for (y = 0; y < actual; y++)
4169 				(void) ddi_intr_free(Adapter->htable[y]);
4170 
4171 			kmem_free(Adapter->htable, Adapter->intr_size);
4172 			return (DDI_FAILURE);
4173 		}
4174 	}
4175 
4176 	rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
4177 
4178 	if (rc != DDI_SUCCESS) {
4179 		E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4180 		    "Get interrupt cap failed: %d\n", rc);
4181 
4182 		/* Free already allocated intr */
4183 		for (y = 0; y < actual; y++) {
4184 			(void) ddi_intr_remove_handler(Adapter->htable[y]);
4185 			(void) ddi_intr_free(Adapter->htable[y]);
4186 		}
4187 
4188 		kmem_free(Adapter->htable, Adapter->intr_size);
4189 		return (DDI_FAILURE);
4190 	}
4191 
4192 	return (DDI_SUCCESS);
4193 }
4194 
4195 static int
4196 e1000g_rem_intrs(struct e1000g *Adapter)
4197 {
4198 	int x;
4199 	int rc;
4200 
4201 	for (x = 0; x < Adapter->intr_cnt; x++) {
4202 		rc = ddi_intr_remove_handler(Adapter->htable[x]);
4203 		if (rc != DDI_SUCCESS) {
4204 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4205 			    "Remove intr handler failed: %d\n", rc);
4206 			return (DDI_FAILURE);
4207 		}
4208 
4209 		rc = ddi_intr_free(Adapter->htable[x]);
4210 		if (rc != DDI_SUCCESS) {
4211 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4212 			    "Free intr failed: %d\n", rc);
4213 			return (DDI_FAILURE);
4214 		}
4215 	}
4216 
4217 	kmem_free(Adapter->htable, Adapter->intr_size);
4218 
4219 	return (DDI_SUCCESS);
4220 }
4221 
4222 static int
4223 e1000g_enable_intrs(struct e1000g *Adapter)
4224 {
4225 	int x;
4226 	int rc;
4227 
4228 	/* Enable interrupts */
4229 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
4230 		/* Call ddi_intr_block_enable() for MSI */
4231 		rc = ddi_intr_block_enable(Adapter->htable,
4232 		    Adapter->intr_cnt);
4233 		if (rc != DDI_SUCCESS) {
4234 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4235 			    "Enable block intr failed: %d\n", rc);
4236 			return (DDI_FAILURE);
4237 		}
4238 	} else {
4239 		/* Call ddi_intr_enable() for Legacy/MSI non block enable */
4240 		for (x = 0; x < Adapter->intr_cnt; x++) {
4241 			rc = ddi_intr_enable(Adapter->htable[x]);
4242 			if (rc != DDI_SUCCESS) {
4243 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4244 				    "Enable intr failed: %d\n", rc);
4245 				return (DDI_FAILURE);
4246 			}
4247 		}
4248 	}
4249 
4250 	return (DDI_SUCCESS);
4251 }
4252 
4253 static int
4254 e1000g_disable_intrs(struct e1000g *Adapter)
4255 {
4256 	int x;
4257 	int rc;
4258 
4259 	/* Disable all interrupts */
4260 	if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
4261 		rc = ddi_intr_block_disable(Adapter->htable,
4262 		    Adapter->intr_cnt);
4263 		if (rc != DDI_SUCCESS) {
4264 			E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4265 			    "Disable block intr failed: %d\n", rc);
4266 			return (DDI_FAILURE);
4267 		}
4268 	} else {
4269 		for (x = 0; x < Adapter->intr_cnt; x++) {
4270 			rc = ddi_intr_disable(Adapter->htable[x]);
4271 			if (rc != DDI_SUCCESS) {
4272 				E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
4273 				    "Disable intr failed: %d\n", rc);
4274 				return (DDI_FAILURE);
4275 			}
4276 		}
4277 	}
4278 
4279 	return (DDI_SUCCESS);
4280 }
4281