xref: /titanic_51/usr/src/uts/sun/io/eri/eri.c (revision 5a00db9d04809df47502f8002f0295cb0b7966e0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOS MT STREAMS ERI(PCI) 10/100 Mb Ethernet Device Driver
28  */
29 
30 #include	<sys/types.h>
31 #include	<sys/debug.h>
32 #include	<sys/stropts.h>
33 #include	<sys/stream.h>
34 #include	<sys/strsubr.h>
35 #include	<sys/kmem.h>
36 #include	<sys/crc32.h>
37 #include	<sys/ddi.h>
38 #include	<sys/sunddi.h>
39 #include	<sys/strsun.h>
40 #include	<sys/stat.h>
41 #include	<sys/cpu.h>
42 #include	<sys/kstat.h>
43 #include	<inet/common.h>
44 #include	<sys/pattr.h>
45 #include	<inet/mi.h>
46 #include	<inet/nd.h>
47 #include	<sys/ethernet.h>
48 #include	<sys/vlan.h>
49 #include	<sys/policy.h>
50 #include	<sys/mac_provider.h>
51 #include	<sys/mac_ether.h>
52 #include	<sys/dlpi.h>
53 
54 #include	<sys/pci.h>
55 
56 #include	"eri_phy.h"
57 #include	"eri_mac.h"
58 #include	"eri.h"
59 #include	"eri_common.h"
60 
61 #include	"eri_msg.h"
62 
63 /*
64  *  **** Function Prototypes *****
65  */
66 /*
67  * Entry points (man9e)
68  */
69 static	int	eri_attach(dev_info_t *, ddi_attach_cmd_t);
70 static	int	eri_detach(dev_info_t *, ddi_detach_cmd_t);
71 static	uint_t	eri_intr(caddr_t);
72 
73 /*
74  * I/O (Input/Output) Functions
75  */
76 static	boolean_t	eri_send_msg(struct eri *, mblk_t *);
77 static  mblk_t		*eri_read_dma(struct eri *, volatile struct rmd *,
78 			    volatile int, uint64_t flags);
79 
80 /*
81  * Initialization Functions
82  */
83 static  boolean_t	eri_init(struct eri *);
84 static	int	eri_allocthings(struct eri *);
85 static  int	eri_init_xfer_params(struct eri *);
86 static  void	eri_statinit(struct eri *);
87 static	int	eri_burstsize(struct eri *);
88 
89 static	void	eri_setup_mac_address(struct eri *, dev_info_t *);
90 
91 static	uint32_t eri_init_rx_channel(struct eri *);
92 static	void	eri_init_rx(struct eri *);
93 static	void	eri_init_txmac(struct eri *);
94 
95 /*
96  * Un-init Functions
97  */
98 static	uint32_t eri_txmac_disable(struct eri *);
99 static	uint32_t eri_rxmac_disable(struct eri *);
100 static	int	eri_stop(struct eri *);
101 static	void	eri_uninit(struct eri *erip);
102 static	int	eri_freebufs(struct eri *);
103 static	boolean_t	eri_reclaim(struct eri *, uint32_t);
104 
105 /*
106  * Transceiver (xcvr) Functions
107  */
108 static	int	eri_new_xcvr(struct eri *); /* Initializes & detects xcvrs */
109 static	int	eri_reset_xcvr(struct eri *);
110 
111 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
112 static	void	eri_xcvr_force_mode(struct eri *, uint32_t *);
113 #endif
114 
115 static	void	eri_mif_poll(struct eri *, soft_mif_enable_t);
116 static	void	eri_check_link(struct eri *);
117 static	uint32_t eri_check_link_noind(struct eri *);
118 static	link_state_t eri_mif_check(struct eri *, uint16_t, uint16_t);
119 static	void    eri_mii_write(struct eri *, uint8_t, uint16_t);
120 static	uint32_t eri_mii_read(struct eri *, uint8_t, uint16_t *);
121 
122 /*
123  * Reset Functions
124  */
125 static	uint32_t eri_etx_reset(struct eri *);
126 static	uint32_t eri_erx_reset(struct eri *);
127 
128 /*
129  * Error Functions
130  */
131 static	void eri_fatal_err(struct eri *, uint32_t);
132 static	void eri_nonfatal_err(struct eri *, uint32_t);
133 
134 #ifdef	ERI_TX_HUNG
135 static	int eri_check_txhung(struct eri *);
136 #endif
137 
138 /*
139  * Hardening Functions
140  */
141 static void eri_fault_msg(struct eri *, uint_t, msg_t, const char *, ...);
142 
143 /*
144  * Misc Functions
145  */
146 static void	eri_savecntrs(struct eri *);
147 
148 static	void	eri_stop_timer(struct eri *erip);
149 static	void	eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec);
150 
151 static	void eri_bb_force_idle(struct eri *);
152 
153 /*
154  * Utility Functions
155  */
156 static	mblk_t *eri_allocb(size_t size);
157 static	mblk_t *eri_allocb_sp(size_t size);
158 static	int	eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp);
159 static	int	eri_param_set(queue_t *, mblk_t *, char *, caddr_t);
160 
161 /*
162  * Functions to support ndd
163  */
164 static	void	eri_nd_free(caddr_t *nd_pparam);
165 
166 static	boolean_t	eri_nd_load(caddr_t *nd_pparam, char *name,
167 				pfi_t get_pfi, pfi_t set_pfi, caddr_t data);
168 
169 static	int	eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp);
170 static	void	eri_param_cleanup(struct eri *);
171 static	int	eri_param_register(struct eri *, param_t *, int);
172 static	void	eri_process_ndd_ioctl(struct eri *, queue_t *, mblk_t *, int);
173 static	int	eri_mk_mblk_tail_space(mblk_t *, mblk_t **, size_t);
174 
175 
176 static	void eri_loopback(struct eri *, queue_t *, mblk_t *);
177 
178 static uint32_t	eri_ladrf_bit(const uint8_t *);
179 
180 
181 /*
182  * Nemo (GLDv3) Functions.
183  */
184 static	int		eri_m_stat(void *, uint_t, uint64_t *);
185 static	int		eri_m_start(void *);
186 static	void		eri_m_stop(void *);
187 static	int		eri_m_promisc(void *, boolean_t);
188 static	int		eri_m_multicst(void *, boolean_t, const uint8_t *);
189 static	int		eri_m_unicst(void *, const uint8_t *);
190 static	void		eri_m_ioctl(void *, queue_t *, mblk_t *);
191 static	boolean_t	eri_m_getcapab(void *, mac_capab_t, void *);
192 static	mblk_t		*eri_m_tx(void *, mblk_t *);
193 
194 static mac_callbacks_t eri_m_callbacks = {
195 	MC_IOCTL | MC_GETCAPAB,
196 	eri_m_stat,
197 	eri_m_start,
198 	eri_m_stop,
199 	eri_m_promisc,
200 	eri_m_multicst,
201 	eri_m_unicst,
202 	eri_m_tx,
203 	eri_m_ioctl,
204 	eri_m_getcapab
205 };
206 
207 /*
208  * Define PHY Vendors: Matches to IEEE
209  * Organizationally Unique Identifier (OUI)
210  */
211 /*
212  * The first two are supported as Internal XCVRs
213  */
214 #define	PHY_VENDOR_LUCENT	0x601d
215 
216 #define	PHY_LINK_NONE		0	/* Not attempted yet or retry */
217 #define	PHY_LINK_DOWN		1	/* Not being used	*/
218 #define	PHY_LINK_UP		2	/* Not being used	*/
219 
220 #define	AUTO_SPEED		0
221 #define	FORCE_SPEED		1
222 
223 /*
224  * MIB II broadcast/multicast packets
225  */
226 
227 #define	IS_BROADCAST(pkt) (bcmp(pkt, &etherbroadcastaddr, ETHERADDRL) == 0)
228 #define	IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
229 
230 #define	BUMP_InNUcast(erip, pkt) \
231 		if (IS_BROADCAST(pkt)) { \
232 			HSTAT(erip, brdcstrcv); \
233 		} else if (IS_MULTICAST(pkt)) { \
234 			HSTAT(erip, multircv); \
235 		}
236 
237 #define	BUMP_OutNUcast(erip, pkt) \
238 		if (IS_BROADCAST(pkt)) { \
239 			HSTAT(erip, brdcstxmt); \
240 		} else if (IS_MULTICAST(pkt)) { \
241 			HSTAT(erip, multixmt); \
242 		}
243 
244 #define	NEXTTMDP(tbasep, tmdlimp, tmdp)	(((tmdp) + 1) == tmdlimp	\
245 	? tbasep : ((tmdp) + 1))
246 
247 #define	ETHERHEADER_SIZE (sizeof (struct ether_header))
248 
249 #ifdef	ERI_RCV_CKSUM
250 #define	ERI_PROCESS_READ(erip, bp, sum)				\
251 {								\
252 	t_uscalar_t	type;					\
253 	uint_t	start_offset, end_offset;			\
254 								\
255 	*(bp->b_wptr) = 0;	/* pad byte */			\
256 								\
257 	/*							\
258 	 * update MIB II statistics				\
259 	 */							\
260 	HSTAT(erip, ipackets64);				\
261 	HSTATN(erip, rbytes64, len);				\
262 	BUMP_InNUcast(erip, bp->b_rptr);			\
263 	type = get_ether_type(bp->b_rptr);			\
264 	if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {	\
265 		start_offset = 0;				\
266 		end_offset = MBLKL(bp) - ETHERHEADER_SIZE;	\
267 		(void) hcksum_assoc(bp, NULL, NULL,		\
268 			start_offset, 0, end_offset, sum, 	\
269 			HCK_PARTIALCKSUM, 0);			\
270 	} else {						\
271 		/*						\
272 		 * Strip the PADS for 802.3			\
273 		 */						\
274 		if (type <= ETHERMTU)				\
275 			bp->b_wptr = bp->b_rptr +		\
276 				ETHERHEADER_SIZE + type;	\
277 	}							\
278 }
279 #else
280 
281 #define	ERI_PROCESS_READ(erip, bp)				\
282 {								\
283 	t_uscalar_t	type;					\
284 	type = get_ether_type(bp->b_rptr);			\
285 								\
286 	/*							\
287 	 * update MIB II statistics				\
288 	 */							\
289 	HSTAT(erip, ipackets64);				\
290 	HSTATN(erip, rbytes64, len);				\
291 	BUMP_InNUcast(erip, bp->b_rptr);			\
292 	/*							\
293 	 * Strip the PADS for 802.3				\
294 	 */							\
295 	if (type <= ETHERMTU)					\
296 		bp->b_wptr = bp->b_rptr + ETHERHEADER_SIZE + 	\
297 			type;					\
298 }
299 #endif  /* ERI_RCV_CKSUM */
300 
301 /*
302  * TX Interrupt Rate
303  */
304 static	int	tx_interrupt_rate = 16;
305 
306 /*
307  * Ethernet broadcast address definition.
308  */
309 static uint8_t	etherbroadcastaddr[] = {
310 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
311 };
312 
313 /*
314  * The following variables are used for configuring various features
315  */
316 #define	ERI_DESC_HANDLE_ALLOC	0x0001
317 #define	ERI_DESC_MEM_ALLOC	0x0002
318 #define	ERI_DESC_MEM_MAP	0x0004
319 #define	ERI_RCV_HANDLE_ALLOC	0x0020
320 #define	ERI_RCV_HANDLE_BIND	0x0040
321 #define	ERI_XMIT_DVMA_ALLOC	0x0100
322 #define	ERI_RCV_DVMA_ALLOC	0x0200
323 #define	ERI_XBUFS_HANDLE_ALLOC  0x0400
324 #define	ERI_XBUFS_KMEM_ALLOC    0x0800
325 #define	ERI_XBUFS_KMEM_DMABIND  0x1000
326 
327 
328 #define	ERI_DONT_STRIP_CRC
329 /*
330  * Translate a kernel virtual address to i/o address.
331  */
332 #define	ERI_IOPBIOADDR(erip, a) \
333 	((erip)->iopbiobase + ((uintptr_t)a - (erip)->iopbkbase))
334 
335 /*
336  * ERI Configuration Register Value
337  * Used to configure parameters that define DMA burst
338  * and internal arbitration behavior.
339  * for equal TX and RX bursts, set the following in global
340  * configuration register.
341  * static	int	global_config = 0x42;
342  */
343 
344 /*
345  * ERI ERX Interrupt Blanking Time
346  * Each count is about 16 us (2048 clocks) for 66 MHz PCI.
347  */
348 static	int	intr_blank_time = 6;	/* for about 96 us */
349 static	int	intr_blank_packets = 8;	/*  */
350 
351 /*
352  * ERX PAUSE Threshold Register value
353  * The following value is for an OFF Threshold of about 15.5 Kbytes
354  * and an ON Threshold of 4K bytes.
355  */
356 static	int rx_pause_threshold = 0xf8 | (0x40 << 12);
357 static	int eri_reinit_fatal = 0;
358 #ifdef	DEBUG
359 static	int noteri = 0;
360 #endif
361 
362 #ifdef	ERI_TX_HUNG
363 static	int eri_reinit_txhung = 0;
364 #endif
365 
366 #ifdef ERI_HDX_BUG_WORKAROUND
367 /*
368  * By default enable padding in hdx mode to 97 bytes.
369  * To disabled, in /etc/system:
370  * set eri:eri_hdx_pad_enable=0
371  */
372 static	uchar_t eri_hdx_pad_enable = 1;
373 #endif
374 
375 /*
376  * Default values to initialize the cache line size and latency timer
377  * registers in the PCI configuration space.
378  * ERI_G_CACHE_LINE_SIZE_16 is defined as 16 since RIO expects in units
379  * of 4 bytes.
380  */
381 #ifdef ERI_PM_WORKAROUND_PCI
382 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_32; /* 128 bytes */
383 static int eri_pci_latency_timer = 0xff;		/* 255 PCI cycles */
384 #else
385 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_16; /* 64 bytes */
386 static int eri_pci_latency_timer = 0x40;		/* 64 PCI cycles */
387 #endif
388 #define	ERI_CACHE_LINE_SIZE	(eri_pci_cache_line << ERI_G_CACHE_BIT)
389 
390 /*
391  * Claim the device is ultra-capable of burst in the beginning.  Use
392  * the value returned by ddi_dma_burstsizes() to actually set the ERI
393  * global configuration register later.
394  *
395  * PCI_ERI supports Infinite burst or 64-byte-multiple bursts.
396  */
397 #define	ERI_LIMADDRLO	((uint64_t)0x00000000)
398 #define	ERI_LIMADDRHI	((uint64_t)0xffffffff)
399 
400 static ddi_dma_attr_t dma_attr = {
401 	DMA_ATTR_V0,		/* version number. */
402 	(uint64_t)ERI_LIMADDRLO, /* low address */
403 	(uint64_t)ERI_LIMADDRHI, /* high address */
404 	(uint64_t)0x00ffffff,	/* address counter max */
405 	(uint64_t)1,		/* alignment */
406 	(uint_t)0xe000e0,	/* dlim_burstsizes for 32 4 bit xfers */
407 	(uint32_t)0x1,		/* minimum transfer size */
408 	(uint64_t)0x7fffffff,	/* maximum transfer size */
409 	(uint64_t)0x00ffffff,	/* maximum segment size */
410 	1,			/* scatter/gather list length */
411 	(uint32_t)1,		/* granularity */
412 	(uint_t)0		/* attribute flags */
413 };
414 
415 static ddi_dma_attr_t desc_dma_attr = {
416 	DMA_ATTR_V0,		/* version number. */
417 	(uint64_t)ERI_LIMADDRLO, /* low address */
418 	(uint64_t)ERI_LIMADDRHI, /* high address */
419 	(uint64_t)0x00ffffff,	/* address counter max */
420 	(uint64_t)8,		/* alignment */
421 	(uint_t)0xe000e0,	/* dlim_burstsizes for 32 4 bit xfers */
422 	(uint32_t)0x1,		/* minimum transfer size */
423 	(uint64_t)0x7fffffff,	/* maximum transfer size */
424 	(uint64_t)0x00ffffff,	/* maximum segment size */
425 	1,			/* scatter/gather list length */
426 	16,			/* granularity */
427 	0			/* attribute flags */
428 };
429 
430 static ddi_device_acc_attr_t buf_attr = {
431 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version */
432 	DDI_NEVERSWAP_ACC,	/* devacc_attr_endian_flags */
433 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder */
434 	DDI_DEFAULT_ACC,	/* devacc_attr_access */
435 };
436 
437 ddi_dma_lim_t eri_dma_limits = {
438 	(uint64_t)ERI_LIMADDRLO, /* dlim_addr_lo */
439 	(uint64_t)ERI_LIMADDRHI, /* dlim_addr_hi */
440 	(uint64_t)ERI_LIMADDRHI, /* dlim_cntr_max */
441 	(uint_t)0x00e000e0,	/* dlim_burstsizes for 32 and 64 bit xfers */
442 	(uint32_t)0x1,		/* dlim_minxfer */
443 	1024			/* dlim_speed */
444 };
445 
446 /*
447  * Link Configuration variables
448  *
449  * On Motherboard implementations, 10/100 Mbps speeds may be supported
450  * by using both the Serial Link and the MII on Non-serial-link interface.
451  * When both links are present, the driver automatically tries to bring up
452  * both. If both are up, the Gigabit Serial Link is selected for use, by
453  * default. The following configuration variable is used to force the selection
454  * of one of the links when both are up.
455  * To change the default selection to the MII link when both the Serial
456  * Link and the MII link are up, change eri_default_link to 1.
457  *
458  * Once a link is in use, the driver will continue to use that link till it
459  * goes down. When it goes down, the driver will look at the status of both the
460  * links again for link selection.
461  *
462  * Currently the standard is not stable w.r.t. gigabit link configuration
463  * using auto-negotiation procedures. Meanwhile, the link may be configured
464  * in "forced" mode using the "autonegotiation enable" bit (bit-12) in the
465  * PCS MII Command Register. In this mode the PCS sends "idles" until sees
466  * "idles" as initialization instead of the Link Configuration protocol
467  * where a Config register is exchanged. In this mode, the ERI is programmed
468  * for full-duplex operation with both pauseTX and pauseRX (for flow control)
469  * enabled.
470  */
471 
472 static	int	select_link = 0; /* automatic selection */
473 static	int	default_link = 0; /* Select Serial link if both are up */
474 
475 /*
476  * The following variables are used for configuring link-operation
477  * for all the "eri" interfaces in the system.
478  * Later these parameters may be changed per interface using "ndd" command
479  * These parameters may also be specified as properties using the .conf
480  * file mechanism for each interface.
481  */
482 
483 /*
484  * The following variable value will be overridden by "link-pulse-disabled"
485  * property which may be created by OBP or eri.conf file. This property is
486  * applicable only for 10 Mbps links.
487  */
488 static	int	link_pulse_disabled = 0;	/* link pulse disabled */
489 
490 /* For MII-based FastEthernet links */
491 static	int	adv_autoneg_cap = 1;
492 static	int	adv_100T4_cap = 0;
493 static	int	adv_100fdx_cap = 1;
494 static	int	adv_100hdx_cap = 1;
495 static	int	adv_10fdx_cap = 1;
496 static	int	adv_10hdx_cap = 1;
497 static	int	adv_pauseTX_cap =  0;
498 static	int	adv_pauseRX_cap =  0;
499 
500 /*
501  * The following gap parameters are in terms of byte times.
502  */
503 static	int	ipg0 = 8;
504 static	int	ipg1 = 8;
505 static	int	ipg2 = 4;
506 
507 static	int	lance_mode = 1;		/* to enable LANCE mode */
508 static	int	mifpoll_enable = 0;	/* to enable mif poll */
509 static	int	ngu_enable = 0;		/* to enable Never Give Up mode */
510 
511 static	int	eri_force_mlf = 0; 	/* to enable mif poll */
512 static	int	eri_phy_mintrans = 1;	/* Lu3X31T mintrans algorithm */
513 /*
514  * For the MII interface, the External Transceiver is selected when present.
515  * The following variable is used to select the Internal Transceiver even
516  * when the External Transceiver is present.
517  */
518 static	int	use_int_xcvr = 0;
519 static	int	pace_size = 0;	/* Do not use pacing for now */
520 
521 static	int	eri_use_dvma_rx = 0;	/* =1:use dvma */
522 static	int	eri_rx_bcopy_max = RX_BCOPY_MAX;	/* =1:use bcopy() */
523 static	int	eri_overflow_reset = 1;	/* global reset if rx_fifo_overflow */
524 static	int	eri_tx_ring_size = 2048; /* number of entries in tx ring */
525 static	int	eri_rx_ring_size = 1024; /* number of entries in rx ring */
526 /*
527  * The following parameters may be configured by the user. If they are not
528  * configured by the user, the values will be based on the capabilities of
529  * the transceiver.
530  * The value "ERI_NOTUSR" is ORed with the parameter value to indicate values
531  * which are NOT configured by the user.
532  */
533 
534 #define	ERI_NOTUSR	0x0f000000
535 #define	ERI_MASK_1BIT	0x1
536 #define	ERI_MASK_2BIT	0x3
537 #define	ERI_MASK_8BIT	0xff
538 
539 
540 /*
541  * Note:
542  * ERI has all of the above capabilities.
543  * Only when an External Transceiver is selected for MII-based FastEthernet
544  * link operation, the capabilities depend upon the capabilities of the
545  * External Transceiver.
546  */
547 
548 /* ------------------------------------------------------------------------- */
549 
550 static  param_t	param_arr[] = {
551 	/* min		max		value	r/w/hidden+name */
552 	{  0,		2,		2,	"-transceiver_inuse"},
553 	{  0,		1,		0,	"-link_status"},
554 	{  0,		1,		0,	"-link_speed"},
555 	{  0,		1,		0,	"-link_mode"},
556 	{  0,		255,		8,	"+ipg1"},
557 	{  0,		255,		4,	"+ipg2"},
558 	{  0,		1,		0,	"+use_int_xcvr"},
559 	{  0,		255,		0,	"+pace_size"},
560 	{  0,		1,		1,	"+adv_autoneg_cap"},
561 	{  0,		1,		1,	"+adv_100T4_cap"},
562 	{  0,		1,		1,	"+adv_100fdx_cap"},
563 	{  0,		1,		1,	"+adv_100hdx_cap"},
564 	{  0,		1,		1,	"+adv_10fdx_cap"},
565 	{  0,		1,		1,	"+adv_10hdx_cap"},
566 	{  0,		1,		1,	"-autoneg_cap"},
567 	{  0,		1,		1,	"-100T4_cap"},
568 	{  0,		1,		1,	"-100fdx_cap"},
569 	{  0,		1,		1,	"-100hdx_cap"},
570 	{  0,		1,		1,	"-10fdx_cap"},
571 	{  0,		1,		1,	"-10hdx_cap"},
572 	{  0,		1,		0,	"-lp_autoneg_cap"},
573 	{  0,		1,		0,	"-lp_100T4_cap"},
574 	{  0,		1,		0,	"-lp_100fdx_cap"},
575 	{  0,		1,		0,	"-lp_100hdx_cap"},
576 	{  0,		1,		0,	"-lp_10fdx_cap"},
577 	{  0,		1,		0,	"-lp_10hdx_cap"},
578 	{  0,		1,		1,	"+lance_mode"},
579 	{  0,		31,		8,	"+ipg0"},
580 	{  0,		127,		6,	"+intr_blank_time"},
581 	{  0,		255,		8,	"+intr_blank_packets"},
582 	{  0,		1,		1,	"!serial-link"},
583 	{  0,		2,		1,	"!non-serial-link"},
584 	{  0,		1,		0,	"%select-link"},
585 	{  0,		1,		0,	"%default-link"},
586 	{  0,		2,		0,	"!link-in-use"},
587 	{  0,		1,		1,	"%adv_asm_dir_cap"},
588 	{  0,		1,		1,	"%adv_pause_cap"},
589 	{  0,		1,		0,	"!asm_dir_cap"},
590 	{  0,		1,		0,	"!pause_cap"},
591 	{  0,		1,		0,	"!lp_asm_dir_cap"},
592 	{  0,		1,		0,	"!lp_pause_cap"},
593 };
594 
595 DDI_DEFINE_STREAM_OPS(eri_dev_ops, nulldev, nulldev, eri_attach, eri_detach,
596 	nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
597 
598 /*
599  * This is the loadable module wrapper.
600  */
601 #include <sys/modctl.h>
602 
603 /*
604  * Module linkage information for the kernel.
605  */
606 static struct modldrv modldrv = {
607 	&mod_driverops,	/* Type of module.  This one is a driver */
608 	"Sun RIO 10/100 Mb Ethernet",
609 	&eri_dev_ops,	/* driver ops */
610 };
611 
612 static struct modlinkage modlinkage = {
613 	MODREV_1, &modldrv, NULL
614 };
615 
616 /*
617  * Hardware Independent Functions
618  * New Section
619  */
620 
621 int
622 _init(void)
623 {
624 	int	status;
625 
626 	mac_init_ops(&eri_dev_ops, "eri");
627 	if ((status = mod_install(&modlinkage)) != 0) {
628 		mac_fini_ops(&eri_dev_ops);
629 	}
630 	return (status);
631 }
632 
633 int
634 _fini(void)
635 {
636 	int status;
637 
638 	status = mod_remove(&modlinkage);
639 	if (status == 0) {
640 		mac_fini_ops(&eri_dev_ops);
641 	}
642 	return (status);
643 }
644 
645 int
646 _info(struct modinfo *modinfop)
647 {
648 	return (mod_info(&modlinkage, modinfop));
649 }
650 
651 
652 /*
653  * Interface exists: make available by filling in network interface
654  * record.  System will initialize the interface when it is ready
655  * to accept packets.
656  */
657 static int
658 eri_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
659 {
660 	struct eri *erip = NULL;
661 	mac_register_t *macp = NULL;
662 	int	regno;
663 	boolean_t	doinit;
664 	boolean_t	mutex_inited = B_FALSE;
665 	boolean_t	intr_add = B_FALSE;
666 
667 	switch (cmd) {
668 	case DDI_ATTACH:
669 		break;
670 
671 	case DDI_RESUME:
672 		if ((erip = ddi_get_driver_private(dip)) == NULL)
673 			return (DDI_FAILURE);
674 
675 		mutex_enter(&erip->intrlock);
676 		erip->flags &= ~ERI_SUSPENDED;
677 		erip->init_macregs = 1;
678 		param_linkup = 0;
679 		erip->stats.link_up = LINK_STATE_DOWN;
680 		erip->linkcheck = 0;
681 
682 		doinit =  (erip->flags & ERI_STARTED) ? B_TRUE : B_FALSE;
683 		mutex_exit(&erip->intrlock);
684 
685 		if (doinit && !eri_init(erip)) {
686 			return (DDI_FAILURE);
687 		}
688 		return (DDI_SUCCESS);
689 
690 	default:
691 		return (DDI_FAILURE);
692 	}
693 
694 	/*
695 	 * Allocate soft device data structure
696 	 */
697 	erip = kmem_zalloc(sizeof (struct eri), KM_SLEEP);
698 
699 	/*
700 	 * Initialize as many elements as possible.
701 	 */
702 	ddi_set_driver_private(dip, erip);
703 	erip->dip = dip;			/* dip	*/
704 	erip->instance = ddi_get_instance(dip);	/* instance */
705 	erip->flags = 0;
706 	erip->multi_refcnt = 0;
707 	erip->promisc = B_FALSE;
708 
709 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
710 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
711 		    "mac_alloc failed");
712 		goto attach_fail;
713 	}
714 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
715 	macp->m_driver = erip;
716 	macp->m_dip = dip;
717 	macp->m_src_addr = erip->ouraddr;
718 	macp->m_callbacks = &eri_m_callbacks;
719 	macp->m_min_sdu = 0;
720 	macp->m_max_sdu = ETHERMTU;
721 	macp->m_margin = VLAN_TAGSZ;
722 
723 	/*
724 	 * Map in the device registers.
725 	 * Separate pointers will be set up for the following
726 	 * register groups within the GEM Register Space:
727 	 * 	Global register set
728 	 * 	ETX register set
729 	 * 	ERX register set
730 	 * 	BigMAC register set.
731 	 * 	MIF register set
732 	 */
733 
734 	if (ddi_dev_nregs(dip, &regno) != (DDI_SUCCESS)) {
735 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
736 		    "ddi_dev_nregs failed, returned %d", regno);
737 		goto attach_fail;
738 	}
739 
740 	/*
741 	 * Map the PCI config space
742 	 */
743 	if (pci_config_setup(dip, &erip->pci_config_handle) != DDI_SUCCESS) {
744 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
745 		    "%s pci_config_setup()", config_space_fatal_msg);
746 		goto attach_fail;
747 	}
748 
749 	/*
750 	 * Initialize device attributes structure
751 	 */
752 	erip->dev_attr.devacc_attr_version =	DDI_DEVICE_ATTR_V0;
753 	erip->dev_attr.devacc_attr_dataorder =	DDI_STRICTORDER_ACC;
754 	erip->dev_attr.devacc_attr_endian_flags =	DDI_STRUCTURE_LE_ACC;
755 
756 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->globregp), 0, 0,
757 	    &erip->dev_attr, &erip->globregh)) {
758 		goto attach_fail;
759 	}
760 	erip->etxregh =		erip->globregh;
761 	erip->erxregh =		erip->globregh;
762 	erip->bmacregh =	erip->globregh;
763 	erip->mifregh =		erip->globregh;
764 
765 	erip->etxregp =  (void *)(((caddr_t)erip->globregp) + 0x2000);
766 	erip->erxregp =  (void *)(((caddr_t)erip->globregp) + 0x4000);
767 	erip->bmacregp = (void *)(((caddr_t)erip->globregp) + 0x6000);
768 	erip->mifregp =  (void *)(((caddr_t)erip->globregp) + 0x6200);
769 
770 	/*
771 	 * Map the software reset register.
772 	 */
773 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->sw_reset_reg),
774 	    0x1010, 4, &erip->dev_attr, &erip->sw_reset_regh)) {
775 		ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
776 		    mregs_4soft_reset_fail_msg);
777 		goto attach_fail;
778 	}
779 
780 	/*
781 	 * Try and stop the device.
782 	 * This is done until we want to handle interrupts.
783 	 */
784 	if (eri_stop(erip))
785 		goto attach_fail;
786 
787 	/*
788 	 * set PCI latency timer register.
789 	 */
790 	pci_config_put8(erip->pci_config_handle, PCI_CONF_LATENCY_TIMER,
791 	    (uchar_t)eri_pci_latency_timer);
792 
793 	if (ddi_intr_hilevel(dip, 0)) {
794 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
795 		    " high-level interrupts are not supported");
796 		goto attach_fail;
797 	}
798 
799 	/*
800 	 * Get the interrupt cookie so the mutexes can be
801 	 * Initialized.
802 	 */
803 	if (ddi_get_iblock_cookie(dip, 0, &erip->cookie) != DDI_SUCCESS)
804 		goto attach_fail;
805 
806 	/*
807 	 * Initialize mutex's for this device.
808 	 */
809 	mutex_init(&erip->xmitlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
810 	mutex_init(&erip->intrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
811 	mutex_init(&erip->linklock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
812 	mutex_init(&erip->xcvrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
813 
814 	mutex_inited = B_TRUE;
815 
816 	/*
817 	 * Add interrupt to system
818 	 */
819 	if (ddi_add_intr(dip, 0, &erip->cookie, 0, eri_intr, (caddr_t)erip) ==
820 	    DDI_SUCCESS)
821 		intr_add = B_TRUE;
822 	else {
823 		goto attach_fail;
824 	}
825 
826 	/*
827 	 * Set up the ethernet mac address.
828 	 */
829 	(void) eri_setup_mac_address(erip, dip);
830 
831 	if (eri_init_xfer_params(erip))
832 		goto attach_fail;
833 
834 	if (eri_burstsize(erip) == DDI_FAILURE) {
835 		goto attach_fail;
836 	}
837 
838 	/*
839 	 * Setup fewer receive bufers.
840 	 */
841 	ERI_RPENDING = eri_rx_ring_size;
842 	ERI_TPENDING = eri_tx_ring_size;
843 
844 	erip->rpending_mask = ERI_RPENDING - 1;
845 	erip->rmdmax_mask = ERI_RPENDING - 1;
846 	erip->mif_config = (ERI_PHY_BMSR << ERI_MIF_CFGPR_SHIFT);
847 
848 	erip->stats.pmcap = ERI_PMCAP_NONE;
849 	if (pci_report_pmcap(dip, PCI_PM_IDLESPEED, (void *)4000) ==
850 	    DDI_SUCCESS)
851 		erip->stats.pmcap = ERI_PMCAP_4MHZ;
852 
853 	if (mac_register(macp, &erip->mh) != 0)
854 		goto attach_fail;
855 
856 	mac_free(macp);
857 
858 	return (DDI_SUCCESS);
859 
860 attach_fail:
861 	if (erip->pci_config_handle)
862 		(void) pci_config_teardown(&erip->pci_config_handle);
863 
864 	if (mutex_inited) {
865 		mutex_destroy(&erip->xmitlock);
866 		mutex_destroy(&erip->intrlock);
867 		mutex_destroy(&erip->linklock);
868 		mutex_destroy(&erip->xcvrlock);
869 	}
870 
871 	ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, attach_fail_msg);
872 
873 	if (intr_add)
874 		ddi_remove_intr(dip, 0, erip->cookie);
875 
876 	if (erip->globregh)
877 		ddi_regs_map_free(&erip->globregh);
878 
879 	if (macp != NULL)
880 		mac_free(macp);
881 	if (erip != NULL)
882 		kmem_free(erip, sizeof (*erip));
883 
884 	return (DDI_FAILURE);
885 }
886 
887 static int
888 eri_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
889 {
890 	struct eri 	*erip;
891 	int i;
892 
893 	if ((erip = ddi_get_driver_private(dip)) == NULL) {
894 		/*
895 		 * No resources allocated.
896 		 */
897 		return (DDI_FAILURE);
898 	}
899 
900 	switch (cmd) {
901 	case DDI_DETACH:
902 		break;
903 
904 	case DDI_SUSPEND:
905 		erip->flags |= ERI_SUSPENDED;
906 		eri_uninit(erip);
907 		return (DDI_SUCCESS);
908 
909 	default:
910 		return (DDI_FAILURE);
911 	}
912 
913 	if (erip->flags & (ERI_RUNNING | ERI_SUSPENDED)) {
914 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, busy_msg);
915 		return (DDI_FAILURE);
916 	}
917 
918 	if (mac_unregister(erip->mh) != 0) {
919 		return (DDI_FAILURE);
920 	}
921 
922 	/*
923 	 * Make the device quiescent
924 	 */
925 	(void) eri_stop(erip);
926 
927 	/*
928 	 * Remove instance of the intr
929 	 */
930 	ddi_remove_intr(dip, 0, erip->cookie);
931 
932 	if (erip->pci_config_handle)
933 		(void) pci_config_teardown(&erip->pci_config_handle);
934 
935 	/*
936 	 * Destroy all mutexes and data structures allocated during
937 	 * attach time.
938 	 */
939 
940 	if (erip->globregh)
941 		ddi_regs_map_free(&erip->globregh);
942 
943 	erip->etxregh =		NULL;
944 	erip->erxregh =		NULL;
945 	erip->bmacregh =	NULL;
946 	erip->mifregh =		NULL;
947 	erip->globregh =	NULL;
948 
949 	if (erip->sw_reset_regh)
950 		ddi_regs_map_free(&erip->sw_reset_regh);
951 
952 	if (erip->ksp)
953 		kstat_delete(erip->ksp);
954 
955 	eri_stop_timer(erip); /* acquire linklock */
956 	eri_start_timer(erip, eri_check_link, 0);
957 	mutex_destroy(&erip->xmitlock);
958 	mutex_destroy(&erip->intrlock);
959 	mutex_destroy(&erip->linklock);
960 	mutex_destroy(&erip->xcvrlock);
961 
962 	if (erip->md_h) {
963 		if (ddi_dma_unbind_handle(erip->md_h) ==
964 		    DDI_FAILURE)
965 			return (DDI_FAILURE);
966 		ddi_dma_mem_free(&erip->mdm_h);
967 		ddi_dma_free_handle(&erip->md_h);
968 	}
969 
970 	if (eri_freebufs(erip))
971 		return (DDI_FAILURE);
972 
973 	/* dvma handle case */
974 
975 	if (erip->eri_dvmarh) {
976 		(void) dvma_release(erip->eri_dvmarh);
977 		erip->eri_dvmarh = NULL;
978 	}
979 /*
980  *	xmit_dma_mode, erip->ndmaxh[i]=NULL for dvma
981  */
982 	else {
983 		for (i = 0; i < ERI_RPENDING; i++)
984 			if (erip->ndmarh[i])
985 				ddi_dma_free_handle(&erip->ndmarh[i]);
986 	}
987 /*
988  *	Release TX buffer
989  */
990 	if (erip->tbuf_ioaddr != 0) {
991 		(void) ddi_dma_unbind_handle(erip->tbuf_handle);
992 		erip->tbuf_ioaddr = 0;
993 	}
994 	if (erip->tbuf_kaddr != NULL) {
995 		ddi_dma_mem_free(&erip->tbuf_acch);
996 		erip->tbuf_kaddr = NULL;
997 	}
998 	if (erip->tbuf_handle != NULL) {
999 		ddi_dma_free_handle(&erip->tbuf_handle);
1000 		erip->tbuf_handle = NULL;
1001 	}
1002 
1003 	eri_param_cleanup(erip);
1004 
1005 	ddi_set_driver_private(dip, NULL);
1006 	kmem_free((caddr_t)erip, sizeof (struct eri));
1007 
1008 	return (DDI_SUCCESS);
1009 }
1010 
1011 /*
1012  * To set up the mac address for the network interface:
1013  * The adapter card may support a local mac address which is published
1014  * in a device node property "local-mac-address". This mac address is
1015  * treated as the factory-installed mac address for DLPI interface.
1016  * If the adapter firmware has used the device for diskless boot
1017  * operation it publishes a property called "mac-address" for use by
1018  * inetboot and the device driver.
1019  * If "mac-address" is not found, the system options property
1020  * "local-mac-address" is used to select the mac-address. If this option
1021  * is set to "true", and "local-mac-address" has been found, then
1022  * local-mac-address is used; otherwise the system mac address is used
1023  * by calling the "localetheraddr()" function.
1024  */
1025 
1026 static void
1027 eri_setup_mac_address(struct eri *erip, dev_info_t *dip)
1028 {
1029 	uchar_t			*prop;
1030 	char			*uselocal;
1031 	unsigned		prop_len;
1032 	uint32_t		addrflags = 0;
1033 	struct ether_addr	factaddr;
1034 
1035 	/*
1036 	 * Check if it is an adapter with its own local mac address
1037 	 * If it is present, save it as the "factory-address"
1038 	 * for this adapter.
1039 	 */
1040 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1041 	    "local-mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1042 		if (prop_len == ETHERADDRL) {
1043 			addrflags = ERI_FACTADDR_PRESENT;
1044 			bcopy(prop, &factaddr, ETHERADDRL);
1045 			ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
1046 			    lether_addr_msg, ether_sprintf(&factaddr));
1047 		}
1048 		ddi_prop_free(prop);
1049 	}
1050 	/*
1051 	 * Check if the adapter has published "mac-address" property.
1052 	 * If it is present, use it as the mac address for this device.
1053 	 */
1054 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1055 	    "mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1056 		if (prop_len >= ETHERADDRL) {
1057 			bcopy(prop, erip->ouraddr, ETHERADDRL);
1058 			ddi_prop_free(prop);
1059 			return;
1060 		}
1061 		ddi_prop_free(prop);
1062 	}
1063 
1064 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
1065 	    &uselocal) == DDI_PROP_SUCCESS) {
1066 		if ((strcmp("true", uselocal) == 0) &&
1067 		    (addrflags & ERI_FACTADDR_PRESENT)) {
1068 			addrflags |= ERI_FACTADDR_USE;
1069 			bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1070 			ddi_prop_free(uselocal);
1071 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1072 			    lmac_addr_msg);
1073 			return;
1074 		}
1075 		ddi_prop_free(uselocal);
1076 	}
1077 
1078 	/*
1079 	 * Get the system ethernet address.
1080 	 */
1081 	(void) localetheraddr(NULL, &factaddr);
1082 	bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1083 }
1084 
1085 
1086 /*
1087  * Calculate the bit in the multicast address filter that selects the given
1088  * address.
1089  * Note: For ERI, the last 8-bits are used.
1090  */
1091 
1092 static uint32_t
1093 eri_ladrf_bit(const uint8_t *addr)
1094 {
1095 	uint32_t crc;
1096 
1097 	CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
1098 
1099 	/*
1100 	 * Just want the 8 most significant bits.
1101 	 */
1102 	return ((~crc) >> 24);
1103 }
1104 
1105 static void
1106 eri_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1107 {
1108 	struct	eri	*erip = arg;
1109 	struct	iocblk	*iocp = (void *)mp->b_rptr;
1110 	int	err;
1111 
1112 	ASSERT(erip != NULL);
1113 
1114 	/*
1115 	 * Privilege checks.
1116 	 */
1117 	switch (iocp->ioc_cmd) {
1118 	case ERI_SET_LOOP_MODE:
1119 	case ERI_ND_SET:
1120 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1121 		if (err != 0) {
1122 			miocnak(wq, mp, 0, err);
1123 			return;
1124 		}
1125 		break;
1126 	default:
1127 		break;
1128 	}
1129 
1130 	switch (iocp->ioc_cmd) {
1131 	case ERI_ND_GET:
1132 	case ERI_ND_SET:
1133 		eri_process_ndd_ioctl(erip, wq, mp, iocp->ioc_cmd);
1134 		break;
1135 
1136 	case ERI_SET_LOOP_MODE:
1137 	case ERI_GET_LOOP_MODE:
1138 		/*
1139 		 * XXX: Consider updating this to the new netlb ioctls.
1140 		 */
1141 		eri_loopback(erip, wq, mp);
1142 		break;
1143 
1144 	default:
1145 		miocnak(wq, mp, 0, EINVAL);
1146 		break;
1147 	}
1148 
1149 	ASSERT(!MUTEX_HELD(&erip->linklock));
1150 }
1151 
1152 static void
1153 eri_loopback(struct eri *erip, queue_t *wq, mblk_t *mp)
1154 {
1155 	struct	iocblk	*iocp = (void *)mp->b_rptr;
1156 	loopback_t	*al;
1157 
1158 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) {
1159 		miocnak(wq, mp, 0, EINVAL);
1160 		return;
1161 	}
1162 
1163 	al = (void *)mp->b_cont->b_rptr;
1164 
1165 	switch (iocp->ioc_cmd) {
1166 	case ERI_SET_LOOP_MODE:
1167 		switch (al->loopback) {
1168 		case ERI_LOOPBACK_OFF:
1169 			erip->flags &= (~ERI_MACLOOPBACK & ~ERI_SERLOOPBACK);
1170 			/* force link status to go down */
1171 			param_linkup = 0;
1172 			erip->stats.link_up = LINK_STATE_DOWN;
1173 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1174 			(void) eri_init(erip);
1175 			break;
1176 
1177 		case ERI_MAC_LOOPBACK_ON:
1178 			erip->flags |= ERI_MACLOOPBACK;
1179 			erip->flags &= ~ERI_SERLOOPBACK;
1180 			param_linkup = 0;
1181 			erip->stats.link_up = LINK_STATE_DOWN;
1182 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1183 			(void) eri_init(erip);
1184 			break;
1185 
1186 		case ERI_PCS_LOOPBACK_ON:
1187 			break;
1188 
1189 		case ERI_SER_LOOPBACK_ON:
1190 			erip->flags |= ERI_SERLOOPBACK;
1191 			erip->flags &= ~ERI_MACLOOPBACK;
1192 			/* force link status to go down */
1193 			param_linkup = 0;
1194 			erip->stats.link_up = LINK_STATE_DOWN;
1195 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1196 			(void) eri_init(erip);
1197 			break;
1198 
1199 		default:
1200 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1201 			    loopback_val_default);
1202 			miocnak(wq, mp, 0, EINVAL);
1203 			return;
1204 		}
1205 		miocnak(wq, mp, 0, 0);
1206 		break;
1207 
1208 	case ERI_GET_LOOP_MODE:
1209 		al->loopback =	ERI_MAC_LOOPBACK_ON | ERI_PCS_LOOPBACK_ON |
1210 		    ERI_SER_LOOPBACK_ON;
1211 		miocack(wq, mp, sizeof (loopback_t), 0);
1212 		break;
1213 
1214 	default:
1215 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1216 		    loopback_cmd_default);
1217 	}
1218 }
1219 
1220 static int
1221 eri_m_promisc(void *arg, boolean_t on)
1222 {
1223 	struct	eri	*erip = arg;
1224 
1225 	mutex_enter(&erip->intrlock);
1226 	erip->promisc = on;
1227 	eri_init_rx(erip);
1228 	mutex_exit(&erip->intrlock);
1229 	return (0);
1230 }
1231 
1232 /*
1233  * This is to support unlimited number of members
1234  * in Multicast.
1235  */
1236 static int
1237 eri_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1238 {
1239 	struct eri		*erip = arg;
1240 	uint32_t 		ladrf_bit;
1241 
1242 	/*
1243 	 * If this address's bit was not already set in the local address
1244 	 * filter, add it and re-initialize the Hardware.
1245 	 */
1246 	ladrf_bit = eri_ladrf_bit(mca);
1247 
1248 	mutex_enter(&erip->intrlock);
1249 	if (add) {
1250 		erip->ladrf_refcnt[ladrf_bit]++;
1251 		if (erip->ladrf_refcnt[ladrf_bit] == 1) {
1252 			LADRF_SET(erip, ladrf_bit);
1253 			erip->multi_refcnt++;
1254 			eri_init_rx(erip);
1255 		}
1256 	} else {
1257 		erip->ladrf_refcnt[ladrf_bit]--;
1258 		if (erip->ladrf_refcnt[ladrf_bit] == 0) {
1259 			LADRF_CLR(erip, ladrf_bit);
1260 			erip->multi_refcnt--;
1261 			eri_init_rx(erip);
1262 		}
1263 	}
1264 	mutex_exit(&erip->intrlock);
1265 	return (0);
1266 }
1267 
1268 static int
1269 eri_m_unicst(void *arg, const uint8_t *macaddr)
1270 {
1271 	struct	eri	*erip = arg;
1272 
1273 	/*
1274 	 * Set new interface local address and re-init device.
1275 	 * This is destructive to any other streams attached
1276 	 * to this device.
1277 	 */
1278 	mutex_enter(&erip->intrlock);
1279 	bcopy(macaddr, &erip->ouraddr, ETHERADDRL);
1280 	eri_init_rx(erip);
1281 	mutex_exit(&erip->intrlock);
1282 	return (0);
1283 }
1284 
1285 /*ARGSUSED*/
1286 static boolean_t
1287 eri_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1288 {
1289 	switch (cap) {
1290 	case MAC_CAPAB_HCKSUM: {
1291 		uint32_t *hcksum_txflags = cap_data;
1292 		*hcksum_txflags = HCKSUM_INET_PARTIAL;
1293 		return (B_TRUE);
1294 	}
1295 	default:
1296 		return (B_FALSE);
1297 	}
1298 }
1299 
1300 static int
1301 eri_m_start(void *arg)
1302 {
1303 	struct eri	*erip = arg;
1304 
1305 	mutex_enter(&erip->intrlock);
1306 	erip->flags |= ERI_STARTED;
1307 	mutex_exit(&erip->intrlock);
1308 
1309 	if (!eri_init(erip)) {
1310 		mutex_enter(&erip->intrlock);
1311 		erip->flags &= ~ERI_STARTED;
1312 		mutex_exit(&erip->intrlock);
1313 		return (EIO);
1314 	}
1315 	return (0);
1316 }
1317 
1318 static void
1319 eri_m_stop(void *arg)
1320 {
1321 	struct eri	*erip = arg;
1322 
1323 	mutex_enter(&erip->intrlock);
1324 	erip->flags &= ~ERI_STARTED;
1325 	mutex_exit(&erip->intrlock);
1326 	eri_uninit(erip);
1327 }
1328 
1329 static int
1330 eri_m_stat(void *arg, uint_t stat, uint64_t *val)
1331 {
1332 	struct eri	*erip = arg;
1333 	struct stats	*esp;
1334 	boolean_t	macupdate = B_FALSE;
1335 
1336 	esp = &erip->stats;
1337 
1338 	mutex_enter(&erip->xmitlock);
1339 	if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
1340 		erip->tx_completion =
1341 		    GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
1342 		macupdate |= eri_reclaim(erip, erip->tx_completion);
1343 	}
1344 	mutex_exit(&erip->xmitlock);
1345 	if (macupdate)
1346 		mac_tx_update(erip->mh);
1347 
1348 	eri_savecntrs(erip);
1349 
1350 	switch (stat) {
1351 	case MAC_STAT_IFSPEED:
1352 		*val = esp->ifspeed * 1000000ULL;
1353 		break;
1354 	case MAC_STAT_MULTIRCV:
1355 		*val = esp->multircv;
1356 		break;
1357 	case MAC_STAT_BRDCSTRCV:
1358 		*val = esp->brdcstrcv;
1359 		break;
1360 	case MAC_STAT_IPACKETS:
1361 		*val = esp->ipackets64;
1362 		break;
1363 	case MAC_STAT_RBYTES:
1364 		*val = esp->rbytes64;
1365 		break;
1366 	case MAC_STAT_OBYTES:
1367 		*val = esp->obytes64;
1368 		break;
1369 	case MAC_STAT_OPACKETS:
1370 		*val = esp->opackets64;
1371 		break;
1372 	case MAC_STAT_IERRORS:
1373 		*val = esp->ierrors;
1374 		break;
1375 	case MAC_STAT_OERRORS:
1376 		*val = esp->oerrors;
1377 		break;
1378 	case MAC_STAT_MULTIXMT:
1379 		*val = esp->multixmt;
1380 		break;
1381 	case MAC_STAT_BRDCSTXMT:
1382 		*val = esp->brdcstxmt;
1383 		break;
1384 	case MAC_STAT_NORCVBUF:
1385 		*val = esp->norcvbuf;
1386 		break;
1387 	case MAC_STAT_NOXMTBUF:
1388 		*val = esp->noxmtbuf;
1389 		break;
1390 	case MAC_STAT_UNDERFLOWS:
1391 		*val = esp->txmac_urun;
1392 		break;
1393 	case MAC_STAT_OVERFLOWS:
1394 		*val = esp->rx_overflow;
1395 		break;
1396 	case MAC_STAT_COLLISIONS:
1397 		*val = esp->collisions;
1398 		break;
1399 	case ETHER_STAT_ALIGN_ERRORS:
1400 		*val = esp->rx_align_err;
1401 		break;
1402 	case ETHER_STAT_FCS_ERRORS:
1403 		*val = esp->rx_crc_err;
1404 		break;
1405 	case ETHER_STAT_EX_COLLISIONS:
1406 		*val = esp->excessive_coll;
1407 		break;
1408 	case ETHER_STAT_TX_LATE_COLLISIONS:
1409 		*val = esp->late_coll;
1410 		break;
1411 	case ETHER_STAT_FIRST_COLLISIONS:
1412 		*val = esp->first_coll;
1413 		break;
1414 	case ETHER_STAT_LINK_DUPLEX:
1415 		*val = esp->link_duplex;
1416 		break;
1417 	case ETHER_STAT_TOOLONG_ERRORS:
1418 		*val = esp->rx_toolong_pkts;
1419 		break;
1420 	case ETHER_STAT_TOOSHORT_ERRORS:
1421 		*val = esp->rx_runt;
1422 		break;
1423 
1424 	case ETHER_STAT_XCVR_ADDR:
1425 		*val = erip->phyad;
1426 		break;
1427 
1428 	case ETHER_STAT_XCVR_INUSE:
1429 		*val = XCVR_100X;	/* should always be 100X for now */
1430 		break;
1431 
1432 	case ETHER_STAT_CAP_100FDX:
1433 		*val = param_bmsr_100fdx;
1434 		break;
1435 	case ETHER_STAT_CAP_100HDX:
1436 		*val = param_bmsr_100hdx;
1437 		break;
1438 	case ETHER_STAT_CAP_10FDX:
1439 		*val = param_bmsr_10fdx;
1440 		break;
1441 	case ETHER_STAT_CAP_10HDX:
1442 		*val = param_bmsr_10hdx;
1443 		break;
1444 	case ETHER_STAT_CAP_AUTONEG:
1445 		*val = param_bmsr_ancap;
1446 		break;
1447 	case ETHER_STAT_CAP_ASMPAUSE:
1448 		*val = param_bmsr_asm_dir;
1449 		break;
1450 	case ETHER_STAT_CAP_PAUSE:
1451 		*val = param_bmsr_pause;
1452 		break;
1453 	case ETHER_STAT_ADV_CAP_100FDX:
1454 		*val = param_anar_100fdx;
1455 		break;
1456 	case ETHER_STAT_ADV_CAP_100HDX:
1457 		*val = param_anar_100hdx;
1458 		break;
1459 	case ETHER_STAT_ADV_CAP_10FDX:
1460 		*val = param_anar_10fdx;
1461 		break;
1462 	case ETHER_STAT_ADV_CAP_10HDX:
1463 		*val = param_anar_10hdx;
1464 		break;
1465 	case ETHER_STAT_ADV_CAP_AUTONEG:
1466 		*val = param_autoneg;
1467 		break;
1468 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
1469 		*val = param_anar_asm_dir;
1470 		break;
1471 	case ETHER_STAT_ADV_CAP_PAUSE:
1472 		*val = param_anar_pause;
1473 		break;
1474 	case ETHER_STAT_LP_CAP_100FDX:
1475 		*val = param_anlpar_100fdx;
1476 		break;
1477 	case ETHER_STAT_LP_CAP_100HDX:
1478 		*val = param_anlpar_100hdx;
1479 		break;
1480 	case ETHER_STAT_LP_CAP_10FDX:
1481 		*val = param_anlpar_10fdx;
1482 		break;
1483 	case ETHER_STAT_LP_CAP_10HDX:
1484 		*val = param_anlpar_10hdx;
1485 		break;
1486 	case ETHER_STAT_LP_CAP_AUTONEG:
1487 		*val = param_aner_lpancap;
1488 		break;
1489 	case ETHER_STAT_LP_CAP_ASMPAUSE:
1490 		*val = param_anlpar_pauseTX;
1491 		break;
1492 	case ETHER_STAT_LP_CAP_PAUSE:
1493 		*val = param_anlpar_pauseRX;
1494 		break;
1495 	case ETHER_STAT_LINK_PAUSE:
1496 		*val = esp->pausing;
1497 		break;
1498 	case ETHER_STAT_LINK_ASMPAUSE:
1499 		*val = param_anar_asm_dir &&
1500 		    param_anlpar_pauseTX &&
1501 		    (param_anar_pause != param_anlpar_pauseRX);
1502 		break;
1503 	case ETHER_STAT_LINK_AUTONEG:
1504 		*val = param_autoneg && param_aner_lpancap;
1505 		break;
1506 	}
1507 	return (0);
1508 }
1509 
1510 /*
1511  * Hardware Functions
1512  * New Section
1513  */
1514 
1515 /*
1516  * Initialize the MAC registers. Some of of the MAC  registers are initialized
1517  * just once since  Global Reset or MAC reset doesn't clear them. Others (like
1518  * Host MAC Address Registers) are cleared on every reset and have to be
1519  * reinitialized.
1520  */
1521 static void
1522 eri_init_macregs_generic(struct eri *erip)
1523 {
1524 	/*
1525 	 * set up the MAC parameter registers once
1526 	 * after power cycle. SUSPEND/RESUME also requires
1527 	 * setting these registers.
1528 	 */
1529 	if ((erip->stats.inits == 1) || (erip->init_macregs)) {
1530 		erip->init_macregs = 0;
1531 		PUT_MACREG(ipg0, param_ipg0);
1532 		PUT_MACREG(ipg1, param_ipg1);
1533 		PUT_MACREG(ipg2, param_ipg2);
1534 		PUT_MACREG(macmin, BMAC_MIN_FRAME_SIZE);
1535 #ifdef	ERI_RX_TAG_ERROR_WORKAROUND
1536 		PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE_TAG | BMAC_MAX_BURST);
1537 #else
1538 		PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE | BMAC_MAX_BURST);
1539 #endif
1540 		PUT_MACREG(palen, BMAC_PREAMBLE_SIZE);
1541 		PUT_MACREG(jam, BMAC_JAM_SIZE);
1542 		PUT_MACREG(alimit, BMAC_ATTEMPT_LIMIT);
1543 		PUT_MACREG(macctl_type, BMAC_CONTROL_TYPE);
1544 		PUT_MACREG(rseed,
1545 		    ((erip->ouraddr[0] & 0x3) << 8) | erip->ouraddr[1]);
1546 
1547 		PUT_MACREG(madd3, BMAC_ADDRESS_3);
1548 		PUT_MACREG(madd4, BMAC_ADDRESS_4);
1549 		PUT_MACREG(madd5, BMAC_ADDRESS_5);
1550 
1551 		/* Program MAC Control address */
1552 		PUT_MACREG(madd6, BMAC_ADDRESS_6);
1553 		PUT_MACREG(madd7, BMAC_ADDRESS_7);
1554 		PUT_MACREG(madd8, BMAC_ADDRESS_8);
1555 
1556 		PUT_MACREG(afr0, BMAC_AF_0);
1557 		PUT_MACREG(afr1, BMAC_AF_1);
1558 		PUT_MACREG(afr2, BMAC_AF_2);
1559 		PUT_MACREG(afmr1_2, BMAC_AF21_MASK);
1560 		PUT_MACREG(afmr0, BMAC_AF0_MASK);
1561 	}
1562 
1563 	/* The counters need to be zeroed */
1564 	PUT_MACREG(nccnt, 0);
1565 	PUT_MACREG(fccnt, 0);
1566 	PUT_MACREG(excnt, 0);
1567 	PUT_MACREG(ltcnt, 0);
1568 	PUT_MACREG(dcnt,  0);
1569 	PUT_MACREG(frcnt, 0);
1570 	PUT_MACREG(lecnt, 0);
1571 	PUT_MACREG(aecnt, 0);
1572 	PUT_MACREG(fecnt, 0);
1573 	PUT_MACREG(rxcv,  0);
1574 
1575 	if (erip->pauseTX)
1576 		PUT_MACREG(spcmd, BMAC_SEND_PAUSE_CMD);
1577 	else
1578 		PUT_MACREG(spcmd, 0);
1579 
1580 	/*
1581 	 * Program BigMAC with local individual ethernet address.
1582 	 */
1583 
1584 	PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
1585 	PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
1586 	PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
1587 
1588 	/*
1589 	 * Install multicast address filter.
1590 	 */
1591 
1592 	PUT_MACREG(hash0, erip->ladrf[0]);
1593 	PUT_MACREG(hash1, erip->ladrf[1]);
1594 	PUT_MACREG(hash2, erip->ladrf[2]);
1595 	PUT_MACREG(hash3, erip->ladrf[3]);
1596 	PUT_MACREG(hash4, erip->ladrf[4]);
1597 	PUT_MACREG(hash5, erip->ladrf[5]);
1598 	PUT_MACREG(hash6, erip->ladrf[6]);
1599 	PUT_MACREG(hash7, erip->ladrf[7]);
1600 	PUT_MACREG(hash8, erip->ladrf[8]);
1601 	PUT_MACREG(hash9, erip->ladrf[9]);
1602 	PUT_MACREG(hash10, erip->ladrf[10]);
1603 	PUT_MACREG(hash11, erip->ladrf[11]);
1604 	PUT_MACREG(hash12, erip->ladrf[12]);
1605 	PUT_MACREG(hash13, erip->ladrf[13]);
1606 	PUT_MACREG(hash14, erip->ladrf[14]);
1607 }
1608 
1609 static int
1610 eri_flush_rxbufs(struct eri *erip)
1611 {
1612 	uint_t	i;
1613 	int	status = 0;
1614 	/*
1615 	 * Free and dvma_unload pending recv buffers.
1616 	 * Maintaining the 1-to-1 ordered sequence of
1617 	 * dvma_load() followed by dvma_unload() is critical.
1618 	 * Always unload anything before loading it again.
1619 	 * Never unload anything twice.  Always unload
1620 	 * before freeing the buffer.  We satisfy these
1621 	 * requirements by unloading only those descriptors
1622 	 * which currently have an mblk associated with them.
1623 	 */
1624 	for (i = 0; i < ERI_RPENDING; i++) {
1625 		if (erip->rmblkp[i]) {
1626 			if (erip->eri_dvmarh)
1627 				dvma_unload(erip->eri_dvmarh, 2 * i,
1628 				    DDI_DMA_SYNC_FORCPU);
1629 			else if ((ddi_dma_unbind_handle(erip->ndmarh[i]) ==
1630 			    DDI_FAILURE))
1631 				status = -1;
1632 			freeb(erip->rmblkp[i]);
1633 			erip->rmblkp[i] = NULL;
1634 		}
1635 	}
1636 	return (status);
1637 }
1638 
1639 static void
1640 eri_init_txbufs(struct eri *erip)
1641 {
1642 	/*
1643 	 * Clear TX descriptors.
1644 	 */
1645 	bzero((caddr_t)erip->eri_tmdp, ERI_TPENDING * sizeof (struct eri_tmd));
1646 
1647 	/*
1648 	 * sync TXDMA descriptors.
1649 	 */
1650 	ERI_SYNCIOPB(erip, erip->eri_tmdp,
1651 	    (ERI_TPENDING * sizeof (struct eri_tmd)), DDI_DMA_SYNC_FORDEV);
1652 	/*
1653 	 * Reset TMD 'walking' pointers.
1654 	 */
1655 	erip->tcurp = erip->eri_tmdp;
1656 	erip->tnextp = erip->eri_tmdp;
1657 	erip->tx_cur_cnt = 0;
1658 	erip->tx_kick = 0;
1659 	erip->tx_completion = 0;
1660 }
1661 
1662 static int
1663 eri_init_rxbufs(struct eri *erip)
1664 {
1665 
1666 	ddi_dma_cookie_t	dma_cookie;
1667 	mblk_t			*bp;
1668 	int			i, status = 0;
1669 	uint32_t		ccnt;
1670 
1671 	/*
1672 	 * clear rcv descriptors
1673 	 */
1674 	bzero((caddr_t)erip->rmdp, ERI_RPENDING * sizeof (struct rmd));
1675 
1676 	for (i = 0; i < ERI_RPENDING; i++) {
1677 		if ((bp = eri_allocb(ERI_BUFSIZE)) == NULL) {
1678 			status = -1;
1679 			continue;
1680 		}
1681 		/* Load data buffer to DVMA space */
1682 		if (erip->eri_dvmarh)
1683 			dvma_kaddr_load(erip->eri_dvmarh,
1684 			    (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1685 			    2 * i, &dma_cookie);
1686 /*
1687  *		Bind data buffer to DMA handle
1688  */
1689 		else if (ddi_dma_addr_bind_handle(erip->ndmarh[i], NULL,
1690 		    (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1691 		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1692 		    &dma_cookie, &ccnt) != DDI_DMA_MAPPED)
1693 			status = -1;
1694 
1695 		PUT_RMD((&erip->rmdp[i]), dma_cookie);
1696 		erip->rmblkp[i] = bp;	/* save for later use */
1697 	}
1698 
1699 	/*
1700 	 * sync RXDMA descriptors.
1701 	 */
1702 	ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
1703 	    DDI_DMA_SYNC_FORDEV);
1704 	/*
1705 	 * Reset RMD 'walking' pointers.
1706 	 */
1707 	erip->rnextp = erip->rmdp;
1708 	erip->rx_completion = 0;
1709 	erip->rx_kick = ERI_RPENDING - 4;
1710 	return (status);
1711 }
1712 
1713 static uint32_t
1714 eri_txmac_disable(struct eri *erip)
1715 {
1716 	int	n;
1717 
1718 	PUT_MACREG(txcfg, GET_MACREG(txcfg) & ~BMAC_TXCFG_ENAB);
1719 	n = (BMACTXRSTDELAY * 10) / ERI_WAITPERIOD;
1720 
1721 	while (--n > 0) {
1722 		drv_usecwait(ERI_WAITPERIOD);
1723 		if ((GET_MACREG(txcfg) & 1) == 0)
1724 			return (0);
1725 	}
1726 	return (1);
1727 }
1728 
1729 static uint32_t
1730 eri_rxmac_disable(struct eri *erip)
1731 {
1732 	int	n;
1733 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) & ~BMAC_RXCFG_ENAB);
1734 	n = BMACRXRSTDELAY / ERI_WAITPERIOD;
1735 
1736 	while (--n > 0) {
1737 		drv_usecwait(ERI_WAITPERIOD);
1738 		if ((GET_MACREG(rxcfg) & 1) == 0)
1739 			return (0);
1740 	}
1741 	return (1);
1742 }
1743 
1744 /*
1745  * Return 0 upon success, 1 on failure.
1746  */
1747 static int
1748 eri_stop(struct eri *erip)
1749 {
1750 	(void) eri_erx_reset(erip);
1751 	(void) eri_etx_reset(erip);
1752 
1753 	/*
1754 	 * set up cache line to 16 for 64 bytes of pci burst size
1755 	 */
1756 	PUT_SWRSTREG(reset, ERI_G_RESET_GLOBAL | ERI_CACHE_LINE_SIZE);
1757 
1758 	if (erip->linkcheck) {
1759 		erip->linkcheck = 0;
1760 		erip->global_reset_issued = 2;
1761 	} else {
1762 		param_linkup = 0;
1763 		erip->stats.link_up = LINK_STATE_DOWN;
1764 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1765 		erip->global_reset_issued = -1;
1766 	}
1767 
1768 	ERI_DELAY((GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE),
1769 	    ERI_MAX_RST_DELAY);
1770 	erip->rx_reset_issued = -1;
1771 	erip->tx_reset_issued = -1;
1772 
1773 	/*
1774 	 * workaround for RIO not resetting the interrupt mask
1775 	 * register to default value 0xffffffff.
1776 	 */
1777 	PUT_GLOBREG(intmask, ERI_G_MASK_ALL);
1778 
1779 	if (GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE) {
1780 		return (0);
1781 	} else {
1782 		return (1);
1783 	}
1784 }
1785 
1786 /*
1787  * Reset Just the RX Portion
1788  * Return 0 upon success, 1 on failure.
1789  *
1790  * Resetting the rxdma while there is a rx dma transaction going on the
1791  * bus, will cause bus hang or parity errors. To avoid this, we would first
1792  * disable the rxdma by clearing the ENABLE bit (bit 0). To make sure it is
1793  * disabled, we will poll it until it realy clears. Furthermore, to verify
1794  * any RX DMA activity is subsided, we delay for 5 msec.
1795  */
1796 static uint32_t
1797 eri_erx_reset(struct eri *erip)
1798 {
1799 	(void) eri_rxmac_disable(erip); /* Disable the RX MAC */
1800 
1801 	/* Disable the RX DMA */
1802 	PUT_ERXREG(config, GET_ERXREG(config) & ~GET_CONFIG_RXDMA_EN);
1803 	ERI_DELAY(((GET_ERXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1804 	if ((GET_ERXREG(config) & 1) != 0)
1805 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1806 		    disable_erx_msg);
1807 
1808 	drv_usecwait(5000); /* Delay to insure no RX DMA activity */
1809 
1810 	PUT_SWRSTREG(reset, ERI_G_RESET_ERX | ERI_CACHE_LINE_SIZE);
1811 	/*
1812 	 * Wait until the reset is completed which is indicated by
1813 	 * the reset bit cleared or time out..
1814 	 */
1815 	ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ==
1816 	    ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1817 	erip->rx_reset_issued = -1;
1818 
1819 	return ((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ? 1 : 0);
1820 }
1821 
1822 /*
1823  * Reset Just the TX Portion
1824  * Return 0 upon success, 1 on failure.
1825  * Resetting the txdma while there is a tx dma transaction on the bus, may cause
1826  * bus hang or parity errors. To avoid this we would first disable the txdma by
1827  * clearing the ENABLE bit (bit 0). To make sure it is disabled, we will poll
1828  * it until it realy clears. Furthermore, to any TX DMA activity is subsided,
1829  * we delay for 1 msec.
1830  */
1831 static uint32_t
1832 eri_etx_reset(struct eri *erip)
1833 {
1834 	(void) eri_txmac_disable(erip);
1835 
1836 	/* Disable the TX DMA */
1837 	PUT_ETXREG(config, GET_ETXREG(config) & ~GET_CONFIG_TXDMA_EN);
1838 #ifdef ORIG
1839 	ERI_DELAY(((GET_ETXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1840 	if ((GET_ETXREG(config) &  1) != 0)
1841 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1842 		    disable_etx_msg);
1843 	drv_usecwait(5000); /* Delay  to ensure DMA completed (if any). */
1844 #endif
1845 	drv_usecwait(5000); /* Delay  to ensure DMA completed (if any). */
1846 	ERI_DELAY(((GET_ETXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1847 	if ((GET_ETXREG(config) &  1) != 0)
1848 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1849 		    disable_etx_msg);
1850 
1851 	PUT_SWRSTREG(reset, ERI_G_RESET_ETX | ERI_CACHE_LINE_SIZE);
1852 
1853 	/*
1854 	 * Wait until the reset is completed which is indicated by the reset bit
1855 	 * cleared or time out..
1856 	 */
1857 	ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) ==
1858 	    ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1859 	erip->tx_reset_issued = -1;
1860 
1861 	if (GET_SWRSTREG(reset) &  (ERI_G_RESET_ETX)) {
1862 		return (1);
1863 	} else
1864 		return (0);
1865 }
1866 
1867 
1868 /*
1869  * Initialize the TX DMA registers and Enable the TX DMA.
1870  */
1871 static uint32_t
1872 eri_init_txregs(struct eri *erip)
1873 {
1874 
1875 	uint32_t	i;
1876 	uint64_t	tx_ring;
1877 
1878 	/*
1879 	 * Initialize ETX Registers:
1880 	 * config, txring_lo, txring_hi
1881 	 */
1882 	tx_ring = ERI_IOPBIOADDR(erip, erip->eri_tmdp);
1883 	PUT_ETXREG(txring_lo, (uint32_t)(tx_ring));
1884 	PUT_ETXREG(txring_hi, (uint32_t)(tx_ring >> 32));
1885 
1886 	/*
1887 	 * Get TX Ring Size Masks.
1888 	 * The ring size ERI_TPENDING is defined in eri_mac.h.
1889 	 */
1890 	switch (ERI_TPENDING) {
1891 	case 32: i = ETX_RINGSZ_32;
1892 		break;
1893 	case 64: i = ETX_RINGSZ_64;
1894 		break;
1895 	case 128: i = ETX_RINGSZ_128;
1896 		break;
1897 	case 256: i = ETX_RINGSZ_256;
1898 		break;
1899 	case 512: i = ETX_RINGSZ_512;
1900 		break;
1901 	case 1024: i = ETX_RINGSZ_1024;
1902 		break;
1903 	case 2048: i = ETX_RINGSZ_2048;
1904 		break;
1905 	case 4096: i = ETX_RINGSZ_4096;
1906 		break;
1907 	default:
1908 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1909 		    unk_tx_descr_sze_msg, ERI_TPENDING);
1910 		return (1);
1911 	}
1912 
1913 	i <<= ERI_TX_RINGSZ_SHIFT;
1914 	PUT_ETXREG(config, ETX_CONFIG_THRESHOLD | i);
1915 	ENABLE_TXDMA(erip);
1916 	ENABLE_MAC(erip);
1917 	return (0);
1918 }
1919 
1920 
1921 /*
1922  * Initialize the RX DMA registers and Enable the RX DMA.
1923  */
1924 static uint32_t
1925 eri_init_rxregs(struct eri *erip)
1926 {
1927 	int i;
1928 	uint64_t	rx_ring;
1929 
1930 	/*
1931 	 * Initialize ERX Registers:
1932 	 * rxring_lo, rxring_hi, config, rx_blanking, rx_pause_threshold.
1933 	 * Also, rx_kick
1934 	 * Read and save rxfifo_size.
1935 	 * XXX: Use this to properly configure PAUSE threshold values.
1936 	 */
1937 	rx_ring = ERI_IOPBIOADDR(erip, erip->rmdp);
1938 	PUT_ERXREG(rxring_lo, (uint32_t)(rx_ring));
1939 	PUT_ERXREG(rxring_hi, (uint32_t)(rx_ring >> 32));
1940 	PUT_ERXREG(rx_kick, erip->rx_kick);
1941 
1942 	/*
1943 	 * The Max ring size, ERI_RMDMAX is defined in eri_mac.h.
1944 	 * More ERI_RPENDING will provide better performance but requires more
1945 	 * system DVMA memory.
1946 	 * eri_rx_ring_size can be used to tune this value from /etc/system
1947 	 * eri_rx_ring_size cannot be NDD'able due to non-recoverable errors
1948 	 * which cannot be detected from NDD operations
1949 	 */
1950 
1951 	/*
1952 	 * get the rxring size bits
1953 	 */
1954 	switch (ERI_RPENDING) {
1955 	case 32: i = ERX_RINGSZ_32;
1956 		break;
1957 	case 64: i = ERX_RINGSZ_64;
1958 		break;
1959 	case 128: i = ERX_RINGSZ_128;
1960 		break;
1961 	case 256: i = ERX_RINGSZ_256;
1962 		break;
1963 	case 512: i = ERX_RINGSZ_512;
1964 		break;
1965 	case 1024: i = ERX_RINGSZ_1024;
1966 		break;
1967 	case 2048: i = ERX_RINGSZ_2048;
1968 		break;
1969 	case 4096: i = ERX_RINGSZ_4096;
1970 		break;
1971 	default:
1972 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1973 		    unk_rx_descr_sze_msg, ERI_RPENDING);
1974 		return (1);
1975 	}
1976 
1977 	i <<= ERI_RX_RINGSZ_SHIFT;
1978 	i |=  (ERI_FSTBYTE_OFFSET << ERI_RX_CONFIG_FBO_SHIFT) |
1979 	    (ETHERHEADER_SIZE << ERI_RX_CONFIG_RX_CSSTART_SHIFT) |
1980 	    (ERI_RX_FIFOTH_1024 << ERI_RX_CONFIG_RXFIFOTH_SHIFT);
1981 
1982 	PUT_ERXREG(config, i);
1983 	PUT_ERXREG(rx_blanking,
1984 	    (param_intr_blank_time << ERI_RX_BLNK_INTR_TIME_SHIFT) |
1985 	    param_intr_blank_packets);
1986 
1987 	PUT_ERXREG(rx_pause_threshold, rx_pause_threshold);
1988 	erip->rxfifo_size = GET_ERXREG(rxfifo_size);
1989 	ENABLE_RXDMA(erip);
1990 	return (0);
1991 }
1992 
1993 static int
1994 eri_freebufs(struct eri *erip)
1995 {
1996 	int status = 0;
1997 
1998 	status = eri_flush_rxbufs(erip);
1999 	return (status);
2000 }
2001 
2002 static void
2003 eri_update_rxbufs(struct eri *erip)
2004 {
2005 	int		i;
2006 	volatile struct rmd  *rmdp, *rmdpbase;
2007 
2008 	/*
2009 	 * Hang out receive buffers.
2010 	 */
2011 	rmdpbase = erip->rmdp;
2012 	for (i = 0; i < ERI_RPENDING; i++) {
2013 		rmdp = rmdpbase + i;
2014 		UPDATE_RMD(rmdp);
2015 	}
2016 
2017 	/*
2018 	 * sync RXDMA descriptors.
2019 	 */
2020 	ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
2021 	    DDI_DMA_SYNC_FORDEV);
2022 	/*
2023 	 * Reset RMD 'walking' pointers.
2024 	 */
2025 	erip->rnextp =	erip->rmdp;
2026 	erip->rx_completion = 0;
2027 	erip->rx_kick =	ERI_RPENDING - 4;
2028 }
2029 
2030 /*
2031  * This routine is used to reset the RX DMA only. In the case of RX
2032  * failures such as RX Tag Error, RX hang etc... we don't want to
2033  * do global reset which takes down the link and clears the FIFO's
2034  * By doing RX only reset, we leave the TX and the link intact.
2035  */
2036 static uint32_t
2037 eri_init_rx_channel(struct eri *erip)
2038 {
2039 	erip->flags &= ~ERI_RXINIT;
2040 	(void) eri_erx_reset(erip);
2041 	eri_update_rxbufs(erip);
2042 	if (eri_init_rxregs(erip))
2043 		return (1);
2044 	PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2045 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2046 	erip->rx_reset_issued = 0;
2047 	HSTAT(erip, rx_inits);
2048 	erip->flags |= ERI_RXINIT;
2049 	return (0);
2050 }
2051 
2052 static void
2053 eri_init_rx(struct eri *erip)
2054 {
2055 	uint16_t	*ladrf;
2056 
2057 	/*
2058 	 * First of all make sure the Receive MAC is stop.
2059 	 */
2060 	(void) eri_rxmac_disable(erip); /* Disable the RX MAC */
2061 
2062 	/*
2063 	 * Program BigMAC with local individual ethernet address.
2064 	 */
2065 
2066 	PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
2067 	PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
2068 	PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
2069 
2070 	/*
2071 	 * Set up multicast address filter by passing all multicast
2072 	 * addresses through a crc generator, and then using the
2073 	 * low order 8 bits as a index into the 256 bit logical
2074 	 * address filter. The high order four bits select the word,
2075 	 * while the rest of the bits select the bit within the word.
2076 	 */
2077 
2078 	ladrf = erip->ladrf;
2079 
2080 	PUT_MACREG(hash0, ladrf[0]);
2081 	PUT_MACREG(hash1, ladrf[1]);
2082 	PUT_MACREG(hash2, ladrf[2]);
2083 	PUT_MACREG(hash3, ladrf[3]);
2084 	PUT_MACREG(hash4, ladrf[4]);
2085 	PUT_MACREG(hash5, ladrf[5]);
2086 	PUT_MACREG(hash6, ladrf[6]);
2087 	PUT_MACREG(hash7, ladrf[7]);
2088 	PUT_MACREG(hash8, ladrf[8]);
2089 	PUT_MACREG(hash9, ladrf[9]);
2090 	PUT_MACREG(hash10, ladrf[10]);
2091 	PUT_MACREG(hash11, ladrf[11]);
2092 	PUT_MACREG(hash12, ladrf[12]);
2093 	PUT_MACREG(hash13, ladrf[13]);
2094 	PUT_MACREG(hash14, ladrf[14]);
2095 	PUT_MACREG(hash15, ladrf[15]);
2096 
2097 #ifdef ERI_DONT_STRIP_CRC
2098 	PUT_MACREG(rxcfg,
2099 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2100 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2101 	    BMAC_RXCFG_ENAB));
2102 #else
2103 	PUT_MACREG(rxcfg,
2104 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2105 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2106 	    BMAC_RXCFG_ENAB | BMAC_RXCFG_STRIP_CRC));
2107 #endif
2108 	/* wait after setting Hash Enable bit */
2109 	/* drv_usecwait(10); */
2110 
2111 	HSTAT(erip, rx_inits);
2112 }
2113 
2114 /*
2115  * This routine is used to init the TX MAC only.
2116  *	&erip->xmitlock is held before calling this routine.
2117  */
2118 void
2119 eri_init_txmac(struct eri *erip)
2120 {
2121 	uint32_t carrier_ext = 0;
2122 
2123 	erip->flags &= ~ERI_TXINIT;
2124 	/*
2125 	 * Stop the Transmit MAC.
2126 	 */
2127 	(void) eri_txmac_disable(erip);
2128 
2129 	/*
2130 	 * Must be Internal Transceiver
2131 	 */
2132 	if (param_mode)
2133 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2134 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2135 	else
2136 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2137 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2138 		    BMAC_XIFC_DIS_ECHO));
2139 
2140 	/*
2141 	 * Initialize the interpacket gap registers
2142 	 */
2143 	PUT_MACREG(ipg1, param_ipg1);
2144 	PUT_MACREG(ipg2, param_ipg2);
2145 
2146 	if (erip->ngu_enable)
2147 		PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2148 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2149 		    BMAC_TXCFG_ENIPG0 : 0) |
2150 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2151 		    BMAC_TXCFG_NGU));
2152 	else
2153 		PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2154 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2155 		    BMAC_TXCFG_ENIPG0 : 0) |
2156 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2157 
2158 	ENABLE_TXDMA(erip);
2159 	ENABLE_TXMAC(erip);
2160 
2161 	HSTAT(erip, tx_inits);
2162 	erip->flags |= ERI_TXINIT;
2163 }
2164 
2165 static void
2166 eri_unallocthings(struct eri *erip)
2167 {
2168 	uint32_t	flag;
2169 	uint32_t	i;
2170 
2171 	flag = erip->alloc_flag;
2172 
2173 	if (flag & ERI_DESC_MEM_MAP)
2174 		(void) ddi_dma_unbind_handle(erip->md_h);
2175 
2176 	if (flag & ERI_DESC_MEM_ALLOC) {
2177 		ddi_dma_mem_free(&erip->mdm_h);
2178 		erip->rmdp = NULL;
2179 		erip->eri_tmdp = NULL;
2180 	}
2181 
2182 	if (flag & ERI_DESC_HANDLE_ALLOC)
2183 		ddi_dma_free_handle(&erip->md_h);
2184 
2185 	(void) eri_freebufs(erip);
2186 
2187 	if (flag & ERI_RCV_HANDLE_ALLOC)
2188 		for (i = 0; i < erip->rcv_handle_cnt; i++)
2189 			ddi_dma_free_handle(&erip->ndmarh[i]);
2190 
2191 	if (flag & ERI_RCV_DVMA_ALLOC) {
2192 		(void) dvma_release(erip->eri_dvmarh);
2193 		erip->eri_dvmarh = NULL;
2194 	}
2195 
2196 	if (flag & ERI_XBUFS_KMEM_DMABIND) {
2197 		(void) ddi_dma_unbind_handle(erip->tbuf_handle);
2198 		erip->tbuf_ioaddr = 0;
2199 	}
2200 
2201 	if (flag & ERI_XBUFS_KMEM_ALLOC) {
2202 		ddi_dma_mem_free(&erip->tbuf_acch);
2203 		erip->tbuf_kaddr = NULL;
2204 	}
2205 
2206 	if (flag & ERI_XBUFS_HANDLE_ALLOC) {
2207 		ddi_dma_free_handle(&erip->tbuf_handle);
2208 		erip->tbuf_handle = NULL;
2209 	}
2210 
2211 }
2212 
2213 /*
2214  * Initialize channel.
2215  * Return true on success, false on error.
2216  *
2217  * The recommended sequence for initialization is:
2218  * 1. Issue a Global Reset command to the Ethernet Channel.
2219  * 2. Poll the Global_Reset bits until the execution of the reset has been
2220  *    completed.
2221  * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2222  *	 Poll Register 0 to till the Resetbit is 0.
2223  * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2224  *	 100Mbps and Non-Isolated mode. The main point here is to bring the
2225  *	 PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2226  *	 to the MII interface so that the Bigmac core can correctly reset
2227  *	 upon a software reset.
2228  * 2(c).  Issue another Global Reset command to the Ethernet Channel and poll
2229  *	  the Global_Reset bits till completion.
2230  * 3. Set up all the data structures in the host memory.
2231  * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2232  *    Register).
2233  * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2234  *    Register).
2235  * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2236  * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2237  * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2238  * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2239  * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2240  * 11. Program the XIF Configuration Register (enable the XIF).
2241  * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2242  * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2243  */
2244 /*
2245  * lock order:
2246  *	intrlock->linklock->xmitlock->xcvrlock
2247  */
2248 static boolean_t
2249 eri_init(struct eri *erip)
2250 {
2251 	uint32_t	init_stat = 0;
2252 	uint32_t	partial_init = 0;
2253 	uint32_t	carrier_ext = 0;
2254 	uint32_t	mac_ctl = 0;
2255 	boolean_t	ret;
2256 	uint32_t 	link_timeout = ERI_LINKCHECK_TIMER;
2257 	link_state_t	linkupdate = LINK_STATE_UNKNOWN;
2258 
2259 	/*
2260 	 * Just return successfully if device is suspended.
2261 	 * eri_init() will be called again from resume.
2262 	 */
2263 	ASSERT(erip != NULL);
2264 
2265 	if (erip->flags & ERI_SUSPENDED) {
2266 		ret = B_TRUE;
2267 		goto init_exit;
2268 	}
2269 
2270 	mutex_enter(&erip->intrlock);
2271 	eri_stop_timer(erip);	/* acquire linklock */
2272 	mutex_enter(&erip->xmitlock);
2273 	erip->flags &= (ERI_DLPI_LINKUP | ERI_STARTED);
2274 	erip->wantw = B_FALSE;
2275 	HSTAT(erip, inits);
2276 	erip->txhung = 0;
2277 
2278 	if ((erip->stats.inits > 1) && (erip->init_macregs == 0))
2279 		eri_savecntrs(erip);
2280 
2281 	mutex_enter(&erip->xcvrlock);
2282 	if (!param_linkup || erip->linkcheck) {
2283 		if (!erip->linkcheck)
2284 			linkupdate = LINK_STATE_DOWN;
2285 		(void) eri_stop(erip);
2286 	}
2287 	if (!(erip->flags & ERI_DLPI_LINKUP) || !param_linkup) {
2288 		erip->flags |= ERI_DLPI_LINKUP;
2289 		eri_mif_poll(erip, MIF_POLL_STOP);
2290 		(void) eri_new_xcvr(erip);
2291 		ERI_DEBUG_MSG1(erip, XCVR_MSG, "New transceiver detected.");
2292 		if (param_transceiver != NO_XCVR) {
2293 			/*
2294 			 * Reset the new PHY and bring up the
2295 			 * link
2296 			 */
2297 			if (eri_reset_xcvr(erip)) {
2298 				ERI_FAULT_MSG1(erip, SEVERITY_NONE,
2299 				    ERI_VERB_MSG, "In Init after reset");
2300 				mutex_exit(&erip->xcvrlock);
2301 				link_timeout = 0;
2302 				goto done;
2303 			}
2304 			if (erip->stats.link_up == LINK_STATE_UP)
2305 				linkupdate = LINK_STATE_UP;
2306 		} else {
2307 			erip->flags |= (ERI_RUNNING | ERI_INITIALIZED);
2308 			param_linkup = 0;
2309 			erip->stats.link_up = LINK_STATE_DOWN;
2310 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2311 			linkupdate = LINK_STATE_DOWN;
2312 			/*
2313 			 * Still go on and complete the MAC initialization as
2314 			 * xcvr might show up later.
2315 			 * you must return to their mutex ordering.
2316 			 */
2317 		}
2318 		eri_mif_poll(erip, MIF_POLL_START);
2319 	}
2320 
2321 	mutex_exit(&erip->xcvrlock);
2322 
2323 	/*
2324 	 * Allocate data structures.
2325 	 */
2326 	if (erip->global_reset_issued) {
2327 		if (erip->global_reset_issued == 2) { /* fast path */
2328 
2329 			/*
2330 			 * Hang out/Initialize descriptors and buffers.
2331 			 */
2332 			eri_init_txbufs(erip);
2333 
2334 			eri_update_rxbufs(erip);
2335 		} else {
2336 			init_stat = eri_allocthings(erip);
2337 			if (init_stat)
2338 				goto done;
2339 
2340 			if (eri_freebufs(erip))
2341 				goto done;
2342 			/*
2343 			 * Hang out/Initialize descriptors and buffers.
2344 			 */
2345 			eri_init_txbufs(erip);
2346 			if (eri_init_rxbufs(erip))
2347 				goto done;
2348 		}
2349 	}
2350 
2351 	/*
2352 	 * BigMAC requires that we confirm that tx, rx and hash are in
2353 	 * quiescent state.
2354 	 * MAC will not reset successfully if the transceiver is not reset and
2355 	 * brought out of Isolate mode correctly. TXMAC reset may fail if the
2356 	 * ext. transceiver is just disconnected. If it fails, try again by
2357 	 * checking the transceiver.
2358 	 */
2359 	if (eri_txmac_disable(erip)) {
2360 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2361 		    disable_txmac_msg);
2362 		param_linkup = 0;	/* force init again */
2363 		erip->stats.link_up = LINK_STATE_DOWN;
2364 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2365 		linkupdate = LINK_STATE_DOWN;
2366 		goto done;
2367 	}
2368 
2369 	if (eri_rxmac_disable(erip)) {
2370 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2371 		    disable_rxmac_msg);
2372 		param_linkup = 0;	/* force init again */
2373 		erip->stats.link_up = LINK_STATE_DOWN;
2374 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2375 		linkupdate = LINK_STATE_DOWN;
2376 		goto done;
2377 	}
2378 
2379 	eri_init_macregs_generic(erip);
2380 
2381 	/*
2382 	 * Initialize ERI Global registers :
2383 	 * config
2384 	 * For PCI :  err_mask, bif_cfg
2385 	 *
2386 	 * Use user-configurable parameter for enabling 64-bit transfers.
2387 	 * Note:For PCI, burst sizes are in multiples of 64-bytes.
2388 	 */
2389 
2390 	/*
2391 	 * Significant performance improvements can be achieved by
2392 	 * disabling transmit interrupt. Thus TMD's are reclaimed
2393 	 * only very infrequently.
2394 	 * The PCS Interrupt is masked here. It is enabled only when
2395 	 * a PCS link is brought up because there is no second level
2396 	 * mask for this interrupt..
2397 	 * Init GLOBAL, TXMAC, RXMAC and MACCTL interrupt masks here.
2398 	 */
2399 	if (! partial_init) {
2400 		PUT_GLOBREG(intmask, ERI_G_MASK_INTR);
2401 		erip->tx_int_me = 0;
2402 		PUT_MACREG(txmask, BMAC_TXINTR_MASK);
2403 		PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2404 		PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2405 	}
2406 
2407 	if (erip->global_reset_issued) {
2408 		/*
2409 		 * Initialize ETX Registers:
2410 		 * config, txring_lo, txring_hi
2411 		 */
2412 		if (eri_init_txregs(erip))
2413 			goto done;
2414 		/*
2415 		 * Initialize ERX Registers:
2416 		 * rxring_lo, rxring_hi, config, rx_blanking,
2417 		 * rx_pause_threshold.  Also, rx_kick
2418 		 * Read and save rxfifo_size.
2419 		 */
2420 		if (eri_init_rxregs(erip))
2421 			goto done;
2422 	}
2423 
2424 	PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2425 
2426 	/*
2427 	 * Set up the slottime,and  rxconfig, txconfig without enabling
2428 	 * the latter two at this time
2429 	 */
2430 	PUT_MACREG(slot, BMAC_SLOT_TIME);
2431 	carrier_ext = 0;
2432 
2433 #ifdef ERI_DONT_STRIP_CRC
2434 	PUT_MACREG(rxcfg,
2435 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2436 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2437 	    (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2438 #else
2439 	PUT_MACREG(rxcfg,
2440 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2441 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2442 	    BMAC_RXCFG_STRIP_CRC |
2443 	    (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2444 #endif
2445 	drv_usecwait(10);	/* wait after setting Hash Enable bit */
2446 
2447 	if (erip->ngu_enable)
2448 		PUT_MACREG(txcfg,
2449 		    ((param_mode ? BMAC_TXCFG_FDX: 0) |
2450 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2451 		    BMAC_TXCFG_ENIPG0 : 0) |
2452 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2453 		    BMAC_TXCFG_NGU));
2454 	else
2455 		PUT_MACREG(txcfg,
2456 		    ((param_mode ? BMAC_TXCFG_FDX: 0) |
2457 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2458 		    BMAC_TXCFG_ENIPG0 : 0) |
2459 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2460 
2461 	if (erip->pauseRX)
2462 		mac_ctl = ERI_MCTLCFG_RXPAUSE;
2463 	if (erip->pauseTX)
2464 		mac_ctl |= ERI_MCTLCFG_TXPAUSE;
2465 
2466 	PUT_MACREG(macctl_cfg, mac_ctl);
2467 
2468 	/*
2469 	 * Must be Internal Transceiver
2470 	 */
2471 	if (param_mode)
2472 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2473 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2474 	else {
2475 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2476 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2477 		    BMAC_XIFC_DIS_ECHO));
2478 
2479 		link_timeout = ERI_CHECK_HANG_TIMER;
2480 	}
2481 
2482 	/*
2483 	 * if MAC int loopback flag is set, put xifc reg in mii loopback
2484 	 * mode {DIAG}
2485 	 */
2486 	if (erip->flags & ERI_MACLOOPBACK) {
2487 		PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIILPBK);
2488 	}
2489 
2490 	/*
2491 	 * Enable TX and RX MACs.
2492 	 */
2493 	ENABLE_MAC(erip);
2494 	erip->flags |= (ERI_RUNNING | ERI_INITIALIZED |
2495 	    ERI_TXINIT | ERI_RXINIT);
2496 	mac_tx_update(erip->mh);
2497 	erip->global_reset_issued = 0;
2498 
2499 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
2500 	eri_xcvr_force_mode(erip, &link_timeout);
2501 #endif
2502 
2503 done:
2504 	if (init_stat)
2505 		eri_unallocthings(erip);
2506 
2507 	mutex_exit(&erip->xmitlock);
2508 	eri_start_timer(erip, eri_check_link, link_timeout);
2509 	mutex_exit(&erip->intrlock);
2510 
2511 	if (linkupdate != LINK_STATE_UNKNOWN)
2512 		mac_link_update(erip->mh, linkupdate);
2513 
2514 	ret = (erip->flags & ERI_RUNNING) ? B_TRUE : B_FALSE;
2515 	if (!ret) {
2516 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
2517 		    "eri_init failed");
2518 	}
2519 
2520 init_exit:
2521 	ASSERT(!MUTEX_HELD(&erip->linklock));
2522 	return (ret);
2523 }
2524 
2525 /*
2526  * 0 as burstsize upon failure as it signifies no burst size.
2527  */
2528 static int
2529 eri_burstsize(struct eri *erip)
2530 {
2531 	ddi_dma_handle_t handle;
2532 
2533 	if (ddi_dma_alloc_handle(erip->dip, &dma_attr, DDI_DMA_DONTWAIT,
2534 	    NULL, &handle))
2535 		return (DDI_FAILURE);
2536 
2537 	erip->burstsizes = ddi_dma_burstsizes(handle);
2538 	ddi_dma_free_handle(&handle);
2539 
2540 	if (erip->burstsizes)
2541 		return (DDI_SUCCESS);
2542 
2543 	return (DDI_FAILURE);
2544 }
2545 
2546 /*
2547  * Un-initialize (STOP) ERI channel.
2548  */
2549 static void
2550 eri_uninit(struct eri *erip)
2551 {
2552 	boolean_t needind;
2553 
2554 	/*
2555 	 * Allow up to 'ERI_DRAINTIME' for pending xmit's to complete.
2556 	 */
2557 	ERI_DELAY((erip->tcurp == erip->tnextp), ERI_DRAINTIME);
2558 
2559 	mutex_enter(&erip->intrlock);
2560 	eri_stop_timer(erip);   /* acquire linklock */
2561 	mutex_enter(&erip->xmitlock);
2562 	mutex_enter(&erip->xcvrlock);
2563 	eri_mif_poll(erip, MIF_POLL_STOP);
2564 	erip->flags &= ~ERI_DLPI_LINKUP;
2565 	mutex_exit(&erip->xcvrlock);
2566 
2567 	needind = !erip->linkcheck;
2568 	(void) eri_stop(erip);
2569 	erip->flags &= ~ERI_RUNNING;
2570 
2571 	mutex_exit(&erip->xmitlock);
2572 	eri_start_timer(erip, eri_check_link, 0);
2573 	mutex_exit(&erip->intrlock);
2574 
2575 	if (needind)
2576 		mac_link_update(erip->mh, LINK_STATE_DOWN);
2577 }
2578 
2579 /*
2580  * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2581  * map it in IO space.
2582  *
2583  * The driver allocates STREAMS buffers which will be mapped in DVMA
2584  * space using DDI DMA resources.
2585  *
2586  */
2587 static int
2588 eri_allocthings(struct eri *erip)
2589 {
2590 
2591 	uintptr_t	a;
2592 	int		size;
2593 	uint32_t	rval;
2594 	int		i;
2595 	size_t		real_len;
2596 	uint32_t	cookiec;
2597 	int		alloc_stat = 0;
2598 	ddi_dma_cookie_t dma_cookie;
2599 
2600 	/*
2601 	 * Return if resources are already allocated.
2602 	 */
2603 	if (erip->rmdp)
2604 		return (alloc_stat);
2605 
2606 	erip->alloc_flag = 0;
2607 
2608 	/*
2609 	 * Allocate the TMD and RMD descriptors and extra for alignments.
2610 	 */
2611 	size = (ERI_RPENDING * sizeof (struct rmd) +
2612 	    ERI_TPENDING * sizeof (struct eri_tmd)) + ERI_GMDALIGN;
2613 
2614 	rval = ddi_dma_alloc_handle(erip->dip, &desc_dma_attr,
2615 	    DDI_DMA_DONTWAIT, 0, &erip->md_h);
2616 	if (rval != DDI_SUCCESS) {
2617 		return (++alloc_stat);
2618 	}
2619 	erip->alloc_flag |= ERI_DESC_HANDLE_ALLOC;
2620 
2621 	rval = ddi_dma_mem_alloc(erip->md_h, size, &erip->dev_attr,
2622 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
2623 	    (caddr_t *)&erip->iopbkbase, &real_len, &erip->mdm_h);
2624 	if (rval != DDI_SUCCESS) {
2625 		return (++alloc_stat);
2626 	}
2627 	erip->alloc_flag |= ERI_DESC_MEM_ALLOC;
2628 
2629 	rval = ddi_dma_addr_bind_handle(erip->md_h, NULL,
2630 	    (caddr_t)erip->iopbkbase, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2631 	    DDI_DMA_DONTWAIT, 0, &erip->md_c, &cookiec);
2632 
2633 	if (rval != DDI_DMA_MAPPED)
2634 		return (++alloc_stat);
2635 
2636 	erip->alloc_flag |= ERI_DESC_MEM_MAP;
2637 
2638 	if (cookiec != 1)
2639 		return (++alloc_stat);
2640 
2641 	erip->iopbiobase = erip->md_c.dmac_address;
2642 
2643 	a = erip->iopbkbase;
2644 	a = ROUNDUP(a, ERI_GMDALIGN);
2645 	erip->rmdp = (struct rmd *)a;
2646 	a += ERI_RPENDING * sizeof (struct rmd);
2647 	erip->eri_tmdp = (struct eri_tmd *)a;
2648 /*
2649  *	Specifically we reserve n (ERI_TPENDING + ERI_RPENDING)
2650  *	pagetable entries. Therefore we have 2 ptes for each
2651  *	descriptor. Since the ethernet buffers are 1518 bytes
2652  *	so they can at most use 2 ptes.
2653  * 	Will do a ddi_dma_addr_setup for each bufer
2654  */
2655 	/*
2656 	 * In the current implementation, we use the ddi compliant
2657 	 * dma interface. We allocate ERI_RPENDING dma handles for receive
2658 	 * activity. The actual dma mapping is done in the io function
2659 	 * eri_read_dma(), by calling the ddi_dma_addr_bind_handle.
2660 	 * Dma resources are deallocated by calling ddi_dma_unbind_handle
2661 	 * in eri_reclaim() for transmit and eri_read_dma(), for receive io.
2662 	 */
2663 
2664 	if (eri_use_dvma_rx &&
2665 	    (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_RPENDING * 2),
2666 	    &erip->eri_dvmarh)) == DDI_SUCCESS) {
2667 		erip->alloc_flag |= ERI_RCV_DVMA_ALLOC;
2668 	} else {
2669 		erip->eri_dvmarh = NULL;
2670 
2671 		for (i = 0; i < ERI_RPENDING; i++) {
2672 			rval = ddi_dma_alloc_handle(erip->dip,
2673 			    &dma_attr, DDI_DMA_DONTWAIT,
2674 			    0, &erip->ndmarh[i]);
2675 
2676 			if (rval != DDI_SUCCESS) {
2677 				ERI_FAULT_MSG1(erip, SEVERITY_HIGH,
2678 				    ERI_VERB_MSG, alloc_rx_dmah_msg);
2679 				alloc_stat++;
2680 				break;
2681 			}
2682 		}
2683 
2684 		erip->rcv_handle_cnt = i;
2685 
2686 		if (i)
2687 			erip->alloc_flag |= ERI_RCV_HANDLE_ALLOC;
2688 
2689 		if (alloc_stat)
2690 			return (alloc_stat);
2691 
2692 	}
2693 
2694 /*
2695  *	Allocate TX buffer
2696  *	Note: buffers must always be allocated in the native
2697  *	ordering of the CPU (always big-endian for Sparc).
2698  *	ddi_dma_mem_alloc returns memory in the native ordering
2699  *	of the bus (big endian for SBus, little endian for PCI).
2700  *	So we cannot use ddi_dma_mem_alloc(, &erip->ge_dev_attr)
2701  *	because we'll get little endian memory on PCI.
2702  */
2703 	if (ddi_dma_alloc_handle(erip->dip, &desc_dma_attr, DDI_DMA_DONTWAIT,
2704 	    0, &erip->tbuf_handle) != DDI_SUCCESS) {
2705 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2706 		    alloc_tx_dmah_msg);
2707 		return (++alloc_stat);
2708 	}
2709 	erip->alloc_flag |= ERI_XBUFS_HANDLE_ALLOC;
2710 	size = ERI_TPENDING * ERI_BUFSIZE;
2711 	if (ddi_dma_mem_alloc(erip->tbuf_handle, size, &buf_attr,
2712 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &erip->tbuf_kaddr,
2713 	    &real_len, &erip->tbuf_acch) != DDI_SUCCESS) {
2714 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2715 		    alloc_tx_dmah_msg);
2716 		return (++alloc_stat);
2717 	}
2718 	erip->alloc_flag |= ERI_XBUFS_KMEM_ALLOC;
2719 	if (ddi_dma_addr_bind_handle(erip->tbuf_handle, NULL,
2720 	    erip->tbuf_kaddr, size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2721 	    DDI_DMA_DONTWAIT, 0, &dma_cookie, &cookiec) != DDI_DMA_MAPPED) {
2722 			return (++alloc_stat);
2723 	}
2724 	erip->tbuf_ioaddr = dma_cookie.dmac_address;
2725 	erip->alloc_flag |= ERI_XBUFS_KMEM_DMABIND;
2726 	if (cookiec != 1)
2727 		return (++alloc_stat);
2728 
2729 	/*
2730 	 * Keep handy limit values for RMD, TMD, and Buffers.
2731 	 */
2732 	erip->rmdlimp = &((erip->rmdp)[ERI_RPENDING]);
2733 	erip->eri_tmdlimp = &((erip->eri_tmdp)[ERI_TPENDING]);
2734 
2735 	/*
2736 	 * Zero out RCV holders.
2737 	 */
2738 	bzero((caddr_t)erip->rmblkp, sizeof (erip->rmblkp));
2739 	return (alloc_stat);
2740 }
2741 
2742 /* <<<<<<<<<<<<<<<<<	INTERRUPT HANDLING FUNCTION	>>>>>>>>>>>>>>>>>>>> */
2743 /*
2744  *	First check to see if it is our device interrupting.
2745  */
2746 static uint_t
2747 eri_intr(caddr_t arg)
2748 {
2749 	struct eri *erip = (void *)arg;
2750 	uint32_t erisbits;
2751 	uint32_t mif_status;
2752 	uint32_t serviced = DDI_INTR_UNCLAIMED;
2753 	link_state_t linkupdate = LINK_STATE_UNKNOWN;
2754 	boolean_t macupdate = B_FALSE;
2755 	mblk_t *mp;
2756 	mblk_t *head;
2757 	mblk_t **tail;
2758 
2759 	head = NULL;
2760 	tail = &head;
2761 
2762 	mutex_enter(&erip->intrlock);
2763 
2764 	erisbits = GET_GLOBREG(status);
2765 
2766 	/*
2767 	 * Check if it is only the RX_DONE interrupt, which is
2768 	 * the most frequent one.
2769 	 */
2770 	if (((erisbits & ERI_G_STATUS_RX_INT) == ERI_G_STATUS_RX_DONE) &&
2771 	    (erip->flags & ERI_RUNNING)) {
2772 		serviced = DDI_INTR_CLAIMED;
2773 		goto rx_done_int;
2774 	}
2775 
2776 	/* Claim the first interrupt after initialization */
2777 	if (erip->flags & ERI_INITIALIZED) {
2778 		erip->flags &= ~ERI_INITIALIZED;
2779 		serviced = DDI_INTR_CLAIMED;
2780 	}
2781 
2782 	/* Check for interesting events */
2783 	if ((erisbits & ERI_G_STATUS_INTR) == 0) {
2784 #ifdef	ESTAR_WORKAROUND
2785 		uint32_t linkupdate;
2786 #endif
2787 
2788 		ERI_DEBUG_MSG2(erip, DIAG_MSG,
2789 		    "eri_intr: Interrupt Not Claimed gsbits  %X", erisbits);
2790 #ifdef	DEBUG
2791 		noteri++;
2792 #endif
2793 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF Config = 0x%X",
2794 		    GET_MIFREG(mif_cfg));
2795 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF imask = 0x%X",
2796 		    GET_MIFREG(mif_imask));
2797 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:INT imask = 0x%X",
2798 		    GET_GLOBREG(intmask));
2799 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:alias %X",
2800 		    GET_GLOBREG(status_alias));
2801 #ifdef	ESTAR_WORKAROUND
2802 		linkupdate = eri_check_link_noind(erip);
2803 #endif
2804 		mutex_exit(&erip->intrlock);
2805 #ifdef	ESTAR_WORKAROUND
2806 		if (linkupdate != LINK_STATE_UNKNOWN)
2807 			mac_link_update(erip->mh, linkupdate);
2808 #endif
2809 		return (serviced);
2810 	}
2811 	serviced = DDI_INTR_CLAIMED;
2812 
2813 	if (!(erip->flags & ERI_RUNNING)) {
2814 		mutex_exit(&erip->intrlock);
2815 		eri_uninit(erip);
2816 		return (serviced);
2817 	}
2818 
2819 	if (erisbits & ERI_G_STATUS_FATAL_ERR) {
2820 		ERI_DEBUG_MSG2(erip, INTR_MSG,
2821 		    "eri_intr: fatal error: erisbits = %X", erisbits);
2822 		(void) eri_fatal_err(erip, erisbits);
2823 		eri_reinit_fatal++;
2824 
2825 		if (erip->rx_reset_issued) {
2826 			erip->rx_reset_issued = 0;
2827 			(void) eri_init_rx_channel(erip);
2828 			mutex_exit(&erip->intrlock);
2829 		} else {
2830 			param_linkup = 0;
2831 			erip->stats.link_up = LINK_STATE_DOWN;
2832 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2833 			DISABLE_MAC(erip);
2834 			mutex_exit(&erip->intrlock);
2835 			(void) eri_init(erip);
2836 		}
2837 		return (serviced);
2838 	}
2839 
2840 	if (erisbits & ERI_G_STATUS_NONFATAL_ERR) {
2841 		ERI_DEBUG_MSG2(erip, INTR_MSG,
2842 		    "eri_intr: non-fatal error: erisbits = %X", erisbits);
2843 		(void) eri_nonfatal_err(erip, erisbits);
2844 		if (erip->linkcheck) {
2845 			mutex_exit(&erip->intrlock);
2846 			(void) eri_init(erip);
2847 			return (serviced);
2848 		}
2849 	}
2850 
2851 	if (erisbits & ERI_G_STATUS_MIF_INT) {
2852 		uint16_t stat;
2853 		ERI_DEBUG_MSG2(erip, XCVR_MSG,
2854 		    "eri_intr:MIF Interrupt:mii_status %X", erip->mii_status);
2855 		eri_stop_timer(erip);   /* acquire linklock */
2856 
2857 		mutex_enter(&erip->xmitlock);
2858 		mutex_enter(&erip->xcvrlock);
2859 #ifdef	ERI_MIF_POLL_STATUS_WORKAROUND
2860 		mif_status = GET_MIFREG(mif_bsts);
2861 		eri_mif_poll(erip, MIF_POLL_STOP);
2862 		ERI_DEBUG_MSG3(erip, XCVR_MSG,
2863 		    "eri_intr: new MIF interrupt status %X XCVR status %X",
2864 		    mif_status, erip->mii_status);
2865 		(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
2866 		linkupdate = eri_mif_check(erip, stat, stat);
2867 
2868 #else
2869 		mif_status = GET_MIFREG(mif_bsts);
2870 		eri_mif_poll(erip, MIF_POLL_STOP);
2871 		linkupdate = eri_mif_check(erip, (uint16_t)mif_status,
2872 		    (uint16_t)(mif_status >> 16));
2873 #endif
2874 		eri_mif_poll(erip, MIF_POLL_START);
2875 		mutex_exit(&erip->xcvrlock);
2876 		mutex_exit(&erip->xmitlock);
2877 
2878 		if (!erip->openloop_autoneg)
2879 			eri_start_timer(erip, eri_check_link,
2880 			    ERI_LINKCHECK_TIMER);
2881 		else
2882 			eri_start_timer(erip, eri_check_link,
2883 			    ERI_P_FAULT_TIMER);
2884 	}
2885 
2886 	ERI_DEBUG_MSG2(erip, INTR_MSG,
2887 	    "eri_intr:May have Read Interrupt status:status %X", erisbits);
2888 
2889 rx_done_int:
2890 	if ((erisbits & (ERI_G_STATUS_TX_INT_ME)) ||
2891 	    (erip->tx_cur_cnt >= tx_interrupt_rate)) {
2892 		mutex_enter(&erip->xmitlock);
2893 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
2894 		    ETX_COMPLETION_MASK);
2895 
2896 		macupdate |= eri_reclaim(erip, erip->tx_completion);
2897 		mutex_exit(&erip->xmitlock);
2898 	}
2899 
2900 	if (erisbits & ERI_G_STATUS_RX_DONE) {
2901 		volatile struct	rmd	*rmdp, *rmdpbase;
2902 		volatile uint32_t rmdi;
2903 		uint8_t loop_limit = 0x20;
2904 		uint64_t flags;
2905 		uint32_t rmdmax_mask = erip->rmdmax_mask;
2906 
2907 		rmdpbase = erip->rmdp;
2908 		rmdi = erip->rx_completion;
2909 		rmdp = rmdpbase + rmdi;
2910 
2911 		/*
2912 		 * Sync RMD before looking at it.
2913 		 */
2914 		ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2915 		    DDI_DMA_SYNC_FORCPU);
2916 		/*
2917 		 * Loop through each RMD.
2918 		 */
2919 
2920 		flags = GET_RMD_FLAGS(rmdp);
2921 		while (((flags & ERI_RMD_OWN) == 0) && (loop_limit)) {
2922 			/* process one packet */
2923 			mp = eri_read_dma(erip, rmdp, rmdi, flags);
2924 			rmdi =  (rmdi + 1) & rmdmax_mask;
2925 			rmdp = rmdpbase + rmdi;
2926 
2927 			if (mp != NULL) {
2928 				*tail = mp;
2929 				tail = &mp->b_next;
2930 			}
2931 
2932 			/*
2933 			 * ERI RCV DMA fetches or updates four descriptors
2934 			 * a time. Also we don't want to update the desc.
2935 			 * batch we just received packet on. So we update
2936 			 * descriptors for every 4 packets and we update
2937 			 * the group of 4 after the current batch.
2938 			 */
2939 
2940 			if (!(rmdi % 4)) {
2941 				if (eri_overflow_reset &&
2942 				    (GET_GLOBREG(status_alias) &
2943 				    ERI_G_STATUS_NONFATAL_ERR)) {
2944 					loop_limit = 1;
2945 				} else {
2946 					erip->rx_kick =
2947 					    (rmdi + ERI_RPENDING - 4) &
2948 					    rmdmax_mask;
2949 					PUT_ERXREG(rx_kick, erip->rx_kick);
2950 				}
2951 			}
2952 
2953 			/*
2954 			 * Sync the next RMD before looking at it.
2955 			 */
2956 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2957 			    DDI_DMA_SYNC_FORCPU);
2958 			flags = GET_RMD_FLAGS(rmdp);
2959 			loop_limit--;
2960 		}
2961 		erip->rx_completion = rmdi;
2962 	}
2963 
2964 	erip->wantw = B_FALSE;
2965 
2966 	mutex_exit(&erip->intrlock);
2967 
2968 	if (head)
2969 		mac_rx(erip->mh, NULL, head);
2970 
2971 	if (macupdate)
2972 		mac_tx_update(erip->mh);
2973 
2974 	if (linkupdate != LINK_STATE_UNKNOWN)
2975 		mac_link_update(erip->mh, linkupdate);
2976 
2977 	return (serviced);
2978 }
2979 
2980 /*
2981  * Handle interrupts for fatal errors
2982  * Need reinitialization.
2983  */
2984 #define	PCI_DATA_PARITY_REP	(1 << 8)
2985 #define	PCI_SING_TARGET_ABORT	(1 << 11)
2986 #define	PCI_RCV_TARGET_ABORT	(1 << 12)
2987 #define	PCI_RCV_MASTER_ABORT	(1 << 13)
2988 #define	PCI_SING_SYSTEM_ERR	(1 << 14)
2989 #define	PCI_DATA_PARITY_ERR	(1 << 15)
2990 
2991 /* called with intrlock held */
2992 static void
2993 eri_fatal_err(struct eri *erip, uint32_t erisbits)
2994 {
2995 	uint16_t	pci_status;
2996 	uint32_t	pci_error_int = 0;
2997 
2998 	if (erisbits & ERI_G_STATUS_RX_TAG_ERR) {
2999 		erip->rx_reset_issued = 1;
3000 		HSTAT(erip, rxtag_err);
3001 	} else {
3002 		erip->global_reset_issued = 1;
3003 		if (erisbits & ERI_G_STATUS_BUS_ERR_INT) {
3004 			pci_error_int = 1;
3005 			HSTAT(erip, pci_error_int);
3006 		} else if (erisbits & ERI_G_STATUS_PERR_INT) {
3007 			HSTAT(erip, parity_error);
3008 		} else {
3009 			HSTAT(erip, unknown_fatal);
3010 		}
3011 	}
3012 
3013 	/*
3014 	 * PCI bus error
3015 	 */
3016 	if (pci_error_int && erip->pci_config_handle) {
3017 		pci_status = pci_config_get16(erip->pci_config_handle,
3018 		    PCI_CONF_STAT);
3019 		ERI_DEBUG_MSG2(erip, FATAL_ERR_MSG, "Bus Error Status %x",
3020 		    pci_status);
3021 		if (pci_status & PCI_DATA_PARITY_REP)
3022 			HSTAT(erip, pci_data_parity_err);
3023 		if (pci_status & PCI_SING_TARGET_ABORT)
3024 			HSTAT(erip, pci_signal_target_abort);
3025 		if (pci_status & PCI_RCV_TARGET_ABORT)
3026 			HSTAT(erip, pci_rcvd_target_abort);
3027 		if (pci_status & PCI_RCV_MASTER_ABORT)
3028 			HSTAT(erip, pci_rcvd_master_abort);
3029 		if (pci_status & PCI_SING_SYSTEM_ERR)
3030 			HSTAT(erip, pci_signal_system_err);
3031 		if (pci_status & PCI_DATA_PARITY_ERR)
3032 			HSTAT(erip, pci_signal_system_err);
3033 		/*
3034 		 * clear it by writing the value that was read back.
3035 		 */
3036 		pci_config_put16(erip->pci_config_handle, PCI_CONF_STAT,
3037 		    pci_status);
3038 	}
3039 }
3040 
3041 /*
3042  * Handle interrupts regarding non-fatal events.
3043  * TXMAC, RXMAC and MACCTL events
3044  */
3045 static void
3046 eri_nonfatal_err(struct eri *erip, uint32_t erisbits)
3047 {
3048 
3049 	uint32_t	txmac_sts, rxmac_sts, macctl_sts, pause_time;
3050 
3051 #ifdef ERI_PM_WORKAROUND
3052 	if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
3053 	    PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
3054 		erip->stats.pmcap = ERI_PMCAP_NONE;
3055 #endif
3056 
3057 	if (erisbits & ERI_G_STATUS_TX_MAC_INT) {
3058 		txmac_sts = GET_MACREG(txsts);
3059 		if (txmac_sts & BMAC_TXSTS_TX_URUN) {
3060 			erip->linkcheck = 1;
3061 			HSTAT(erip, txmac_urun);
3062 			HSTAT(erip, oerrors);
3063 		}
3064 
3065 		if (txmac_sts & BMAC_TXSTS_MAXPKT_ERR) {
3066 			erip->linkcheck = 1;
3067 			HSTAT(erip, txmac_maxpkt_err);
3068 			HSTAT(erip, oerrors);
3069 		}
3070 		if (txmac_sts & BMAC_TXSTS_NCC_EXP) {
3071 			erip->stats.collisions += 0x10000;
3072 		}
3073 
3074 		if (txmac_sts & BMAC_TXSTS_ECC_EXP) {
3075 			erip->stats.excessive_coll += 0x10000;
3076 		}
3077 
3078 		if (txmac_sts & BMAC_TXSTS_LCC_EXP) {
3079 			erip->stats.late_coll += 0x10000;
3080 		}
3081 
3082 		if (txmac_sts & BMAC_TXSTS_FCC_EXP) {
3083 			erip->stats.first_coll += 0x10000;
3084 		}
3085 
3086 		if (txmac_sts & BMAC_TXSTS_DEFER_EXP) {
3087 			HSTAT(erip, defer_timer_exp);
3088 		}
3089 
3090 		if (txmac_sts & BMAC_TXSTS_PEAK_EXP) {
3091 			erip->stats.peak_attempt_cnt += 0x100;
3092 		}
3093 	}
3094 
3095 	if (erisbits & ERI_G_STATUS_RX_NO_BUF) {
3096 		ERI_DEBUG_MSG1(erip, NONFATAL_MSG, "rx dropped/no free desc");
3097 
3098 		if (eri_overflow_reset)
3099 			erip->linkcheck = 1;
3100 
3101 		HSTAT(erip, no_free_rx_desc);
3102 		HSTAT(erip, ierrors);
3103 	}
3104 	if (erisbits & ERI_G_STATUS_RX_MAC_INT) {
3105 		rxmac_sts = GET_MACREG(rxsts);
3106 		if (rxmac_sts & BMAC_RXSTS_RX_OVF) {
3107 #ifndef ERI_RMAC_HANG_WORKAROUND
3108 			eri_stop_timer(erip);   /* acquire linklock */
3109 			erip->check_rmac_hang ++;
3110 			erip->check2_rmac_hang = 0;
3111 			erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
3112 			erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
3113 
3114 			ERI_DEBUG_MSG5(erip, NONFATAL_MSG,
3115 			    "overflow intr %d: %8x wr:%2x rd:%2x",
3116 			    erip->check_rmac_hang,
3117 			    GET_MACREG(macsm),
3118 			    GET_ERXREG(rxfifo_wr_ptr),
3119 			    GET_ERXREG(rxfifo_rd_ptr));
3120 
3121 			eri_start_timer(erip, eri_check_link,
3122 			    ERI_CHECK_HANG_TIMER);
3123 #endif
3124 			if (eri_overflow_reset)
3125 				erip->linkcheck = 1;
3126 
3127 			HSTAT(erip, rx_overflow);
3128 			HSTAT(erip, ierrors);
3129 		}
3130 
3131 		if (rxmac_sts & BMAC_RXSTS_ALE_EXP) {
3132 			erip->stats.rx_align_err += 0x10000;
3133 			erip->stats.ierrors += 0x10000;
3134 		}
3135 
3136 		if (rxmac_sts & BMAC_RXSTS_CRC_EXP) {
3137 			erip->stats.rx_crc_err += 0x10000;
3138 			erip->stats.ierrors += 0x10000;
3139 		}
3140 
3141 		if (rxmac_sts & BMAC_RXSTS_LEN_EXP) {
3142 			erip->stats.rx_length_err += 0x10000;
3143 			erip->stats.ierrors += 0x10000;
3144 		}
3145 
3146 		if (rxmac_sts & BMAC_RXSTS_CVI_EXP) {
3147 			erip->stats.rx_code_viol_err += 0x10000;
3148 			erip->stats.ierrors += 0x10000;
3149 		}
3150 	}
3151 
3152 	if (erisbits & ERI_G_STATUS_MAC_CTRL_INT) {
3153 
3154 		macctl_sts = GET_MACREG(macctl_sts);
3155 		if (macctl_sts & ERI_MCTLSTS_PAUSE_RCVD) {
3156 			pause_time = ((macctl_sts &
3157 			    ERI_MCTLSTS_PAUSE_TIME) >> 16);
3158 			ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
3159 			    "PAUSE Received. pause time = %X slot_times",
3160 			    pause_time);
3161 			HSTAT(erip, pause_rxcount);
3162 			erip->stats.pause_time_count += pause_time;
3163 		}
3164 
3165 		if (macctl_sts & ERI_MCTLSTS_PAUSE_STATE) {
3166 			HSTAT(erip, pause_oncount);
3167 			erip->stats.pausing = 1;
3168 		}
3169 
3170 		if (macctl_sts & ERI_MCTLSTS_NONPAUSE) {
3171 			HSTAT(erip, pause_offcount);
3172 			erip->stats.pausing = 0;
3173 		}
3174 	}
3175 
3176 }
3177 
3178 /*
3179  * if this is the first init do not bother to save the
3180  * counters.
3181  */
3182 static void
3183 eri_savecntrs(struct eri *erip)
3184 {
3185 	uint32_t	fecnt, aecnt, lecnt, rxcv;
3186 	uint32_t	ltcnt, excnt, fccnt;
3187 
3188 	/* XXX What all gets added in ierrors and oerrors? */
3189 	fecnt = GET_MACREG(fecnt);
3190 	HSTATN(erip, rx_crc_err, fecnt);
3191 	PUT_MACREG(fecnt, 0);
3192 
3193 	aecnt = GET_MACREG(aecnt);
3194 	HSTATN(erip, rx_align_err, aecnt);
3195 	PUT_MACREG(aecnt, 0);
3196 
3197 	lecnt = GET_MACREG(lecnt);
3198 	HSTATN(erip, rx_length_err, lecnt);
3199 	PUT_MACREG(lecnt, 0);
3200 
3201 	rxcv = GET_MACREG(rxcv);
3202 	HSTATN(erip, rx_code_viol_err, rxcv);
3203 	PUT_MACREG(rxcv, 0);
3204 
3205 	ltcnt = GET_MACREG(ltcnt);
3206 	HSTATN(erip, late_coll, ltcnt);
3207 	PUT_MACREG(ltcnt, 0);
3208 
3209 	erip->stats.collisions += (GET_MACREG(nccnt) + ltcnt);
3210 	PUT_MACREG(nccnt, 0);
3211 
3212 	excnt = GET_MACREG(excnt);
3213 	HSTATN(erip, excessive_coll, excnt);
3214 	PUT_MACREG(excnt, 0);
3215 
3216 	fccnt = GET_MACREG(fccnt);
3217 	HSTATN(erip, first_coll, fccnt);
3218 	PUT_MACREG(fccnt, 0);
3219 
3220 	/*
3221 	 * Do not add code violations to input errors.
3222 	 * They are already counted in CRC errors
3223 	 */
3224 	HSTATN(erip, ierrors, (fecnt + aecnt + lecnt));
3225 	HSTATN(erip, oerrors, (ltcnt + excnt));
3226 }
3227 
3228 mblk_t *
3229 eri_allocb_sp(size_t size)
3230 {
3231 	mblk_t  *mp;
3232 
3233 	size += 128;
3234 	if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3235 		return (NULL);
3236 	}
3237 	mp->b_wptr += 128;
3238 	mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3239 	mp->b_rptr = mp->b_wptr;
3240 
3241 	return (mp);
3242 }
3243 
3244 mblk_t *
3245 eri_allocb(size_t size)
3246 {
3247 	mblk_t  *mp;
3248 
3249 	if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3250 		return (NULL);
3251 	}
3252 	mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3253 	mp->b_rptr = mp->b_wptr;
3254 
3255 	return (mp);
3256 }
3257 
3258 /*
3259  * Hardware Dependent Functions
3260  * New Section.
3261  */
3262 
3263 /* <<<<<<<<<<<<<<<< Fast Ethernet PHY Bit Bang Operations >>>>>>>>>>>>>>>>>> */
3264 
3265 static void
3266 send_bit(struct eri *erip, uint32_t x)
3267 {
3268 	PUT_MIFREG(mif_bbdata, x);
3269 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3270 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3271 }
3272 
3273 /*
3274  * To read the MII register bits according to the IEEE Standard
3275  */
3276 static uint32_t
3277 get_bit_std(struct eri *erip)
3278 {
3279 	uint32_t	x;
3280 
3281 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3282 	drv_usecwait(1);	/* wait for  >330 ns for stable data */
3283 	if (param_transceiver == INTERNAL_XCVR)
3284 		x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM0) ? 1 : 0;
3285 	else
3286 		x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM1) ? 1 : 0;
3287 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3288 	return (x);
3289 }
3290 
3291 #define	SEND_BIT(x)		send_bit(erip, x)
3292 #define	GET_BIT_STD(x)		x = get_bit_std(erip)
3293 
3294 
3295 static void
3296 eri_bb_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3297 {
3298 	uint8_t	phyad;
3299 	int		i;
3300 
3301 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
3302 	phyad = erip->phyad;
3303 	(void) eri_bb_force_idle(erip);
3304 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
3305 	SEND_BIT(0); SEND_BIT(1);	/* <OP> */
3306 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
3307 		SEND_BIT((phyad >> i) & 1);
3308 	}
3309 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
3310 		SEND_BIT((regad >> i) & 1);
3311 	}
3312 	SEND_BIT(1); SEND_BIT(0);	/* <TA> */
3313 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
3314 		SEND_BIT((data >> i) & 1);
3315 	}
3316 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
3317 }
3318 
3319 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3320 static uint32_t
3321 eri_bb_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3322 {
3323 	uint8_t	phyad;
3324 	int	i;
3325 	uint32_t	x;
3326 	uint32_t	y;
3327 
3328 	*datap = 0;
3329 
3330 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
3331 	phyad = erip->phyad;
3332 	(void) eri_bb_force_idle(erip);
3333 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
3334 	SEND_BIT(1); SEND_BIT(0);	/* <OP> */
3335 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
3336 		SEND_BIT((phyad >> i) & 1);
3337 	}
3338 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
3339 		SEND_BIT((regad >> i) & 1);
3340 	}
3341 
3342 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
3343 
3344 	GET_BIT_STD(x);
3345 	GET_BIT_STD(y);		/* <TA> */
3346 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
3347 		GET_BIT_STD(x);
3348 		*datap += (x << i);
3349 	}
3350 	/* Kludge to get the Transceiver out of hung mode */
3351 	/* XXX: Test if this is still needed */
3352 	GET_BIT_STD(x);
3353 	GET_BIT_STD(x);
3354 	GET_BIT_STD(x);
3355 
3356 	return (y);
3357 }
3358 
3359 static void
3360 eri_bb_force_idle(struct eri *erip)
3361 {
3362 	int		i;
3363 
3364 	for (i = 0; i < 33; i++) {
3365 		SEND_BIT(1);
3366 	}
3367 }
3368 
3369 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
3370 
3371 
3372 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
3373 
3374 #ifdef ERI_FRM_DEBUG
3375 int frame_flag = 0;
3376 #endif
3377 
3378 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3379 static uint32_t
3380 eri_mii_read(struct eri	*erip, uint8_t regad, uint16_t *datap)
3381 {
3382 	uint32_t frame;
3383 	uint8_t phyad;
3384 
3385 	if (param_transceiver == NO_XCVR)
3386 		return (1);	/* No xcvr present */
3387 
3388 	if (!erip->frame_enable)
3389 		return (eri_bb_mii_read(erip, regad, datap));
3390 
3391 	phyad = erip->phyad;
3392 #ifdef ERI_FRM_DEBUG
3393 	if (!frame_flag) {
3394 		eri_errror(erip->dip, "Frame Register used for MII");
3395 		frame_flag = 1;
3396 	}
3397 #endif
3398 	ERI_DEBUG_MSG3(erip, FRM_MSG,
3399 	    "Frame Reg :mii_read: phyad = %X reg = %X ", phyad, regad);
3400 
3401 	PUT_MIFREG(mif_frame, ERI_MIF_FRREAD |
3402 	    (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3403 	    (regad << ERI_MIF_FRREGAD_SHIFT));
3404 	MIF_ERIDELAY(300,  phyad, regad);
3405 	frame = GET_MIFREG(mif_frame);
3406 	if ((frame & ERI_MIF_FRTA0) == 0) {
3407 		return (1);
3408 	} else {
3409 		*datap = (uint16_t)(frame & ERI_MIF_FRDATA);
3410 		return (0);
3411 	}
3412 
3413 }
3414 
3415 static void
3416 eri_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3417 {
3418 	uint8_t	phyad;
3419 
3420 	if (!erip->frame_enable) {
3421 		eri_bb_mii_write(erip, regad, data);
3422 		return;
3423 	}
3424 
3425 	phyad = erip->phyad;
3426 
3427 	PUT_MIFREG(mif_frame, (ERI_MIF_FRWRITE |
3428 	    (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3429 	    (regad << ERI_MIF_FRREGAD_SHIFT) | data));
3430 	MIF_ERIDELAY(300,  phyad, regad);
3431 	(void) GET_MIFREG(mif_frame);
3432 }
3433 
3434 
3435 /* <<<<<<<<<<<<<<<<<	PACKET TRANSMIT FUNCTIONS	>>>>>>>>>>>>>>>>>>>> */
3436 
3437 #define	ERI_CROSS_PAGE_BOUNDRY(i, size, pagesize) \
3438 	((i & pagesize) != ((i + size) & pagesize))
3439 
3440 /*
3441  * Send a single mblk.  Returns B_TRUE if the packet is sent, or disposed of
3442  * by freemsg.  Returns B_FALSE if the packet was not sent or queued, and
3443  * should be retried later (due to tx resource exhaustion.)
3444  */
3445 static boolean_t
3446 eri_send_msg(struct eri *erip, mblk_t *mp)
3447 {
3448 	volatile struct	eri_tmd	*tmdp = NULL;
3449 	volatile struct	eri_tmd	*tbasep = NULL;
3450 	uint32_t	len_msg = 0;
3451 	uint32_t	i;
3452 	uint64_t	int_me = 0;
3453 	uint_t		tmdcsum = 0;
3454 	uint_t		start_offset = 0;
3455 	uint_t		stuff_offset = 0;
3456 	uint_t		flags = 0;
3457 	boolean_t	macupdate = B_FALSE;
3458 
3459 	caddr_t	ptr;
3460 	uint32_t	offset;
3461 	uint64_t	ctrl;
3462 	ddi_dma_cookie_t	c;
3463 
3464 	if (!param_linkup) {
3465 		freemsg(mp);
3466 		HSTAT(erip, tnocar);
3467 		HSTAT(erip, oerrors);
3468 		return (B_TRUE);
3469 	}
3470 
3471 #ifdef ERI_HWCSUM
3472 	hcksum_retrieve(mp, NULL, NULL, &start_offset, &stuff_offset,
3473 	    NULL, NULL, &flags);
3474 
3475 	if (flags & HCK_PARTIALCKSUM) {
3476 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
3477 			start_offset += ETHERHEADER_SIZE + 4;
3478 			stuff_offset += ETHERHEADER_SIZE + 4;
3479 		} else {
3480 			start_offset += ETHERHEADER_SIZE;
3481 			stuff_offset += ETHERHEADER_SIZE;
3482 		}
3483 		tmdcsum = ERI_TMD_CSENABL;
3484 	}
3485 #endif /* ERI_HWCSUM */
3486 
3487 	if ((len_msg = msgsize(mp)) > ERI_BUFSIZE) {
3488 		/*
3489 		 * This sholdn't ever occur, as GLD should not send us
3490 		 * packets that are too big.
3491 		 */
3492 		HSTAT(erip, oerrors);
3493 		freemsg(mp);
3494 		return (B_TRUE);
3495 	}
3496 
3497 	/*
3498 	 * update MIB II statistics
3499 	 */
3500 	BUMP_OutNUcast(erip, mp->b_rptr);
3501 
3502 	mutex_enter(&erip->xmitlock);
3503 
3504 	tbasep = erip->eri_tmdp;
3505 
3506 	/* Check if there are enough descriptors for this packet */
3507 	tmdp = erip->tnextp;
3508 
3509 	if (tmdp >=  erip->tcurp) /* check notmds */
3510 		i = tmdp - erip->tcurp;
3511 	else
3512 		i = tmdp + ERI_TPENDING - erip->tcurp;
3513 
3514 	if (i > (ERI_TPENDING - 4))
3515 		goto notmds;
3516 
3517 	if (i >= (ERI_TPENDING >> 1) && !(erip->starts & 0x7))
3518 		int_me = ERI_TMD_INTME;
3519 
3520 	i = tmdp - tbasep; /* index */
3521 
3522 	offset = (i * ERI_BUFSIZE);
3523 	ptr = erip->tbuf_kaddr + offset;
3524 
3525 	mcopymsg(mp, ptr);
3526 
3527 #ifdef	ERI_HDX_BUG_WORKAROUND
3528 	if ((param_mode) || (eri_hdx_pad_enable == 0)) {
3529 		if (len_msg < ETHERMIN) {
3530 			bzero((ptr + len_msg), (ETHERMIN - len_msg));
3531 			len_msg = ETHERMIN;
3532 		}
3533 	} else {
3534 		if (len_msg < 97) {
3535 			bzero((ptr + len_msg), (97 - len_msg));
3536 			len_msg = 97;
3537 		}
3538 	}
3539 #endif
3540 	c.dmac_address = erip->tbuf_ioaddr + offset;
3541 	(void) ddi_dma_sync(erip->tbuf_handle,
3542 	    (off_t)offset, len_msg, DDI_DMA_SYNC_FORDEV);
3543 
3544 		/* first and last (and only!) descr of packet */
3545 	ctrl = ERI_TMD_SOP | ERI_TMD_EOP | int_me | tmdcsum |
3546 	    (start_offset << ERI_TMD_CSSTART_SHIFT) |
3547 	    (stuff_offset << ERI_TMD_CSSTUFF_SHIFT);
3548 
3549 	PUT_TMD(tmdp, c, len_msg, ctrl);
3550 	ERI_SYNCIOPB(erip, tmdp, sizeof (struct eri_tmd),
3551 	    DDI_DMA_SYNC_FORDEV);
3552 
3553 	tmdp = NEXTTMD(erip, tmdp);
3554 	erip->tx_cur_cnt++;
3555 
3556 	erip->tx_kick = tmdp - tbasep;
3557 	PUT_ETXREG(tx_kick, erip->tx_kick);
3558 	erip->tnextp = tmdp;
3559 
3560 	erip->starts++;
3561 
3562 	if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3563 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3564 		    ETX_COMPLETION_MASK);
3565 		macupdate |= eri_reclaim(erip, erip->tx_completion);
3566 	}
3567 	mutex_exit(&erip->xmitlock);
3568 
3569 	if (macupdate)
3570 		mac_tx_update(erip->mh);
3571 
3572 	return (B_TRUE);
3573 
3574 notmds:
3575 	HSTAT(erip, notmds);
3576 	erip->wantw = B_TRUE;
3577 
3578 	if (!erip->tx_int_me) {
3579 		PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
3580 		    ~(ERI_G_MASK_TX_INT_ME));
3581 		erip->tx_int_me = 1;
3582 	}
3583 
3584 	if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3585 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3586 		    ETX_COMPLETION_MASK);
3587 		macupdate |= eri_reclaim(erip, erip->tx_completion);
3588 	}
3589 
3590 	mutex_exit(&erip->xmitlock);
3591 
3592 	if (macupdate)
3593 		mac_tx_update(erip->mh);
3594 
3595 	return (B_FALSE);
3596 }
3597 
3598 static mblk_t *
3599 eri_m_tx(void *arg, mblk_t *mp)
3600 {
3601 	struct eri *erip = arg;
3602 	mblk_t *next;
3603 
3604 	while (mp != NULL) {
3605 		next = mp->b_next;
3606 		mp->b_next = NULL;
3607 		if (!eri_send_msg(erip, mp)) {
3608 			mp->b_next = next;
3609 			break;
3610 		}
3611 		mp = next;
3612 	}
3613 
3614 	return (mp);
3615 }
3616 
3617 /*
3618  * Transmit completion reclaiming.
3619  */
3620 static boolean_t
3621 eri_reclaim(struct eri *erip, uint32_t tx_completion)
3622 {
3623 	volatile struct	eri_tmd	*tmdp;
3624 	struct	eri_tmd	*tcomp;
3625 	struct	eri_tmd	*tbasep;
3626 	struct	eri_tmd	*tlimp;
3627 	uint64_t	flags;
3628 	uint_t reclaimed = 0;
3629 
3630 	tbasep = erip->eri_tmdp;
3631 	tlimp = erip->eri_tmdlimp;
3632 
3633 	tmdp = erip->tcurp;
3634 	tcomp = tbasep + tx_completion; /* pointer to completion tmd */
3635 
3636 	/*
3637 	 * Loop through each TMD starting from tcurp and upto tcomp.
3638 	 */
3639 	while (tmdp != tcomp) {
3640 		flags = GET_TMD_FLAGS(tmdp);
3641 		if (flags & (ERI_TMD_SOP))
3642 			HSTAT(erip, opackets64);
3643 
3644 		HSTATN(erip, obytes64, (flags & ERI_TMD_BUFSIZE));
3645 
3646 		tmdp = NEXTTMDP(tbasep, tlimp, tmdp);
3647 		reclaimed++;
3648 	}
3649 
3650 	erip->tcurp = tmdp;
3651 	erip->tx_cur_cnt -= reclaimed;
3652 
3653 	return (erip->wantw && reclaimed ? B_TRUE : B_FALSE);
3654 }
3655 
3656 
3657 /* <<<<<<<<<<<<<<<<<<<	PACKET RECEIVE FUNCTIONS	>>>>>>>>>>>>>>>>>>> */
3658 static mblk_t *
3659 eri_read_dma(struct eri *erip, volatile struct rmd *rmdp,
3660 	int rmdi, uint64_t flags)
3661 {
3662 	mblk_t	*bp, *nbp;
3663 	int	len;
3664 	uint_t ccnt;
3665 	ddi_dma_cookie_t	c;
3666 #ifdef ERI_RCV_CKSUM
3667 	ushort_t sum;
3668 #endif /* ERI_RCV_CKSUM */
3669 	mblk_t *retmp = NULL;
3670 
3671 	bp = erip->rmblkp[rmdi];
3672 	len = (flags & ERI_RMD_BUFSIZE) >> ERI_RMD_BUFSIZE_SHIFT;
3673 #ifdef	ERI_DONT_STRIP_CRC
3674 	len -= 4;
3675 #endif
3676 	/*
3677 	 * In the event of RX FIFO overflow error, ERI REV 1.0 ASIC can
3678 	 * corrupt packets following the descriptor corresponding the
3679 	 * overflow. To detect the corrupted packets, we disable the
3680 	 * dropping of the "bad" packets at the MAC. The descriptor
3681 	 * then would have the "BAD" bit set. We drop the overflowing
3682 	 * packet and the packet following it. We could have done some sort
3683 	 * of checking to determine if the second packet was indeed bad
3684 	 * (using CRC or checksum) but it would be expensive in this
3685 	 * routine, since it is run in interrupt context.
3686 	 */
3687 	if ((flags & ERI_RMD_BAD) || (len  < ETHERMIN) || (len > ETHERMAX+4)) {
3688 
3689 		HSTAT(erip, rx_bad_pkts);
3690 		if ((flags & ERI_RMD_BAD) == 0)
3691 			HSTAT(erip, ierrors);
3692 		if (len < ETHERMIN) {
3693 			HSTAT(erip, rx_runt);
3694 		} else if (len > ETHERMAX+4) {
3695 			HSTAT(erip, rx_toolong_pkts);
3696 		}
3697 		HSTAT(erip, drop);
3698 		UPDATE_RMD(rmdp);
3699 
3700 		ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3701 		    DDI_DMA_SYNC_FORDEV);
3702 		return (NULL);
3703 	}
3704 #ifdef  ERI_DONT_STRIP_CRC
3705 	{
3706 		uint32_t hw_fcs, tail_fcs;
3707 		/*
3708 		 * since we don't let the hardware strip the CRC in hdx
3709 		 * then the driver needs to do it.
3710 		 * this is to workaround a hardware bug
3711 		 */
3712 		bp->b_wptr = bp->b_rptr + ERI_FSTBYTE_OFFSET + len;
3713 		/*
3714 		 * Get the Checksum calculated by the hardware.
3715 		 */
3716 		hw_fcs = flags & ERI_RMD_CKSUM;
3717 		/*
3718 		 * Catch the case when the CRC starts on an odd
3719 		 * boundary.
3720 		 */
3721 		tail_fcs = bp->b_wptr[0] << 8 | bp->b_wptr[1];
3722 		tail_fcs += bp->b_wptr[2] << 8 | bp->b_wptr[3];
3723 		tail_fcs = (tail_fcs & 0xffff) + (tail_fcs >> 16);
3724 		if ((uintptr_t)(bp->b_wptr) & 1) {
3725 			tail_fcs = (tail_fcs << 8) & 0xffff  | (tail_fcs >> 8);
3726 		}
3727 		hw_fcs += tail_fcs;
3728 		hw_fcs = (hw_fcs & 0xffff) + (hw_fcs >> 16);
3729 		hw_fcs &= 0xffff;
3730 		/*
3731 		 * Now we can replace what the hardware wrote, make believe
3732 		 * it got it right in the first place.
3733 		 */
3734 		flags = (flags & ~(uint64_t)ERI_RMD_CKSUM) | hw_fcs;
3735 	}
3736 #endif
3737 	/*
3738 	 * Packet Processing
3739 	 * Once we get a packet bp, we try allocate a new mblk, nbp
3740 	 * to replace this one. If we succeed, we map it to the current
3741 	 * dma handle and update the descriptor with the new cookie. We
3742 	 * then put bp in our read service queue erip->ipq, if it exists
3743 	 * or we just bp to the streams expecting it.
3744 	 * If allocation of the new mblk fails, we implicitly drop the
3745 	 * current packet, i.e do not pass up the mblk and re-use it.
3746 	 * Re-mapping is not required.
3747 	 */
3748 
3749 	if (len < eri_rx_bcopy_max) {
3750 		if ((nbp = eri_allocb_sp(len + ERI_FSTBYTE_OFFSET))) {
3751 			(void) ddi_dma_sync(erip->ndmarh[rmdi], 0,
3752 			    len + ERI_FSTBYTE_OFFSET, DDI_DMA_SYNC_FORCPU);
3753 			DB_TYPE(nbp) = M_DATA;
3754 			bcopy(bp->b_rptr, nbp->b_rptr,
3755 			    len + ERI_FSTBYTE_OFFSET);
3756 			UPDATE_RMD(rmdp);
3757 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3758 			    DDI_DMA_SYNC_FORDEV);
3759 
3760 			/* Add the First Byte offset to the b_rptr */
3761 			nbp->b_rptr += ERI_FSTBYTE_OFFSET;
3762 			nbp->b_wptr = nbp->b_rptr + len;
3763 
3764 #ifdef ERI_RCV_CKSUM
3765 			sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3766 			ERI_PROCESS_READ(erip, nbp, sum);
3767 #else
3768 			ERI_PROCESS_READ(erip, nbp);
3769 #endif
3770 			retmp = nbp;
3771 		} else {
3772 
3773 			/*
3774 			 * mblk allocation has failed. Re-use the old mblk for
3775 			 * the next packet. Re-mapping is not required since
3776 			 * the same mblk and dma cookie is to be used again.
3777 			 */
3778 			HSTAT(erip, ierrors);
3779 			HSTAT(erip, allocbfail);
3780 			HSTAT(erip, norcvbuf);
3781 
3782 			UPDATE_RMD(rmdp);
3783 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3784 			    DDI_DMA_SYNC_FORDEV);
3785 			ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3786 		}
3787 	} else {
3788 		/* Use dma unmap/map */
3789 		if ((nbp = eri_allocb_sp(ERI_BUFSIZE))) {
3790 			/*
3791 			 * How do we harden this, specially if unbind
3792 			 * succeeds and then bind fails?
3793 			 *  If Unbind fails, we can leave without updating
3794 			 * the descriptor but would it continue to work on
3795 			 * next round?
3796 			 */
3797 			(void) ddi_dma_unbind_handle(erip->ndmarh[rmdi]);
3798 			(void) ddi_dma_addr_bind_handle(erip->ndmarh[rmdi],
3799 			    NULL, (caddr_t)nbp->b_rptr, ERI_BUFSIZE,
3800 			    DDI_DMA_READ | DDI_DMA_CONSISTENT,
3801 			    DDI_DMA_DONTWAIT, 0, &c, &ccnt);
3802 
3803 			erip->rmblkp[rmdi] = nbp;
3804 			PUT_RMD(rmdp, c);
3805 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3806 			    DDI_DMA_SYNC_FORDEV);
3807 
3808 			/* Add the First Byte offset to the b_rptr */
3809 
3810 			bp->b_rptr += ERI_FSTBYTE_OFFSET;
3811 			bp->b_wptr = bp->b_rptr + len;
3812 
3813 #ifdef ERI_RCV_CKSUM
3814 			sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3815 			ERI_PROCESS_READ(erip, bp, sum);
3816 #else
3817 			ERI_PROCESS_READ(erip, bp);
3818 #endif
3819 			retmp = bp;
3820 		} else {
3821 
3822 			/*
3823 			 * mblk allocation has failed. Re-use the old mblk for
3824 			 * the next packet. Re-mapping is not required since
3825 			 * the same mblk and dma cookie is to be used again.
3826 			 */
3827 			HSTAT(erip, ierrors);
3828 			HSTAT(erip, allocbfail);
3829 			HSTAT(erip, norcvbuf);
3830 
3831 			UPDATE_RMD(rmdp);
3832 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3833 			    DDI_DMA_SYNC_FORDEV);
3834 			ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3835 		}
3836 	}
3837 
3838 	return (retmp);
3839 }
3840 
3841 #define	LINK_STAT_DISPLAY_TIME	20
3842 
3843 static int
3844 eri_init_xfer_params(struct eri *erip)
3845 {
3846 	int	i;
3847 	dev_info_t *dip;
3848 
3849 	dip = erip->dip;
3850 
3851 	for (i = 0; i < A_CNT(param_arr); i++)
3852 		erip->param_arr[i] = param_arr[i];
3853 
3854 	erip->xmit_dma_mode = 0;
3855 	erip->rcv_dma_mode = 0;
3856 	erip->mifpoll_enable = mifpoll_enable;
3857 	erip->lance_mode_enable = lance_mode;
3858 	erip->frame_enable = 1;
3859 	erip->ngu_enable = ngu_enable;
3860 
3861 	if (!erip->g_nd && !eri_param_register(erip,
3862 	    erip->param_arr, A_CNT(param_arr))) {
3863 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
3864 		    param_reg_fail_msg);
3865 			return (-1);
3866 		}
3867 
3868 	/*
3869 	 * Set up the start-up values for user-configurable parameters
3870 	 * Get the values from the global variables first.
3871 	 * Use the MASK to limit the value to allowed maximum.
3872 	 */
3873 
3874 	param_transceiver = NO_XCVR;
3875 
3876 /*
3877  * The link speed may be forced to either 10 Mbps or 100 Mbps using the
3878  * property "transfer-speed". This may be done in OBP by using the command
3879  * "apply transfer-speed=<speed> <device>". The speed may be either 10 or 100.
3880  */
3881 	i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "transfer-speed", 0);
3882 	if (i != 0) {
3883 		param_autoneg = 0;	/* force speed */
3884 		param_anar_100T4 = 0;
3885 		param_anar_10fdx = 0;
3886 		param_anar_10hdx = 0;
3887 		param_anar_100fdx = 0;
3888 		param_anar_100hdx = 0;
3889 		param_anar_asm_dir = 0;
3890 		param_anar_pause = 0;
3891 
3892 		if (i == 10)
3893 			param_anar_10hdx = 1;
3894 		else if (i == 100)
3895 			param_anar_100hdx = 1;
3896 	}
3897 
3898 	/*
3899 	 * Get the parameter values configured in .conf file.
3900 	 */
3901 	param_ipg1 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg1", ipg1) &
3902 	    ERI_MASK_8BIT;
3903 
3904 	param_ipg2 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg2", ipg2) &
3905 	    ERI_MASK_8BIT;
3906 
3907 	param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3908 	    "use_int_xcvr", use_int_xcvr) & ERI_MASK_1BIT;
3909 
3910 	param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3911 	    "pace_size", pace_size) & ERI_MASK_8BIT;
3912 
3913 	param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3914 	    "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
3915 
3916 	param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3917 	    "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
3918 
3919 	param_anar_100T4 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3920 	    "adv_100T4_cap", adv_100T4_cap) & ERI_MASK_1BIT;
3921 
3922 	param_anar_100fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3923 	    "adv_100fdx_cap", adv_100fdx_cap) & ERI_MASK_1BIT;
3924 
3925 	param_anar_100hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3926 	    "adv_100hdx_cap", adv_100hdx_cap) & ERI_MASK_1BIT;
3927 
3928 	param_anar_10fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3929 	    "adv_10fdx_cap", adv_10fdx_cap) & ERI_MASK_1BIT;
3930 
3931 	param_anar_10hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3932 	    "adv_10hdx_cap", adv_10hdx_cap) & ERI_MASK_1BIT;
3933 
3934 	param_ipg0 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg0", ipg0) &
3935 	    ERI_MASK_8BIT;
3936 
3937 	param_intr_blank_time = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3938 	    "intr_blank_time", intr_blank_time) & ERI_MASK_8BIT;
3939 
3940 	param_intr_blank_packets = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3941 	    "intr_blank_packets", intr_blank_packets) & ERI_MASK_8BIT;
3942 
3943 	param_lance_mode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3944 	    "lance_mode", lance_mode) & ERI_MASK_1BIT;
3945 
3946 	param_select_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3947 	    "select_link", select_link) & ERI_MASK_1BIT;
3948 
3949 	param_default_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3950 	    "default_link", default_link) & ERI_MASK_1BIT;
3951 
3952 	param_anar_asm_dir = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3953 	    "adv_asm_dir_cap", adv_pauseTX_cap) & ERI_MASK_1BIT;
3954 
3955 	param_anar_pause = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3956 	    "adv_pause_cap", adv_pauseRX_cap) & ERI_MASK_1BIT;
3957 
3958 	if (link_pulse_disabled)
3959 		erip->link_pulse_disabled = 1;
3960 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, 0, "link-pulse-disabled"))
3961 		erip->link_pulse_disabled = 1;
3962 
3963 	eri_statinit(erip);
3964 	return (0);
3965 
3966 }
3967 
3968 static void
3969 eri_process_ndd_ioctl(struct eri *erip, queue_t *wq, mblk_t *mp, int cmd)
3970 {
3971 
3972 	uint32_t old_ipg1, old_ipg2, old_use_int_xcvr, old_autoneg;
3973 	uint32_t old_100T4;
3974 	uint32_t old_100fdx, old_100hdx, old_10fdx, old_10hdx;
3975 	uint32_t old_ipg0, old_lance_mode;
3976 	uint32_t old_intr_blank_time, old_intr_blank_packets;
3977 	uint32_t old_asm_dir, old_pause;
3978 	uint32_t old_select_link, old_default_link;
3979 
3980 	switch (cmd) {
3981 	case ERI_ND_GET:
3982 
3983 		old_autoneg =	param_autoneg;
3984 		old_100T4 =	param_anar_100T4;
3985 		old_100fdx =	param_anar_100fdx;
3986 		old_100hdx =	param_anar_100hdx;
3987 		old_10fdx =	param_anar_10fdx;
3988 		old_10hdx =	param_anar_10hdx;
3989 		old_asm_dir =	param_anar_asm_dir;
3990 		old_pause =	param_anar_pause;
3991 
3992 		param_autoneg = old_autoneg & ~ERI_NOTUSR;
3993 		param_anar_100T4 = old_100T4 & ~ERI_NOTUSR;
3994 		param_anar_100fdx = old_100fdx & ~ERI_NOTUSR;
3995 		param_anar_100hdx = old_100hdx & ~ERI_NOTUSR;
3996 		param_anar_10fdx = old_10fdx & ~ERI_NOTUSR;
3997 		param_anar_10hdx = old_10hdx & ~ERI_NOTUSR;
3998 		param_anar_asm_dir = old_asm_dir & ~ERI_NOTUSR;
3999 		param_anar_pause = old_pause & ~ERI_NOTUSR;
4000 
4001 		if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4002 			param_autoneg = old_autoneg;
4003 			param_anar_100T4 = old_100T4;
4004 			param_anar_100fdx = old_100fdx;
4005 			param_anar_100hdx = old_100hdx;
4006 			param_anar_10fdx = old_10fdx;
4007 			param_anar_10hdx = old_10hdx;
4008 			param_anar_asm_dir = old_asm_dir;
4009 			param_anar_pause = old_pause;
4010 			miocnak(wq, mp, 0, EINVAL);
4011 			return;
4012 		}
4013 		param_autoneg = old_autoneg;
4014 		param_anar_100T4 = old_100T4;
4015 		param_anar_100fdx = old_100fdx;
4016 		param_anar_100hdx = old_100hdx;
4017 		param_anar_10fdx = old_10fdx;
4018 		param_anar_10hdx = old_10hdx;
4019 		param_anar_asm_dir = old_asm_dir;
4020 		param_anar_pause = old_pause;
4021 
4022 		qreply(wq, mp);
4023 		break;
4024 
4025 	case ERI_ND_SET:
4026 		old_ipg0 = param_ipg0;
4027 		old_intr_blank_time = param_intr_blank_time;
4028 		old_intr_blank_packets = param_intr_blank_packets;
4029 		old_lance_mode = param_lance_mode;
4030 		old_ipg1 = param_ipg1;
4031 		old_ipg2 = param_ipg2;
4032 		old_use_int_xcvr = param_use_intphy;
4033 		old_autoneg = param_autoneg;
4034 		old_100T4 =	param_anar_100T4;
4035 		old_100fdx =	param_anar_100fdx;
4036 		old_100hdx =	param_anar_100hdx;
4037 		old_10fdx =	param_anar_10fdx;
4038 		old_10hdx =	param_anar_10hdx;
4039 		param_autoneg = 0xff;
4040 		old_asm_dir = param_anar_asm_dir;
4041 		param_anar_asm_dir = 0xff;
4042 		old_pause = param_anar_pause;
4043 		param_anar_pause = 0xff;
4044 		old_select_link = param_select_link;
4045 		old_default_link = param_default_link;
4046 
4047 		if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4048 			param_autoneg = old_autoneg;
4049 			miocnak(wq, mp, 0, EINVAL);
4050 			return;
4051 		}
4052 
4053 		qreply(wq, mp);
4054 
4055 		if (param_autoneg != 0xff) {
4056 			ERI_DEBUG_MSG2(erip, NDD_MSG,
4057 			    "ndd_ioctl: new param_autoneg %d", param_autoneg);
4058 			param_linkup = 0;
4059 			erip->stats.link_up = LINK_STATE_DOWN;
4060 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4061 			(void) eri_init(erip);
4062 		} else {
4063 			param_autoneg = old_autoneg;
4064 			if ((old_use_int_xcvr != param_use_intphy) ||
4065 			    (old_default_link != param_default_link) ||
4066 			    (old_select_link != param_select_link)) {
4067 				param_linkup = 0;
4068 				erip->stats.link_up = LINK_STATE_DOWN;
4069 				erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4070 				(void) eri_init(erip);
4071 			} else if ((old_ipg1 != param_ipg1) ||
4072 			    (old_ipg2 != param_ipg2) ||
4073 			    (old_ipg0 != param_ipg0) ||
4074 			    (old_intr_blank_time != param_intr_blank_time) ||
4075 			    (old_intr_blank_packets !=
4076 			    param_intr_blank_packets) ||
4077 			    (old_lance_mode != param_lance_mode)) {
4078 				param_linkup = 0;
4079 				erip->stats.link_up = LINK_STATE_DOWN;
4080 				erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4081 				(void) eri_init(erip);
4082 			}
4083 		}
4084 		break;
4085 	}
4086 }
4087 
4088 
4089 static int
4090 eri_stat_kstat_update(kstat_t *ksp, int rw)
4091 {
4092 	struct eri *erip;
4093 	struct erikstat *erikp;
4094 	struct stats *esp;
4095 	boolean_t macupdate = B_FALSE;
4096 
4097 	erip = (struct eri *)ksp->ks_private;
4098 	erikp = (struct erikstat *)ksp->ks_data;
4099 
4100 	if (rw != KSTAT_READ)
4101 		return (EACCES);
4102 	/*
4103 	 * Update all the stats by reading all the counter registers.
4104 	 * Counter register stats are not updated till they overflow
4105 	 * and interrupt.
4106 	 */
4107 
4108 	mutex_enter(&erip->xmitlock);
4109 	if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
4110 		erip->tx_completion =
4111 		    GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
4112 		macupdate |= eri_reclaim(erip, erip->tx_completion);
4113 	}
4114 	mutex_exit(&erip->xmitlock);
4115 	if (macupdate)
4116 		mac_tx_update(erip->mh);
4117 
4118 	eri_savecntrs(erip);
4119 
4120 	esp = &erip->stats;
4121 
4122 	erikp->erik_txmac_maxpkt_err.value.ul = esp->txmac_maxpkt_err;
4123 	erikp->erik_defer_timer_exp.value.ul = esp->defer_timer_exp;
4124 	erikp->erik_peak_attempt_cnt.value.ul = esp->peak_attempt_cnt;
4125 	erikp->erik_tx_hang.value.ul	= esp->tx_hang;
4126 
4127 	erikp->erik_no_free_rx_desc.value.ul	= esp->no_free_rx_desc;
4128 
4129 	erikp->erik_rx_hang.value.ul		= esp->rx_hang;
4130 	erikp->erik_rx_length_err.value.ul	= esp->rx_length_err;
4131 	erikp->erik_rx_code_viol_err.value.ul	= esp->rx_code_viol_err;
4132 	erikp->erik_pause_rxcount.value.ul	= esp->pause_rxcount;
4133 	erikp->erik_pause_oncount.value.ul	= esp->pause_oncount;
4134 	erikp->erik_pause_offcount.value.ul	= esp->pause_offcount;
4135 	erikp->erik_pause_time_count.value.ul	= esp->pause_time_count;
4136 
4137 	erikp->erik_inits.value.ul		= esp->inits;
4138 	erikp->erik_jab.value.ul		= esp->jab;
4139 	erikp->erik_notmds.value.ul		= esp->notmds;
4140 	erikp->erik_allocbfail.value.ul		= esp->allocbfail;
4141 	erikp->erik_drop.value.ul		= esp->drop;
4142 	erikp->erik_rx_bad_pkts.value.ul	= esp->rx_bad_pkts;
4143 	erikp->erik_rx_inits.value.ul		= esp->rx_inits;
4144 	erikp->erik_tx_inits.value.ul		= esp->tx_inits;
4145 	erikp->erik_rxtag_err.value.ul		= esp->rxtag_err;
4146 	erikp->erik_parity_error.value.ul	= esp->parity_error;
4147 	erikp->erik_pci_error_int.value.ul	= esp->pci_error_int;
4148 	erikp->erik_unknown_fatal.value.ul	= esp->unknown_fatal;
4149 	erikp->erik_pci_data_parity_err.value.ul = esp->pci_data_parity_err;
4150 	erikp->erik_pci_signal_target_abort.value.ul =
4151 	    esp->pci_signal_target_abort;
4152 	erikp->erik_pci_rcvd_target_abort.value.ul =
4153 	    esp->pci_rcvd_target_abort;
4154 	erikp->erik_pci_rcvd_master_abort.value.ul =
4155 	    esp->pci_rcvd_master_abort;
4156 	erikp->erik_pci_signal_system_err.value.ul =
4157 	    esp->pci_signal_system_err;
4158 	erikp->erik_pci_det_parity_err.value.ul = esp->pci_det_parity_err;
4159 
4160 	erikp->erik_pmcap.value.ul = esp->pmcap;
4161 
4162 	return (0);
4163 }
4164 
4165 static void
4166 eri_statinit(struct eri *erip)
4167 {
4168 	struct	kstat	*ksp;
4169 	struct	erikstat	*erikp;
4170 
4171 	if ((ksp = kstat_create("eri", erip->instance, "driver_info", "net",
4172 	    KSTAT_TYPE_NAMED,
4173 	    sizeof (struct erikstat) / sizeof (kstat_named_t), 0)) == NULL) {
4174 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
4175 		    kstat_create_fail_msg);
4176 		return;
4177 	}
4178 
4179 	erip->ksp = ksp;
4180 	erikp = (struct erikstat *)(ksp->ks_data);
4181 	/*
4182 	 * MIB II kstat variables
4183 	 */
4184 
4185 	kstat_named_init(&erikp->erik_inits, "inits", KSTAT_DATA_ULONG);
4186 
4187 	kstat_named_init(&erikp->erik_txmac_maxpkt_err,	"txmac_maxpkt_err",
4188 	    KSTAT_DATA_ULONG);
4189 	kstat_named_init(&erikp->erik_defer_timer_exp, "defer_timer_exp",
4190 	    KSTAT_DATA_ULONG);
4191 	kstat_named_init(&erikp->erik_peak_attempt_cnt,	"peak_attempt_cnt",
4192 	    KSTAT_DATA_ULONG);
4193 	kstat_named_init(&erikp->erik_tx_hang, "tx_hang", KSTAT_DATA_ULONG);
4194 
4195 	kstat_named_init(&erikp->erik_no_free_rx_desc, "no_free_rx_desc",
4196 	    KSTAT_DATA_ULONG);
4197 	kstat_named_init(&erikp->erik_rx_hang, "rx_hang", KSTAT_DATA_ULONG);
4198 	kstat_named_init(&erikp->erik_rx_length_err, "rx_length_err",
4199 	    KSTAT_DATA_ULONG);
4200 	kstat_named_init(&erikp->erik_rx_code_viol_err,	"rx_code_viol_err",
4201 	    KSTAT_DATA_ULONG);
4202 
4203 	kstat_named_init(&erikp->erik_pause_rxcount, "pause_rcv_cnt",
4204 	    KSTAT_DATA_ULONG);
4205 
4206 	kstat_named_init(&erikp->erik_pause_oncount, "pause_on_cnt",
4207 	    KSTAT_DATA_ULONG);
4208 
4209 	kstat_named_init(&erikp->erik_pause_offcount, "pause_off_cnt",
4210 	    KSTAT_DATA_ULONG);
4211 	kstat_named_init(&erikp->erik_pause_time_count,	"pause_time_cnt",
4212 	    KSTAT_DATA_ULONG);
4213 
4214 	kstat_named_init(&erikp->erik_jab, "jabber", KSTAT_DATA_ULONG);
4215 	kstat_named_init(&erikp->erik_notmds, "no_tmds", KSTAT_DATA_ULONG);
4216 	kstat_named_init(&erikp->erik_allocbfail, "allocbfail",
4217 	    KSTAT_DATA_ULONG);
4218 
4219 	kstat_named_init(&erikp->erik_drop, "drop", KSTAT_DATA_ULONG);
4220 
4221 	kstat_named_init(&erikp->erik_rx_bad_pkts, "bad_pkts",
4222 	    KSTAT_DATA_ULONG);
4223 
4224 	kstat_named_init(&erikp->erik_rx_inits, "rx_inits", KSTAT_DATA_ULONG);
4225 
4226 	kstat_named_init(&erikp->erik_tx_inits, "tx_inits", KSTAT_DATA_ULONG);
4227 
4228 	kstat_named_init(&erikp->erik_rxtag_err, "rxtag_error",
4229 	    KSTAT_DATA_ULONG);
4230 
4231 	kstat_named_init(&erikp->erik_parity_error, "parity_error",
4232 	    KSTAT_DATA_ULONG);
4233 
4234 	kstat_named_init(&erikp->erik_pci_error_int, "pci_error_interrupt",
4235 	    KSTAT_DATA_ULONG);
4236 	kstat_named_init(&erikp->erik_unknown_fatal, "unknown_fatal",
4237 	    KSTAT_DATA_ULONG);
4238 	kstat_named_init(&erikp->erik_pci_data_parity_err,
4239 	    "pci_data_parity_err", KSTAT_DATA_ULONG);
4240 	kstat_named_init(&erikp->erik_pci_signal_target_abort,
4241 	    "pci_signal_target_abort", KSTAT_DATA_ULONG);
4242 	kstat_named_init(&erikp->erik_pci_rcvd_target_abort,
4243 	    "pci_rcvd_target_abort", KSTAT_DATA_ULONG);
4244 	kstat_named_init(&erikp->erik_pci_rcvd_master_abort,
4245 	    "pci_rcvd_master_abort", KSTAT_DATA_ULONG);
4246 	kstat_named_init(&erikp->erik_pci_signal_system_err,
4247 	    "pci_signal_system_err", KSTAT_DATA_ULONG);
4248 	kstat_named_init(&erikp->erik_pci_det_parity_err,
4249 	    "pci_det_parity_err", KSTAT_DATA_ULONG);
4250 
4251 	kstat_named_init(&erikp->erik_pmcap, "pmcap", KSTAT_DATA_ULONG);
4252 
4253 
4254 	ksp->ks_update = eri_stat_kstat_update;
4255 	ksp->ks_private = (void *) erip;
4256 	kstat_install(ksp);
4257 }
4258 
4259 
4260 /* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS	>>>>>>>>>>>>>>>>>>> */
4261 /*
4262  * ndd support functions to get/set parameters
4263  */
4264 /* Free the Named Dispatch Table by calling eri_nd_free */
4265 static void
4266 eri_param_cleanup(struct eri *erip)
4267 {
4268 	if (erip->g_nd)
4269 		(void) eri_nd_free(&erip->g_nd);
4270 }
4271 
4272 /*
4273  * Extracts the value from the eri parameter array and prints the
4274  * parameter value. cp points to the required parameter.
4275  */
4276 /* ARGSUSED */
4277 static int
4278 eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp)
4279 {
4280 	param_t		*eripa = (void *)cp;
4281 	int		param_len = 1;
4282 	uint32_t	param_val;
4283 	mblk_t		*nmp;
4284 	int		ok;
4285 
4286 	param_val = eripa->param_val;
4287 	/*
4288 	 * Calculate space required in mblk.
4289 	 * Remember to include NULL terminator.
4290 	 */
4291 	do {
4292 		param_len++;
4293 		param_val /= 10;
4294 	} while (param_val);
4295 
4296 	ok = eri_mk_mblk_tail_space(mp, &nmp, param_len);
4297 	if (ok == 0) {
4298 		(void) sprintf((char *)nmp->b_wptr, "%d", eripa->param_val);
4299 		nmp->b_wptr += param_len;
4300 	}
4301 
4302 	return (ok);
4303 }
4304 
4305 /*
4306  * Check if there is space for p_val at the end if mblk.
4307  * If not, allocate new 1k mblk.
4308  */
4309 static int
4310 eri_mk_mblk_tail_space(mblk_t *mp, mblk_t **nmp, size_t sz)
4311 {
4312 	mblk_t *tmp = mp;
4313 
4314 	while (tmp->b_cont)
4315 		tmp = tmp->b_cont;
4316 
4317 	if (MBLKTAIL(tmp) < sz) {
4318 		if ((tmp->b_cont = allocb(1024, BPRI_HI)) == NULL)
4319 			return (ENOMEM);
4320 		tmp = tmp->b_cont;
4321 	}
4322 	*nmp = tmp;
4323 	return (0);
4324 }
4325 
4326 /*
4327  * Register each element of the parameter array with the
4328  * named dispatch handler. Each element is loaded using
4329  * eri_nd_load()
4330  */
4331 static int
4332 eri_param_register(struct eri *erip, param_t *eripa, int cnt)
4333 {
4334 	/* cnt gives the count of the number of */
4335 	/* elements present in the parameter array */
4336 
4337 	int i;
4338 
4339 	for (i = 0; i < cnt; i++, eripa++) {
4340 		pfi_t	setter = (pfi_t)eri_param_set;
4341 
4342 		switch (eripa->param_name[0]) {
4343 		case '+':	/* read-write */
4344 			setter = (pfi_t)eri_param_set;
4345 			break;
4346 
4347 		case '-':	/* read-only */
4348 			setter = NULL;
4349 			break;
4350 
4351 		case '!':	/* read-only, not displayed */
4352 		case '%':	/* read-write, not displayed */
4353 			continue;
4354 		}
4355 
4356 		if (!eri_nd_load(&erip->g_nd, eripa->param_name + 1,
4357 		    (pfi_t)eri_param_get, setter, (caddr_t)eripa)) {
4358 			(void) eri_nd_free(&erip->g_nd);
4359 			return (B_FALSE);
4360 		}
4361 	}
4362 
4363 	return (B_TRUE);
4364 }
4365 
4366 /*
4367  * Sets the eri parameter to the value in the param_register using
4368  * eri_nd_load().
4369  */
4370 /* ARGSUSED */
4371 static int
4372 eri_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp)
4373 {
4374 	char *end;
4375 	long new_value;
4376 	param_t	*eripa = (void *)cp;
4377 
4378 	if (ddi_strtol(value, &end, 10, &new_value) != 0)
4379 		return (EINVAL);
4380 	if (end == value || new_value < eripa->param_min ||
4381 	    new_value > eripa->param_max) {
4382 			return (EINVAL);
4383 	}
4384 	eripa->param_val = (uint32_t)new_value;
4385 	return (0);
4386 
4387 }
4388 
4389 /* Free the table pointed to by 'ndp' */
4390 static void
4391 eri_nd_free(caddr_t *nd_pparam)
4392 {
4393 	ND	*nd;
4394 
4395 	if ((nd = (void *)(*nd_pparam)) != NULL) {
4396 		if (nd->nd_tbl)
4397 			kmem_free(nd->nd_tbl, nd->nd_size);
4398 		kmem_free(nd, sizeof (ND));
4399 		*nd_pparam = NULL;
4400 	}
4401 }
4402 
4403 static int
4404 eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp)
4405 {
4406 	int	err;
4407 	IOCP	iocp;
4408 	MBLKP	mp1;
4409 	ND	*nd;
4410 	NDE	*nde;
4411 	char	*valp;
4412 	size_t	avail;
4413 	mblk_t	*nmp;
4414 
4415 	if (!nd_param)
4416 		return (B_FALSE);
4417 
4418 	nd = (void *)nd_param;
4419 	iocp = (void *)mp->b_rptr;
4420 	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
4421 		mp->b_datap->db_type = M_IOCACK;
4422 		iocp->ioc_count = 0;
4423 		iocp->ioc_error = EINVAL;
4424 		return (B_TRUE);
4425 	}
4426 	/*
4427 	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
4428 	 *	However, existing code sends in some big buffers.
4429 	 */
4430 	avail = iocp->ioc_count;
4431 	if (mp1->b_cont) {
4432 		freemsg(mp1->b_cont);
4433 		mp1->b_cont = NULL;
4434 	}
4435 
4436 	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
4437 	valp = (char *)mp1->b_rptr;
4438 
4439 	for (nde = nd->nd_tbl; /* */; nde++) {
4440 		if (!nde->nde_name)
4441 			return (B_FALSE);
4442 		if (strcmp(nde->nde_name, valp) == 0)
4443 			break;
4444 	}
4445 	err = EINVAL;
4446 
4447 	while (*valp++)
4448 		;
4449 
4450 	if (!*valp || valp >= (char *)mp1->b_wptr)
4451 		valp = NULL;
4452 
4453 	switch (iocp->ioc_cmd) {
4454 	case ND_GET:
4455 	/*
4456 	 * (XXX) hack: "*valp" is size of user buffer for copyout. If result
4457 	 * of action routine is too big, free excess and return ioc_rval as buf
4458 	 * size needed.  Return as many mblocks as will fit, free the rest.  For
4459 	 * backward compatibility, assume size of orig ioctl buffer if "*valp"
4460 	 * bad or not given.
4461 	 */
4462 		if (valp)
4463 			(void) ddi_strtol(valp, NULL, 10, (long *)&avail);
4464 		/* We overwrite the name/value with the reply data */
4465 		{
4466 			mblk_t *mp2 = mp1;
4467 
4468 			while (mp2) {
4469 				mp2->b_wptr = mp2->b_rptr;
4470 				mp2 = mp2->b_cont;
4471 			}
4472 		}
4473 		err = (*nde->nde_get_pfi)(q, mp1, nde->nde_data, iocp->ioc_cr);
4474 		if (!err) {
4475 			size_t	size_out;
4476 			ssize_t	excess;
4477 
4478 			iocp->ioc_rval = 0;
4479 
4480 			/* Tack on the null */
4481 			err = eri_mk_mblk_tail_space(mp1, &nmp, 1);
4482 			if (!err) {
4483 				*nmp->b_wptr++ = '\0';
4484 				size_out = msgdsize(mp1);
4485 				excess = size_out - avail;
4486 				if (excess > 0) {
4487 					iocp->ioc_rval = (unsigned)size_out;
4488 					size_out -= excess;
4489 					(void) adjmsg(mp1, -(excess + 1));
4490 					err = eri_mk_mblk_tail_space(mp1,
4491 					    &nmp, 1);
4492 					if (!err)
4493 						*nmp->b_wptr++ = '\0';
4494 					else
4495 						size_out = 0;
4496 				}
4497 
4498 			} else
4499 				size_out = 0;
4500 
4501 			iocp->ioc_count = size_out;
4502 		}
4503 		break;
4504 
4505 	case ND_SET:
4506 		if (valp) {
4507 			err = (*nde->nde_set_pfi)(q, mp1, valp,
4508 			    nde->nde_data, iocp->ioc_cr);
4509 			iocp->ioc_count = 0;
4510 			freemsg(mp1);
4511 			mp->b_cont = NULL;
4512 		}
4513 		break;
4514 	}
4515 
4516 	iocp->ioc_error = err;
4517 	mp->b_datap->db_type = M_IOCACK;
4518 	return (B_TRUE);
4519 }
4520 
4521 /*
4522  * Load 'name' into the named dispatch table pointed to by 'ndp'.
4523  * 'ndp' should be the address of a char pointer cell.  If the table
4524  * does not exist (*ndp == 0), a new table is allocated and 'ndp'
4525  * is stuffed.  If there is not enough space in the table for a new
4526  * entry, more space is allocated.
4527  */
4528 static boolean_t
4529 eri_nd_load(caddr_t *nd_pparam, char *name, pfi_t get_pfi,
4530     pfi_t set_pfi, caddr_t data)
4531 {
4532 	ND	*nd;
4533 	NDE	*nde;
4534 
4535 	if (!nd_pparam)
4536 		return (B_FALSE);
4537 
4538 	if ((nd = (void *)(*nd_pparam)) == NULL) {
4539 		if ((nd = (ND *)kmem_zalloc(sizeof (ND), KM_NOSLEEP))
4540 		    == NULL)
4541 			return (B_FALSE);
4542 		*nd_pparam = (caddr_t)nd;
4543 	}
4544 	if (nd->nd_tbl) {
4545 		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
4546 			if (strcmp(name, nde->nde_name) == 0)
4547 				goto fill_it;
4548 		}
4549 	}
4550 	if (nd->nd_free_count <= 1) {
4551 		if ((nde = (NDE *)kmem_zalloc(nd->nd_size +
4552 		    NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
4553 			return (B_FALSE);
4554 
4555 		nd->nd_free_count += NDE_ALLOC_COUNT;
4556 		if (nd->nd_tbl) {
4557 			bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
4558 			kmem_free((char *)nd->nd_tbl, nd->nd_size);
4559 		} else {
4560 			nd->nd_free_count--;
4561 			nde->nde_name = "?";
4562 			nde->nde_get_pfi = nd_get_names;
4563 			nde->nde_set_pfi = nd_set_default;
4564 		}
4565 		nde->nde_data = (caddr_t)nd;
4566 		nd->nd_tbl = nde;
4567 		nd->nd_size += NDE_ALLOC_SIZE;
4568 	}
4569 	for (nde = nd->nd_tbl; nde->nde_name; nde++)
4570 		;
4571 	nd->nd_free_count--;
4572 fill_it:
4573 	nde->nde_name = name;
4574 	nde->nde_get_pfi = get_pfi ? get_pfi : nd_get_default;
4575 	nde->nde_set_pfi = set_pfi ? set_pfi : nd_set_default;
4576 	nde->nde_data = data;
4577 	return (B_TRUE);
4578 }
4579 
4580 /*
4581  * Hardening Functions
4582  * New Section
4583  */
4584 #ifdef  DEBUG
4585 /*PRINTFLIKE5*/
4586 static void
4587 eri_debug_msg(const char *file, int line, struct eri *erip,
4588     debug_msg_t type, const char *fmt, ...)
4589 {
4590 	char	msg_buffer[255];
4591 	va_list ap;
4592 
4593 	va_start(ap, fmt);
4594 	(void) vsprintf(msg_buffer, fmt, ap);
4595 	va_end(ap);
4596 
4597 	if (eri_msg_out & ERI_CON_MSG) {
4598 		if (((type <= eri_debug_level) && eri_debug_all) ||
4599 		    ((type == eri_debug_level) && !eri_debug_all)) {
4600 			if (erip)
4601 				cmn_err(CE_CONT, "D: %s %s%d:(%s%d) %s\n",
4602 				    debug_msg_string[type], file, line,
4603 				    ddi_driver_name(erip->dip), erip->instance,
4604 				    msg_buffer);
4605 			else
4606 				cmn_err(CE_CONT, "D: %s %s(%d): %s\n",
4607 				    debug_msg_string[type], file,
4608 				    line, msg_buffer);
4609 		}
4610 	}
4611 }
4612 #endif
4613 
4614 
4615 /*PRINTFLIKE4*/
4616 static void
4617 eri_fault_msg(struct eri *erip, uint_t severity, msg_t type,
4618 	const char *fmt, ...)
4619 {
4620 	char	msg_buffer[255];
4621 	va_list	ap;
4622 
4623 	va_start(ap, fmt);
4624 	(void) vsprintf(msg_buffer, fmt, ap);
4625 	va_end(ap);
4626 
4627 	if (erip == NULL) {
4628 		cmn_err(CE_NOTE, "eri : %s", msg_buffer);
4629 		return;
4630 	}
4631 
4632 	if (severity == SEVERITY_HIGH) {
4633 		cmn_err(CE_WARN, "%s%d : %s", ddi_driver_name(erip->dip),
4634 		    erip->instance, msg_buffer);
4635 	} else switch (type) {
4636 	case ERI_VERB_MSG:
4637 		cmn_err(CE_CONT, "?%s%d : %s", ddi_driver_name(erip->dip),
4638 		    erip->instance, msg_buffer);
4639 		break;
4640 	case ERI_LOG_MSG:
4641 		cmn_err(CE_NOTE, "^%s%d : %s", ddi_driver_name(erip->dip),
4642 		    erip->instance, msg_buffer);
4643 		break;
4644 	case ERI_BUF_MSG:
4645 		cmn_err(CE_NOTE, "!%s%d : %s", ddi_driver_name(erip->dip),
4646 		    erip->instance, msg_buffer);
4647 		break;
4648 	case ERI_CON_MSG:
4649 		cmn_err(CE_CONT, "%s%d : %s", ddi_driver_name(erip->dip),
4650 		    erip->instance, msg_buffer);
4651 	default:
4652 		break;
4653 	}
4654 }
4655 
4656 /*
4657  * Transceiver (xcvr) Functions
4658  * New Section
4659  */
4660 /*
4661  * eri_stop_timer function is used by a function before doing link-related
4662  * processing. It locks the "linklock" to protect the link-related data
4663  * structures. This lock will be subsequently released in eri_start_timer().
4664  */
4665 static void
4666 eri_stop_timer(struct eri *erip)
4667 {
4668 	timeout_id_t id;
4669 	mutex_enter(&erip->linklock);
4670 	if (erip->timerid) {
4671 		erip->flags |= ERI_NOTIMEOUTS; /* prevent multiple timeout */
4672 		id = erip->timerid;
4673 		erip->timerid = 0; /* prevent other thread do untimeout */
4674 		mutex_exit(&erip->linklock); /* no mutex across untimeout() */
4675 
4676 		(void) untimeout(id);
4677 		mutex_enter(&erip->linklock); /* acquire mutex again */
4678 		erip->flags &= ~ERI_NOTIMEOUTS;
4679 	}
4680 }
4681 
4682 /*
4683  * If msec parameter is zero, just release "linklock".
4684  */
4685 static void
4686 eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec)
4687 {
4688 	if (msec) {
4689 		if (!(erip->flags & ERI_NOTIMEOUTS) &&
4690 		    (erip->flags & ERI_RUNNING)) {
4691 			erip->timerid = timeout(func, (caddr_t)erip,
4692 			    drv_usectohz(1000*msec));
4693 		}
4694 	}
4695 
4696 	mutex_exit(&erip->linklock);
4697 }
4698 
4699 static int
4700 eri_new_xcvr(struct eri *erip)
4701 {
4702 	int		status;
4703 	uint32_t 	cfg;
4704 	int		old_transceiver;
4705 
4706 	if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4707 	    PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
4708 		erip->stats.pmcap = ERI_PMCAP_NONE;
4709 
4710 	status = B_FALSE;			/* no change */
4711 	cfg = GET_MIFREG(mif_cfg);
4712 	ERI_DEBUG_MSG2(erip, MIF_MSG, "cfg value = %X", cfg);
4713 	old_transceiver = param_transceiver;
4714 
4715 	if ((cfg & ERI_MIF_CFGM1) && !use_int_xcvr) {
4716 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Found External XCVR");
4717 		/*
4718 		 * An External Transceiver was found and it takes priority
4719 		 * over an internal, given the use_int_xcvr flag
4720 		 * is false.
4721 		 */
4722 		if (old_transceiver != EXTERNAL_XCVR) {
4723 			/*
4724 			 * External transceiver has just been plugged
4725 			 * in. Isolate the internal Transceiver.
4726 			 */
4727 			if (old_transceiver == INTERNAL_XCVR) {
4728 				eri_mii_write(erip, ERI_PHY_BMCR,
4729 				    (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4730 				    PHY_BMCR_LPBK));
4731 			}
4732 			status = B_TRUE;
4733 		}
4734 		/*
4735 		 * Select the external Transceiver.
4736 		 */
4737 		erip->phyad = ERI_EXTERNAL_PHYAD;
4738 		param_transceiver = EXTERNAL_XCVR;
4739 		erip->mif_config &= ~ERI_MIF_CFGPD;
4740 		erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4741 		erip->mif_config |= ERI_MIF_CFGPS;
4742 		PUT_MIFREG(mif_cfg, erip->mif_config);
4743 
4744 		PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIIBUF_OE);
4745 		drv_usecwait(ERI_MIF_POLL_DELAY);
4746 	} else if (cfg & ERI_MIF_CFGM0) {
4747 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Found Internal XCVR");
4748 		/*
4749 		 * An Internal Transceiver was found or the
4750 		 * use_int_xcvr flag is true.
4751 		 */
4752 		if (old_transceiver != INTERNAL_XCVR) {
4753 			/*
4754 			 * The external transceiver has just been
4755 			 * disconnected or we're moving from a no
4756 			 * transceiver state.
4757 			 */
4758 			if ((old_transceiver == EXTERNAL_XCVR) &&
4759 			    (cfg & ERI_MIF_CFGM0)) {
4760 				eri_mii_write(erip, ERI_PHY_BMCR,
4761 				    (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4762 				    PHY_BMCR_LPBK));
4763 			}
4764 			status = B_TRUE;
4765 		}
4766 		/*
4767 		 * Select the internal transceiver.
4768 		 */
4769 		erip->phyad = ERI_INTERNAL_PHYAD;
4770 		param_transceiver = INTERNAL_XCVR;
4771 		erip->mif_config &= ~ERI_MIF_CFGPD;
4772 		erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4773 		erip->mif_config &= ~ERI_MIF_CFGPS;
4774 		PUT_MIFREG(mif_cfg, erip->mif_config);
4775 
4776 		PUT_MACREG(xifc, GET_MACREG(xifc) & ~ BMAC_XIFC_MIIBUF_OE);
4777 		drv_usecwait(ERI_MIF_POLL_DELAY);
4778 	} else {
4779 		/*
4780 		 * Did not find a valid xcvr.
4781 		 */
4782 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4783 		    "Eri_new_xcvr : Select None");
4784 		param_transceiver = NO_XCVR;
4785 		erip->xcvr_status = PHY_LINK_DOWN;
4786 	}
4787 
4788 	if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4789 		if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4790 		    (void *)4000) == DDI_SUCCESS)
4791 			erip->stats.pmcap = ERI_PMCAP_4MHZ;
4792 	}
4793 
4794 	return (status);
4795 }
4796 
4797 /*
4798  * This function is used for timers.  No locks are held on timer expiry.
4799  */
4800 static void
4801 eri_check_link(struct eri *erip)
4802 {
4803 	link_state_t	linkupdate = eri_check_link_noind(erip);
4804 
4805 	if (linkupdate != LINK_STATE_UNKNOWN)
4806 		mac_link_update(erip->mh, linkupdate);
4807 }
4808 
4809 /*
4810  * Compare our xcvr in our structure to the xcvr that we get from
4811  * eri_check_mii_xcvr(). If they are different then mark the
4812  * link down, reset xcvr, and return.
4813  *
4814  * Note without the MII connector, conditions can not change that
4815  * will then use a external phy, thus this code has been cleaned
4816  * to not even call the function or to possibly change the xcvr.
4817  */
4818 static uint32_t
4819 eri_check_link_noind(struct eri *erip)
4820 {
4821 	uint16_t stat, control, mif_ints;
4822 	uint32_t link_timeout	= ERI_LINKCHECK_TIMER;
4823 	uint32_t linkupdate = 0;
4824 
4825 	eri_stop_timer(erip);	/* acquire linklock */
4826 
4827 	mutex_enter(&erip->xmitlock);
4828 	mutex_enter(&erip->xcvrlock);
4829 	eri_mif_poll(erip, MIF_POLL_STOP);
4830 
4831 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4832 	mif_ints = erip->mii_status ^ stat;
4833 
4834 	if (erip->openloop_autoneg) {
4835 		(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4836 		ERI_DEBUG_MSG3(erip, XCVR_MSG,
4837 		    "eri_check_link:openloop stat %X mii_status %X",
4838 		    stat, erip->mii_status);
4839 		(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
4840 		if (!(stat & PHY_BMSR_LNKSTS) &&
4841 		    (erip->openloop_autoneg < 2)) {
4842 			if (param_speed) {
4843 				control &= ~PHY_BMCR_100M;
4844 				param_anlpar_100hdx = 0;
4845 				param_anlpar_10hdx = 1;
4846 				param_speed = 0;
4847 				erip->stats.ifspeed = 10;
4848 
4849 			} else {
4850 				control |= PHY_BMCR_100M;
4851 				param_anlpar_100hdx = 1;
4852 				param_anlpar_10hdx = 0;
4853 				param_speed = 1;
4854 				erip->stats.ifspeed = 100;
4855 			}
4856 			ERI_DEBUG_MSG3(erip, XCVR_MSG,
4857 			    "eri_check_link: trying speed %X stat %X",
4858 			    param_speed, stat);
4859 
4860 			erip->openloop_autoneg ++;
4861 			eri_mii_write(erip, ERI_PHY_BMCR, control);
4862 			link_timeout = ERI_P_FAULT_TIMER;
4863 		} else {
4864 			erip->openloop_autoneg = 0;
4865 			linkupdate = eri_mif_check(erip, stat, stat);
4866 			if (erip->openloop_autoneg)
4867 				link_timeout = ERI_P_FAULT_TIMER;
4868 		}
4869 		eri_mif_poll(erip, MIF_POLL_START);
4870 		mutex_exit(&erip->xcvrlock);
4871 		mutex_exit(&erip->xmitlock);
4872 
4873 		eri_start_timer(erip, eri_check_link, link_timeout);
4874 		return (linkupdate);
4875 	}
4876 
4877 	linkupdate = eri_mif_check(erip, mif_ints, stat);
4878 	eri_mif_poll(erip, MIF_POLL_START);
4879 	mutex_exit(&erip->xcvrlock);
4880 	mutex_exit(&erip->xmitlock);
4881 
4882 #ifdef ERI_RMAC_HANG_WORKAROUND
4883 	/*
4884 	 * Check if rx hung.
4885 	 */
4886 	if ((erip->flags & ERI_RUNNING) && param_linkup) {
4887 		if (erip->check_rmac_hang) {
4888 			ERI_DEBUG_MSG5(erip,
4889 			    NONFATAL_MSG,
4890 			    "check1 %d: macsm:%8x wr:%2x rd:%2x",
4891 			    erip->check_rmac_hang,
4892 			    GET_MACREG(macsm),
4893 			    GET_ERXREG(rxfifo_wr_ptr),
4894 			    GET_ERXREG(rxfifo_rd_ptr));
4895 
4896 			erip->check_rmac_hang = 0;
4897 			erip->check2_rmac_hang ++;
4898 
4899 			erip->rxfifo_wr_ptr_c = GET_ERXREG(rxfifo_wr_ptr);
4900 			erip->rxfifo_rd_ptr_c = GET_ERXREG(rxfifo_rd_ptr);
4901 
4902 			eri_start_timer(erip, eri_check_link,
4903 			    ERI_CHECK_HANG_TIMER);
4904 			return (linkupdate);
4905 		}
4906 
4907 		if (erip->check2_rmac_hang) {
4908 			ERI_DEBUG_MSG5(erip,
4909 			    NONFATAL_MSG,
4910 			    "check2 %d: macsm:%8x wr:%2x rd:%2x",
4911 			    erip->check2_rmac_hang,
4912 			    GET_MACREG(macsm),
4913 			    GET_ERXREG(rxfifo_wr_ptr),
4914 			    GET_ERXREG(rxfifo_rd_ptr));
4915 
4916 			erip->check2_rmac_hang = 0;
4917 
4918 			erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
4919 			erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
4920 
4921 			if (((GET_MACREG(macsm) & BMAC_OVERFLOW_STATE) ==
4922 			    BMAC_OVERFLOW_STATE) &&
4923 			    ((erip->rxfifo_wr_ptr_c == erip->rxfifo_rd_ptr_c) ||
4924 			    ((erip->rxfifo_rd_ptr == erip->rxfifo_rd_ptr_c) &&
4925 			    (erip->rxfifo_wr_ptr == erip->rxfifo_wr_ptr_c)))) {
4926 				ERI_DEBUG_MSG1(erip,
4927 				    NONFATAL_MSG,
4928 				    "RX hang: Reset mac");
4929 
4930 				HSTAT(erip, rx_hang);
4931 				erip->linkcheck = 1;
4932 
4933 				eri_start_timer(erip, eri_check_link,
4934 				    ERI_LINKCHECK_TIMER);
4935 				(void) eri_init(erip);
4936 				return (linkupdate);
4937 			}
4938 		}
4939 	}
4940 #endif
4941 
4942 	/*
4943 	 * Check if tx hung.
4944 	 */
4945 #ifdef	ERI_TX_HUNG
4946 	if ((erip->flags & ERI_RUNNING) && param_linkup &&
4947 	    (eri_check_txhung(erip))) {
4948 		HSTAT(erip, tx_hang);
4949 		eri_reinit_txhung++;
4950 		erip->linkcheck = 1;
4951 		eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4952 		(void) eri_init(erip);
4953 		return (linkupdate);
4954 	}
4955 #endif
4956 
4957 #ifdef ERI_PM_WORKAROUND
4958 	if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4959 		if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4960 		    (void *)4000) == DDI_SUCCESS)
4961 			erip->stats.pmcap = ERI_PMCAP_4MHZ;
4962 
4963 		ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
4964 		    "eri_check_link: PMCAP %d", erip->stats.pmcap);
4965 	}
4966 #endif
4967 	if ((!param_mode) && (param_transceiver != NO_XCVR))
4968 		eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4969 	else
4970 		eri_start_timer(erip, eri_check_link, ERI_LINKCHECK_TIMER);
4971 	return (linkupdate);
4972 }
4973 
4974 static link_state_t
4975 eri_mif_check(struct eri *erip, uint16_t mif_ints, uint16_t mif_data)
4976 {
4977 	uint16_t control, aner, anlpar, anar, an_common;
4978 	uint16_t old_mintrans;
4979 	int restart_autoneg = 0;
4980 	link_state_t retv;
4981 
4982 	ERI_DEBUG_MSG4(erip, XCVR_MSG, "eri_mif_check: mif_mask: %X, %X, %X",
4983 	    erip->mif_mask, mif_ints, mif_data);
4984 
4985 	mif_ints &= ~erip->mif_mask;
4986 	erip->mii_status = mif_data;
4987 	/*
4988 	 * Now check if someone has pulled the xcvr or
4989 	 * a new xcvr has shown up
4990 	 * If so try to find out what the new xcvr setup is.
4991 	 */
4992 	if (((mif_ints & PHY_BMSR_RES1) && (mif_data == 0xFFFF)) ||
4993 	    (param_transceiver == NO_XCVR)) {
4994 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4995 		    "No status transceiver gone");
4996 		if (eri_new_xcvr(erip)) {
4997 			if (param_transceiver != NO_XCVR) {
4998 				/*
4999 				 * Reset the new PHY and bring up the link
5000 				 */
5001 				(void) eri_reset_xcvr(erip);
5002 			}
5003 		}
5004 		return (LINK_STATE_UNKNOWN);
5005 	}
5006 
5007 	if (param_autoneg && (mif_ints & PHY_BMSR_LNKSTS) &&
5008 	    (mif_data & PHY_BMSR_LNKSTS) && (mif_data & PHY_BMSR_ANC)) {
5009 		mif_ints |= PHY_BMSR_ANC;
5010 		ERI_DEBUG_MSG3(erip, PHY_MSG,
5011 		    "eri_mif_check: Set ANC bit mif_data %X mig_ints %X",
5012 		    mif_data, mif_ints);
5013 	}
5014 
5015 	if ((mif_ints & PHY_BMSR_ANC) && (mif_data & PHY_BMSR_ANC)) {
5016 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Auto-negotiation interrupt.");
5017 
5018 		/*
5019 		 * Switch off Auto-negotiation interrupts and switch on
5020 		 * Link ststus interrupts.
5021 		 */
5022 		erip->mif_mask |= PHY_BMSR_ANC;
5023 		erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5024 		(void) eri_mii_read(erip, ERI_PHY_ANER, &aner);
5025 		param_aner_lpancap = 1 && (aner & PHY_ANER_LPNW);
5026 		if ((aner & PHY_ANER_MLF) || (eri_force_mlf)) {
5027 			ERI_DEBUG_MSG1(erip, XCVR_MSG,
5028 			    "parallel detection fault");
5029 			/*
5030 			 * Consider doing open loop auto-negotiation.
5031 			 */
5032 			ERI_DEBUG_MSG1(erip, XCVR_MSG,
5033 			    "Going into Open loop Auto-neg");
5034 			(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5035 
5036 			control &= ~(PHY_BMCR_ANE | PHY_BMCR_RAN |
5037 			    PHY_BMCR_FDX);
5038 			if (param_anar_100fdx || param_anar_100hdx) {
5039 				control |= PHY_BMCR_100M;
5040 				param_anlpar_100hdx = 1;
5041 				param_anlpar_10hdx = 0;
5042 				param_speed = 1;
5043 				erip->stats.ifspeed = 100;
5044 
5045 			} else if (param_anar_10fdx || param_anar_10hdx) {
5046 				control &= ~PHY_BMCR_100M;
5047 				param_anlpar_100hdx = 0;
5048 				param_anlpar_10hdx = 1;
5049 				param_speed = 0;
5050 				erip->stats.ifspeed = 10;
5051 			} else {
5052 				ERI_FAULT_MSG1(erip, SEVERITY_NONE,
5053 				    ERI_VERB_MSG,
5054 				    "Transceiver speed set incorrectly.");
5055 				return (0);
5056 			}
5057 
5058 			(void) eri_mii_write(erip, ERI_PHY_BMCR, control);
5059 			param_anlpar_100fdx = 0;
5060 			param_anlpar_10fdx = 0;
5061 			param_mode = 0;
5062 			erip->openloop_autoneg = 1;
5063 			return (0);
5064 		}
5065 		(void) eri_mii_read(erip, ERI_PHY_ANLPAR, &anlpar);
5066 		(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5067 		an_common = anar & anlpar;
5068 
5069 		ERI_DEBUG_MSG2(erip, XCVR_MSG, "an_common = 0x%X", an_common);
5070 
5071 		if (an_common & (PHY_ANLPAR_TXFDX | PHY_ANLPAR_TX)) {
5072 			param_speed = 1;
5073 			erip->stats.ifspeed = 100;
5074 			param_mode = 1 && (an_common & PHY_ANLPAR_TXFDX);
5075 
5076 		} else if (an_common & (PHY_ANLPAR_10FDX | PHY_ANLPAR_10)) {
5077 			param_speed = 0;
5078 			erip->stats.ifspeed = 10;
5079 			param_mode = 1 && (an_common & PHY_ANLPAR_10FDX);
5080 
5081 		} else an_common = 0x0;
5082 
5083 		if (!an_common) {
5084 			ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
5085 			    "Transceiver: anar not set with speed selection");
5086 		}
5087 		param_anlpar_100T4 = 1 && (anlpar & PHY_ANLPAR_T4);
5088 		param_anlpar_100fdx = 1 && (anlpar & PHY_ANLPAR_TXFDX);
5089 		param_anlpar_100hdx = 1 && (anlpar & PHY_ANLPAR_TX);
5090 		param_anlpar_10fdx = 1 && (anlpar & PHY_ANLPAR_10FDX);
5091 		param_anlpar_10hdx = 1 && (anlpar & PHY_ANLPAR_10);
5092 
5093 		ERI_DEBUG_MSG2(erip, PHY_MSG,
5094 		    "Link duplex = 0x%X", param_mode);
5095 		ERI_DEBUG_MSG2(erip, PHY_MSG,
5096 		    "Link speed = 0x%X", param_speed);
5097 	/*	mif_ints |= PHY_BMSR_LNKSTS; prevent double msg */
5098 	/*	mif_data |= PHY_BMSR_LNKSTS; prevent double msg */
5099 	}
5100 	retv = LINK_STATE_UNKNOWN;
5101 	if (mif_ints & PHY_BMSR_LNKSTS) {
5102 		if (mif_data & PHY_BMSR_LNKSTS) {
5103 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Link Up");
5104 			/*
5105 			 * Program Lu3X31T for mininum transition
5106 			 */
5107 			if (eri_phy_mintrans) {
5108 				eri_mii_write(erip, 31, 0x8000);
5109 				(void) eri_mii_read(erip, 0, &old_mintrans);
5110 				eri_mii_write(erip, 0, 0x00F1);
5111 				eri_mii_write(erip, 31, 0x0000);
5112 			}
5113 			/*
5114 			 * The link is up.
5115 			 */
5116 			eri_init_txmac(erip);
5117 			param_linkup = 1;
5118 			erip->stats.link_up = LINK_STATE_UP;
5119 			if (param_mode)
5120 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5121 			else
5122 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5123 
5124 			retv = LINK_STATE_UP;
5125 		} else {
5126 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Link down.");
5127 			param_linkup = 0;
5128 			erip->stats.link_up = LINK_STATE_DOWN;
5129 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5130 			retv = LINK_STATE_DOWN;
5131 			if (param_autoneg) {
5132 				restart_autoneg = 1;
5133 			}
5134 		}
5135 	} else {
5136 		if (mif_data & PHY_BMSR_LNKSTS) {
5137 			if (!param_linkup) {
5138 				ERI_DEBUG_MSG1(erip, PHY_MSG,
5139 				    "eri_mif_check: MIF data link up");
5140 				/*
5141 				 * Program Lu3X31T for minimum transition
5142 				 */
5143 				if (eri_phy_mintrans) {
5144 					eri_mii_write(erip, 31, 0x8000);
5145 					(void) eri_mii_read(erip, 0,
5146 					    &old_mintrans);
5147 					eri_mii_write(erip, 0, 0x00F1);
5148 					eri_mii_write(erip, 31, 0x0000);
5149 				}
5150 				/*
5151 				 * The link is up.
5152 				 */
5153 				eri_init_txmac(erip);
5154 
5155 				param_linkup = 1;
5156 				erip->stats.link_up = LINK_STATE_UP;
5157 				if (param_mode)
5158 					erip->stats.link_duplex =
5159 					    LINK_DUPLEX_FULL;
5160 				else
5161 					erip->stats.link_duplex =
5162 					    LINK_DUPLEX_HALF;
5163 
5164 				retv = LINK_STATE_UP;
5165 			}
5166 		} else if (param_linkup) {
5167 			/*
5168 			 * The link is down now.
5169 			 */
5170 			ERI_DEBUG_MSG1(erip, PHY_MSG,
5171 			    "eri_mif_check:Link was up and went down");
5172 			param_linkup = 0;
5173 			erip->stats.link_up = LINK_STATE_DOWN;
5174 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5175 			retv = LINK_STATE_DOWN;
5176 			if (param_autoneg)
5177 				restart_autoneg = 1;
5178 		}
5179 	}
5180 	if (restart_autoneg) {
5181 		/*
5182 		 * Restart normal auto-negotiation.
5183 		 */
5184 		ERI_DEBUG_MSG1(erip, PHY_MSG,
5185 		    "eri_mif_check:Restart AUto Negotiation");
5186 		erip->openloop_autoneg = 0;
5187 		param_mode = 0;
5188 		param_speed = 0;
5189 		param_anlpar_100T4 = 0;
5190 		param_anlpar_100fdx = 0;
5191 		param_anlpar_100hdx = 0;
5192 		param_anlpar_10fdx = 0;
5193 		param_anlpar_10hdx = 0;
5194 		param_aner_lpancap = 0;
5195 		(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5196 		control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5197 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5198 	}
5199 	if (mif_ints & PHY_BMSR_JABDET) {
5200 		if (mif_data & PHY_BMSR_JABDET) {
5201 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Jabber detected.");
5202 			HSTAT(erip, jab);
5203 			/*
5204 			 * Reset the new PHY and bring up the link
5205 			 * (Check for failure?)
5206 			 */
5207 			(void) eri_reset_xcvr(erip);
5208 		}
5209 	}
5210 	return (retv);
5211 }
5212 
5213 #define	PHYRST_PERIOD 500
5214 static int
5215 eri_reset_xcvr(struct eri *erip)
5216 {
5217 	uint16_t	stat;
5218 	uint16_t	anar;
5219 	uint16_t	control;
5220 	uint16_t	idr1;
5221 	uint16_t	idr2;
5222 	uint16_t	nicr;
5223 	uint32_t	speed_100;
5224 	uint32_t	speed_10;
5225 	int n;
5226 
5227 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
5228 	erip->ifspeed_old = erip->stats.ifspeed;
5229 #endif
5230 	/*
5231 	 * Reset Open loop auto-negotiation this means you can try
5232 	 * Normal auto-negotiation, until you get a Multiple Link fault
5233 	 * at which point you try 100M half duplex then 10M half duplex
5234 	 * until you get a Link up.
5235 	 */
5236 	erip->openloop_autoneg = 0;
5237 
5238 	/*
5239 	 * Reset the xcvr.
5240 	 */
5241 	eri_mii_write(erip, ERI_PHY_BMCR, PHY_BMCR_RESET);
5242 
5243 	/* Check for transceiver reset completion */
5244 
5245 	n = 1000;
5246 	while (--n > 0) {
5247 		drv_usecwait((clock_t)PHYRST_PERIOD);
5248 		if (eri_mii_read(erip, ERI_PHY_BMCR, &control) == 1) {
5249 			/* Transceiver does not talk MII */
5250 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5251 			    "eri_reset_xcvr: no mii");
5252 		}
5253 		if ((control & PHY_BMCR_RESET) == 0)
5254 			goto reset_done;
5255 	}
5256 	ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
5257 	    "eri_reset_xcvr:reset_failed n == 0, control %x", control);
5258 	goto eri_reset_xcvr_failed;
5259 
5260 reset_done:
5261 
5262 	ERI_DEBUG_MSG2(erip, AUTOCONFIG_MSG,
5263 	    "eri_reset_xcvr: reset complete in %d us",
5264 	    (1000 - n) * PHYRST_PERIOD);
5265 
5266 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5267 	(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5268 	(void) eri_mii_read(erip, ERI_PHY_IDR1, &idr1);
5269 	(void) eri_mii_read(erip, ERI_PHY_IDR2, &idr2);
5270 
5271 	ERI_DEBUG_MSG4(erip, XCVR_MSG,
5272 	    "eri_reset_xcvr: control %x stat %x anar %x", control, stat, anar);
5273 
5274 	/*
5275 	 * Initialize the read only transceiver ndd information
5276 	 * the values are either 0 or 1.
5277 	 */
5278 	param_bmsr_ancap = 1 && (stat & PHY_BMSR_ACFG);
5279 	param_bmsr_100T4 = 1 && (stat & PHY_BMSR_100T4);
5280 	param_bmsr_100fdx = 1 && (stat & PHY_BMSR_100FDX);
5281 	param_bmsr_100hdx = 1 && (stat & PHY_BMSR_100HDX);
5282 	param_bmsr_10fdx = 1 && (stat & PHY_BMSR_10FDX);
5283 	param_bmsr_10hdx = 1 && (stat & PHY_BMSR_10HDX);
5284 
5285 	/*
5286 	 * Match up the ndd capabilities with the transceiver.
5287 	 */
5288 	param_autoneg &= param_bmsr_ancap;
5289 	param_anar_100fdx &= param_bmsr_100fdx;
5290 	param_anar_100hdx &= param_bmsr_100hdx;
5291 	param_anar_10fdx &= param_bmsr_10fdx;
5292 	param_anar_10hdx &= param_bmsr_10hdx;
5293 
5294 	/*
5295 	 * Select the operation mode of the transceiver.
5296 	 */
5297 	if (param_autoneg) {
5298 		/*
5299 		 * Initialize our auto-negotiation capabilities.
5300 		 */
5301 		anar = PHY_SELECTOR;
5302 		if (param_anar_100T4)
5303 			anar |= PHY_ANAR_T4;
5304 		if (param_anar_100fdx)
5305 			anar |= PHY_ANAR_TXFDX;
5306 		if (param_anar_100hdx)
5307 			anar |= PHY_ANAR_TX;
5308 		if (param_anar_10fdx)
5309 			anar |= PHY_ANAR_10FDX;
5310 		if (param_anar_10hdx)
5311 			anar |= PHY_ANAR_10;
5312 		ERI_DEBUG_MSG2(erip, XCVR_MSG, "anar = %x", anar);
5313 		eri_mii_write(erip, ERI_PHY_ANAR, anar);
5314 	}
5315 
5316 	/* Place the Transceiver in normal operation mode */
5317 	if ((control & PHY_BMCR_ISOLATE) || (control & PHY_BMCR_LPBK)) {
5318 		control &= ~(PHY_BMCR_ISOLATE | PHY_BMCR_LPBK);
5319 		eri_mii_write(erip, ERI_PHY_BMCR,
5320 		    (control & ~PHY_BMCR_ISOLATE));
5321 	}
5322 
5323 	/*
5324 	 * If Lu3X31T then allow nonzero eri_phy_mintrans
5325 	 */
5326 	if (eri_phy_mintrans &&
5327 	    (idr1 != 0x43 || (idr2 & 0xFFF0) != 0x7420)) {
5328 		eri_phy_mintrans = 0;
5329 	}
5330 	/*
5331 	 * Initialize the mif interrupt mask.
5332 	 */
5333 	erip->mif_mask = (uint16_t)(~PHY_BMSR_RES1);
5334 
5335 	/*
5336 	 * Establish link speeds and do necessary special stuff based
5337 	 * in the speed.
5338 	 */
5339 	speed_100 = param_anar_100fdx | param_anar_100hdx;
5340 	speed_10 = param_anar_10fdx | param_anar_10hdx;
5341 
5342 	ERI_DEBUG_MSG5(erip, XCVR_MSG, "eri_reset_xcvr: %d %d %d %d",
5343 	    param_anar_100fdx, param_anar_100hdx, param_anar_10fdx,
5344 	    param_anar_10hdx);
5345 
5346 	ERI_DEBUG_MSG3(erip, XCVR_MSG,
5347 	    "eri_reset_xcvr: speed_100 %d speed_10 %d", speed_100, speed_10);
5348 
5349 	if ((!speed_100) && (speed_10)) {
5350 		erip->mif_mask &= ~PHY_BMSR_JABDET;
5351 		if (!(param_anar_10fdx) &&
5352 		    (param_anar_10hdx) &&
5353 		    (erip->link_pulse_disabled)) {
5354 			param_speed = 0;
5355 			param_mode = 0;
5356 			(void) eri_mii_read(erip, ERI_PHY_NICR, &nicr);
5357 			nicr &= ~PHY_NICR_LD;
5358 			eri_mii_write(erip, ERI_PHY_NICR, nicr);
5359 			param_linkup = 1;
5360 			erip->stats.link_up = LINK_STATE_UP;
5361 			if (param_mode)
5362 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5363 			else
5364 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5365 		}
5366 	}
5367 
5368 	/*
5369 	 * Clear the autonegotitation before re-starting
5370 	 */
5371 	control = PHY_BMCR_100M | PHY_BMCR_FDX;
5372 /*	eri_mii_write(erip, ERI_PHY_BMCR, control); */
5373 	if (param_autoneg) {
5374 		/*
5375 		 * Setup the transceiver for autonegotiation.
5376 		 */
5377 		erip->mif_mask &= ~PHY_BMSR_ANC;
5378 
5379 		/*
5380 		 * Clear the Auto-negotiation before re-starting
5381 		 */
5382 		eri_mii_write(erip, ERI_PHY_BMCR, control & ~PHY_BMCR_ANE);
5383 
5384 		/*
5385 		 * Switch on auto-negotiation.
5386 		 */
5387 		control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5388 
5389 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5390 	} else {
5391 		/*
5392 		 * Force the transceiver.
5393 		 */
5394 		erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5395 
5396 		/*
5397 		 * Switch off auto-negotiation.
5398 		 */
5399 		control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5400 
5401 		if (speed_100) {
5402 			control |= PHY_BMCR_100M;
5403 			param_aner_lpancap = 0; /* Clear LP nway */
5404 			param_anlpar_10fdx = 0;
5405 			param_anlpar_10hdx = 0;
5406 			param_anlpar_100T4 = param_anar_100T4;
5407 			param_anlpar_100fdx = param_anar_100fdx;
5408 			param_anlpar_100hdx = param_anar_100hdx;
5409 			param_speed = 1;
5410 			erip->stats.ifspeed = 100;
5411 			param_mode = param_anar_100fdx;
5412 			if (param_mode) {
5413 				param_anlpar_100hdx = 0;
5414 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5415 			} else {
5416 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5417 			}
5418 		} else if (speed_10) {
5419 			control &= ~PHY_BMCR_100M;
5420 			param_aner_lpancap = 0; /* Clear LP nway */
5421 			param_anlpar_100fdx = 0;
5422 			param_anlpar_100hdx = 0;
5423 			param_anlpar_100T4 = 0;
5424 			param_anlpar_10fdx = param_anar_10fdx;
5425 			param_anlpar_10hdx = param_anar_10hdx;
5426 			param_speed = 0;
5427 			erip->stats.ifspeed = 10;
5428 			param_mode = param_anar_10fdx;
5429 			if (param_mode) {
5430 				param_anlpar_10hdx = 0;
5431 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5432 			} else {
5433 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5434 			}
5435 		} else {
5436 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5437 			    "Transceiver speed set incorrectly.");
5438 		}
5439 
5440 		if (param_mode) {
5441 			control |= PHY_BMCR_FDX;
5442 		}
5443 
5444 		ERI_DEBUG_MSG4(erip, PHY_MSG,
5445 		    "control = %x status = %x param_mode %d",
5446 		    control, stat, param_mode);
5447 
5448 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5449 /*
5450  *		if (param_mode) {
5451  *			control |= PHY_BMCR_FDX;
5452  *		}
5453  *		control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5454  *		eri_mii_write(erip, ERI_PHY_BMCR, control);
5455  */
5456 	}
5457 
5458 #ifdef DEBUG
5459 	(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5460 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5461 	(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5462 #endif
5463 	ERI_DEBUG_MSG4(erip, PHY_MSG,
5464 	    "control %X status %X anar %X", control, stat, anar);
5465 
5466 eri_reset_xcvr_exit:
5467 	return (0);
5468 
5469 eri_reset_xcvr_failed:
5470 	return (1);
5471 }
5472 
5473 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
5474 
5475 static void
5476 eri_xcvr_force_mode(struct eri *erip, uint32_t *link_timeout)
5477 {
5478 
5479 	if (!param_autoneg && !param_linkup && (erip->stats.ifspeed == 10) &&
5480 	    (param_anar_10fdx | param_anar_10hdx)) {
5481 		*link_timeout = SECOND(1);
5482 		return;
5483 	}
5484 
5485 	if (!param_autoneg && !param_linkup && (erip->ifspeed_old == 10) &&
5486 	    (param_anar_100fdx | param_anar_100hdx)) {
5487 		/*
5488 		 * May have to set link partner's speed and mode.
5489 		 */
5490 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_LOG_MSG,
5491 		"May have to set link partner's speed and duplex mode.");
5492 	}
5493 }
5494 #endif
5495 
5496 static void
5497 eri_mif_poll(struct eri *erip, soft_mif_enable_t enable)
5498 {
5499 	if (enable == MIF_POLL_START) {
5500 		if (erip->mifpoll_enable && !erip->openloop_autoneg) {
5501 			erip->mif_config |= ERI_MIF_CFGPE;
5502 			PUT_MIFREG(mif_cfg, erip->mif_config);
5503 			drv_usecwait(ERI_MIF_POLL_DELAY);
5504 			PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
5505 			    ~ERI_G_MASK_MIF_INT);
5506 			PUT_MIFREG(mif_imask, erip->mif_mask);
5507 		}
5508 	} else if (enable == MIF_POLL_STOP) {
5509 			erip->mif_config &= ~ERI_MIF_CFGPE;
5510 			PUT_MIFREG(mif_cfg, erip->mif_config);
5511 			drv_usecwait(ERI_MIF_POLL_DELAY);
5512 			PUT_GLOBREG(intmask, GET_GLOBREG(intmask) |
5513 			    ERI_G_MASK_MIF_INT);
5514 			PUT_MIFREG(mif_imask, ERI_MIF_INTMASK);
5515 	}
5516 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF Config = 0x%X",
5517 	    GET_MIFREG(mif_cfg));
5518 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF imask = 0x%X",
5519 	    GET_MIFREG(mif_imask));
5520 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "INT imask = 0x%X",
5521 	    GET_GLOBREG(intmask));
5522 	ERI_DEBUG_MSG1(erip, XCVR_MSG, "<== mif_poll");
5523 }
5524 
5525 /* Decide if transmitter went dead and reinitialize everything */
5526 #ifdef	ERI_TX_HUNG
5527 static int eri_txhung_limit = 2;
5528 static int
5529 eri_check_txhung(struct eri *erip)
5530 {
5531 	boolean_t	macupdate = B_FALSE;
5532 
5533 	mutex_enter(&erip->xmitlock);
5534 	if (erip->flags & ERI_RUNNING)
5535 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
5536 		    ETX_COMPLETION_MASK);
5537 		macupdate |= eri_reclaim(erip, erip->tx_completion);
5538 
5539 	/* Something needs to be sent out but it is not going out */
5540 	if ((erip->tcurp != erip->tnextp) &&
5541 	    (erip->stats.opackets64 == erip->erisave.reclaim_opackets) &&
5542 	    (erip->stats.collisions == erip->erisave.starts))
5543 		erip->txhung++;
5544 	else
5545 		erip->txhung = 0;
5546 
5547 	erip->erisave.reclaim_opackets = erip->stats.opackets64;
5548 	erip->erisave.starts = erip->stats.collisions;
5549 	mutex_exit(&erip->xmitlock);
5550 
5551 	if (macupdate)
5552 		mac_tx_update(erip->mh);
5553 
5554 	return (erip->txhung >= eri_txhung_limit);
5555 }
5556 #endif
5557