xref: /titanic_50/usr/src/uts/sun/io/eri/eri.c (revision 79d2f1e1d69dd8621f962e48584f1e2434b3c837)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SunOS MT STREAMS ERI(PCI) 10/100 Mb Ethernet Device Driver
30  */
31 
32 #include	<sys/types.h>
33 #include	<sys/debug.h>
34 #include	<sys/stropts.h>
35 #include	<sys/stream.h>
36 #include	<sys/strsubr.h>
37 #include	<sys/kmem.h>
38 #include	<sys/crc32.h>
39 #include	<sys/ddi.h>
40 #include	<sys/sunddi.h>
41 #include	<sys/strsun.h>
42 #include	<sys/stat.h>
43 #include	<sys/cpu.h>
44 #include	<sys/kstat.h>
45 #include	<inet/common.h>
46 #include	<sys/pattr.h>
47 #include	<inet/mi.h>
48 #include	<inet/nd.h>
49 #include	<sys/ethernet.h>
50 #include	<sys/vlan.h>
51 #include	<sys/policy.h>
52 #include	<sys/mac.h>
53 #include	<sys/mac_ether.h>
54 #include	<sys/dlpi.h>
55 
56 #include	<sys/pci.h>
57 
58 #include	"eri_phy.h"
59 #include	"eri_mac.h"
60 #include	"eri.h"
61 #include	"eri_common.h"
62 
63 #include	"eri_msg.h"
64 
65 /*
66  *  **** Function Prototypes *****
67  */
68 /*
69  * Entry points (man9e)
70  */
71 static	int	eri_attach(dev_info_t *, ddi_attach_cmd_t);
72 static	int	eri_detach(dev_info_t *, ddi_detach_cmd_t);
73 static	uint_t	eri_intr(caddr_t);
74 
75 /*
76  * I/O (Input/Output) Functions
77  */
78 static	boolean_t	eri_send_msg(struct eri *, mblk_t *);
79 static  mblk_t		*eri_read_dma(struct eri *, volatile struct rmd *,
80 			    volatile int, uint64_t flags);
81 
82 /*
83  * Initialization Functions
84  */
85 static  boolean_t	eri_init(struct eri *);
86 static	int	eri_allocthings(struct eri *);
87 static  int	eri_init_xfer_params(struct eri *);
88 static  void	eri_statinit(struct eri *);
89 static	int	eri_burstsize(struct eri *);
90 
91 static	void	eri_setup_mac_address(struct eri *, dev_info_t *);
92 
93 static	uint32_t eri_init_rx_channel(struct eri *);
94 static	void	eri_init_rx(struct eri *);
95 static	void	eri_init_txmac(struct eri *);
96 
97 /*
98  * Un-init Functions
99  */
100 static	uint32_t eri_txmac_disable(struct eri *);
101 static	uint32_t eri_rxmac_disable(struct eri *);
102 static	int	eri_stop(struct eri *);
103 static	void	eri_uninit(struct eri *erip);
104 static	int	eri_freebufs(struct eri *);
105 static	boolean_t	eri_reclaim(struct eri *, uint32_t);
106 
107 /*
108  * Transceiver (xcvr) Functions
109  */
110 static	int	eri_new_xcvr(struct eri *); /* Initializes & detects xcvrs */
111 static	int	eri_reset_xcvr(struct eri *);
112 
113 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
114 static	void	eri_xcvr_force_mode(struct eri *, uint32_t *);
115 #endif
116 
117 static	void	eri_mif_poll(struct eri *, soft_mif_enable_t);
118 static	void	eri_check_link(struct eri *);
119 static	uint32_t eri_check_link_noind(struct eri *);
120 static	link_state_t eri_mif_check(struct eri *, uint16_t, uint16_t);
121 static	void    eri_mii_write(struct eri *, uint8_t, uint16_t);
122 static	uint32_t eri_mii_read(struct eri *, uint8_t, uint16_t *);
123 
124 /*
125  * Reset Functions
126  */
127 static	uint32_t eri_etx_reset(struct eri *);
128 static	uint32_t eri_erx_reset(struct eri *);
129 
130 /*
131  * Error Functions
132  */
133 static	void eri_fatal_err(struct eri *, uint32_t);
134 static	void eri_nonfatal_err(struct eri *, uint32_t);
135 
136 #ifdef	ERI_TX_HUNG
137 static	int eri_check_txhung(struct eri *);
138 #endif
139 
140 /*
141  * Hardening Functions
142  */
143 static void eri_fault_msg(struct eri *, uint_t, msg_t, const char *, ...);
144 
145 /*
146  * Misc Functions
147  */
148 static void	eri_savecntrs(struct eri *);
149 
150 static	void	eri_stop_timer(struct eri *erip);
151 static	void	eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec);
152 
153 static	void eri_bb_force_idle(struct eri *);
154 
155 /*
156  * Utility Functions
157  */
158 static	mblk_t *eri_allocb(size_t size);
159 static	mblk_t *eri_allocb_sp(size_t size);
160 static	int	eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp);
161 static	int	eri_param_set(queue_t *, mblk_t *, char *, caddr_t);
162 
163 /*
164  * Functions to support ndd
165  */
166 static	void	eri_nd_free(caddr_t *nd_pparam);
167 
168 static	boolean_t	eri_nd_load(caddr_t *nd_pparam, char *name,
169 				pfi_t get_pfi, pfi_t set_pfi, caddr_t data);
170 
171 static	int	eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp);
172 static	void	eri_param_cleanup(struct eri *);
173 static	int	eri_param_register(struct eri *, param_t *, int);
174 static	void	eri_process_ndd_ioctl(struct eri *, queue_t *, mblk_t *, int);
175 static	int	eri_mk_mblk_tail_space(mblk_t *, mblk_t **, size_t);
176 
177 
178 static	void eri_loopback(struct eri *, queue_t *, mblk_t *);
179 
180 static uint32_t	eri_ladrf_bit(const uint8_t *);
181 
182 
183 /*
184  * Nemo (GLDv3) Functions.
185  */
186 static	int		eri_m_stat(void *, uint_t, uint64_t *);
187 static	int		eri_m_start(void *);
188 static	void		eri_m_stop(void *);
189 static	int		eri_m_promisc(void *, boolean_t);
190 static	int		eri_m_multicst(void *, boolean_t, const uint8_t *);
191 static	int		eri_m_unicst(void *, const uint8_t *);
192 static	void		eri_m_ioctl(void *, queue_t *, mblk_t *);
193 static	boolean_t	eri_m_getcapab(void *, mac_capab_t, void *);
194 static	mblk_t		*eri_m_tx(void *, mblk_t *);
195 
196 static mac_callbacks_t eri_m_callbacks = {
197 	MC_IOCTL | MC_GETCAPAB,
198 	eri_m_stat,
199 	eri_m_start,
200 	eri_m_stop,
201 	eri_m_promisc,
202 	eri_m_multicst,
203 	eri_m_unicst,
204 	eri_m_tx,
205 	NULL,
206 	eri_m_ioctl,
207 	eri_m_getcapab
208 };
209 
210 /*
211  * Define PHY Vendors: Matches to IEEE
212  * Organizationally Unique Identifier (OUI)
213  */
214 /*
215  * The first two are supported as Internal XCVRs
216  */
217 #define	PHY_VENDOR_LUCENT	0x601d
218 
219 #define	PHY_LINK_NONE		0	/* Not attempted yet or retry */
220 #define	PHY_LINK_DOWN		1	/* Not being used	*/
221 #define	PHY_LINK_UP		2	/* Not being used	*/
222 
223 #define	AUTO_SPEED		0
224 #define	FORCE_SPEED		1
225 
226 /*
227  * MIB II broadcast/multicast packets
228  */
229 
230 #define	IS_BROADCAST(pkt) (bcmp(pkt, &etherbroadcastaddr, ETHERADDRL) == 0)
231 #define	IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
232 
233 #define	BUMP_InNUcast(erip, pkt) \
234 		if (IS_BROADCAST(pkt)) { \
235 			HSTAT(erip, brdcstrcv); \
236 		} else if (IS_MULTICAST(pkt)) { \
237 			HSTAT(erip, multircv); \
238 		}
239 
240 #define	BUMP_OutNUcast(erip, pkt) \
241 		if (IS_BROADCAST(pkt)) { \
242 			HSTAT(erip, brdcstxmt); \
243 		} else if (IS_MULTICAST(pkt)) { \
244 			HSTAT(erip, multixmt); \
245 		}
246 
247 #define	NEXTTMDP(tbasep, tmdlimp, tmdp)	(((tmdp) + 1) == tmdlimp	\
248 	? tbasep : ((tmdp) + 1))
249 
250 #define	ETHERHEADER_SIZE (sizeof (struct ether_header))
251 
252 #ifdef	ERI_RCV_CKSUM
253 #define	ERI_PROCESS_READ(erip, bp, sum)				\
254 {								\
255 	t_uscalar_t	type;					\
256 	uint_t	start_offset, end_offset;			\
257 								\
258 	*(bp->b_wptr) = 0;	/* pad byte */			\
259 								\
260 	/*							\
261 	 * update MIB II statistics				\
262 	 */							\
263 	HSTAT(erip, ipackets64);				\
264 	HSTATN(erip, rbytes64, len);				\
265 	BUMP_InNUcast(erip, bp->b_rptr);			\
266 	type = get_ether_type(bp->b_rptr);			\
267 	if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {	\
268 		start_offset = 0;				\
269 		end_offset = bp->b_wptr - 			\
270 			(bp->b_rptr + ETHERHEADER_SIZE);	\
271 		(void) hcksum_assoc(bp, NULL, NULL,		\
272 			start_offset, 0, end_offset, sum, 	\
273 			HCK_PARTIALCKSUM, 0);			\
274 	} else {						\
275 		/*						\
276 		 * Strip the PADS for 802.3			\
277 		 */						\
278 		if (type <= ETHERMTU)				\
279 			bp->b_wptr = bp->b_rptr +		\
280 				ETHERHEADER_SIZE + type;	\
281 	}							\
282 }
283 #else
284 
285 #define	ERI_PROCESS_READ(erip, bp)				\
286 {								\
287 	t_uscalar_t	type;					\
288 	type = get_ether_type(bp->b_rptr);			\
289 								\
290 	/*							\
291 	 * update MIB II statistics				\
292 	 */							\
293 	HSTAT(erip, ipackets64);				\
294 	HSTATN(erip, rbytes64, len);				\
295 	BUMP_InNUcast(erip, bp->b_rptr);			\
296 	/*							\
297 	 * Strip the PADS for 802.3				\
298 	 */							\
299 	if (type <= ETHERMTU)					\
300 		bp->b_wptr = bp->b_rptr + ETHERHEADER_SIZE + 	\
301 			type;					\
302 }
303 #endif  /* ERI_RCV_CKSUM */
304 
305 /*
306  * TX Interrupt Rate
307  */
308 static	int	tx_interrupt_rate = 16;
309 
310 /*
311  * Ethernet broadcast address definition.
312  */
313 static uint8_t	etherbroadcastaddr[] = {
314 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
315 };
316 
317 /*
318  * The following variables are used for configuring various features
319  */
320 #define	ERI_DESC_HANDLE_ALLOC	0x0001
321 #define	ERI_DESC_MEM_ALLOC	0x0002
322 #define	ERI_DESC_MEM_MAP	0x0004
323 #define	ERI_XMIT_HANDLE_ALLOC	0x0008
324 #define	ERI_XMIT_HANDLE_BIND	0x0010
325 #define	ERI_RCV_HANDLE_ALLOC	0x0020
326 #define	ERI_RCV_HANDLE_BIND	0x0040
327 #define	ERI_XMIT_DVMA_ALLOC	0x0100
328 #define	ERI_RCV_DVMA_ALLOC	0x0200
329 #define	ERI_XBUFS_HANDLE_ALLOC  0x0400
330 #define	ERI_XBUFS_KMEM_ALLOC    0x0800
331 #define	ERI_XBUFS_KMEM_DMABIND  0x1000
332 
333 
334 #define	ERI_DONT_STRIP_CRC
335 /*
336  * Translate a kernel virtual address to i/o address.
337  */
338 #define	ERI_IOPBIOADDR(erip, a) \
339 	((erip)->iopbiobase + ((uintptr_t)a - (erip)->iopbkbase))
340 
341 /*
342  * ERI Configuration Register Value
343  * Used to configure parameters that define DMA burst
344  * and internal arbitration behavior.
345  * for equal TX and RX bursts, set the following in global
346  * configuration register.
347  * static	int	global_config = 0x42;
348  */
349 
350 /*
351  * ERI ERX Interrupt Blanking Time
352  * Each count is about 16 us (2048 clocks) for 66 MHz PCI.
353  */
354 static	int	intr_blank_time = 6;	/* for about 96 us */
355 static	int	intr_blank_packets = 8;	/*  */
356 
357 /*
358  * ERX PAUSE Threshold Register value
359  * The following value is for an OFF Threshold of about 15.5 Kbytes
360  * and an ON Threshold of 4K bytes.
361  */
362 static	int rx_pause_threshold = 0xf8 | (0x40 << 12);
363 static	int eri_reinit_fatal = 0;
364 #ifdef	DEBUG
365 static	int noteri = 0;
366 #endif
367 
368 #ifdef	ERI_TX_HUNG
369 static	int eri_reinit_txhung = 0;
370 #endif
371 
372 #ifdef ERI_HDX_BUG_WORKAROUND
373 /*
374  * By default enable padding in hdx mode to 97 bytes.
375  * To disabled, in /etc/system:
376  * set eri:eri_hdx_pad_enable=0
377  */
378 static	uchar_t eri_hdx_pad_enable = 1;
379 #endif
380 
381 /*
382  * Default values to initialize the cache line size and latency timer
383  * registers in the PCI configuration space.
384  * ERI_G_CACHE_LINE_SIZE_16 is defined as 16 since RIO expects in units
385  * of 4 bytes.
386  */
387 #ifdef ERI_PM_WORKAROUND_PCI
388 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_32; /* 128 bytes */
389 static int eri_pci_latency_timer = 0xff;		/* 255 PCI cycles */
390 #else
391 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_16; /* 64 bytes */
392 static int eri_pci_latency_timer = 0x40;		/* 64 PCI cycles */
393 #endif
394 #define	ERI_CACHE_LINE_SIZE	(eri_pci_cache_line << ERI_G_CACHE_BIT)
395 
396 /*
397  * Claim the device is ultra-capable of burst in the beginning.  Use
398  * the value returned by ddi_dma_burstsizes() to actually set the ERI
399  * global configuration register later.
400  *
401  * PCI_ERI supports Infinite burst or 64-byte-multiple bursts.
402  */
403 #define	ERI_LIMADDRLO	((uint64_t)0x00000000)
404 #define	ERI_LIMADDRHI	((uint64_t)0xffffffff)
405 
406 static ddi_dma_attr_t dma_attr = {
407 	DMA_ATTR_V0,		/* version number. */
408 	(uint64_t)ERI_LIMADDRLO, /* low address */
409 	(uint64_t)ERI_LIMADDRHI, /* high address */
410 	(uint64_t)0x00ffffff,	/* address counter max */
411 	(uint64_t)1,		/* alignment */
412 	(uint_t)0xe000e0,	/* dlim_burstsizes for 32 4 bit xfers */
413 	(uint32_t)0x1,		/* minimum transfer size */
414 	(uint64_t)0x7fffffff,	/* maximum transfer size */
415 	(uint64_t)0x00ffffff,	/* maximum segment size */
416 	1,			/* scatter/gather list length */
417 	(uint32_t)1,		/* granularity */
418 	(uint_t)0		/* attribute flags */
419 };
420 
421 static ddi_dma_attr_t desc_dma_attr = {
422 	DMA_ATTR_V0,		/* version number. */
423 	(uint64_t)ERI_LIMADDRLO, /* low address */
424 	(uint64_t)ERI_LIMADDRHI, /* high address */
425 	(uint64_t)0x00ffffff,	/* address counter max */
426 	(uint64_t)8,		/* alignment */
427 	(uint_t)0xe000e0,	/* dlim_burstsizes for 32 4 bit xfers */
428 	(uint32_t)0x1,		/* minimum transfer size */
429 	(uint64_t)0x7fffffff,	/* maximum transfer size */
430 	(uint64_t)0x00ffffff,	/* maximum segment size */
431 	1,			/* scatter/gather list length */
432 	16,			/* granularity */
433 	0			/* attribute flags */
434 };
435 
436 ddi_dma_lim_t eri_dma_limits = {
437 	(uint64_t)ERI_LIMADDRLO, /* dlim_addr_lo */
438 	(uint64_t)ERI_LIMADDRHI, /* dlim_addr_hi */
439 	(uint64_t)ERI_LIMADDRHI, /* dlim_cntr_max */
440 	(uint_t)0x00e000e0,	/* dlim_burstsizes for 32 and 64 bit xfers */
441 	(uint32_t)0x1,		/* dlim_minxfer */
442 	1024			/* dlim_speed */
443 };
444 
445 /*
446  * Link Configuration variables
447  *
448  * On Motherboard implementations, 10/100 Mbps speeds may be supported
449  * by using both the Serial Link and the MII on Non-serial-link interface.
450  * When both links are present, the driver automatically tries to bring up
451  * both. If both are up, the Gigabit Serial Link is selected for use, by
452  * default. The following configuration variable is used to force the selection
453  * of one of the links when both are up.
454  * To change the default selection to the MII link when both the Serial
455  * Link and the MII link are up, change eri_default_link to 1.
456  *
457  * Once a link is in use, the driver will continue to use that link till it
458  * goes down. When it goes down, the driver will look at the status of both the
459  * links again for link selection.
460  *
461  * Currently the standard is not stable w.r.t. gigabit link configuration
462  * using auto-negotiation procedures. Meanwhile, the link may be configured
463  * in "forced" mode using the "autonegotiation enable" bit (bit-12) in the
464  * PCS MII Command Register. In this mode the PCS sends "idles" until sees
465  * "idles" as initialization instead of the Link Configuration protocol
466  * where a Config register is exchanged. In this mode, the ERI is programmed
467  * for full-duplex operation with both pauseTX and pauseRX (for flow control)
468  * enabled.
469  */
470 
471 static	int	select_link = 0; /* automatic selection */
472 static	int	default_link = 0; /* Select Serial link if both are up */
473 
474 /*
475  * The following variables are used for configuring link-operation
476  * for all the "eri" interfaces in the system.
477  * Later these parameters may be changed per interface using "ndd" command
478  * These parameters may also be specified as properties using the .conf
479  * file mechanism for each interface.
480  */
481 
482 /*
483  * The following variable value will be overridden by "link-pulse-disabled"
484  * property which may be created by OBP or eri.conf file. This property is
485  * applicable only for 10 Mbps links.
486  */
487 static	int	link_pulse_disabled = 0;	/* link pulse disabled */
488 
489 /* For MII-based FastEthernet links */
490 static	int	adv_autoneg_cap = 1;
491 static	int	adv_100T4_cap = 0;
492 static	int	adv_100fdx_cap = 1;
493 static	int	adv_100hdx_cap = 1;
494 static	int	adv_10fdx_cap = 1;
495 static	int	adv_10hdx_cap = 1;
496 static	int	adv_pauseTX_cap =  0;
497 static	int	adv_pauseRX_cap =  0;
498 
499 /*
500  * The following gap parameters are in terms of byte times.
501  */
502 static	int	ipg0 = 8;
503 static	int	ipg1 = 8;
504 static	int	ipg2 = 4;
505 
506 static	int	lance_mode = 1;		/* to enable LANCE mode */
507 static	int	mifpoll_enable = 0;	/* to enable mif poll */
508 static	int	ngu_enable = 0;		/* to enable Never Give Up mode */
509 
510 static	int	eri_force_mlf = 0; 	/* to enable mif poll */
511 static	int	eri_phy_mintrans = 1;	/* Lu3X31T mintrans algorithm */
512 /*
513  * For the MII interface, the External Transceiver is selected when present.
514  * The following variable is used to select the Internal Transceiver even
515  * when the External Transceiver is present.
516  */
517 static	int	use_int_xcvr = 0;
518 static	int	pace_size = 0;	/* Do not use pacing for now */
519 
520 static	int	eri_use_dvma_rx = 0;	/* =1:use dvma */
521 static	int	eri_use_dvma_tx = 1;	/* =1:use dvma */
522 static	int	eri_rx_bcopy_max = RX_BCOPY_MAX;	/* =1:use bcopy() */
523 static	int	eri_tx_bcopy_max = TX_BCOPY_MAX;	/* =1:use bcopy() */
524 static	int	eri_overflow_reset = 1;	/* global reset if rx_fifo_overflow */
525 static	int	eri_tx_ring_size = 2048; /* number of entries in tx ring */
526 static	int	eri_rx_ring_size = 1024; /* number of entries in rx ring */
527 /*
528  * The following parameters may be configured by the user. If they are not
529  * configured by the user, the values will be based on the capabilities of
530  * the transceiver.
531  * The value "ERI_NOTUSR" is ORed with the parameter value to indicate values
532  * which are NOT configured by the user.
533  */
534 
535 #define	ERI_NOTUSR	0x0f000000
536 #define	ERI_MASK_1BIT	0x1
537 #define	ERI_MASK_2BIT	0x3
538 #define	ERI_MASK_8BIT	0xff
539 
540 
541 /*
542  * Note:
543  * ERI has all of the above capabilities.
544  * Only when an External Transceiver is selected for MII-based FastEthernet
545  * link operation, the capabilities depend upon the capabilities of the
546  * External Transceiver.
547  */
548 
549 /* ------------------------------------------------------------------------- */
550 
551 static  param_t	param_arr[] = {
552 	/* min		max		value	r/w/hidden+name */
553 	{  0,		2,		2,	"-transceiver_inuse"},
554 	{  0,		1,		0,	"-link_status"},
555 	{  0,		1,		0,	"-link_speed"},
556 	{  0,		1,		0,	"-link_mode"},
557 	{  0,		255,		8,	"+ipg1"},
558 	{  0,		255,		4,	"+ipg2"},
559 	{  0,		1,		0,	"+use_int_xcvr"},
560 	{  0,		255,		0,	"+pace_size"},
561 	{  0,		1,		1,	"+adv_autoneg_cap"},
562 	{  0,		1,		1,	"+adv_100T4_cap"},
563 	{  0,		1,		1,	"+adv_100fdx_cap"},
564 	{  0,		1,		1,	"+adv_100hdx_cap"},
565 	{  0,		1,		1,	"+adv_10fdx_cap"},
566 	{  0,		1,		1,	"+adv_10hdx_cap"},
567 	{  0,		1,		1,	"-autoneg_cap"},
568 	{  0,		1,		1,	"-100T4_cap"},
569 	{  0,		1,		1,	"-100fdx_cap"},
570 	{  0,		1,		1,	"-100hdx_cap"},
571 	{  0,		1,		1,	"-10fdx_cap"},
572 	{  0,		1,		1,	"-10hdx_cap"},
573 	{  0,		1,		0,	"-lp_autoneg_cap"},
574 	{  0,		1,		0,	"-lp_100T4_cap"},
575 	{  0,		1,		0,	"-lp_100fdx_cap"},
576 	{  0,		1,		0,	"-lp_100hdx_cap"},
577 	{  0,		1,		0,	"-lp_10fdx_cap"},
578 	{  0,		1,		0,	"-lp_10hdx_cap"},
579 	{  0,		1,		1,	"+lance_mode"},
580 	{  0,		31,		8,	"+ipg0"},
581 	{  0,		127,		6,	"+intr_blank_time"},
582 	{  0,		255,		8,	"+intr_blank_packets"},
583 	{  0,		1,		1,	"!serial-link"},
584 	{  0,		2,		1,	"!non-serial-link"},
585 	{  0,		1,		0,	"%select-link"},
586 	{  0,		1,		0,	"%default-link"},
587 	{  0,		2,		0,	"!link-in-use"},
588 	{  0,		1,		1,	"%adv_asm_dir_cap"},
589 	{  0,		1,		1,	"%adv_pause_cap"},
590 	{  0,		1,		0,	"!asm_dir_cap"},
591 	{  0,		1,		0,	"!pause_cap"},
592 	{  0,		1,		0,	"!lp_asm_dir_cap"},
593 	{  0,		1,		0,	"!lp_pause_cap"},
594 };
595 
596 DDI_DEFINE_STREAM_OPS(eri_dev_ops, nulldev, nulldev, eri_attach, eri_detach,
597 	nodev, NULL, D_MP, NULL);
598 
599 /*
600  * This is the loadable module wrapper.
601  */
602 #include <sys/modctl.h>
603 
604 /*
605  * Module linkage information for the kernel.
606  */
607 static struct modldrv modldrv = {
608 	&mod_driverops,	/* Type of module.  This one is a driver */
609 	"Sun RIO 10/100 Mb Ethernet",
610 	&eri_dev_ops,	/* driver ops */
611 };
612 
613 static struct modlinkage modlinkage = {
614 	MODREV_1, &modldrv, NULL
615 };
616 
617 /*
618  * Hardware Independent Functions
619  * New Section
620  */
621 
622 int
623 _init(void)
624 {
625 	int	status;
626 
627 	mac_init_ops(&eri_dev_ops, "eri");
628 	if ((status = mod_install(&modlinkage)) != 0) {
629 		mac_fini_ops(&eri_dev_ops);
630 	}
631 	return (status);
632 }
633 
634 int
635 _fini(void)
636 {
637 	int status;
638 
639 	status = mod_remove(&modlinkage);
640 	if (status == 0) {
641 		mac_fini_ops(&eri_dev_ops);
642 	}
643 	return (status);
644 }
645 
646 int
647 _info(struct modinfo *modinfop)
648 {
649 	return (mod_info(&modlinkage, modinfop));
650 }
651 
652 
653 /*
654  * Interface exists: make available by filling in network interface
655  * record.  System will initialize the interface when it is ready
656  * to accept packets.
657  */
658 static int
659 eri_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
660 {
661 	struct eri *erip = NULL;
662 	mac_register_t *macp = NULL;
663 	int	regno;
664 	boolean_t	doinit;
665 	boolean_t	mutex_inited = B_FALSE;
666 	boolean_t	intr_add = B_FALSE;
667 
668 	switch (cmd) {
669 	case DDI_ATTACH:
670 		break;
671 
672 	case DDI_RESUME:
673 		if ((erip = ddi_get_driver_private(dip)) == NULL)
674 			return (DDI_FAILURE);
675 
676 		mutex_enter(&erip->intrlock);
677 		erip->flags &= ~ERI_SUSPENDED;
678 		erip->init_macregs = 1;
679 		param_linkup = 0;
680 		erip->stats.link_up = LINK_STATE_DOWN;
681 		erip->linkcheck = 0;
682 
683 		doinit =  (erip->flags & ERI_STARTED) ? B_TRUE : B_FALSE;
684 		mutex_exit(&erip->intrlock);
685 
686 		if (doinit && !eri_init(erip)) {
687 			return (DDI_FAILURE);
688 		}
689 		return (DDI_SUCCESS);
690 
691 	default:
692 		return (DDI_FAILURE);
693 	}
694 
695 	/*
696 	 * Allocate soft device data structure
697 	 */
698 	erip = kmem_zalloc(sizeof (struct eri), KM_SLEEP);
699 
700 	/*
701 	 * Initialize as many elements as possible.
702 	 */
703 	ddi_set_driver_private(dip, erip);
704 	erip->dip = dip;			/* dip	*/
705 	erip->instance = ddi_get_instance(dip);	/* instance */
706 	erip->flags = 0;
707 	erip->multi_refcnt = 0;
708 	erip->promisc = B_FALSE;
709 
710 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
711 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
712 		    "mac_alloc failed");
713 		goto attach_fail;
714 	}
715 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
716 	macp->m_driver = erip;
717 	macp->m_dip = dip;
718 	macp->m_src_addr = erip->ouraddr;
719 	macp->m_callbacks = &eri_m_callbacks;
720 	macp->m_min_sdu = 0;
721 	macp->m_max_sdu = ETHERMTU;
722 	macp->m_margin = VLAN_TAGSZ;
723 
724 	/*
725 	 * Map in the device registers.
726 	 * Separate pointers will be set up for the following
727 	 * register groups within the GEM Register Space:
728 	 * 	Global register set
729 	 * 	ETX register set
730 	 * 	ERX register set
731 	 * 	BigMAC register set.
732 	 * 	MIF register set
733 	 */
734 
735 	if (ddi_dev_nregs(dip, &regno) != (DDI_SUCCESS)) {
736 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
737 		    "ddi_dev_nregs failed, returned %d", regno);
738 		goto attach_fail;
739 	}
740 
741 	/*
742 	 * Map the PCI config space
743 	 */
744 	if (pci_config_setup(dip, &erip->pci_config_handle) != DDI_SUCCESS) {
745 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
746 		    "%s pci_config_setup()", config_space_fatal_msg);
747 		goto attach_fail;
748 	}
749 
750 	/*
751 	 * Initialize device attributes structure
752 	 */
753 	erip->dev_attr.devacc_attr_version =	DDI_DEVICE_ATTR_V0;
754 	erip->dev_attr.devacc_attr_dataorder =	DDI_STRICTORDER_ACC;
755 	erip->dev_attr.devacc_attr_endian_flags =	DDI_STRUCTURE_LE_ACC;
756 
757 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->globregp), 0, 0,
758 	    &erip->dev_attr, &erip->globregh)) {
759 		goto attach_fail;
760 	}
761 	erip->etxregh =		erip->globregh;
762 	erip->erxregh =		erip->globregh;
763 	erip->bmacregh =	erip->globregh;
764 	erip->mifregh =		erip->globregh;
765 
766 	erip->etxregp =  (void *)(((caddr_t)erip->globregp) + 0x2000);
767 	erip->erxregp =  (void *)(((caddr_t)erip->globregp) + 0x4000);
768 	erip->bmacregp = (void *)(((caddr_t)erip->globregp) + 0x6000);
769 	erip->mifregp =  (void *)(((caddr_t)erip->globregp) + 0x6200);
770 
771 	/*
772 	 * Map the software reset register.
773 	 */
774 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->sw_reset_reg),
775 	    0x1010, 4, &erip->dev_attr, &erip->sw_reset_regh)) {
776 		ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
777 		    mregs_4soft_reset_fail_msg);
778 		goto attach_fail;
779 	}
780 
781 	/*
782 	 * Try and stop the device.
783 	 * This is done until we want to handle interrupts.
784 	 */
785 	if (eri_stop(erip))
786 		goto attach_fail;
787 
788 	/*
789 	 * set PCI latency timer register.
790 	 */
791 	pci_config_put8(erip->pci_config_handle, PCI_CONF_LATENCY_TIMER,
792 	    (uchar_t)eri_pci_latency_timer);
793 
794 	if (ddi_intr_hilevel(dip, 0)) {
795 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
796 		    " high-level interrupts are not supported");
797 		goto attach_fail;
798 	}
799 
800 	/*
801 	 * Get the interrupt cookie so the mutexes can be
802 	 * Initialized.
803 	 */
804 	if (ddi_get_iblock_cookie(dip, 0, &erip->cookie) != DDI_SUCCESS)
805 		goto attach_fail;
806 
807 	/*
808 	 * Initialize mutex's for this device.
809 	 */
810 	mutex_init(&erip->xmitlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
811 	mutex_init(&erip->intrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
812 	mutex_init(&erip->linklock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
813 	mutex_init(&erip->xcvrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
814 
815 	mutex_inited = B_TRUE;
816 
817 	/*
818 	 * Add interrupt to system
819 	 */
820 	if (ddi_add_intr(dip, 0, &erip->cookie, 0, eri_intr, (caddr_t)erip) ==
821 	    DDI_SUCCESS)
822 		intr_add = B_TRUE;
823 	else {
824 		goto attach_fail;
825 	}
826 
827 	/*
828 	 * Set up the ethernet mac address.
829 	 */
830 	(void) eri_setup_mac_address(erip, dip);
831 
832 	if (eri_init_xfer_params(erip))
833 		goto attach_fail;
834 
835 	if (eri_burstsize(erip) == DDI_FAILURE) {
836 		goto attach_fail;
837 	}
838 
839 	/*
840 	 * Setup fewer receive bufers.
841 	 */
842 	ERI_RPENDING = eri_rx_ring_size;
843 	ERI_TPENDING = eri_tx_ring_size;
844 
845 	erip->rpending_mask = ERI_RPENDING - 1;
846 	erip->rmdmax_mask = ERI_RPENDING - 1;
847 	erip->mif_config = (ERI_PHY_BMSR << ERI_MIF_CFGPR_SHIFT);
848 
849 	erip->stats.pmcap = ERI_PMCAP_NONE;
850 	if (pci_report_pmcap(dip, PCI_PM_IDLESPEED, (void *)4000) ==
851 	    DDI_SUCCESS)
852 		erip->stats.pmcap = ERI_PMCAP_4MHZ;
853 
854 	if (mac_register(macp, &erip->mh) != 0)
855 		goto attach_fail;
856 
857 	mac_free(macp);
858 
859 	return (DDI_SUCCESS);
860 
861 attach_fail:
862 	if (erip->pci_config_handle)
863 		(void) pci_config_teardown(&erip->pci_config_handle);
864 
865 	if (mutex_inited) {
866 		mutex_destroy(&erip->xmitlock);
867 		mutex_destroy(&erip->intrlock);
868 		mutex_destroy(&erip->linklock);
869 		mutex_destroy(&erip->xcvrlock);
870 	}
871 
872 	ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, attach_fail_msg);
873 
874 	if (intr_add)
875 		ddi_remove_intr(dip, 0, erip->cookie);
876 
877 	if (erip->globregh)
878 		ddi_regs_map_free(&erip->globregh);
879 
880 	if (macp != NULL)
881 		mac_free(macp);
882 	if (erip != NULL)
883 		kmem_free(erip, sizeof (*erip));
884 
885 	return (DDI_FAILURE);
886 }
887 
888 static int
889 eri_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
890 {
891 	struct eri 	*erip;
892 	int i;
893 
894 	if ((erip = ddi_get_driver_private(dip)) == NULL) {
895 		/*
896 		 * No resources allocated.
897 		 */
898 		return (DDI_FAILURE);
899 	}
900 
901 	switch (cmd) {
902 	case DDI_DETACH:
903 		break;
904 
905 	case DDI_SUSPEND:
906 		erip->flags |= ERI_SUSPENDED;
907 		eri_uninit(erip);
908 		return (DDI_SUCCESS);
909 
910 	default:
911 		return (DDI_FAILURE);
912 	}
913 
914 	if (erip->flags & (ERI_RUNNING | ERI_SUSPENDED)) {
915 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, busy_msg);
916 		return (DDI_FAILURE);
917 	}
918 
919 	if (mac_unregister(erip->mh) != 0) {
920 		return (DDI_FAILURE);
921 	}
922 
923 	/*
924 	 * Make the device quiescent
925 	 */
926 	(void) eri_stop(erip);
927 
928 	/*
929 	 * Remove instance of the intr
930 	 */
931 	ddi_remove_intr(dip, 0, erip->cookie);
932 
933 	if (erip->pci_config_handle)
934 		(void) pci_config_teardown(&erip->pci_config_handle);
935 
936 	/*
937 	 * Destroy all mutexes and data structures allocated during
938 	 * attach time.
939 	 */
940 
941 	if (erip->globregh)
942 		ddi_regs_map_free(&erip->globregh);
943 
944 	erip->etxregh =		NULL;
945 	erip->erxregh =		NULL;
946 	erip->bmacregh =	NULL;
947 	erip->mifregh =		NULL;
948 	erip->globregh =	NULL;
949 
950 	if (erip->sw_reset_regh)
951 		ddi_regs_map_free(&erip->sw_reset_regh);
952 
953 	if (erip->ksp)
954 		kstat_delete(erip->ksp);
955 
956 	eri_stop_timer(erip); /* acquire linklock */
957 	eri_start_timer(erip, eri_check_link, 0);
958 	mutex_destroy(&erip->xmitlock);
959 	mutex_destroy(&erip->intrlock);
960 	mutex_destroy(&erip->linklock);
961 	mutex_destroy(&erip->xcvrlock);
962 
963 	if (erip->md_h) {
964 		if (ddi_dma_unbind_handle(erip->md_h) ==
965 		    DDI_FAILURE)
966 			return (DDI_FAILURE);
967 		ddi_dma_mem_free(&erip->mdm_h);
968 		ddi_dma_free_handle(&erip->md_h);
969 	}
970 
971 	if (eri_freebufs(erip))
972 		return (DDI_FAILURE);
973 
974 	/* dvma handle case */
975 	if (erip->eri_dvmaxh) {
976 		(void) dvma_release(erip->eri_dvmaxh);
977 		erip->eri_dvmaxh = NULL;
978 	}
979 
980 	if (erip->eri_dvmarh) {
981 		(void) dvma_release(erip->eri_dvmarh);
982 		erip->eri_dvmarh = NULL;
983 	}
984 /*
985  *	xmit_dma_mode, erip->ndmaxh[i]=NULL for dvma
986  */
987 	else {
988 		for (i = 0; i < ERI_TPENDING; i++)
989 			if (erip->ndmaxh[i])
990 				ddi_dma_free_handle(&erip->ndmaxh[i]);
991 		for (i = 0; i < ERI_RPENDING; i++)
992 			if (erip->ndmarh[i])
993 				ddi_dma_free_handle(&erip->ndmarh[i]);
994 	}
995 /*
996  *	Release tiny TX buffers
997  */
998 	if (erip->tbuf_ioaddr != 0) {
999 		(void) ddi_dma_unbind_handle(erip->tbuf_handle);
1000 		erip->tbuf_ioaddr = 0;
1001 	}
1002 	if (erip->tbuf_kaddr != NULL) {
1003 		kmem_free(erip->tbuf_kaddr, ERI_TPENDING * eri_tx_bcopy_max);
1004 		erip->tbuf_kaddr = NULL;
1005 	}
1006 	if (erip->tbuf_handle != NULL) {
1007 		ddi_dma_free_handle(&erip->tbuf_handle);
1008 		erip->tbuf_handle = NULL;
1009 	}
1010 
1011 	eri_param_cleanup(erip);
1012 
1013 	ddi_set_driver_private(dip, NULL);
1014 	kmem_free((caddr_t)erip, sizeof (struct eri));
1015 
1016 	return (DDI_SUCCESS);
1017 }
1018 
1019 /*
1020  * To set up the mac address for the network interface:
1021  * The adapter card may support a local mac address which is published
1022  * in a device node property "local-mac-address". This mac address is
1023  * treated as the factory-installed mac address for DLPI interface.
1024  * If the adapter firmware has used the device for diskless boot
1025  * operation it publishes a property called "mac-address" for use by
1026  * inetboot and the device driver.
1027  * If "mac-address" is not found, the system options property
1028  * "local-mac-address" is used to select the mac-address. If this option
1029  * is set to "true", and "local-mac-address" has been found, then
1030  * local-mac-address is used; otherwise the system mac address is used
1031  * by calling the "localetheraddr()" function.
1032  */
1033 
1034 static void
1035 eri_setup_mac_address(struct eri *erip, dev_info_t *dip)
1036 {
1037 	uchar_t			*prop;
1038 	char			*uselocal;
1039 	unsigned		prop_len;
1040 	uint32_t		addrflags = 0;
1041 	struct ether_addr	factaddr;
1042 
1043 	/*
1044 	 * Check if it is an adapter with its own local mac address
1045 	 * If it is present, save it as the "factory-address"
1046 	 * for this adapter.
1047 	 */
1048 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1049 	    "local-mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1050 		if (prop_len == ETHERADDRL) {
1051 			addrflags = ERI_FACTADDR_PRESENT;
1052 			bcopy(prop, &factaddr, ETHERADDRL);
1053 			ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
1054 			    lether_addr_msg, ether_sprintf(&factaddr));
1055 		}
1056 		ddi_prop_free(prop);
1057 	}
1058 	/*
1059 	 * Check if the adapter has published "mac-address" property.
1060 	 * If it is present, use it as the mac address for this device.
1061 	 */
1062 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1063 	    "mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1064 		if (prop_len >= ETHERADDRL) {
1065 			bcopy(prop, erip->ouraddr, ETHERADDRL);
1066 			ddi_prop_free(prop);
1067 			return;
1068 		}
1069 		ddi_prop_free(prop);
1070 	}
1071 
1072 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
1073 	    &uselocal) == DDI_PROP_SUCCESS) {
1074 		if ((strcmp("true", uselocal) == 0) &&
1075 		    (addrflags & ERI_FACTADDR_PRESENT)) {
1076 			addrflags |= ERI_FACTADDR_USE;
1077 			bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1078 			ddi_prop_free(uselocal);
1079 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1080 			    lmac_addr_msg);
1081 			return;
1082 		}
1083 		ddi_prop_free(uselocal);
1084 	}
1085 
1086 	/*
1087 	 * Get the system ethernet address.
1088 	 */
1089 	(void) localetheraddr(NULL, &factaddr);
1090 	bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1091 }
1092 
1093 
1094 /*
1095  * Calculate the bit in the multicast address filter that selects the given
1096  * address.
1097  * Note: For ERI, the last 8-bits are used.
1098  */
1099 
1100 static uint32_t
1101 eri_ladrf_bit(const uint8_t *addr)
1102 {
1103 	uint32_t crc;
1104 
1105 	CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
1106 
1107 	/*
1108 	 * Just want the 8 most significant bits.
1109 	 */
1110 	return ((~crc) >> 24);
1111 }
1112 
1113 static void
1114 eri_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1115 {
1116 	struct	eri	*erip = arg;
1117 	struct	iocblk	*iocp = (void *)mp->b_rptr;
1118 	int	err;
1119 
1120 	ASSERT(erip != NULL);
1121 
1122 	/*
1123 	 * Privilege checks.
1124 	 */
1125 	switch (iocp->ioc_cmd) {
1126 	case ERI_SET_LOOP_MODE:
1127 	case ERI_ND_SET:
1128 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1129 		if (err != 0) {
1130 			miocnak(wq, mp, 0, err);
1131 			return;
1132 		}
1133 		break;
1134 	default:
1135 		break;
1136 	}
1137 
1138 	switch (iocp->ioc_cmd) {
1139 	case ERI_ND_GET:
1140 	case ERI_ND_SET:
1141 		eri_process_ndd_ioctl(erip, wq, mp, iocp->ioc_cmd);
1142 		break;
1143 
1144 	case ERI_SET_LOOP_MODE:
1145 	case ERI_GET_LOOP_MODE:
1146 		/*
1147 		 * XXX: Consider updating this to the new netlb ioctls.
1148 		 */
1149 		eri_loopback(erip, wq, mp);
1150 		break;
1151 
1152 	default:
1153 		miocnak(wq, mp, 0, EINVAL);
1154 		break;
1155 	}
1156 
1157 	ASSERT(!MUTEX_HELD(&erip->linklock));
1158 }
1159 
1160 static void
1161 eri_loopback(struct eri *erip, queue_t *wq, mblk_t *mp)
1162 {
1163 	struct	iocblk	*iocp = (void *)mp->b_rptr;
1164 	loopback_t	*al;
1165 
1166 	/*LINTED E_PTRDIFF_OVERFLOW*/
1167 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) {
1168 		miocnak(wq, mp, 0, EINVAL);
1169 		return;
1170 	}
1171 
1172 	al = (void *)mp->b_cont->b_rptr;
1173 
1174 	switch (iocp->ioc_cmd) {
1175 	case ERI_SET_LOOP_MODE:
1176 		switch (al->loopback) {
1177 		case ERI_LOOPBACK_OFF:
1178 			erip->flags &= (~ERI_MACLOOPBACK & ~ERI_SERLOOPBACK);
1179 			/* force link status to go down */
1180 			param_linkup = 0;
1181 			erip->stats.link_up = LINK_STATE_DOWN;
1182 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1183 			(void) eri_init(erip);
1184 			break;
1185 
1186 		case ERI_MAC_LOOPBACK_ON:
1187 			erip->flags |= ERI_MACLOOPBACK;
1188 			erip->flags &= ~ERI_SERLOOPBACK;
1189 			param_linkup = 0;
1190 			erip->stats.link_up = LINK_STATE_DOWN;
1191 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1192 			(void) eri_init(erip);
1193 			break;
1194 
1195 		case ERI_PCS_LOOPBACK_ON:
1196 			break;
1197 
1198 		case ERI_SER_LOOPBACK_ON:
1199 			erip->flags |= ERI_SERLOOPBACK;
1200 			erip->flags &= ~ERI_MACLOOPBACK;
1201 			/* force link status to go down */
1202 			param_linkup = 0;
1203 			erip->stats.link_up = LINK_STATE_DOWN;
1204 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1205 			(void) eri_init(erip);
1206 			break;
1207 
1208 		default:
1209 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1210 			    loopback_val_default);
1211 			miocnak(wq, mp, 0, EINVAL);
1212 			return;
1213 		}
1214 		miocnak(wq, mp, 0, 0);
1215 		break;
1216 
1217 	case ERI_GET_LOOP_MODE:
1218 		al->loopback =	ERI_MAC_LOOPBACK_ON | ERI_PCS_LOOPBACK_ON |
1219 		    ERI_SER_LOOPBACK_ON;
1220 		miocack(wq, mp, sizeof (loopback_t), 0);
1221 		break;
1222 
1223 	default:
1224 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1225 		    loopback_cmd_default);
1226 	}
1227 }
1228 
1229 static int
1230 eri_m_promisc(void *arg, boolean_t on)
1231 {
1232 	struct	eri	*erip = arg;
1233 
1234 	mutex_enter(&erip->intrlock);
1235 	erip->promisc = on;
1236 	eri_init_rx(erip);
1237 	mutex_exit(&erip->intrlock);
1238 	return (0);
1239 }
1240 
1241 /*
1242  * This is to support unlimited number of members
1243  * in Multicast.
1244  */
1245 static int
1246 eri_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1247 {
1248 	struct eri		*erip = arg;
1249 	uint32_t 		ladrf_bit;
1250 
1251 	/*
1252 	 * If this address's bit was not already set in the local address
1253 	 * filter, add it and re-initialize the Hardware.
1254 	 */
1255 	ladrf_bit = eri_ladrf_bit(mca);
1256 
1257 	mutex_enter(&erip->intrlock);
1258 	if (add) {
1259 		erip->ladrf_refcnt[ladrf_bit]++;
1260 		if (erip->ladrf_refcnt[ladrf_bit] == 1) {
1261 			LADRF_SET(erip, ladrf_bit);
1262 			erip->multi_refcnt++;
1263 			eri_init_rx(erip);
1264 		}
1265 	} else {
1266 		erip->ladrf_refcnt[ladrf_bit]--;
1267 		if (erip->ladrf_refcnt[ladrf_bit] == 0) {
1268 			LADRF_CLR(erip, ladrf_bit);
1269 			erip->multi_refcnt--;
1270 			eri_init_rx(erip);
1271 		}
1272 	}
1273 	mutex_exit(&erip->intrlock);
1274 	return (0);
1275 }
1276 
1277 static int
1278 eri_m_unicst(void *arg, const uint8_t *macaddr)
1279 {
1280 	struct	eri	*erip = arg;
1281 
1282 	/*
1283 	 * Set new interface local address and re-init device.
1284 	 * This is destructive to any other streams attached
1285 	 * to this device.
1286 	 */
1287 	mutex_enter(&erip->intrlock);
1288 	bcopy(macaddr, &erip->ouraddr, ETHERADDRL);
1289 	eri_init_rx(erip);
1290 	mutex_exit(&erip->intrlock);
1291 	return (0);
1292 }
1293 
1294 /*ARGSUSED*/
1295 static boolean_t
1296 eri_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1297 {
1298 	switch (cap) {
1299 	case MAC_CAPAB_HCKSUM: {
1300 		uint32_t *hcksum_txflags = cap_data;
1301 		*hcksum_txflags = HCKSUM_INET_PARTIAL;
1302 		return (B_TRUE);
1303 	}
1304 	case MAC_CAPAB_POLL:
1305 	default:
1306 		return (B_FALSE);
1307 	}
1308 }
1309 
1310 static int
1311 eri_m_start(void *arg)
1312 {
1313 	struct eri	*erip = arg;
1314 
1315 	mutex_enter(&erip->intrlock);
1316 	erip->flags |= ERI_STARTED;
1317 	mutex_exit(&erip->intrlock);
1318 
1319 	if (!eri_init(erip)) {
1320 		mutex_enter(&erip->intrlock);
1321 		erip->flags &= ~ERI_STARTED;
1322 		mutex_exit(&erip->intrlock);
1323 		return (EIO);
1324 	}
1325 	return (0);
1326 }
1327 
1328 static void
1329 eri_m_stop(void *arg)
1330 {
1331 	struct eri	*erip = arg;
1332 
1333 	mutex_enter(&erip->intrlock);
1334 	erip->flags &= ~ERI_STARTED;
1335 	mutex_exit(&erip->intrlock);
1336 	eri_uninit(erip);
1337 }
1338 
1339 static int
1340 eri_m_stat(void *arg, uint_t stat, uint64_t *val)
1341 {
1342 	struct eri	*erip = arg;
1343 	struct stats	*esp;
1344 	boolean_t	macupdate = B_FALSE;
1345 
1346 	esp = &erip->stats;
1347 
1348 	mutex_enter(&erip->xmitlock);
1349 	if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
1350 		erip->tx_completion =
1351 		    GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
1352 		macupdate |= eri_reclaim(erip, erip->tx_completion);
1353 	}
1354 	mutex_exit(&erip->xmitlock);
1355 	if (macupdate)
1356 		mac_tx_update(erip->mh);
1357 
1358 	eri_savecntrs(erip);
1359 
1360 	switch (stat) {
1361 	case MAC_STAT_IFSPEED:
1362 		*val = esp->ifspeed * 1000000ULL;
1363 		break;
1364 	case MAC_STAT_MULTIRCV:
1365 		*val = esp->multircv;
1366 		break;
1367 	case MAC_STAT_BRDCSTRCV:
1368 		*val = esp->brdcstrcv;
1369 		break;
1370 	case MAC_STAT_IPACKETS:
1371 		*val = esp->ipackets64;
1372 		break;
1373 	case MAC_STAT_RBYTES:
1374 		*val = esp->rbytes64;
1375 		break;
1376 	case MAC_STAT_OBYTES:
1377 		*val = esp->obytes64;
1378 		break;
1379 	case MAC_STAT_OPACKETS:
1380 		*val = esp->opackets64;
1381 		break;
1382 	case MAC_STAT_IERRORS:
1383 		*val = esp->ierrors;
1384 		break;
1385 	case MAC_STAT_OERRORS:
1386 		*val = esp->oerrors;
1387 		break;
1388 	case MAC_STAT_MULTIXMT:
1389 		*val = esp->multixmt;
1390 		break;
1391 	case MAC_STAT_BRDCSTXMT:
1392 		*val = esp->brdcstxmt;
1393 		break;
1394 	case MAC_STAT_NORCVBUF:
1395 		*val = esp->norcvbuf;
1396 		break;
1397 	case MAC_STAT_NOXMTBUF:
1398 		*val = esp->noxmtbuf;
1399 		break;
1400 	case MAC_STAT_UNDERFLOWS:
1401 		*val = esp->txmac_urun;
1402 		break;
1403 	case MAC_STAT_OVERFLOWS:
1404 		*val = esp->rx_overflow;
1405 		break;
1406 	case MAC_STAT_COLLISIONS:
1407 		*val = esp->collisions;
1408 		break;
1409 	case ETHER_STAT_ALIGN_ERRORS:
1410 		*val = esp->rx_align_err;
1411 		break;
1412 	case ETHER_STAT_FCS_ERRORS:
1413 		*val = esp->rx_crc_err;
1414 		break;
1415 	case ETHER_STAT_EX_COLLISIONS:
1416 		*val = esp->excessive_coll;
1417 		break;
1418 	case ETHER_STAT_TX_LATE_COLLISIONS:
1419 		*val = esp->late_coll;
1420 		break;
1421 	case ETHER_STAT_FIRST_COLLISIONS:
1422 		*val = esp->first_coll;
1423 		break;
1424 	case ETHER_STAT_LINK_DUPLEX:
1425 		*val = esp->link_duplex;
1426 		break;
1427 	case ETHER_STAT_TOOLONG_ERRORS:
1428 		*val = esp->rx_toolong_pkts;
1429 		break;
1430 	case ETHER_STAT_TOOSHORT_ERRORS:
1431 		*val = esp->rx_runt;
1432 		break;
1433 
1434 	case ETHER_STAT_XCVR_ADDR:
1435 		*val = erip->phyad;
1436 		break;
1437 
1438 	case ETHER_STAT_XCVR_INUSE:
1439 		*val = XCVR_100X;	/* should always be 100X for now */
1440 		break;
1441 
1442 	case ETHER_STAT_CAP_100FDX:
1443 		*val = param_bmsr_100fdx;
1444 		break;
1445 	case ETHER_STAT_CAP_100HDX:
1446 		*val = param_bmsr_100hdx;
1447 		break;
1448 	case ETHER_STAT_CAP_10FDX:
1449 		*val = param_bmsr_10fdx;
1450 		break;
1451 	case ETHER_STAT_CAP_10HDX:
1452 		*val = param_bmsr_10hdx;
1453 		break;
1454 	case ETHER_STAT_CAP_AUTONEG:
1455 		*val = param_bmsr_ancap;
1456 		break;
1457 	case ETHER_STAT_CAP_ASMPAUSE:
1458 		*val = param_bmsr_asm_dir;
1459 		break;
1460 	case ETHER_STAT_CAP_PAUSE:
1461 		*val = param_bmsr_pause;
1462 		break;
1463 	case ETHER_STAT_ADV_CAP_100FDX:
1464 		*val = param_anar_100fdx;
1465 		break;
1466 	case ETHER_STAT_ADV_CAP_100HDX:
1467 		*val = param_anar_100hdx;
1468 		break;
1469 	case ETHER_STAT_ADV_CAP_10FDX:
1470 		*val = param_anar_10fdx;
1471 		break;
1472 	case ETHER_STAT_ADV_CAP_10HDX:
1473 		*val = param_anar_10hdx;
1474 		break;
1475 	case ETHER_STAT_ADV_CAP_AUTONEG:
1476 		*val = param_autoneg;
1477 		break;
1478 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
1479 		*val = param_anar_asm_dir;
1480 		break;
1481 	case ETHER_STAT_ADV_CAP_PAUSE:
1482 		*val = param_anar_pause;
1483 		break;
1484 	case ETHER_STAT_LP_CAP_100FDX:
1485 		*val = param_anlpar_100fdx;
1486 		break;
1487 	case ETHER_STAT_LP_CAP_100HDX:
1488 		*val = param_anlpar_100hdx;
1489 		break;
1490 	case ETHER_STAT_LP_CAP_10FDX:
1491 		*val = param_anlpar_10fdx;
1492 		break;
1493 	case ETHER_STAT_LP_CAP_10HDX:
1494 		*val = param_anlpar_10hdx;
1495 		break;
1496 	case ETHER_STAT_LP_CAP_AUTONEG:
1497 		*val = param_aner_lpancap;
1498 		break;
1499 	case ETHER_STAT_LP_CAP_ASMPAUSE:
1500 		*val = param_anlpar_pauseTX;
1501 		break;
1502 	case ETHER_STAT_LP_CAP_PAUSE:
1503 		*val = param_anlpar_pauseRX;
1504 		break;
1505 	case ETHER_STAT_LINK_PAUSE:
1506 		*val = esp->pausing;
1507 		break;
1508 	case ETHER_STAT_LINK_ASMPAUSE:
1509 		*val = param_anar_asm_dir &&
1510 		    param_anlpar_pauseTX &&
1511 		    (param_anar_pause != param_anlpar_pauseRX);
1512 		break;
1513 	case ETHER_STAT_LINK_AUTONEG:
1514 		*val = param_autoneg && param_aner_lpancap;
1515 		break;
1516 	}
1517 	return (0);
1518 }
1519 
1520 /*
1521  * Hardware Functions
1522  * New Section
1523  */
1524 
1525 /*
1526  * Initialize the MAC registers. Some of of the MAC  registers are initialized
1527  * just once since  Global Reset or MAC reset doesn't clear them. Others (like
1528  * Host MAC Address Registers) are cleared on every reset and have to be
1529  * reinitialized.
1530  */
1531 static void
1532 eri_init_macregs_generic(struct eri *erip)
1533 {
1534 	/*
1535 	 * set up the MAC parameter registers once
1536 	 * after power cycle. SUSPEND/RESUME also requires
1537 	 * setting these registers.
1538 	 */
1539 	if ((erip->stats.inits == 1) || (erip->init_macregs)) {
1540 		erip->init_macregs = 0;
1541 		PUT_MACREG(ipg0, param_ipg0);
1542 		PUT_MACREG(ipg1, param_ipg1);
1543 		PUT_MACREG(ipg2, param_ipg2);
1544 		PUT_MACREG(macmin, BMAC_MIN_FRAME_SIZE);
1545 #ifdef	ERI_RX_TAG_ERROR_WORKAROUND
1546 		PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE_TAG | BMAC_MAX_BURST);
1547 #else
1548 		PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE | BMAC_MAX_BURST);
1549 #endif
1550 		PUT_MACREG(palen, BMAC_PREAMBLE_SIZE);
1551 		PUT_MACREG(jam, BMAC_JAM_SIZE);
1552 		PUT_MACREG(alimit, BMAC_ATTEMPT_LIMIT);
1553 		PUT_MACREG(macctl_type, BMAC_CONTROL_TYPE);
1554 		PUT_MACREG(rseed,
1555 		    ((erip->ouraddr[0] & 0x3) << 8) | erip->ouraddr[1]);
1556 
1557 		PUT_MACREG(madd3, BMAC_ADDRESS_3);
1558 		PUT_MACREG(madd4, BMAC_ADDRESS_4);
1559 		PUT_MACREG(madd5, BMAC_ADDRESS_5);
1560 
1561 		/* Program MAC Control address */
1562 		PUT_MACREG(madd6, BMAC_ADDRESS_6);
1563 		PUT_MACREG(madd7, BMAC_ADDRESS_7);
1564 		PUT_MACREG(madd8, BMAC_ADDRESS_8);
1565 
1566 		PUT_MACREG(afr0, BMAC_AF_0);
1567 		PUT_MACREG(afr1, BMAC_AF_1);
1568 		PUT_MACREG(afr2, BMAC_AF_2);
1569 		PUT_MACREG(afmr1_2, BMAC_AF21_MASK);
1570 		PUT_MACREG(afmr0, BMAC_AF0_MASK);
1571 	}
1572 
1573 	/* The counters need to be zeroed */
1574 	PUT_MACREG(nccnt, 0);
1575 	PUT_MACREG(fccnt, 0);
1576 	PUT_MACREG(excnt, 0);
1577 	PUT_MACREG(ltcnt, 0);
1578 	PUT_MACREG(dcnt,  0);
1579 	PUT_MACREG(frcnt, 0);
1580 	PUT_MACREG(lecnt, 0);
1581 	PUT_MACREG(aecnt, 0);
1582 	PUT_MACREG(fecnt, 0);
1583 	PUT_MACREG(rxcv,  0);
1584 
1585 	if (erip->pauseTX)
1586 		PUT_MACREG(spcmd, BMAC_SEND_PAUSE_CMD);
1587 	else
1588 		PUT_MACREG(spcmd, 0);
1589 
1590 	/*
1591 	 * Program BigMAC with local individual ethernet address.
1592 	 */
1593 
1594 	PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
1595 	PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
1596 	PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
1597 
1598 	/*
1599 	 * Install multicast address filter.
1600 	 */
1601 
1602 	PUT_MACREG(hash0, erip->ladrf[0]);
1603 	PUT_MACREG(hash1, erip->ladrf[1]);
1604 	PUT_MACREG(hash2, erip->ladrf[2]);
1605 	PUT_MACREG(hash3, erip->ladrf[3]);
1606 	PUT_MACREG(hash4, erip->ladrf[4]);
1607 	PUT_MACREG(hash5, erip->ladrf[5]);
1608 	PUT_MACREG(hash6, erip->ladrf[6]);
1609 	PUT_MACREG(hash7, erip->ladrf[7]);
1610 	PUT_MACREG(hash8, erip->ladrf[8]);
1611 	PUT_MACREG(hash9, erip->ladrf[9]);
1612 	PUT_MACREG(hash10, erip->ladrf[10]);
1613 	PUT_MACREG(hash11, erip->ladrf[11]);
1614 	PUT_MACREG(hash12, erip->ladrf[12]);
1615 	PUT_MACREG(hash13, erip->ladrf[13]);
1616 	PUT_MACREG(hash14, erip->ladrf[14]);
1617 }
1618 
1619 static int
1620 eri_flush_txbufs(struct eri *erip)
1621 {
1622 	uint_t	i;
1623 	int	status = 0;
1624 	/*
1625 	 * Free and dvma_unload pending xmit  buffers.
1626 	 * Maintaining the 1-to-1 ordered sequence of
1627 	 * dvma_load() followed by dvma_unload() is critical.
1628 	 * Always unload anything before loading it again.
1629 	 * Never unload anything twice.  Always unload
1630 	 * before freeing the buffer.  We satisfy these
1631 	 * requirements by unloading only those descriptors
1632 	 * which currently have an mblk associated with them.
1633 	 */
1634 	for (i = 0; i < ERI_TPENDING; i++) {
1635 		if (erip->tmblkp[i]) {
1636 			if (erip->eri_dvmaxh)
1637 				dvma_unload(erip->eri_dvmaxh, 2*i, DONT_FLUSH);
1638 			else if ((ddi_dma_unbind_handle(erip->ndmaxh[i]) ==
1639 			    DDI_FAILURE))
1640 				status = -1;
1641 			freeb(erip->tmblkp[i]);
1642 			erip->tmblkp[i] = NULL;
1643 		}
1644 	}
1645 	return (status);
1646 }
1647 
1648 static int
1649 eri_flush_rxbufs(struct eri *erip)
1650 {
1651 	uint_t	i;
1652 	int	status = 0;
1653 	/*
1654 	 * Free and dvma_unload pending recv buffers.
1655 	 * Maintaining the 1-to-1 ordered sequence of
1656 	 * dvma_load() followed by dvma_unload() is critical.
1657 	 * Always unload anything before loading it again.
1658 	 * Never unload anything twice.  Always unload
1659 	 * before freeing the buffer.  We satisfy these
1660 	 * requirements by unloading only those descriptors
1661 	 * which currently have an mblk associated with them.
1662 	 */
1663 	for (i = 0; i < ERI_RPENDING; i++) {
1664 		if (erip->rmblkp[i]) {
1665 			if (erip->eri_dvmarh)
1666 				dvma_unload(erip->eri_dvmarh, 2 * i,
1667 				    DDI_DMA_SYNC_FORCPU);
1668 			else if ((ddi_dma_unbind_handle(erip->ndmarh[i]) ==
1669 			    DDI_FAILURE))
1670 				status = -1;
1671 			freeb(erip->rmblkp[i]);
1672 			erip->rmblkp[i] = NULL;
1673 		}
1674 	}
1675 	return (status);
1676 }
1677 
1678 static void
1679 eri_init_txbufs(struct eri *erip)
1680 {
1681 	/*
1682 	 * Clear TX descriptors.
1683 	 */
1684 	bzero((caddr_t)erip->eri_tmdp, ERI_TPENDING * sizeof (struct eri_tmd));
1685 
1686 	/*
1687 	 * sync TXDMA descriptors.
1688 	 */
1689 	ERI_SYNCIOPB(erip, erip->eri_tmdp,
1690 	    (ERI_TPENDING * sizeof (struct eri_tmd)), DDI_DMA_SYNC_FORDEV);
1691 	/*
1692 	 * Reset TMD 'walking' pointers.
1693 	 */
1694 	erip->tcurp = erip->eri_tmdp;
1695 	erip->tnextp = erip->eri_tmdp;
1696 	erip->tx_cur_cnt = 0;
1697 	erip->tx_kick = 0;
1698 	erip->tx_completion = 0;
1699 }
1700 
1701 static int
1702 eri_init_rxbufs(struct eri *erip)
1703 {
1704 
1705 	ddi_dma_cookie_t	dma_cookie;
1706 	mblk_t			*bp;
1707 	int			i, status = 0;
1708 	uint32_t		ccnt;
1709 
1710 	/*
1711 	 * clear rcv descriptors
1712 	 */
1713 	bzero((caddr_t)erip->rmdp, ERI_RPENDING * sizeof (struct rmd));
1714 
1715 	for (i = 0; i < ERI_RPENDING; i++) {
1716 		if ((bp = eri_allocb(ERI_BUFSIZE)) == NULL) {
1717 			status = -1;
1718 			continue;
1719 		}
1720 		/* Load data buffer to DVMA space */
1721 		if (erip->eri_dvmarh)
1722 			dvma_kaddr_load(erip->eri_dvmarh,
1723 			    (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1724 			    2 * i, &dma_cookie);
1725 /*
1726  *		Bind data buffer to DMA handle
1727  */
1728 		else if (ddi_dma_addr_bind_handle(erip->ndmarh[i], NULL,
1729 		    (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1730 		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1731 		    &dma_cookie, &ccnt) != DDI_DMA_MAPPED)
1732 			status = -1;
1733 
1734 		PUT_RMD((&erip->rmdp[i]), dma_cookie);
1735 		erip->rmblkp[i] = bp;	/* save for later use */
1736 	}
1737 
1738 	/*
1739 	 * sync RXDMA descriptors.
1740 	 */
1741 	ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
1742 	    DDI_DMA_SYNC_FORDEV);
1743 	/*
1744 	 * Reset RMD 'walking' pointers.
1745 	 */
1746 	erip->rnextp = erip->rmdp;
1747 	erip->rx_completion = 0;
1748 	erip->rx_kick = ERI_RPENDING - 4;
1749 	return (status);
1750 }
1751 
1752 static uint32_t
1753 eri_txmac_disable(struct eri *erip)
1754 {
1755 	int	n;
1756 
1757 	PUT_MACREG(txcfg, GET_MACREG(txcfg) & ~BMAC_TXCFG_ENAB);
1758 	n = (BMACTXRSTDELAY * 10) / ERI_WAITPERIOD;
1759 
1760 	while (--n > 0) {
1761 		drv_usecwait(ERI_WAITPERIOD);
1762 		if ((GET_MACREG(txcfg) & 1) == 0)
1763 			return (0);
1764 	}
1765 	return (1);
1766 }
1767 
1768 static uint32_t
1769 eri_rxmac_disable(struct eri *erip)
1770 {
1771 	int	n;
1772 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) & ~BMAC_RXCFG_ENAB);
1773 	n = BMACRXRSTDELAY / ERI_WAITPERIOD;
1774 
1775 	while (--n > 0) {
1776 		drv_usecwait(ERI_WAITPERIOD);
1777 		if ((GET_MACREG(rxcfg) & 1) == 0)
1778 			return (0);
1779 	}
1780 	return (1);
1781 }
1782 
1783 /*
1784  * Return 0 upon success, 1 on failure.
1785  */
1786 static int
1787 eri_stop(struct eri *erip)
1788 {
1789 	(void) eri_erx_reset(erip);
1790 	(void) eri_etx_reset(erip);
1791 
1792 	/*
1793 	 * set up cache line to 16 for 64 bytes of pci burst size
1794 	 */
1795 	PUT_SWRSTREG(reset, ERI_G_RESET_GLOBAL | ERI_CACHE_LINE_SIZE);
1796 
1797 	if (erip->linkcheck) {
1798 		erip->linkcheck = 0;
1799 		erip->global_reset_issued = 2;
1800 	} else {
1801 		param_linkup = 0;
1802 		erip->stats.link_up = LINK_STATE_DOWN;
1803 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1804 		erip->global_reset_issued = -1;
1805 	}
1806 
1807 	ERI_DELAY((GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE),
1808 	    ERI_MAX_RST_DELAY);
1809 	erip->rx_reset_issued = -1;
1810 	erip->tx_reset_issued = -1;
1811 
1812 	/*
1813 	 * workaround for RIO not resetting the interrupt mask
1814 	 * register to default value 0xffffffff.
1815 	 */
1816 	PUT_GLOBREG(intmask, ERI_G_MASK_ALL);
1817 
1818 	if (GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE) {
1819 		return (0);
1820 	} else {
1821 		return (1);
1822 	}
1823 }
1824 
1825 /*
1826  * Reset Just the RX Portion
1827  * Return 0 upon success, 1 on failure.
1828  *
1829  * Resetting the rxdma while there is a rx dma transaction going on the
1830  * bus, will cause bus hang or parity errors. To avoid this, we would first
1831  * disable the rxdma by clearing the ENABLE bit (bit 0). To make sure it is
1832  * disabled, we will poll it until it realy clears. Furthermore, to verify
1833  * any RX DMA activity is subsided, we delay for 5 msec.
1834  */
1835 static uint32_t
1836 eri_erx_reset(struct eri *erip)
1837 {
1838 	(void) eri_rxmac_disable(erip); /* Disable the RX MAC */
1839 
1840 	/* Disable the RX DMA */
1841 	PUT_ERXREG(config, GET_ERXREG(config) & ~GET_CONFIG_RXDMA_EN);
1842 	ERI_DELAY(((GET_ERXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1843 	if ((GET_ERXREG(config) & 1) != 0)
1844 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1845 		    disable_erx_msg);
1846 
1847 	drv_usecwait(5000); /* Delay to insure no RX DMA activity */
1848 
1849 	PUT_SWRSTREG(reset, ERI_G_RESET_ERX | ERI_CACHE_LINE_SIZE);
1850 	/*
1851 	 * Wait until the reset is completed which is indicated by
1852 	 * the reset bit cleared or time out..
1853 	 */
1854 	ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ==
1855 	    ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1856 	erip->rx_reset_issued = -1;
1857 
1858 	return ((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ? 1 : 0);
1859 }
1860 
1861 /*
1862  * Reset Just the TX Portion
1863  * Return 0 upon success, 1 on failure.
1864  * Resetting the txdma while there is a tx dma transaction on the bus, may cause
1865  * bus hang or parity errors. To avoid this we would first disable the txdma by
1866  * clearing the ENABLE bit (bit 0). To make sure it is disabled, we will poll
1867  * it until it realy clears. Furthermore, to any TX DMA activity is subsided,
1868  * we delay for 1 msec.
1869  */
1870 static uint32_t
1871 eri_etx_reset(struct eri *erip)
1872 {
1873 	(void) eri_txmac_disable(erip);
1874 
1875 	/* Disable the TX DMA */
1876 	PUT_ETXREG(config, GET_ETXREG(config) & ~GET_CONFIG_TXDMA_EN);
1877 #ifdef ORIG
1878 	ERI_DELAY(((GET_ETXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1879 	if ((GET_ETXREG(config) &  1) != 0)
1880 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1881 		    disable_etx_msg);
1882 	drv_usecwait(5000); /* Delay  to ensure DMA completed (if any). */
1883 #endif
1884 	drv_usecwait(5000); /* Delay  to ensure DMA completed (if any). */
1885 	ERI_DELAY(((GET_ETXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1886 	if ((GET_ETXREG(config) &  1) != 0)
1887 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1888 		    disable_etx_msg);
1889 
1890 	PUT_SWRSTREG(reset, ERI_G_RESET_ETX | ERI_CACHE_LINE_SIZE);
1891 
1892 	/*
1893 	 * Wait until the reset is completed which is indicated by the reset bit
1894 	 * cleared or time out..
1895 	 */
1896 	ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) ==
1897 	    ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1898 	erip->tx_reset_issued = -1;
1899 
1900 	if (GET_SWRSTREG(reset) &  (ERI_G_RESET_ETX)) {
1901 		return (1);
1902 	} else
1903 		return (0);
1904 }
1905 
1906 
1907 /*
1908  * Initialize the TX DMA registers and Enable the TX DMA.
1909  */
1910 static uint32_t
1911 eri_init_txregs(struct eri *erip)
1912 {
1913 
1914 	uint32_t	i;
1915 	uint64_t	tx_ring;
1916 
1917 	/*
1918 	 * Initialize ETX Registers:
1919 	 * config, txring_lo, txring_hi
1920 	 */
1921 	tx_ring = ERI_IOPBIOADDR(erip, erip->eri_tmdp);
1922 	PUT_ETXREG(txring_lo, (uint32_t)(tx_ring));
1923 	PUT_ETXREG(txring_hi, (uint32_t)(tx_ring >> 32));
1924 
1925 	/*
1926 	 * Get TX Ring Size Masks.
1927 	 * The ring size ERI_TPENDING is defined in eri_mac.h.
1928 	 */
1929 	switch (ERI_TPENDING) {
1930 	case 32: i = ETX_RINGSZ_32;
1931 		break;
1932 	case 64: i = ETX_RINGSZ_64;
1933 		break;
1934 	case 128: i = ETX_RINGSZ_128;
1935 		break;
1936 	case 256: i = ETX_RINGSZ_256;
1937 		break;
1938 	case 512: i = ETX_RINGSZ_512;
1939 		break;
1940 	case 1024: i = ETX_RINGSZ_1024;
1941 		break;
1942 	case 2048: i = ETX_RINGSZ_2048;
1943 		break;
1944 	case 4096: i = ETX_RINGSZ_4096;
1945 		break;
1946 	default:
1947 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1948 		    unk_tx_descr_sze_msg, ERI_TPENDING);
1949 		return (1);
1950 	}
1951 
1952 	i <<= ERI_TX_RINGSZ_SHIFT;
1953 	PUT_ETXREG(config, ETX_CONFIG_THRESHOLD | i);
1954 	ENABLE_TXDMA(erip);
1955 	ENABLE_MAC(erip);
1956 	return (0);
1957 }
1958 
1959 
1960 /*
1961  * Initialize the RX DMA registers and Enable the RX DMA.
1962  */
1963 static uint32_t
1964 eri_init_rxregs(struct eri *erip)
1965 {
1966 	int i;
1967 	uint64_t	rx_ring;
1968 
1969 	/*
1970 	 * Initialize ERX Registers:
1971 	 * rxring_lo, rxring_hi, config, rx_blanking, rx_pause_threshold.
1972 	 * Also, rx_kick
1973 	 * Read and save rxfifo_size.
1974 	 * XXX: Use this to properly configure PAUSE threshold values.
1975 	 */
1976 	rx_ring = ERI_IOPBIOADDR(erip, erip->rmdp);
1977 	PUT_ERXREG(rxring_lo, (uint32_t)(rx_ring));
1978 	PUT_ERXREG(rxring_hi, (uint32_t)(rx_ring >> 32));
1979 	PUT_ERXREG(rx_kick, erip->rx_kick);
1980 
1981 	/*
1982 	 * The Max ring size, ERI_RMDMAX is defined in eri_mac.h.
1983 	 * More ERI_RPENDING will provide better performance but requires more
1984 	 * system DVMA memory.
1985 	 * eri_rx_ring_size can be used to tune this value from /etc/system
1986 	 * eri_rx_ring_size cannot be NDD'able due to non-recoverable errors
1987 	 * which cannot be detected from NDD operations
1988 	 */
1989 
1990 	/*
1991 	 * get the rxring size bits
1992 	 */
1993 	switch (ERI_RPENDING) {
1994 	case 32: i = ERX_RINGSZ_32;
1995 		break;
1996 	case 64: i = ERX_RINGSZ_64;
1997 		break;
1998 	case 128: i = ERX_RINGSZ_128;
1999 		break;
2000 	case 256: i = ERX_RINGSZ_256;
2001 		break;
2002 	case 512: i = ERX_RINGSZ_512;
2003 		break;
2004 	case 1024: i = ERX_RINGSZ_1024;
2005 		break;
2006 	case 2048: i = ERX_RINGSZ_2048;
2007 		break;
2008 	case 4096: i = ERX_RINGSZ_4096;
2009 		break;
2010 	default:
2011 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2012 		    unk_rx_descr_sze_msg, ERI_RPENDING);
2013 		return (1);
2014 	}
2015 
2016 	i <<= ERI_RX_RINGSZ_SHIFT;
2017 	i |=  (ERI_FSTBYTE_OFFSET << ERI_RX_CONFIG_FBO_SHIFT) |
2018 	    (ETHERHEADER_SIZE << ERI_RX_CONFIG_RX_CSSTART_SHIFT) |
2019 	    (ERI_RX_FIFOTH_1024 << ERI_RX_CONFIG_RXFIFOTH_SHIFT);
2020 
2021 	PUT_ERXREG(config, i);
2022 	PUT_ERXREG(rx_blanking,
2023 	    (param_intr_blank_time << ERI_RX_BLNK_INTR_TIME_SHIFT) |
2024 	    param_intr_blank_packets);
2025 
2026 	PUT_ERXREG(rx_pause_threshold, rx_pause_threshold);
2027 	erip->rxfifo_size = GET_ERXREG(rxfifo_size);
2028 	ENABLE_RXDMA(erip);
2029 	return (0);
2030 }
2031 
2032 static int
2033 eri_freebufs(struct eri *erip)
2034 {
2035 	int status = 0;
2036 
2037 	status = eri_flush_rxbufs(erip) | eri_flush_txbufs(erip);
2038 	return (status);
2039 }
2040 
2041 static void
2042 eri_update_rxbufs(struct eri *erip)
2043 {
2044 	int		i;
2045 	volatile struct rmd  *rmdp, *rmdpbase;
2046 
2047 	/*
2048 	 * Hang out receive buffers.
2049 	 */
2050 	rmdpbase = erip->rmdp;
2051 	for (i = 0; i < ERI_RPENDING; i++) {
2052 		rmdp = rmdpbase + i;
2053 		UPDATE_RMD(rmdp);
2054 	}
2055 
2056 	/*
2057 	 * sync RXDMA descriptors.
2058 	 */
2059 	ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
2060 	    DDI_DMA_SYNC_FORDEV);
2061 	/*
2062 	 * Reset RMD 'walking' pointers.
2063 	 */
2064 	erip->rnextp =	erip->rmdp;
2065 	erip->rx_completion = 0;
2066 	erip->rx_kick =	ERI_RPENDING - 4;
2067 }
2068 
2069 /*
2070  * This routine is used to reset the RX DMA only. In the case of RX
2071  * failures such as RX Tag Error, RX hang etc... we don't want to
2072  * do global reset which takes down the link and clears the FIFO's
2073  * By doing RX only reset, we leave the TX and the link intact.
2074  */
2075 static uint32_t
2076 eri_init_rx_channel(struct eri *erip)
2077 {
2078 	erip->flags &= ~ERI_RXINIT;
2079 	(void) eri_erx_reset(erip);
2080 	eri_update_rxbufs(erip);
2081 	if (eri_init_rxregs(erip))
2082 		return (1);
2083 	PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2084 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2085 	erip->rx_reset_issued = 0;
2086 	HSTAT(erip, rx_inits);
2087 	erip->flags |= ERI_RXINIT;
2088 	return (0);
2089 }
2090 
2091 static void
2092 eri_init_rx(struct eri *erip)
2093 {
2094 	uint16_t	*ladrf;
2095 
2096 	/*
2097 	 * First of all make sure the Receive MAC is stop.
2098 	 */
2099 	(void) eri_rxmac_disable(erip); /* Disable the RX MAC */
2100 
2101 	/*
2102 	 * Program BigMAC with local individual ethernet address.
2103 	 */
2104 
2105 	PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
2106 	PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
2107 	PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
2108 
2109 	/*
2110 	 * Set up multicast address filter by passing all multicast
2111 	 * addresses through a crc generator, and then using the
2112 	 * low order 8 bits as a index into the 256 bit logical
2113 	 * address filter. The high order four bits select the word,
2114 	 * while the rest of the bits select the bit within the word.
2115 	 */
2116 
2117 	ladrf = erip->ladrf;
2118 
2119 	PUT_MACREG(hash0, ladrf[0]);
2120 	PUT_MACREG(hash1, ladrf[1]);
2121 	PUT_MACREG(hash2, ladrf[2]);
2122 	PUT_MACREG(hash3, ladrf[3]);
2123 	PUT_MACREG(hash4, ladrf[4]);
2124 	PUT_MACREG(hash5, ladrf[5]);
2125 	PUT_MACREG(hash6, ladrf[6]);
2126 	PUT_MACREG(hash7, ladrf[7]);
2127 	PUT_MACREG(hash8, ladrf[8]);
2128 	PUT_MACREG(hash9, ladrf[9]);
2129 	PUT_MACREG(hash10, ladrf[10]);
2130 	PUT_MACREG(hash11, ladrf[11]);
2131 	PUT_MACREG(hash12, ladrf[12]);
2132 	PUT_MACREG(hash13, ladrf[13]);
2133 	PUT_MACREG(hash14, ladrf[14]);
2134 	PUT_MACREG(hash15, ladrf[15]);
2135 
2136 #ifdef ERI_DONT_STRIP_CRC
2137 	PUT_MACREG(rxcfg,
2138 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2139 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2140 	    BMAC_RXCFG_ENAB));
2141 #else
2142 	PUT_MACREG(rxcfg,
2143 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2144 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2145 	    BMAC_RXCFG_ENAB | BMAC_RXCFG_STRIP_CRC));
2146 #endif
2147 	/* wait after setting Hash Enable bit */
2148 	/* drv_usecwait(10); */
2149 
2150 	HSTAT(erip, rx_inits);
2151 }
2152 
2153 /*
2154  * This routine is used to init the TX MAC only.
2155  *	&erip->xmitlock is held before calling this routine.
2156  */
2157 void
2158 eri_init_txmac(struct eri *erip)
2159 {
2160 	uint32_t carrier_ext = 0;
2161 
2162 	erip->flags &= ~ERI_TXINIT;
2163 	/*
2164 	 * Stop the Transmit MAC.
2165 	 */
2166 	(void) eri_txmac_disable(erip);
2167 
2168 	/*
2169 	 * Must be Internal Transceiver
2170 	 */
2171 	if (param_mode)
2172 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2173 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2174 	else
2175 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2176 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2177 		    BMAC_XIFC_DIS_ECHO));
2178 
2179 	/*
2180 	 * Initialize the interpacket gap registers
2181 	 */
2182 	PUT_MACREG(ipg1, param_ipg1);
2183 	PUT_MACREG(ipg2, param_ipg2);
2184 
2185 	if (erip->ngu_enable)
2186 		PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2187 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2188 		    BMAC_TXCFG_ENIPG0 : 0) |
2189 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2190 		    BMAC_TXCFG_NGU));
2191 	else
2192 		PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2193 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2194 		    BMAC_TXCFG_ENIPG0 : 0) |
2195 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2196 
2197 	ENABLE_TXDMA(erip);
2198 	ENABLE_TXMAC(erip);
2199 
2200 	HSTAT(erip, tx_inits);
2201 	erip->flags |= ERI_TXINIT;
2202 }
2203 
2204 static void
2205 eri_unallocthings(struct eri *erip)
2206 {
2207 	uint32_t	flag;
2208 	uint32_t	i;
2209 
2210 	flag = erip->alloc_flag;
2211 
2212 	if (flag & ERI_DESC_MEM_MAP)
2213 		(void) ddi_dma_unbind_handle(erip->md_h);
2214 
2215 	if (flag & ERI_DESC_MEM_ALLOC) {
2216 		ddi_dma_mem_free(&erip->mdm_h);
2217 		erip->rmdp = NULL;
2218 		erip->eri_tmdp = NULL;
2219 	}
2220 
2221 	if (flag & ERI_DESC_HANDLE_ALLOC)
2222 		ddi_dma_free_handle(&erip->md_h);
2223 
2224 	(void) eri_freebufs(erip);
2225 
2226 	if (flag & ERI_XMIT_HANDLE_ALLOC)
2227 		for (i = 0; i < erip->xmit_handle_cnt; i++)
2228 			ddi_dma_free_handle(&erip->ndmaxh[i]);
2229 
2230 	if (flag & ERI_XMIT_DVMA_ALLOC) {
2231 		(void) dvma_release(erip->eri_dvmaxh);
2232 		erip->eri_dvmaxh = NULL;
2233 	}
2234 
2235 	if (flag & ERI_RCV_HANDLE_ALLOC)
2236 		for (i = 0; i < erip->rcv_handle_cnt; i++)
2237 			ddi_dma_free_handle(&erip->ndmarh[i]);
2238 
2239 	if (flag & ERI_RCV_DVMA_ALLOC) {
2240 		(void) dvma_release(erip->eri_dvmarh);
2241 		erip->eri_dvmarh = NULL;
2242 	}
2243 
2244 	if (flag & ERI_XBUFS_KMEM_DMABIND) {
2245 		(void) ddi_dma_unbind_handle(erip->tbuf_handle);
2246 		erip->tbuf_ioaddr = 0;
2247 	}
2248 
2249 	if (flag & ERI_XBUFS_KMEM_ALLOC) {
2250 		kmem_free(erip->tbuf_kaddr, ERI_TPENDING * eri_tx_bcopy_max);
2251 		erip->tbuf_kaddr = NULL;
2252 	}
2253 
2254 	if (flag & ERI_XBUFS_HANDLE_ALLOC) {
2255 		ddi_dma_free_handle(&erip->tbuf_handle);
2256 		erip->tbuf_handle = NULL;
2257 	}
2258 
2259 }
2260 
2261 /*
2262  * Initialize channel.
2263  * Return true on success, false on error.
2264  *
2265  * The recommended sequence for initialization is:
2266  * 1. Issue a Global Reset command to the Ethernet Channel.
2267  * 2. Poll the Global_Reset bits until the execution of the reset has been
2268  *    completed.
2269  * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2270  *	 Poll Register 0 to till the Resetbit is 0.
2271  * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2272  *	 100Mbps and Non-Isolated mode. The main point here is to bring the
2273  *	 PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2274  *	 to the MII interface so that the Bigmac core can correctly reset
2275  *	 upon a software reset.
2276  * 2(c).  Issue another Global Reset command to the Ethernet Channel and poll
2277  *	  the Global_Reset bits till completion.
2278  * 3. Set up all the data structures in the host memory.
2279  * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2280  *    Register).
2281  * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2282  *    Register).
2283  * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2284  * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2285  * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2286  * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2287  * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2288  * 11. Program the XIF Configuration Register (enable the XIF).
2289  * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2290  * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2291  */
2292 /*
2293  * lock order:
2294  *	intrlock->linklock->xmitlock->xcvrlock
2295  */
2296 static boolean_t
2297 eri_init(struct eri *erip)
2298 {
2299 	uint32_t	init_stat = 0;
2300 	uint32_t	partial_init = 0;
2301 	uint32_t	carrier_ext = 0;
2302 	uint32_t	mac_ctl = 0;
2303 	boolean_t	ret;
2304 	uint32_t 	link_timeout = ERI_LINKCHECK_TIMER;
2305 	link_state_t	linkupdate = LINK_STATE_UNKNOWN;
2306 
2307 	/*
2308 	 * Just return successfully if device is suspended.
2309 	 * eri_init() will be called again from resume.
2310 	 */
2311 	ASSERT(erip != NULL);
2312 
2313 	if (erip->flags & ERI_SUSPENDED) {
2314 		ret = B_TRUE;
2315 		goto init_exit;
2316 	}
2317 
2318 	mutex_enter(&erip->intrlock);
2319 	eri_stop_timer(erip);	/* acquire linklock */
2320 	mutex_enter(&erip->xmitlock);
2321 	erip->flags &= (ERI_DLPI_LINKUP | ERI_STARTED);
2322 	erip->wantw = B_FALSE;
2323 	HSTAT(erip, inits);
2324 	erip->txhung = 0;
2325 
2326 	if ((erip->stats.inits > 1) && (erip->init_macregs == 0))
2327 		eri_savecntrs(erip);
2328 
2329 	mutex_enter(&erip->xcvrlock);
2330 	if (!param_linkup || erip->linkcheck) {
2331 		if (!erip->linkcheck)
2332 			linkupdate = LINK_STATE_DOWN;
2333 		(void) eri_stop(erip);
2334 	}
2335 	if (!(erip->flags & ERI_DLPI_LINKUP) || !param_linkup) {
2336 		erip->flags |= ERI_DLPI_LINKUP;
2337 		eri_mif_poll(erip, MIF_POLL_STOP);
2338 		(void) eri_new_xcvr(erip);
2339 		ERI_DEBUG_MSG1(erip, XCVR_MSG, "New transceiver detected.");
2340 		if (param_transceiver != NO_XCVR) {
2341 			/*
2342 			 * Reset the new PHY and bring up the
2343 			 * link
2344 			 */
2345 			if (eri_reset_xcvr(erip)) {
2346 				ERI_FAULT_MSG1(erip, SEVERITY_NONE,
2347 				    ERI_VERB_MSG, "In Init after reset");
2348 				mutex_exit(&erip->xcvrlock);
2349 				link_timeout = 0;
2350 				goto done;
2351 			}
2352 			if (erip->stats.link_up == LINK_STATE_UP)
2353 				linkupdate = LINK_STATE_UP;
2354 		} else {
2355 			erip->flags |= (ERI_RUNNING | ERI_INITIALIZED);
2356 			param_linkup = 0;
2357 			erip->stats.link_up = LINK_STATE_DOWN;
2358 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2359 			linkupdate = LINK_STATE_DOWN;
2360 			/*
2361 			 * Still go on and complete the MAC initialization as
2362 			 * xcvr might show up later.
2363 			 * you must return to their mutex ordering.
2364 			 */
2365 		}
2366 		eri_mif_poll(erip, MIF_POLL_START);
2367 	}
2368 
2369 	mutex_exit(&erip->xcvrlock);
2370 
2371 	/*
2372 	 * Allocate data structures.
2373 	 */
2374 	if (erip->global_reset_issued) {
2375 		if (erip->global_reset_issued == 2) { /* fast path */
2376 			if (eri_flush_txbufs(erip))
2377 				goto done;
2378 			/*
2379 			 * Hang out/Initialize descriptors and buffers.
2380 			 */
2381 			eri_init_txbufs(erip);
2382 
2383 			eri_update_rxbufs(erip);
2384 		} else {
2385 			init_stat = eri_allocthings(erip);
2386 			if (init_stat)
2387 				goto done;
2388 
2389 			if (eri_freebufs(erip))
2390 				goto done;
2391 			/*
2392 			 * Hang out/Initialize descriptors and buffers.
2393 			 */
2394 			eri_init_txbufs(erip);
2395 			if (eri_init_rxbufs(erip))
2396 				goto done;
2397 		}
2398 	}
2399 
2400 	/*
2401 	 * BigMAC requires that we confirm that tx, rx and hash are in
2402 	 * quiescent state.
2403 	 * MAC will not reset successfully if the transceiver is not reset and
2404 	 * brought out of Isolate mode correctly. TXMAC reset may fail if the
2405 	 * ext. transceiver is just disconnected. If it fails, try again by
2406 	 * checking the transceiver.
2407 	 */
2408 	if (eri_txmac_disable(erip)) {
2409 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2410 		    disable_txmac_msg);
2411 		param_linkup = 0;	/* force init again */
2412 		erip->stats.link_up = LINK_STATE_DOWN;
2413 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2414 		linkupdate = LINK_STATE_DOWN;
2415 		goto done;
2416 	}
2417 
2418 	if (eri_rxmac_disable(erip)) {
2419 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2420 		    disable_rxmac_msg);
2421 		param_linkup = 0;	/* force init again */
2422 		erip->stats.link_up = LINK_STATE_DOWN;
2423 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2424 		linkupdate = LINK_STATE_DOWN;
2425 		goto done;
2426 	}
2427 
2428 	eri_init_macregs_generic(erip);
2429 
2430 	/*
2431 	 * Initialize ERI Global registers :
2432 	 * config
2433 	 * For PCI :  err_mask, bif_cfg
2434 	 *
2435 	 * Use user-configurable parameter for enabling 64-bit transfers.
2436 	 * Note:For PCI, burst sizes are in multiples of 64-bytes.
2437 	 */
2438 
2439 	/*
2440 	 * Significant performance improvements can be achieved by
2441 	 * disabling transmit interrupt. Thus TMD's are reclaimed
2442 	 * only very infrequently.
2443 	 * The PCS Interrupt is masked here. It is enabled only when
2444 	 * a PCS link is brought up because there is no second level
2445 	 * mask for this interrupt..
2446 	 * Init GLOBAL, TXMAC, RXMAC and MACCTL interrupt masks here.
2447 	 */
2448 	if (! partial_init) {
2449 		PUT_GLOBREG(intmask, ERI_G_MASK_INTR);
2450 		erip->tx_int_me = 0;
2451 		PUT_MACREG(txmask, BMAC_TXINTR_MASK);
2452 		PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2453 		PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2454 	}
2455 
2456 	if (erip->global_reset_issued) {
2457 		/*
2458 		 * Initialize ETX Registers:
2459 		 * config, txring_lo, txring_hi
2460 		 */
2461 		if (eri_init_txregs(erip))
2462 			goto done;
2463 		/*
2464 		 * Initialize ERX Registers:
2465 		 * rxring_lo, rxring_hi, config, rx_blanking,
2466 		 * rx_pause_threshold.  Also, rx_kick
2467 		 * Read and save rxfifo_size.
2468 		 */
2469 		if (eri_init_rxregs(erip))
2470 			goto done;
2471 	}
2472 
2473 	PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2474 
2475 	/*
2476 	 * Set up the slottime,and  rxconfig, txconfig without enabling
2477 	 * the latter two at this time
2478 	 */
2479 	PUT_MACREG(slot, BMAC_SLOT_TIME);
2480 	carrier_ext = 0;
2481 
2482 #ifdef ERI_DONT_STRIP_CRC
2483 	PUT_MACREG(rxcfg,
2484 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2485 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2486 	    (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2487 #else
2488 	PUT_MACREG(rxcfg,
2489 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2490 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2491 	    BMAC_RXCFG_STRIP_CRC |
2492 	    (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2493 #endif
2494 	drv_usecwait(10);	/* wait after setting Hash Enable bit */
2495 
2496 	if (erip->ngu_enable)
2497 		PUT_MACREG(txcfg,
2498 		    ((param_mode ? BMAC_TXCFG_FDX: 0) |
2499 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2500 		    BMAC_TXCFG_ENIPG0 : 0) |
2501 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2502 		    BMAC_TXCFG_NGU));
2503 	else
2504 		PUT_MACREG(txcfg,
2505 		    ((param_mode ? BMAC_TXCFG_FDX: 0) |
2506 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2507 		    BMAC_TXCFG_ENIPG0 : 0) |
2508 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2509 
2510 	if (erip->pauseRX)
2511 		mac_ctl = ERI_MCTLCFG_RXPAUSE;
2512 	if (erip->pauseTX)
2513 		mac_ctl |= ERI_MCTLCFG_TXPAUSE;
2514 
2515 	PUT_MACREG(macctl_cfg, mac_ctl);
2516 
2517 	/*
2518 	 * Must be Internal Transceiver
2519 	 */
2520 	if (param_mode)
2521 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2522 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2523 	else {
2524 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2525 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2526 		    BMAC_XIFC_DIS_ECHO));
2527 
2528 		link_timeout = ERI_CHECK_HANG_TIMER;
2529 	}
2530 
2531 	/*
2532 	 * if MAC int loopback flag is set, put xifc reg in mii loopback
2533 	 * mode {DIAG}
2534 	 */
2535 	if (erip->flags & ERI_MACLOOPBACK) {
2536 		PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIILPBK);
2537 	}
2538 
2539 	/*
2540 	 * Enable TX and RX MACs.
2541 	 */
2542 	ENABLE_MAC(erip);
2543 	erip->flags |= (ERI_RUNNING | ERI_INITIALIZED |
2544 	    ERI_TXINIT | ERI_RXINIT);
2545 	mac_tx_update(erip->mh);
2546 	erip->global_reset_issued = 0;
2547 
2548 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
2549 	eri_xcvr_force_mode(erip, &link_timeout);
2550 #endif
2551 
2552 done:
2553 	if (init_stat)
2554 		eri_unallocthings(erip);
2555 
2556 	mutex_exit(&erip->xmitlock);
2557 	eri_start_timer(erip, eri_check_link, link_timeout);
2558 	mutex_exit(&erip->intrlock);
2559 
2560 	if (linkupdate != LINK_STATE_UNKNOWN)
2561 		mac_link_update(erip->mh, linkupdate);
2562 
2563 	ret = (erip->flags & ERI_RUNNING) ? B_TRUE : B_FALSE;
2564 	if (!ret) {
2565 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
2566 		    "eri_init failed");
2567 	}
2568 
2569 init_exit:
2570 	ASSERT(!MUTEX_HELD(&erip->linklock));
2571 	return (ret);
2572 }
2573 
2574 /*
2575  * 0 as burstsize upon failure as it signifies no burst size.
2576  */
2577 static int
2578 eri_burstsize(struct eri *erip)
2579 {
2580 	ddi_dma_handle_t handle;
2581 
2582 	if (ddi_dma_alloc_handle(erip->dip, &dma_attr, DDI_DMA_DONTWAIT,
2583 	    NULL, &handle))
2584 		return (DDI_FAILURE);
2585 
2586 	erip->burstsizes = ddi_dma_burstsizes(handle);
2587 	ddi_dma_free_handle(&handle);
2588 
2589 	if (erip->burstsizes)
2590 		return (DDI_SUCCESS);
2591 
2592 	return (DDI_FAILURE);
2593 }
2594 
2595 /*
2596  * Un-initialize (STOP) ERI channel.
2597  */
2598 static void
2599 eri_uninit(struct eri *erip)
2600 {
2601 	boolean_t needind;
2602 
2603 	/*
2604 	 * Allow up to 'ERI_DRAINTIME' for pending xmit's to complete.
2605 	 */
2606 	ERI_DELAY((erip->tcurp == erip->tnextp), ERI_DRAINTIME);
2607 
2608 	mutex_enter(&erip->intrlock);
2609 	eri_stop_timer(erip);   /* acquire linklock */
2610 	mutex_enter(&erip->xmitlock);
2611 	mutex_enter(&erip->xcvrlock);
2612 	eri_mif_poll(erip, MIF_POLL_STOP);
2613 	erip->flags &= ~ERI_DLPI_LINKUP;
2614 	mutex_exit(&erip->xcvrlock);
2615 
2616 	needind = !erip->linkcheck;
2617 	(void) eri_stop(erip);
2618 	erip->flags &= ~ERI_RUNNING;
2619 
2620 	mutex_exit(&erip->xmitlock);
2621 	eri_start_timer(erip, eri_check_link, 0);
2622 	mutex_exit(&erip->intrlock);
2623 
2624 	if (needind)
2625 		mac_link_update(erip->mh, LINK_STATE_DOWN);
2626 }
2627 
2628 /*
2629  * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2630  * map it in IO space.
2631  *
2632  * The driver allocates STREAMS buffers which will be mapped in DVMA
2633  * space using DDI DMA resources.
2634  *
2635  */
2636 static int
2637 eri_allocthings(struct eri *erip)
2638 {
2639 
2640 	uintptr_t	a;
2641 	int		size;
2642 	uint32_t	rval;
2643 	int		i;
2644 	size_t		real_len;
2645 	uint32_t	cookiec;
2646 	int		alloc_stat = 0;
2647 	ddi_dma_cookie_t dma_cookie;
2648 
2649 	/*
2650 	 * Return if resources are already allocated.
2651 	 */
2652 	if (erip->rmdp)
2653 		return (alloc_stat);
2654 
2655 	erip->alloc_flag = 0;
2656 
2657 	/*
2658 	 * Allocate the TMD and RMD descriptors and extra for alignments.
2659 	 */
2660 	size = (ERI_RPENDING * sizeof (struct rmd) +
2661 	    ERI_TPENDING * sizeof (struct eri_tmd)) + ERI_GMDALIGN;
2662 
2663 	rval = ddi_dma_alloc_handle(erip->dip, &desc_dma_attr,
2664 	    DDI_DMA_DONTWAIT, 0, &erip->md_h);
2665 	if (rval != DDI_SUCCESS) {
2666 		return (++alloc_stat);
2667 	}
2668 	erip->alloc_flag |= ERI_DESC_HANDLE_ALLOC;
2669 
2670 	rval = ddi_dma_mem_alloc(erip->md_h, size, &erip->dev_attr,
2671 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
2672 	    (caddr_t *)&erip->iopbkbase, &real_len, &erip->mdm_h);
2673 	if (rval != DDI_SUCCESS) {
2674 		return (++alloc_stat);
2675 	}
2676 	erip->alloc_flag |= ERI_DESC_MEM_ALLOC;
2677 
2678 	rval = ddi_dma_addr_bind_handle(erip->md_h, NULL,
2679 	    (caddr_t)erip->iopbkbase, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2680 	    DDI_DMA_DONTWAIT, 0, &erip->md_c, &cookiec);
2681 
2682 	if (rval != DDI_DMA_MAPPED)
2683 		return (++alloc_stat);
2684 
2685 	erip->alloc_flag |= ERI_DESC_MEM_MAP;
2686 
2687 	if (cookiec != 1)
2688 		return (++alloc_stat);
2689 
2690 	erip->iopbiobase = erip->md_c.dmac_address;
2691 
2692 	a = erip->iopbkbase;
2693 	a = ROUNDUP(a, ERI_GMDALIGN);
2694 	erip->rmdp = (struct rmd *)a;
2695 	a += ERI_RPENDING * sizeof (struct rmd);
2696 	erip->eri_tmdp = (struct eri_tmd *)a;
2697 /*
2698  *	Specifically we reserve n (ERI_TPENDING + ERI_RPENDING)
2699  *	pagetable entries. Therefore we have 2 ptes for each
2700  *	descriptor. Since the ethernet buffers are 1518 bytes
2701  *	so they can at most use 2 ptes.
2702  * 	Will do a ddi_dma_addr_setup for each bufer
2703  */
2704 	/*
2705 	 * In the current implementation, we use the ddi compliant
2706 	 * dma interface. We allocate ERI_TPENDING dma handles for
2707 	 * Transmit  activity and ERI_RPENDING dma handles for receive
2708 	 * activity. The actual dma mapping is done in the io functions
2709 	 * eri_start() and eri_read_dma(),
2710 	 * by calling the ddi_dma_addr_bind_handle.
2711 	 * Dma resources are deallocated by calling ddi_dma_unbind_handle
2712 	 * in eri_reclaim() for transmit and eri_read_dma(), for receive io.
2713 	 */
2714 
2715 	if (eri_use_dvma_tx &&
2716 	    (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_TPENDING * 2),
2717 	    &erip->eri_dvmaxh)) == DDI_SUCCESS) {
2718 		erip->alloc_flag |= ERI_XMIT_DVMA_ALLOC;
2719 	} else {
2720 		erip->eri_dvmaxh = NULL;
2721 		for (i = 0; i < ERI_TPENDING; i++) {
2722 			rval = ddi_dma_alloc_handle(erip->dip,
2723 			    &dma_attr, DDI_DMA_DONTWAIT, 0,
2724 			    &erip->ndmaxh[i]);
2725 
2726 			if (rval != DDI_SUCCESS) {
2727 				ERI_FAULT_MSG1(erip, SEVERITY_HIGH,
2728 				    ERI_VERB_MSG, alloc_tx_dmah_msg);
2729 				alloc_stat++;
2730 				break;
2731 			}
2732 		}
2733 
2734 		erip->xmit_handle_cnt = i;
2735 
2736 		if (i)
2737 			erip->alloc_flag |= ERI_XMIT_HANDLE_ALLOC;
2738 
2739 		if (alloc_stat)
2740 			return (alloc_stat);
2741 	}
2742 
2743 	if (eri_use_dvma_rx &&
2744 	    (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_RPENDING * 2),
2745 	    &erip->eri_dvmarh)) == DDI_SUCCESS) {
2746 		erip->alloc_flag |= ERI_RCV_DVMA_ALLOC;
2747 	} else {
2748 		erip->eri_dvmarh = NULL;
2749 
2750 		for (i = 0; i < ERI_RPENDING; i++) {
2751 			rval = ddi_dma_alloc_handle(erip->dip,
2752 			    &dma_attr, DDI_DMA_DONTWAIT,
2753 			    0, &erip->ndmarh[i]);
2754 
2755 			if (rval != DDI_SUCCESS) {
2756 				ERI_FAULT_MSG1(erip, SEVERITY_HIGH,
2757 				    ERI_VERB_MSG, alloc_rx_dmah_msg);
2758 				alloc_stat++;
2759 				break;
2760 			}
2761 		}
2762 
2763 		erip->rcv_handle_cnt = i;
2764 
2765 		if (i)
2766 			erip->alloc_flag |= ERI_RCV_HANDLE_ALLOC;
2767 
2768 		if (alloc_stat)
2769 			return (alloc_stat);
2770 
2771 	}
2772 
2773 /*
2774  *	Allocate tiny TX buffers
2775  *	Note: tinybufs must always be allocated in the native
2776  *	ordering of the CPU (always big-endian for Sparc).
2777  *	ddi_dma_mem_alloc returns memory in the native ordering
2778  *	of the bus (big endian for SBus, little endian for PCI).
2779  *	So we cannot use ddi_dma_mem_alloc(, &erip->ge_dev_attr)
2780  *	because we'll get little endian memory on PCI.
2781  */
2782 	if (ddi_dma_alloc_handle(erip->dip, &desc_dma_attr, DDI_DMA_DONTWAIT,
2783 	    0, &erip->tbuf_handle) != DDI_SUCCESS) {
2784 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2785 		    alloc_tx_dmah_msg);
2786 		return (++alloc_stat);
2787 	}
2788 	erip->alloc_flag |= ERI_XBUFS_HANDLE_ALLOC;
2789 	size = ERI_TPENDING * eri_tx_bcopy_max;
2790 	erip->tbuf_kaddr = (caddr_t)kmem_alloc(size, KM_NOSLEEP);
2791 	if (erip->tbuf_kaddr == NULL) {
2792 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2793 		    alloc_tx_dmah_msg);
2794 		return (++alloc_stat);
2795 	}
2796 	erip->alloc_flag |= ERI_XBUFS_KMEM_ALLOC;
2797 	if (ddi_dma_addr_bind_handle(erip->tbuf_handle, NULL,
2798 	    erip->tbuf_kaddr, size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2799 	    DDI_DMA_DONTWAIT, 0, &dma_cookie, &cookiec) != DDI_DMA_MAPPED) {
2800 			return (++alloc_stat);
2801 	}
2802 	erip->tbuf_ioaddr = dma_cookie.dmac_address;
2803 	erip->alloc_flag |= ERI_XBUFS_KMEM_DMABIND;
2804 	if (cookiec != 1)
2805 		return (++alloc_stat);
2806 
2807 	/*
2808 	 * Keep handy limit values for RMD, TMD, and Buffers.
2809 	 */
2810 	erip->rmdlimp = &((erip->rmdp)[ERI_RPENDING]);
2811 	erip->eri_tmdlimp = &((erip->eri_tmdp)[ERI_TPENDING]);
2812 
2813 	/*
2814 	 * Zero out xmit and RCV holders.
2815 	 */
2816 	bzero((caddr_t)erip->tmblkp, sizeof (erip->tmblkp));
2817 	bzero((caddr_t)erip->rmblkp, sizeof (erip->rmblkp));
2818 	return (alloc_stat);
2819 }
2820 
2821 /* <<<<<<<<<<<<<<<<<	INTERRUPT HANDLING FUNCTION	>>>>>>>>>>>>>>>>>>>> */
2822 /*
2823  *	First check to see if it is our device interrupting.
2824  */
2825 static uint_t
2826 eri_intr(caddr_t arg)
2827 {
2828 	struct eri *erip = (void *)arg;
2829 	uint32_t erisbits;
2830 	uint32_t mif_status;
2831 	uint32_t serviced = DDI_INTR_UNCLAIMED;
2832 	link_state_t linkupdate = LINK_STATE_UNKNOWN;
2833 	boolean_t macupdate = B_FALSE;
2834 	mblk_t *mp;
2835 	mblk_t *head;
2836 	mblk_t **tail;
2837 
2838 	head = NULL;
2839 	tail = &head;
2840 
2841 	mutex_enter(&erip->intrlock);
2842 
2843 	erisbits = GET_GLOBREG(status);
2844 
2845 	/*
2846 	 * Check if it is only the RX_DONE interrupt, which is
2847 	 * the most frequent one.
2848 	 */
2849 	if (((erisbits & ERI_G_STATUS_RX_INT) == ERI_G_STATUS_RX_DONE) &&
2850 	    (erip->flags & ERI_RUNNING)) {
2851 		serviced = DDI_INTR_CLAIMED;
2852 		goto rx_done_int;
2853 	}
2854 
2855 	/* Claim the first interrupt after initialization */
2856 	if (erip->flags & ERI_INITIALIZED) {
2857 		erip->flags &= ~ERI_INITIALIZED;
2858 		serviced = DDI_INTR_CLAIMED;
2859 	}
2860 
2861 	/* Check for interesting events */
2862 	if ((erisbits & ERI_G_STATUS_INTR) == 0) {
2863 #ifdef	ESTAR_WORKAROUND
2864 		uint32_t linkupdate;
2865 #endif
2866 
2867 		ERI_DEBUG_MSG2(erip, DIAG_MSG,
2868 		    "eri_intr: Interrupt Not Claimed gsbits  %X", erisbits);
2869 #ifdef	DEBUG
2870 		noteri++;
2871 #endif
2872 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF Config = 0x%X",
2873 		    GET_MIFREG(mif_cfg));
2874 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF imask = 0x%X",
2875 		    GET_MIFREG(mif_imask));
2876 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:INT imask = 0x%X",
2877 		    GET_GLOBREG(intmask));
2878 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:alias %X",
2879 		    GET_GLOBREG(status_alias));
2880 #ifdef	ESTAR_WORKAROUND
2881 		linkupdate = eri_check_link_noind(erip);
2882 #endif
2883 		mutex_exit(&erip->intrlock);
2884 #ifdef	ESTAR_WORKAROUND
2885 		if (linkupdate != LINK_STATE_UNKNOWN)
2886 			mac_link_update(erip->mh, linkupdate);
2887 #endif
2888 		return (serviced);
2889 	}
2890 	serviced = DDI_INTR_CLAIMED;
2891 
2892 	if (!(erip->flags & ERI_RUNNING)) {
2893 		mutex_exit(&erip->intrlock);
2894 		eri_uninit(erip);
2895 		return (serviced);
2896 	}
2897 
2898 	if (erisbits & ERI_G_STATUS_FATAL_ERR) {
2899 		ERI_DEBUG_MSG2(erip, INTR_MSG,
2900 		    "eri_intr: fatal error: erisbits = %X", erisbits);
2901 		(void) eri_fatal_err(erip, erisbits);
2902 		eri_reinit_fatal++;
2903 
2904 		if (erip->rx_reset_issued) {
2905 			erip->rx_reset_issued = 0;
2906 			(void) eri_init_rx_channel(erip);
2907 			mutex_exit(&erip->intrlock);
2908 		} else {
2909 			param_linkup = 0;
2910 			erip->stats.link_up = LINK_STATE_DOWN;
2911 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2912 			DISABLE_MAC(erip);
2913 			mutex_exit(&erip->intrlock);
2914 			(void) eri_init(erip);
2915 		}
2916 		return (serviced);
2917 	}
2918 
2919 	if (erisbits & ERI_G_STATUS_NONFATAL_ERR) {
2920 		ERI_DEBUG_MSG2(erip, INTR_MSG,
2921 		    "eri_intr: non-fatal error: erisbits = %X", erisbits);
2922 		(void) eri_nonfatal_err(erip, erisbits);
2923 		if (erip->linkcheck) {
2924 			mutex_exit(&erip->intrlock);
2925 			(void) eri_init(erip);
2926 			return (serviced);
2927 		}
2928 	}
2929 
2930 	if (erisbits & ERI_G_STATUS_MIF_INT) {
2931 		uint16_t stat;
2932 		ERI_DEBUG_MSG2(erip, XCVR_MSG,
2933 		    "eri_intr:MIF Interrupt:mii_status %X", erip->mii_status);
2934 		eri_stop_timer(erip);   /* acquire linklock */
2935 
2936 		mutex_enter(&erip->xmitlock);
2937 		mutex_enter(&erip->xcvrlock);
2938 #ifdef	ERI_MIF_POLL_STATUS_WORKAROUND
2939 		mif_status = GET_MIFREG(mif_bsts);
2940 		eri_mif_poll(erip, MIF_POLL_STOP);
2941 		ERI_DEBUG_MSG3(erip, XCVR_MSG,
2942 		    "eri_intr: new MIF interrupt status %X XCVR status %X",
2943 		    mif_status, erip->mii_status);
2944 		(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
2945 		linkupdate = eri_mif_check(erip, stat, stat);
2946 
2947 #else
2948 		mif_status = GET_MIFREG(mif_bsts);
2949 		eri_mif_poll(erip, MIF_POLL_STOP);
2950 		linkupdate = eri_mif_check(erip, (uint16_t)mif_status,
2951 		    (uint16_t)(mif_status >> 16));
2952 #endif
2953 		eri_mif_poll(erip, MIF_POLL_START);
2954 		mutex_exit(&erip->xcvrlock);
2955 		mutex_exit(&erip->xmitlock);
2956 
2957 		if (!erip->openloop_autoneg)
2958 			eri_start_timer(erip, eri_check_link,
2959 			    ERI_LINKCHECK_TIMER);
2960 		else
2961 			eri_start_timer(erip, eri_check_link,
2962 			    ERI_P_FAULT_TIMER);
2963 	}
2964 
2965 	ERI_DEBUG_MSG2(erip, INTR_MSG,
2966 	    "eri_intr:May have Read Interrupt status:status %X", erisbits);
2967 
2968 rx_done_int:
2969 	if ((erisbits & (ERI_G_STATUS_TX_INT_ME)) ||
2970 	    (erip->tx_cur_cnt >= tx_interrupt_rate)) {
2971 		mutex_enter(&erip->xmitlock);
2972 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
2973 		    ETX_COMPLETION_MASK);
2974 
2975 		macupdate |= eri_reclaim(erip, erip->tx_completion);
2976 		mutex_exit(&erip->xmitlock);
2977 	}
2978 
2979 	if (erisbits & ERI_G_STATUS_RX_DONE) {
2980 		volatile struct	rmd	*rmdp, *rmdpbase;
2981 		volatile uint32_t rmdi;
2982 		uint8_t loop_limit = 0x20;
2983 		uint64_t flags;
2984 		uint32_t rmdmax_mask = erip->rmdmax_mask;
2985 
2986 		rmdpbase = erip->rmdp;
2987 		rmdi = erip->rx_completion;
2988 		rmdp = rmdpbase + rmdi;
2989 
2990 		/*
2991 		 * Sync RMD before looking at it.
2992 		 */
2993 		ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2994 		    DDI_DMA_SYNC_FORCPU);
2995 		/*
2996 		 * Loop through each RMD.
2997 		 */
2998 
2999 		flags = GET_RMD_FLAGS(rmdp);
3000 		while (((flags & ERI_RMD_OWN) == 0) && (loop_limit)) {
3001 			/* process one packet */
3002 			mp = eri_read_dma(erip, rmdp, rmdi, flags);
3003 			rmdi =  (rmdi + 1) & rmdmax_mask;
3004 			rmdp = rmdpbase + rmdi;
3005 
3006 			if (mp != NULL) {
3007 				*tail = mp;
3008 				tail = &mp->b_next;
3009 			}
3010 
3011 			/*
3012 			 * ERI RCV DMA fetches or updates four descriptors
3013 			 * a time. Also we don't want to update the desc.
3014 			 * batch we just received packet on. So we update
3015 			 * descriptors for every 4 packets and we update
3016 			 * the group of 4 after the current batch.
3017 			 */
3018 
3019 			if (!(rmdi % 4)) {
3020 				if (eri_overflow_reset &&
3021 				    (GET_GLOBREG(status_alias) &
3022 				    ERI_G_STATUS_NONFATAL_ERR)) {
3023 					loop_limit = 1;
3024 				} else {
3025 					erip->rx_kick =
3026 					    (rmdi + ERI_RPENDING - 4) &
3027 					    rmdmax_mask;
3028 					PUT_ERXREG(rx_kick, erip->rx_kick);
3029 				}
3030 			}
3031 
3032 			/*
3033 			 * Sync the next RMD before looking at it.
3034 			 */
3035 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3036 			    DDI_DMA_SYNC_FORCPU);
3037 			flags = GET_RMD_FLAGS(rmdp);
3038 			loop_limit--;
3039 		}
3040 		erip->rx_completion = rmdi;
3041 	}
3042 
3043 	erip->wantw = B_FALSE;
3044 
3045 	mutex_exit(&erip->intrlock);
3046 
3047 	if (head)
3048 		mac_rx(erip->mh, NULL, head);
3049 
3050 	if (macupdate)
3051 		mac_tx_update(erip->mh);
3052 
3053 	if (linkupdate != LINK_STATE_UNKNOWN)
3054 		mac_link_update(erip->mh, linkupdate);
3055 
3056 	return (serviced);
3057 }
3058 
3059 /*
3060  * Handle interrupts for fatal errors
3061  * Need reinitialization.
3062  */
3063 #define	PCI_DATA_PARITY_REP	(1 << 8)
3064 #define	PCI_SING_TARGET_ABORT	(1 << 11)
3065 #define	PCI_RCV_TARGET_ABORT	(1 << 12)
3066 #define	PCI_RCV_MASTER_ABORT	(1 << 13)
3067 #define	PCI_SING_SYSTEM_ERR	(1 << 14)
3068 #define	PCI_DATA_PARITY_ERR	(1 << 15)
3069 
3070 /* called with intrlock held */
3071 static void
3072 eri_fatal_err(struct eri *erip, uint32_t erisbits)
3073 {
3074 	uint16_t	pci_status;
3075 	uint32_t	pci_error_int = 0;
3076 
3077 	if (erisbits & ERI_G_STATUS_RX_TAG_ERR) {
3078 		erip->rx_reset_issued = 1;
3079 		HSTAT(erip, rxtag_err);
3080 	} else {
3081 		erip->global_reset_issued = 1;
3082 		if (erisbits & ERI_G_STATUS_BUS_ERR_INT) {
3083 			pci_error_int = 1;
3084 			HSTAT(erip, pci_error_int);
3085 		} else if (erisbits & ERI_G_STATUS_PERR_INT) {
3086 			HSTAT(erip, parity_error);
3087 		} else {
3088 			HSTAT(erip, unknown_fatal);
3089 		}
3090 	}
3091 
3092 	/*
3093 	 * PCI bus error
3094 	 */
3095 	if (pci_error_int && erip->pci_config_handle) {
3096 		pci_status = pci_config_get16(erip->pci_config_handle,
3097 		    PCI_CONF_STAT);
3098 		ERI_DEBUG_MSG2(erip, FATAL_ERR_MSG, "Bus Error Status %x",
3099 		    pci_status);
3100 		if (pci_status & PCI_DATA_PARITY_REP)
3101 			HSTAT(erip, pci_data_parity_err);
3102 		if (pci_status & PCI_SING_TARGET_ABORT)
3103 			HSTAT(erip, pci_signal_target_abort);
3104 		if (pci_status & PCI_RCV_TARGET_ABORT)
3105 			HSTAT(erip, pci_rcvd_target_abort);
3106 		if (pci_status & PCI_RCV_MASTER_ABORT)
3107 			HSTAT(erip, pci_rcvd_master_abort);
3108 		if (pci_status & PCI_SING_SYSTEM_ERR)
3109 			HSTAT(erip, pci_signal_system_err);
3110 		if (pci_status & PCI_DATA_PARITY_ERR)
3111 			HSTAT(erip, pci_signal_system_err);
3112 		/*
3113 		 * clear it by writing the value that was read back.
3114 		 */
3115 		pci_config_put16(erip->pci_config_handle, PCI_CONF_STAT,
3116 		    pci_status);
3117 	}
3118 }
3119 
3120 /*
3121  * Handle interrupts regarding non-fatal events.
3122  * TXMAC, RXMAC and MACCTL events
3123  */
3124 static void
3125 eri_nonfatal_err(struct eri *erip, uint32_t erisbits)
3126 {
3127 
3128 	uint32_t	txmac_sts, rxmac_sts, macctl_sts, pause_time;
3129 
3130 #ifdef ERI_PM_WORKAROUND
3131 	if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
3132 	    PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
3133 		erip->stats.pmcap = ERI_PMCAP_NONE;
3134 #endif
3135 
3136 	if (erisbits & ERI_G_STATUS_TX_MAC_INT) {
3137 		txmac_sts = GET_MACREG(txsts);
3138 		if (txmac_sts & BMAC_TXSTS_TX_URUN) {
3139 			erip->linkcheck = 1;
3140 			HSTAT(erip, txmac_urun);
3141 			HSTAT(erip, oerrors);
3142 		}
3143 
3144 		if (txmac_sts & BMAC_TXSTS_MAXPKT_ERR) {
3145 			erip->linkcheck = 1;
3146 			HSTAT(erip, txmac_maxpkt_err);
3147 			HSTAT(erip, oerrors);
3148 		}
3149 		if (txmac_sts & BMAC_TXSTS_NCC_EXP) {
3150 			erip->stats.collisions += 0x10000;
3151 		}
3152 
3153 		if (txmac_sts & BMAC_TXSTS_ECC_EXP) {
3154 			erip->stats.excessive_coll += 0x10000;
3155 		}
3156 
3157 		if (txmac_sts & BMAC_TXSTS_LCC_EXP) {
3158 			erip->stats.late_coll += 0x10000;
3159 		}
3160 
3161 		if (txmac_sts & BMAC_TXSTS_FCC_EXP) {
3162 			erip->stats.first_coll += 0x10000;
3163 		}
3164 
3165 		if (txmac_sts & BMAC_TXSTS_DEFER_EXP) {
3166 			HSTAT(erip, defer_timer_exp);
3167 		}
3168 
3169 		if (txmac_sts & BMAC_TXSTS_PEAK_EXP) {
3170 			erip->stats.peak_attempt_cnt += 0x100;
3171 		}
3172 	}
3173 
3174 	if (erisbits & ERI_G_STATUS_RX_NO_BUF) {
3175 		ERI_DEBUG_MSG1(erip, NONFATAL_MSG, "rx dropped/no free desc");
3176 
3177 		if (eri_overflow_reset)
3178 			erip->linkcheck = 1;
3179 
3180 		HSTAT(erip, no_free_rx_desc);
3181 		HSTAT(erip, ierrors);
3182 	}
3183 	if (erisbits & ERI_G_STATUS_RX_MAC_INT) {
3184 		rxmac_sts = GET_MACREG(rxsts);
3185 		if (rxmac_sts & BMAC_RXSTS_RX_OVF) {
3186 #ifndef ERI_RMAC_HANG_WORKAROUND
3187 			eri_stop_timer(erip);   /* acquire linklock */
3188 			erip->check_rmac_hang ++;
3189 			erip->check2_rmac_hang = 0;
3190 			erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
3191 			erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
3192 
3193 			ERI_DEBUG_MSG5(erip, NONFATAL_MSG,
3194 			    "overflow intr %d: %8x wr:%2x rd:%2x",
3195 			    erip->check_rmac_hang,
3196 			    GET_MACREG(macsm),
3197 			    GET_ERXREG(rxfifo_wr_ptr),
3198 			    GET_ERXREG(rxfifo_rd_ptr));
3199 
3200 			eri_start_timer(erip, eri_check_link,
3201 			    ERI_CHECK_HANG_TIMER);
3202 #endif
3203 			if (eri_overflow_reset)
3204 				erip->linkcheck = 1;
3205 
3206 			HSTAT(erip, rx_overflow);
3207 			HSTAT(erip, ierrors);
3208 		}
3209 
3210 		if (rxmac_sts & BMAC_RXSTS_ALE_EXP) {
3211 			erip->stats.rx_align_err += 0x10000;
3212 			erip->stats.ierrors += 0x10000;
3213 		}
3214 
3215 		if (rxmac_sts & BMAC_RXSTS_CRC_EXP) {
3216 			erip->stats.rx_crc_err += 0x10000;
3217 			erip->stats.ierrors += 0x10000;
3218 		}
3219 
3220 		if (rxmac_sts & BMAC_RXSTS_LEN_EXP) {
3221 			erip->stats.rx_length_err += 0x10000;
3222 			erip->stats.ierrors += 0x10000;
3223 		}
3224 
3225 		if (rxmac_sts & BMAC_RXSTS_CVI_EXP) {
3226 			erip->stats.rx_code_viol_err += 0x10000;
3227 			erip->stats.ierrors += 0x10000;
3228 		}
3229 	}
3230 
3231 	if (erisbits & ERI_G_STATUS_MAC_CTRL_INT) {
3232 
3233 		macctl_sts = GET_MACREG(macctl_sts);
3234 		if (macctl_sts & ERI_MCTLSTS_PAUSE_RCVD) {
3235 			pause_time = ((macctl_sts &
3236 			    ERI_MCTLSTS_PAUSE_TIME) >> 16);
3237 			ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
3238 			    "PAUSE Received. pause time = %X slot_times",
3239 			    pause_time);
3240 			HSTAT(erip, pause_rxcount);
3241 			erip->stats.pause_time_count += pause_time;
3242 		}
3243 
3244 		if (macctl_sts & ERI_MCTLSTS_PAUSE_STATE) {
3245 			HSTAT(erip, pause_oncount);
3246 			erip->stats.pausing = 1;
3247 		}
3248 
3249 		if (macctl_sts & ERI_MCTLSTS_NONPAUSE) {
3250 			HSTAT(erip, pause_offcount);
3251 			erip->stats.pausing = 0;
3252 		}
3253 	}
3254 
3255 }
3256 
3257 /*
3258  * if this is the first init do not bother to save the
3259  * counters.
3260  */
3261 static void
3262 eri_savecntrs(struct eri *erip)
3263 {
3264 	uint32_t	fecnt, aecnt, lecnt, rxcv;
3265 	uint32_t	ltcnt, excnt, fccnt;
3266 
3267 	/* XXX What all gets added in ierrors and oerrors? */
3268 	fecnt = GET_MACREG(fecnt);
3269 	HSTATN(erip, rx_crc_err, fecnt);
3270 	PUT_MACREG(fecnt, 0);
3271 
3272 	aecnt = GET_MACREG(aecnt);
3273 	HSTATN(erip, rx_align_err, aecnt);
3274 	PUT_MACREG(aecnt, 0);
3275 
3276 	lecnt = GET_MACREG(lecnt);
3277 	HSTATN(erip, rx_length_err, lecnt);
3278 	PUT_MACREG(lecnt, 0);
3279 
3280 	rxcv = GET_MACREG(rxcv);
3281 	HSTATN(erip, rx_code_viol_err, rxcv);
3282 	PUT_MACREG(rxcv, 0);
3283 
3284 	ltcnt = GET_MACREG(ltcnt);
3285 	HSTATN(erip, late_coll, ltcnt);
3286 	PUT_MACREG(ltcnt, 0);
3287 
3288 	erip->stats.collisions += (GET_MACREG(nccnt) + ltcnt);
3289 	PUT_MACREG(nccnt, 0);
3290 
3291 	excnt = GET_MACREG(excnt);
3292 	HSTATN(erip, excessive_coll, excnt);
3293 	PUT_MACREG(excnt, 0);
3294 
3295 	fccnt = GET_MACREG(fccnt);
3296 	HSTATN(erip, first_coll, fccnt);
3297 	PUT_MACREG(fccnt, 0);
3298 
3299 	/*
3300 	 * Do not add code violations to input errors.
3301 	 * They are already counted in CRC errors
3302 	 */
3303 	HSTATN(erip, ierrors, (fecnt + aecnt + lecnt));
3304 	HSTATN(erip, oerrors, (ltcnt + excnt));
3305 }
3306 
3307 mblk_t *
3308 eri_allocb_sp(size_t size)
3309 {
3310 	mblk_t  *mp;
3311 
3312 	size += 128;
3313 	if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3314 		return (NULL);
3315 	}
3316 	mp->b_wptr += 128;
3317 	mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3318 	mp->b_rptr = mp->b_wptr;
3319 
3320 	return (mp);
3321 }
3322 
3323 mblk_t *
3324 eri_allocb(size_t size)
3325 {
3326 	mblk_t  *mp;
3327 
3328 	if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3329 		return (NULL);
3330 	}
3331 	mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3332 	mp->b_rptr = mp->b_wptr;
3333 
3334 	return (mp);
3335 }
3336 
3337 /*
3338  * Hardware Dependent Functions
3339  * New Section.
3340  */
3341 
3342 /* <<<<<<<<<<<<<<<< Fast Ethernet PHY Bit Bang Operations >>>>>>>>>>>>>>>>>> */
3343 
3344 static void
3345 send_bit(struct eri *erip, uint32_t x)
3346 {
3347 	PUT_MIFREG(mif_bbdata, x);
3348 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3349 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3350 }
3351 
3352 /*
3353  * To read the MII register bits according to the IEEE Standard
3354  */
3355 static uint32_t
3356 get_bit_std(struct eri *erip)
3357 {
3358 	uint32_t	x;
3359 
3360 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3361 	drv_usecwait(1);	/* wait for  >330 ns for stable data */
3362 	if (param_transceiver == INTERNAL_XCVR)
3363 		x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM0) ? 1 : 0;
3364 	else
3365 		x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM1) ? 1 : 0;
3366 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3367 	return (x);
3368 }
3369 
3370 #define	SEND_BIT(x)		send_bit(erip, x)
3371 #define	GET_BIT_STD(x)		x = get_bit_std(erip)
3372 
3373 
3374 static void
3375 eri_bb_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3376 {
3377 	uint8_t	phyad;
3378 	int		i;
3379 
3380 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
3381 	phyad = erip->phyad;
3382 	(void) eri_bb_force_idle(erip);
3383 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
3384 	SEND_BIT(0); SEND_BIT(1);	/* <OP> */
3385 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
3386 		SEND_BIT((phyad >> i) & 1);
3387 	}
3388 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
3389 		SEND_BIT((regad >> i) & 1);
3390 	}
3391 	SEND_BIT(1); SEND_BIT(0);	/* <TA> */
3392 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
3393 		SEND_BIT((data >> i) & 1);
3394 	}
3395 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
3396 }
3397 
3398 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3399 static uint32_t
3400 eri_bb_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3401 {
3402 	uint8_t	phyad;
3403 	int	i;
3404 	uint32_t	x;
3405 	uint32_t	y;
3406 
3407 	*datap = 0;
3408 
3409 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
3410 	phyad = erip->phyad;
3411 	(void) eri_bb_force_idle(erip);
3412 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
3413 	SEND_BIT(1); SEND_BIT(0);	/* <OP> */
3414 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
3415 		SEND_BIT((phyad >> i) & 1);
3416 	}
3417 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
3418 		SEND_BIT((regad >> i) & 1);
3419 	}
3420 
3421 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
3422 
3423 	GET_BIT_STD(x);
3424 	GET_BIT_STD(y);		/* <TA> */
3425 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
3426 		GET_BIT_STD(x);
3427 		*datap += (x << i);
3428 	}
3429 	/* Kludge to get the Transceiver out of hung mode */
3430 	/* XXX: Test if this is still needed */
3431 	GET_BIT_STD(x);
3432 	GET_BIT_STD(x);
3433 	GET_BIT_STD(x);
3434 
3435 	return (y);
3436 }
3437 
3438 static void
3439 eri_bb_force_idle(struct eri *erip)
3440 {
3441 	int		i;
3442 
3443 	for (i = 0; i < 33; i++) {
3444 		SEND_BIT(1);
3445 	}
3446 }
3447 
3448 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
3449 
3450 
3451 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
3452 
3453 #ifdef ERI_FRM_DEBUG
3454 int frame_flag = 0;
3455 #endif
3456 
3457 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3458 static uint32_t
3459 eri_mii_read(struct eri	*erip, uint8_t regad, uint16_t *datap)
3460 {
3461 	uint32_t frame;
3462 	uint8_t phyad;
3463 
3464 	if (param_transceiver == NO_XCVR)
3465 		return (1);	/* No xcvr present */
3466 
3467 	if (!erip->frame_enable)
3468 		return (eri_bb_mii_read(erip, regad, datap));
3469 
3470 	phyad = erip->phyad;
3471 #ifdef ERI_FRM_DEBUG
3472 	if (!frame_flag) {
3473 		eri_errror(erip->dip, "Frame Register used for MII");
3474 		frame_flag = 1;
3475 	}
3476 #endif
3477 	ERI_DEBUG_MSG3(erip, FRM_MSG,
3478 	    "Frame Reg :mii_read: phyad = %X reg = %X ", phyad, regad);
3479 
3480 	PUT_MIFREG(mif_frame, ERI_MIF_FRREAD |
3481 	    (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3482 	    (regad << ERI_MIF_FRREGAD_SHIFT));
3483 	MIF_ERIDELAY(300,  phyad, regad);
3484 	frame = GET_MIFREG(mif_frame);
3485 	if ((frame & ERI_MIF_FRTA0) == 0) {
3486 		return (1);
3487 	} else {
3488 		*datap = (uint16_t)(frame & ERI_MIF_FRDATA);
3489 		return (0);
3490 	}
3491 
3492 }
3493 
3494 static void
3495 eri_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3496 {
3497 	uint8_t	phyad;
3498 
3499 	if (!erip->frame_enable) {
3500 		eri_bb_mii_write(erip, regad, data);
3501 		return;
3502 	}
3503 
3504 	phyad = erip->phyad;
3505 
3506 	PUT_MIFREG(mif_frame, (ERI_MIF_FRWRITE |
3507 	    (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3508 	    (regad << ERI_MIF_FRREGAD_SHIFT) | data));
3509 	MIF_ERIDELAY(300,  phyad, regad);
3510 	(void) GET_MIFREG(mif_frame);
3511 }
3512 
3513 
3514 /* <<<<<<<<<<<<<<<<<	PACKET TRANSMIT FUNCTIONS	>>>>>>>>>>>>>>>>>>>> */
3515 
3516 #define	ERI_CROSS_PAGE_BOUNDRY(i, size, pagesize) \
3517 	((i & pagesize) != ((i + size) & pagesize))
3518 
3519 /*
3520  * Send a single mblk.  Returns B_TRUE if the packet is sent, or disposed of
3521  * by freemsg.  Returns B_FALSE if the packet was not sent or queued, and
3522  * should be retried later (due to tx resource exhaustion.)
3523  */
3524 static boolean_t
3525 eri_send_msg(struct eri *erip, mblk_t *mp)
3526 {
3527 	volatile struct	eri_tmd	*tmdp = NULL;
3528 	volatile struct	eri_tmd	*tbasep = NULL;
3529 	mblk_t		*nmp;
3530 	uint32_t	len = 0, len_msg = 0, xover_len = TX_STREAM_MIN;
3531 	uint32_t	nmblks = 0;
3532 	uint32_t	i, j;
3533 	uint64_t	int_me = 0;
3534 	uint_t		tmdcsum = 0;
3535 	uint_t		start_offset = 0;
3536 	uint_t		stuff_offset = 0;
3537 	uint_t		flags = 0;
3538 	boolean_t	macupdate = B_FALSE;
3539 
3540 	caddr_t	ptr;
3541 	uint32_t	offset;
3542 	uint64_t	ctrl;
3543 	uint32_t	count;
3544 	uint32_t	flag_dma;
3545 	ddi_dma_cookie_t	c;
3546 
3547 	if (!param_linkup) {
3548 		freemsg(mp);
3549 		HSTAT(erip, tnocar);
3550 		HSTAT(erip, oerrors);
3551 		return (B_TRUE);
3552 	}
3553 
3554 	nmp = mp;
3555 
3556 #ifdef ERI_HWCSUM
3557 	hcksum_retrieve(mp, NULL, NULL, &start_offset, &stuff_offset,
3558 	    NULL, NULL, &flags);
3559 
3560 	if (flags & HCK_PARTIALCKSUM) {
3561 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
3562 			start_offset += ETHERHEADER_SIZE + 4;
3563 			stuff_offset += ETHERHEADER_SIZE + 4;
3564 		} else {
3565 			start_offset += ETHERHEADER_SIZE;
3566 			stuff_offset += ETHERHEADER_SIZE;
3567 		}
3568 		tmdcsum = ERI_TMD_CSENABL;
3569 	}
3570 #endif /* ERI_HWCSUM */
3571 	while (nmp != NULL) {
3572 		ASSERT(nmp->b_wptr >= nmp->b_rptr);
3573 		nmblks++; /* # of mbs */
3574 		nmp = nmp->b_cont;
3575 	}
3576 	len_msg = msgsize(mp);
3577 
3578 	/*
3579 	 * update MIB II statistics
3580 	 */
3581 	BUMP_OutNUcast(erip, mp->b_rptr);
3582 
3583 /*
3584  * 	----------------------------------------------------------------------
3585  *	here we deal with 3 cases.
3586  * 	1. pkt has exactly one mblk
3587  * 	2. pkt has exactly two mblks
3588  * 	3. pkt has more than 2 mblks. Since this almost
3589  *	   always never happens, we copy all of them into
3590  *	   a msh with one mblk.
3591  * 	for each mblk in the message, we allocate a tmd and
3592  * 	figure out the tmd index and tmblkp index.
3593  * 	----------------------------------------------------------------------
3594  */
3595 	if (nmblks > 2) { /* more than 2 mbs */
3596 		if ((nmp = eri_allocb(len_msg)) == NULL) {
3597 			HSTAT(erip, allocbfail);
3598 			HSTAT(erip, noxmtbuf);
3599 			freemsg(mp);
3600 			return (B_TRUE); /* bad case */
3601 		}
3602 		mcopymsg(mp, nmp->b_rptr);
3603 		nmp->b_wptr = nmp->b_rptr + len_msg;
3604 		mp = nmp;
3605 		nmblks = 1; /* make it one mb */
3606 	} else
3607 		nmp = mp;
3608 
3609 	mutex_enter(&erip->xmitlock);
3610 
3611 	tbasep = erip->eri_tmdp;
3612 
3613 	/* Check if there are enough descriptors for this packet */
3614 	tmdp = erip->tnextp;
3615 
3616 	if (tmdp >=  erip->tcurp) /* check notmds */
3617 		i = tmdp - erip->tcurp;
3618 	else
3619 		i = tmdp + ERI_TPENDING - erip->tcurp;
3620 
3621 	if (i > (ERI_TPENDING - 4))
3622 		goto notmds;
3623 
3624 	if (i >= (ERI_TPENDING >> 1) && !(erip->starts & 0x7))
3625 		int_me = ERI_TMD_INTME;
3626 
3627 	for (j = 0; j < nmblks; j++) { /* for one or two mb cases */
3628 
3629 		/*LINTED E_PTRDIFF_OVERFLOW*/
3630 		len = nmp->b_wptr - nmp->b_rptr;
3631 		i = tmdp - tbasep; /* index */
3632 
3633 		if (len_msg < eri_tx_bcopy_max) { /* tb-all mb */
3634 
3635 			offset = (i * eri_tx_bcopy_max);
3636 			ptr = erip->tbuf_kaddr + offset;
3637 
3638 			mcopymsg(mp, ptr);
3639 
3640 #ifdef	ERI_HDX_BUG_WORKAROUND
3641 			if ((param_mode) || (eri_hdx_pad_enable == 0)) {
3642 				if (len_msg < ETHERMIN) {
3643 					bzero((ptr + len_msg),
3644 					    (ETHERMIN - len_msg));
3645 					len_msg = ETHERMIN;
3646 				}
3647 			} else {
3648 				if (len_msg < 97) {
3649 					bzero((ptr + len_msg), (97 - len_msg));
3650 					len_msg = 97;
3651 				}
3652 			}
3653 #endif
3654 			len = len_msg;
3655 			c.dmac_address = erip->tbuf_ioaddr + offset;
3656 			(void) ddi_dma_sync(erip->tbuf_handle,
3657 			    (off_t)offset, len_msg, DDI_DMA_SYNC_FORDEV);
3658 			nmblks = 1; /* exit this for loop */
3659 		} else if ((!j) && (len < eri_tx_bcopy_max)) { /* tb-1st mb */
3660 
3661 			offset = (i * eri_tx_bcopy_max);
3662 			ptr = erip->tbuf_kaddr + offset;
3663 
3664 			bcopy(mp->b_rptr, ptr, len);
3665 
3666 			c.dmac_address = erip->tbuf_ioaddr + offset;
3667 			(void) ddi_dma_sync(erip->tbuf_handle,
3668 			    (off_t)offset, len, DDI_DMA_SYNC_FORDEV);
3669 			nmp = mp->b_cont;
3670 			mp->b_cont = NULL;
3671 			freeb(mp);
3672 		} else if (erip->eri_dvmaxh != NULL) { /* fast DVMA */
3673 
3674 			dvma_kaddr_load(erip->eri_dvmaxh,
3675 			    (caddr_t)nmp->b_rptr, len, 2 * i, &c);
3676 			dvma_sync(erip->eri_dvmaxh,
3677 			    2 * i, DDI_DMA_SYNC_FORDEV);
3678 
3679 			erip->tmblkp[i] = nmp;
3680 			if (!j) {
3681 				nmp = mp->b_cont;
3682 				mp->b_cont = NULL;
3683 			}
3684 
3685 		} else { /* DDI DMA */
3686 			if (len < xover_len)
3687 				flag_dma = DDI_DMA_WRITE | DDI_DMA_CONSISTENT;
3688 			else
3689 				flag_dma = DDI_DMA_WRITE | DDI_DMA_STREAMING;
3690 
3691 			if (ddi_dma_addr_bind_handle(erip->ndmaxh[i],
3692 			    NULL, (caddr_t)nmp->b_rptr, len,
3693 			    flag_dma, DDI_DMA_DONTWAIT, NULL, &c,
3694 			    &count) != DDI_DMA_MAPPED) {
3695 				if (j) { /* free previous DMV resources */
3696 					i = erip->tnextp - tbasep;
3697 					if (erip->ndmaxh[i]) { /* DDI DMA */
3698 						(void) ddi_dma_unbind_handle(
3699 						    erip->ndmaxh[i]);
3700 						erip->ndmaxh[i] = NULL;
3701 						freeb(mp);
3702 					}
3703 					freeb(nmp);
3704 				} else {
3705 					freemsg(mp);
3706 				}
3707 
3708 				mutex_exit(&erip->xmitlock);
3709 				HSTAT(erip, noxmtbuf);
3710 
3711 				return (B_TRUE); /* bad case */
3712 			}
3713 
3714 			erip->tmblkp[i] = nmp;
3715 			if (!j) {
3716 				nmp = mp->b_cont;
3717 				mp->b_cont = NULL;
3718 			}
3719 		}
3720 
3721 		ctrl = 0;
3722 		/* first descr of packet */
3723 		if (!j) {
3724 			ctrl = ERI_TMD_SOP| int_me | tmdcsum |
3725 			    (start_offset << ERI_TMD_CSSTART_SHIFT) |
3726 			    (stuff_offset << ERI_TMD_CSSTUFF_SHIFT);
3727 		}
3728 
3729 		/* last descr of packet */
3730 		if ((j + 1) == nmblks) {
3731 			ctrl |= ERI_TMD_EOP;
3732 		}
3733 
3734 		PUT_TMD(tmdp, c, len, ctrl);
3735 		ERI_SYNCIOPB(erip, tmdp, sizeof (struct eri_tmd),
3736 		    DDI_DMA_SYNC_FORDEV);
3737 
3738 		tmdp = NEXTTMD(erip, tmdp);
3739 		erip->tx_cur_cnt++;
3740 	} /* for each nmp */
3741 
3742 	erip->tx_kick = tmdp - tbasep;
3743 	PUT_ETXREG(tx_kick, erip->tx_kick);
3744 	erip->tnextp = tmdp;
3745 
3746 	erip->starts++;
3747 
3748 	if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3749 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3750 		    ETX_COMPLETION_MASK);
3751 		macupdate |= eri_reclaim(erip, erip->tx_completion);
3752 	}
3753 	mutex_exit(&erip->xmitlock);
3754 
3755 	if (macupdate)
3756 		mac_tx_update(erip->mh);
3757 
3758 	return (B_TRUE);
3759 
3760 notmds:
3761 	HSTAT(erip, notmds);
3762 	erip->wantw = B_TRUE;
3763 
3764 	if (!erip->tx_int_me) {
3765 		PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
3766 		    ~(ERI_G_MASK_TX_INT_ME));
3767 		erip->tx_int_me = 1;
3768 	}
3769 
3770 	if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3771 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3772 		    ETX_COMPLETION_MASK);
3773 		macupdate |= eri_reclaim(erip, erip->tx_completion);
3774 	}
3775 
3776 	mutex_exit(&erip->xmitlock);
3777 
3778 	if (macupdate)
3779 		mac_tx_update(erip->mh);
3780 
3781 	return (B_FALSE);
3782 }
3783 
3784 static mblk_t *
3785 eri_m_tx(void *arg, mblk_t *mp)
3786 {
3787 	struct eri *erip = arg;
3788 	mblk_t *next;
3789 
3790 	while (mp != NULL) {
3791 		next = mp->b_next;
3792 		mp->b_next = NULL;
3793 		if (!eri_send_msg(erip, mp)) {
3794 			mp->b_next = next;
3795 			break;
3796 		}
3797 		mp = next;
3798 	}
3799 
3800 	return (mp);
3801 }
3802 
3803 /*
3804  * Transmit completion reclaiming.
3805  */
3806 static boolean_t
3807 eri_reclaim(struct eri *erip, uint32_t tx_completion)
3808 {
3809 	volatile struct	eri_tmd	*tmdp;
3810 	struct	eri_tmd	*tcomp;
3811 	struct	eri_tmd	*tbasep;
3812 	struct	eri_tmd	*tlimp;
3813 	mblk_t *bp;
3814 	int	i;
3815 	uint64_t	flags;
3816 	uint_t reclaimed = 0;
3817 
3818 	tbasep = erip->eri_tmdp;
3819 	tlimp = erip->eri_tmdlimp;
3820 
3821 	tmdp = erip->tcurp;
3822 	tcomp = tbasep + tx_completion; /* pointer to completion tmd */
3823 
3824 	/*
3825 	 * Loop through each TMD starting from tcurp and upto tcomp.
3826 	 */
3827 	while (tmdp != tcomp) {
3828 		flags = GET_TMD_FLAGS(tmdp);
3829 		if (flags & (ERI_TMD_SOP))
3830 			HSTAT(erip, opackets64);
3831 
3832 		HSTATN(erip, obytes64, (flags & ERI_TMD_BUFSIZE));
3833 
3834 		i = tmdp - tbasep;
3835 		bp = erip->tmblkp[i];
3836 
3837 		/* dvma handle case */
3838 
3839 		if (bp) {
3840 			if (erip->eri_dvmaxh) {
3841 				dvma_unload(erip->eri_dvmaxh, 2 * i,
3842 				    (uint_t)DONT_FLUSH);
3843 			} else {
3844 				/* dma handle case. */
3845 				(void) ddi_dma_unbind_handle(erip->ndmaxh[i]);
3846 			}
3847 
3848 			freeb(bp);
3849 			erip->tmblkp[i] = NULL;
3850 
3851 		}
3852 		tmdp = NEXTTMDP(tbasep, tlimp, tmdp);
3853 		reclaimed++;
3854 	}
3855 
3856 	erip->tcurp = tmdp;
3857 	erip->tx_cur_cnt -= reclaimed;
3858 
3859 	return (erip->wantw && reclaimed ? B_TRUE : B_FALSE);
3860 }
3861 
3862 
3863 /* <<<<<<<<<<<<<<<<<<<	PACKET RECEIVE FUNCTIONS	>>>>>>>>>>>>>>>>>>> */
3864 static mblk_t *
3865 eri_read_dma(struct eri *erip, volatile struct rmd *rmdp,
3866 	int rmdi, uint64_t flags)
3867 {
3868 	mblk_t	*bp, *nbp;
3869 	int	len;
3870 	uint_t ccnt;
3871 	ddi_dma_cookie_t	c;
3872 #ifdef ERI_RCV_CKSUM
3873 	ushort_t sum;
3874 #endif /* ERI_RCV_CKSUM */
3875 	mblk_t *retmp = NULL;
3876 
3877 	bp = erip->rmblkp[rmdi];
3878 	len = (flags & ERI_RMD_BUFSIZE) >> ERI_RMD_BUFSIZE_SHIFT;
3879 #ifdef	ERI_DONT_STRIP_CRC
3880 	len -= 4;
3881 #endif
3882 	/*
3883 	 * In the event of RX FIFO overflow error, ERI REV 1.0 ASIC can
3884 	 * corrupt packets following the descriptor corresponding the
3885 	 * overflow. To detect the corrupted packets, we disable the
3886 	 * dropping of the "bad" packets at the MAC. The descriptor
3887 	 * then would have the "BAD" bit set. We drop the overflowing
3888 	 * packet and the packet following it. We could have done some sort
3889 	 * of checking to determine if the second packet was indeed bad
3890 	 * (using CRC or checksum) but it would be expensive in this
3891 	 * routine, since it is run in interrupt context.
3892 	 */
3893 	if ((flags & ERI_RMD_BAD) || (len  < ETHERMIN) || (len > ETHERMAX+4)) {
3894 
3895 		HSTAT(erip, rx_bad_pkts);
3896 		if ((flags & ERI_RMD_BAD) == 0)
3897 			HSTAT(erip, ierrors);
3898 		if (len < ETHERMIN) {
3899 			HSTAT(erip, rx_runt);
3900 		} else if (len > ETHERMAX+4) {
3901 			HSTAT(erip, rx_toolong_pkts);
3902 		}
3903 		HSTAT(erip, drop);
3904 		UPDATE_RMD(rmdp);
3905 
3906 		ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3907 		    DDI_DMA_SYNC_FORDEV);
3908 		return (NULL);
3909 	}
3910 #ifdef  ERI_DONT_STRIP_CRC
3911 	{
3912 		uint32_t hw_fcs, tail_fcs;
3913 		/*
3914 		 * since we don't let the hardware strip the CRC in hdx
3915 		 * then the driver needs to do it.
3916 		 * this is to workaround a hardware bug
3917 		 */
3918 		bp->b_wptr = bp->b_rptr + ERI_FSTBYTE_OFFSET + len;
3919 		/*
3920 		 * Get the Checksum calculated by the hardware.
3921 		 */
3922 		hw_fcs = flags & ERI_RMD_CKSUM;
3923 		/*
3924 		 * Catch the case when the CRC starts on an odd
3925 		 * boundary.
3926 		 */
3927 		tail_fcs = bp->b_wptr[0] << 8 | bp->b_wptr[1];
3928 		tail_fcs += bp->b_wptr[2] << 8 | bp->b_wptr[3];
3929 		tail_fcs = (tail_fcs & 0xffff) + (tail_fcs >> 16);
3930 		if ((uintptr_t)(bp->b_wptr) & 1) {
3931 			tail_fcs = (tail_fcs << 8) & 0xffff  | (tail_fcs >> 8);
3932 		}
3933 		hw_fcs += tail_fcs;
3934 		hw_fcs = (hw_fcs & 0xffff) + (hw_fcs >> 16);
3935 		hw_fcs &= 0xffff;
3936 		/*
3937 		 * Now we can replace what the hardware wrote, make believe
3938 		 * it got it right in the first place.
3939 		 */
3940 		flags = (flags & ~(uint64_t)ERI_RMD_CKSUM) | hw_fcs;
3941 	}
3942 #endif
3943 	/*
3944 	 * Packet Processing
3945 	 * Once we get a packet bp, we try allocate a new mblk, nbp
3946 	 * to replace this one. If we succeed, we map it to the current
3947 	 * dma handle and update the descriptor with the new cookie. We
3948 	 * then put bp in our read service queue erip->ipq, if it exists
3949 	 * or we just bp to the streams expecting it.
3950 	 * If allocation of the new mblk fails, we implicitly drop the
3951 	 * current packet, i.e do not pass up the mblk and re-use it.
3952 	 * Re-mapping is not required.
3953 	 */
3954 
3955 	if (len < eri_rx_bcopy_max) {
3956 		if ((nbp = eri_allocb_sp(len + ERI_FSTBYTE_OFFSET))) {
3957 			(void) ddi_dma_sync(erip->ndmarh[rmdi], 0,
3958 			    len + ERI_FSTBYTE_OFFSET, DDI_DMA_SYNC_FORCPU);
3959 			DB_TYPE(nbp) = M_DATA;
3960 			bcopy(bp->b_rptr, nbp->b_rptr,
3961 			    len + ERI_FSTBYTE_OFFSET);
3962 			UPDATE_RMD(rmdp);
3963 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3964 			    DDI_DMA_SYNC_FORDEV);
3965 
3966 			/* Add the First Byte offset to the b_rptr */
3967 			nbp->b_rptr += ERI_FSTBYTE_OFFSET;
3968 			nbp->b_wptr = nbp->b_rptr + len;
3969 
3970 #ifdef ERI_RCV_CKSUM
3971 			sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3972 			/*LINTED E_PTRDIFF_OVERFLOW*/
3973 			ERI_PROCESS_READ(erip, nbp, sum);
3974 #else
3975 			/*LINTED E_PTRDIFF_OVERFLOW*/
3976 			ERI_PROCESS_READ(erip, nbp);
3977 #endif
3978 			retmp = nbp;
3979 		} else {
3980 
3981 			/*
3982 			 * mblk allocation has failed. Re-use the old mblk for
3983 			 * the next packet. Re-mapping is not required since
3984 			 * the same mblk and dma cookie is to be used again.
3985 			 */
3986 			HSTAT(erip, ierrors);
3987 			HSTAT(erip, allocbfail);
3988 			HSTAT(erip, norcvbuf);
3989 
3990 			UPDATE_RMD(rmdp);
3991 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3992 			    DDI_DMA_SYNC_FORDEV);
3993 			ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3994 		}
3995 	} else {
3996 		/* Use dma unmap/map */
3997 		if ((nbp = eri_allocb_sp(ERI_BUFSIZE))) {
3998 			/*
3999 			 * How do we harden this, specially if unbind
4000 			 * succeeds and then bind fails?
4001 			 *  If Unbind fails, we can leave without updating
4002 			 * the descriptor but would it continue to work on
4003 			 * next round?
4004 			 */
4005 			(void) ddi_dma_unbind_handle(erip->ndmarh[rmdi]);
4006 			(void) ddi_dma_addr_bind_handle(erip->ndmarh[rmdi],
4007 			    NULL, (caddr_t)nbp->b_rptr, ERI_BUFSIZE,
4008 			    DDI_DMA_READ | DDI_DMA_CONSISTENT,
4009 			    DDI_DMA_DONTWAIT, 0, &c, &ccnt);
4010 
4011 			erip->rmblkp[rmdi] = nbp;
4012 			PUT_RMD(rmdp, c);
4013 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
4014 			    DDI_DMA_SYNC_FORDEV);
4015 
4016 			/* Add the First Byte offset to the b_rptr */
4017 
4018 			bp->b_rptr += ERI_FSTBYTE_OFFSET;
4019 			bp->b_wptr = bp->b_rptr + len;
4020 
4021 #ifdef ERI_RCV_CKSUM
4022 			sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
4023 			/*LINTED E_PTRDIFF_OVERFLOW*/
4024 			ERI_PROCESS_READ(erip, bp, sum);
4025 #else
4026 			/*LINTED E_PTRDIFF_OVERFLOW*/
4027 			ERI_PROCESS_READ(erip, bp);
4028 #endif
4029 			retmp = bp;
4030 		} else {
4031 
4032 			/*
4033 			 * mblk allocation has failed. Re-use the old mblk for
4034 			 * the next packet. Re-mapping is not required since
4035 			 * the same mblk and dma cookie is to be used again.
4036 			 */
4037 			HSTAT(erip, ierrors);
4038 			HSTAT(erip, allocbfail);
4039 			HSTAT(erip, norcvbuf);
4040 
4041 			UPDATE_RMD(rmdp);
4042 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
4043 			    DDI_DMA_SYNC_FORDEV);
4044 			ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
4045 		}
4046 	}
4047 
4048 	return (retmp);
4049 }
4050 
4051 #define	LINK_STAT_DISPLAY_TIME	20
4052 
4053 static int
4054 eri_init_xfer_params(struct eri *erip)
4055 {
4056 	int	i;
4057 	dev_info_t *dip;
4058 
4059 	dip = erip->dip;
4060 
4061 	for (i = 0; i < A_CNT(param_arr); i++)
4062 		erip->param_arr[i] = param_arr[i];
4063 
4064 	erip->xmit_dma_mode = 0;
4065 	erip->rcv_dma_mode = 0;
4066 	erip->mifpoll_enable = mifpoll_enable;
4067 	erip->lance_mode_enable = lance_mode;
4068 	erip->frame_enable = 1;
4069 	erip->ngu_enable = ngu_enable;
4070 
4071 	if (!erip->g_nd && !eri_param_register(erip,
4072 	    erip->param_arr, A_CNT(param_arr))) {
4073 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
4074 		    param_reg_fail_msg);
4075 			return (-1);
4076 		}
4077 
4078 	/*
4079 	 * Set up the start-up values for user-configurable parameters
4080 	 * Get the values from the global variables first.
4081 	 * Use the MASK to limit the value to allowed maximum.
4082 	 */
4083 
4084 	param_transceiver = NO_XCVR;
4085 
4086 /*
4087  * The link speed may be forced to either 10 Mbps or 100 Mbps using the
4088  * property "transfer-speed". This may be done in OBP by using the command
4089  * "apply transfer-speed=<speed> <device>". The speed may be either 10 or 100.
4090  */
4091 	i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "transfer-speed", 0);
4092 	if (i != 0) {
4093 		param_autoneg = 0;	/* force speed */
4094 		param_anar_100T4 = 0;
4095 		param_anar_10fdx = 0;
4096 		param_anar_10hdx = 0;
4097 		param_anar_100fdx = 0;
4098 		param_anar_100hdx = 0;
4099 		param_anar_asm_dir = 0;
4100 		param_anar_pause = 0;
4101 
4102 		if (i == 10)
4103 			param_anar_10hdx = 1;
4104 		else if (i == 100)
4105 			param_anar_100hdx = 1;
4106 	}
4107 
4108 	/*
4109 	 * Get the parameter values configured in .conf file.
4110 	 */
4111 	param_ipg1 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg1", ipg1) &
4112 	    ERI_MASK_8BIT;
4113 
4114 	param_ipg2 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg2", ipg2) &
4115 	    ERI_MASK_8BIT;
4116 
4117 	param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4118 	    "use_int_xcvr", use_int_xcvr) & ERI_MASK_1BIT;
4119 
4120 	param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4121 	    "pace_size", pace_size) & ERI_MASK_8BIT;
4122 
4123 	param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4124 	    "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
4125 
4126 	param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4127 	    "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
4128 
4129 	param_anar_100T4 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4130 	    "adv_100T4_cap", adv_100T4_cap) & ERI_MASK_1BIT;
4131 
4132 	param_anar_100fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4133 	    "adv_100fdx_cap", adv_100fdx_cap) & ERI_MASK_1BIT;
4134 
4135 	param_anar_100hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4136 	    "adv_100hdx_cap", adv_100hdx_cap) & ERI_MASK_1BIT;
4137 
4138 	param_anar_10fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4139 	    "adv_10fdx_cap", adv_10fdx_cap) & ERI_MASK_1BIT;
4140 
4141 	param_anar_10hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4142 	    "adv_10hdx_cap", adv_10hdx_cap) & ERI_MASK_1BIT;
4143 
4144 	param_ipg0 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg0", ipg0) &
4145 	    ERI_MASK_8BIT;
4146 
4147 	param_intr_blank_time = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4148 	    "intr_blank_time", intr_blank_time) & ERI_MASK_8BIT;
4149 
4150 	param_intr_blank_packets = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4151 	    "intr_blank_packets", intr_blank_packets) & ERI_MASK_8BIT;
4152 
4153 	param_lance_mode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4154 	    "lance_mode", lance_mode) & ERI_MASK_1BIT;
4155 
4156 	param_select_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4157 	    "select_link", select_link) & ERI_MASK_1BIT;
4158 
4159 	param_default_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4160 	    "default_link", default_link) & ERI_MASK_1BIT;
4161 
4162 	param_anar_asm_dir = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4163 	    "adv_asm_dir_cap", adv_pauseTX_cap) & ERI_MASK_1BIT;
4164 
4165 	param_anar_pause = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4166 	    "adv_pause_cap", adv_pauseRX_cap) & ERI_MASK_1BIT;
4167 
4168 	if (link_pulse_disabled)
4169 		erip->link_pulse_disabled = 1;
4170 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, 0, "link-pulse-disabled"))
4171 		erip->link_pulse_disabled = 1;
4172 
4173 	eri_statinit(erip);
4174 	return (0);
4175 
4176 }
4177 
4178 static void
4179 eri_process_ndd_ioctl(struct eri *erip, queue_t *wq, mblk_t *mp, int cmd)
4180 {
4181 
4182 	uint32_t old_ipg1, old_ipg2, old_use_int_xcvr, old_autoneg;
4183 	uint32_t old_100T4;
4184 	uint32_t old_100fdx, old_100hdx, old_10fdx, old_10hdx;
4185 	uint32_t old_ipg0, old_lance_mode;
4186 	uint32_t old_intr_blank_time, old_intr_blank_packets;
4187 	uint32_t old_asm_dir, old_pause;
4188 	uint32_t old_select_link, old_default_link;
4189 
4190 	switch (cmd) {
4191 	case ERI_ND_GET:
4192 
4193 		old_autoneg =	param_autoneg;
4194 		old_100T4 =	param_anar_100T4;
4195 		old_100fdx =	param_anar_100fdx;
4196 		old_100hdx =	param_anar_100hdx;
4197 		old_10fdx =	param_anar_10fdx;
4198 		old_10hdx =	param_anar_10hdx;
4199 		old_asm_dir =	param_anar_asm_dir;
4200 		old_pause =	param_anar_pause;
4201 
4202 		param_autoneg = old_autoneg & ~ERI_NOTUSR;
4203 		param_anar_100T4 = old_100T4 & ~ERI_NOTUSR;
4204 		param_anar_100fdx = old_100fdx & ~ERI_NOTUSR;
4205 		param_anar_100hdx = old_100hdx & ~ERI_NOTUSR;
4206 		param_anar_10fdx = old_10fdx & ~ERI_NOTUSR;
4207 		param_anar_10hdx = old_10hdx & ~ERI_NOTUSR;
4208 		param_anar_asm_dir = old_asm_dir & ~ERI_NOTUSR;
4209 		param_anar_pause = old_pause & ~ERI_NOTUSR;
4210 
4211 		if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4212 			param_autoneg = old_autoneg;
4213 			param_anar_100T4 = old_100T4;
4214 			param_anar_100fdx = old_100fdx;
4215 			param_anar_100hdx = old_100hdx;
4216 			param_anar_10fdx = old_10fdx;
4217 			param_anar_10hdx = old_10hdx;
4218 			param_anar_asm_dir = old_asm_dir;
4219 			param_anar_pause = old_pause;
4220 			miocnak(wq, mp, 0, EINVAL);
4221 			return;
4222 		}
4223 		param_autoneg = old_autoneg;
4224 		param_anar_100T4 = old_100T4;
4225 		param_anar_100fdx = old_100fdx;
4226 		param_anar_100hdx = old_100hdx;
4227 		param_anar_10fdx = old_10fdx;
4228 		param_anar_10hdx = old_10hdx;
4229 		param_anar_asm_dir = old_asm_dir;
4230 		param_anar_pause = old_pause;
4231 
4232 		qreply(wq, mp);
4233 		break;
4234 
4235 	case ERI_ND_SET:
4236 		old_ipg0 = param_ipg0;
4237 		old_intr_blank_time = param_intr_blank_time;
4238 		old_intr_blank_packets = param_intr_blank_packets;
4239 		old_lance_mode = param_lance_mode;
4240 		old_ipg1 = param_ipg1;
4241 		old_ipg2 = param_ipg2;
4242 		old_use_int_xcvr = param_use_intphy;
4243 		old_autoneg = param_autoneg;
4244 		old_100T4 =	param_anar_100T4;
4245 		old_100fdx =	param_anar_100fdx;
4246 		old_100hdx =	param_anar_100hdx;
4247 		old_10fdx =	param_anar_10fdx;
4248 		old_10hdx =	param_anar_10hdx;
4249 		param_autoneg = 0xff;
4250 		old_asm_dir = param_anar_asm_dir;
4251 		param_anar_asm_dir = 0xff;
4252 		old_pause = param_anar_pause;
4253 		param_anar_pause = 0xff;
4254 		old_select_link = param_select_link;
4255 		old_default_link = param_default_link;
4256 
4257 		if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4258 			param_autoneg = old_autoneg;
4259 			miocnak(wq, mp, 0, EINVAL);
4260 			return;
4261 		}
4262 
4263 		qreply(wq, mp);
4264 
4265 		if (param_autoneg != 0xff) {
4266 			ERI_DEBUG_MSG2(erip, NDD_MSG,
4267 			    "ndd_ioctl: new param_autoneg %d", param_autoneg);
4268 			param_linkup = 0;
4269 			erip->stats.link_up = LINK_STATE_DOWN;
4270 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4271 			(void) eri_init(erip);
4272 		} else {
4273 			param_autoneg = old_autoneg;
4274 			if ((old_use_int_xcvr != param_use_intphy) ||
4275 			    (old_default_link != param_default_link) ||
4276 			    (old_select_link != param_select_link)) {
4277 				param_linkup = 0;
4278 				erip->stats.link_up = LINK_STATE_DOWN;
4279 				erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4280 				(void) eri_init(erip);
4281 			} else if ((old_ipg1 != param_ipg1) ||
4282 			    (old_ipg2 != param_ipg2) ||
4283 			    (old_ipg0 != param_ipg0) ||
4284 			    (old_intr_blank_time != param_intr_blank_time) ||
4285 			    (old_intr_blank_packets !=
4286 			    param_intr_blank_packets) ||
4287 			    (old_lance_mode != param_lance_mode)) {
4288 				param_linkup = 0;
4289 				erip->stats.link_up = LINK_STATE_DOWN;
4290 				erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4291 				(void) eri_init(erip);
4292 			}
4293 		}
4294 		break;
4295 	}
4296 }
4297 
4298 
4299 static int
4300 eri_stat_kstat_update(kstat_t *ksp, int rw)
4301 {
4302 	struct eri *erip;
4303 	struct erikstat *erikp;
4304 	struct stats *esp;
4305 	boolean_t macupdate = B_FALSE;
4306 
4307 	erip = (struct eri *)ksp->ks_private;
4308 	erikp = (struct erikstat *)ksp->ks_data;
4309 
4310 	if (rw != KSTAT_READ)
4311 		return (EACCES);
4312 	/*
4313 	 * Update all the stats by reading all the counter registers.
4314 	 * Counter register stats are not updated till they overflow
4315 	 * and interrupt.
4316 	 */
4317 
4318 	mutex_enter(&erip->xmitlock);
4319 	if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
4320 		erip->tx_completion =
4321 		    GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
4322 		macupdate |= eri_reclaim(erip, erip->tx_completion);
4323 	}
4324 	mutex_exit(&erip->xmitlock);
4325 	if (macupdate)
4326 		mac_tx_update(erip->mh);
4327 
4328 	eri_savecntrs(erip);
4329 
4330 	esp = &erip->stats;
4331 
4332 	erikp->erik_txmac_maxpkt_err.value.ul = esp->txmac_maxpkt_err;
4333 	erikp->erik_defer_timer_exp.value.ul = esp->defer_timer_exp;
4334 	erikp->erik_peak_attempt_cnt.value.ul = esp->peak_attempt_cnt;
4335 	erikp->erik_tx_hang.value.ul	= esp->tx_hang;
4336 
4337 	erikp->erik_no_free_rx_desc.value.ul	= esp->no_free_rx_desc;
4338 
4339 	erikp->erik_rx_hang.value.ul		= esp->rx_hang;
4340 	erikp->erik_rx_length_err.value.ul	= esp->rx_length_err;
4341 	erikp->erik_rx_code_viol_err.value.ul	= esp->rx_code_viol_err;
4342 	erikp->erik_pause_rxcount.value.ul	= esp->pause_rxcount;
4343 	erikp->erik_pause_oncount.value.ul	= esp->pause_oncount;
4344 	erikp->erik_pause_offcount.value.ul	= esp->pause_offcount;
4345 	erikp->erik_pause_time_count.value.ul	= esp->pause_time_count;
4346 
4347 	erikp->erik_inits.value.ul		= esp->inits;
4348 	erikp->erik_jab.value.ul		= esp->jab;
4349 	erikp->erik_notmds.value.ul		= esp->notmds;
4350 	erikp->erik_allocbfail.value.ul		= esp->allocbfail;
4351 	erikp->erik_drop.value.ul		= esp->drop;
4352 	erikp->erik_rx_bad_pkts.value.ul	= esp->rx_bad_pkts;
4353 	erikp->erik_rx_inits.value.ul		= esp->rx_inits;
4354 	erikp->erik_tx_inits.value.ul		= esp->tx_inits;
4355 	erikp->erik_rxtag_err.value.ul		= esp->rxtag_err;
4356 	erikp->erik_parity_error.value.ul	= esp->parity_error;
4357 	erikp->erik_pci_error_int.value.ul	= esp->pci_error_int;
4358 	erikp->erik_unknown_fatal.value.ul	= esp->unknown_fatal;
4359 	erikp->erik_pci_data_parity_err.value.ul = esp->pci_data_parity_err;
4360 	erikp->erik_pci_signal_target_abort.value.ul =
4361 	    esp->pci_signal_target_abort;
4362 	erikp->erik_pci_rcvd_target_abort.value.ul =
4363 	    esp->pci_rcvd_target_abort;
4364 	erikp->erik_pci_rcvd_master_abort.value.ul =
4365 	    esp->pci_rcvd_master_abort;
4366 	erikp->erik_pci_signal_system_err.value.ul =
4367 	    esp->pci_signal_system_err;
4368 	erikp->erik_pci_det_parity_err.value.ul = esp->pci_det_parity_err;
4369 
4370 	erikp->erik_pmcap.value.ul = esp->pmcap;
4371 
4372 	return (0);
4373 }
4374 
4375 static void
4376 eri_statinit(struct eri *erip)
4377 {
4378 	struct	kstat	*ksp;
4379 	struct	erikstat	*erikp;
4380 
4381 	if ((ksp = kstat_create("eri", erip->instance, "driver_info", "net",
4382 	    KSTAT_TYPE_NAMED,
4383 	    sizeof (struct erikstat) / sizeof (kstat_named_t), 0)) == NULL) {
4384 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
4385 		    kstat_create_fail_msg);
4386 		return;
4387 	}
4388 
4389 	erip->ksp = ksp;
4390 	erikp = (struct erikstat *)(ksp->ks_data);
4391 	/*
4392 	 * MIB II kstat variables
4393 	 */
4394 
4395 	kstat_named_init(&erikp->erik_inits, "inits", KSTAT_DATA_ULONG);
4396 
4397 	kstat_named_init(&erikp->erik_txmac_maxpkt_err,	"txmac_maxpkt_err",
4398 	    KSTAT_DATA_ULONG);
4399 	kstat_named_init(&erikp->erik_defer_timer_exp, "defer_timer_exp",
4400 	    KSTAT_DATA_ULONG);
4401 	kstat_named_init(&erikp->erik_peak_attempt_cnt,	"peak_attempt_cnt",
4402 	    KSTAT_DATA_ULONG);
4403 	kstat_named_init(&erikp->erik_tx_hang, "tx_hang", KSTAT_DATA_ULONG);
4404 
4405 	kstat_named_init(&erikp->erik_no_free_rx_desc, "no_free_rx_desc",
4406 	    KSTAT_DATA_ULONG);
4407 	kstat_named_init(&erikp->erik_rx_hang, "rx_hang", KSTAT_DATA_ULONG);
4408 	kstat_named_init(&erikp->erik_rx_length_err, "rx_length_err",
4409 	    KSTAT_DATA_ULONG);
4410 	kstat_named_init(&erikp->erik_rx_code_viol_err,	"rx_code_viol_err",
4411 	    KSTAT_DATA_ULONG);
4412 
4413 	kstat_named_init(&erikp->erik_pause_rxcount, "pause_rcv_cnt",
4414 	    KSTAT_DATA_ULONG);
4415 
4416 	kstat_named_init(&erikp->erik_pause_oncount, "pause_on_cnt",
4417 	    KSTAT_DATA_ULONG);
4418 
4419 	kstat_named_init(&erikp->erik_pause_offcount, "pause_off_cnt",
4420 	    KSTAT_DATA_ULONG);
4421 	kstat_named_init(&erikp->erik_pause_time_count,	"pause_time_cnt",
4422 	    KSTAT_DATA_ULONG);
4423 
4424 	kstat_named_init(&erikp->erik_jab, "jabber", KSTAT_DATA_ULONG);
4425 	kstat_named_init(&erikp->erik_notmds, "no_tmds", KSTAT_DATA_ULONG);
4426 	kstat_named_init(&erikp->erik_allocbfail, "allocbfail",
4427 	    KSTAT_DATA_ULONG);
4428 
4429 	kstat_named_init(&erikp->erik_drop, "drop", KSTAT_DATA_ULONG);
4430 
4431 	kstat_named_init(&erikp->erik_rx_bad_pkts, "bad_pkts",
4432 	    KSTAT_DATA_ULONG);
4433 
4434 	kstat_named_init(&erikp->erik_rx_inits, "rx_inits", KSTAT_DATA_ULONG);
4435 
4436 	kstat_named_init(&erikp->erik_tx_inits, "tx_inits", KSTAT_DATA_ULONG);
4437 
4438 	kstat_named_init(&erikp->erik_rxtag_err, "rxtag_error",
4439 	    KSTAT_DATA_ULONG);
4440 
4441 	kstat_named_init(&erikp->erik_parity_error, "parity_error",
4442 	    KSTAT_DATA_ULONG);
4443 
4444 	kstat_named_init(&erikp->erik_pci_error_int, "pci_error_interrupt",
4445 	    KSTAT_DATA_ULONG);
4446 	kstat_named_init(&erikp->erik_unknown_fatal, "unknown_fatal",
4447 	    KSTAT_DATA_ULONG);
4448 	kstat_named_init(&erikp->erik_pci_data_parity_err,
4449 	    "pci_data_parity_err", KSTAT_DATA_ULONG);
4450 	kstat_named_init(&erikp->erik_pci_signal_target_abort,
4451 	    "pci_signal_target_abort", KSTAT_DATA_ULONG);
4452 	kstat_named_init(&erikp->erik_pci_rcvd_target_abort,
4453 	    "pci_rcvd_target_abort", KSTAT_DATA_ULONG);
4454 	kstat_named_init(&erikp->erik_pci_rcvd_master_abort,
4455 	    "pci_rcvd_master_abort", KSTAT_DATA_ULONG);
4456 	kstat_named_init(&erikp->erik_pci_signal_system_err,
4457 	    "pci_signal_system_err", KSTAT_DATA_ULONG);
4458 	kstat_named_init(&erikp->erik_pci_det_parity_err,
4459 	    "pci_det_parity_err", KSTAT_DATA_ULONG);
4460 
4461 	kstat_named_init(&erikp->erik_pmcap, "pmcap", KSTAT_DATA_ULONG);
4462 
4463 
4464 	ksp->ks_update = eri_stat_kstat_update;
4465 	ksp->ks_private = (void *) erip;
4466 	kstat_install(ksp);
4467 }
4468 
4469 
4470 /* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS	>>>>>>>>>>>>>>>>>>> */
4471 /*
4472  * ndd support functions to get/set parameters
4473  */
4474 /* Free the Named Dispatch Table by calling eri_nd_free */
4475 static void
4476 eri_param_cleanup(struct eri *erip)
4477 {
4478 	if (erip->g_nd)
4479 		(void) eri_nd_free(&erip->g_nd);
4480 }
4481 
4482 /*
4483  * Extracts the value from the eri parameter array and prints the
4484  * parameter value. cp points to the required parameter.
4485  */
4486 /* ARGSUSED */
4487 static int
4488 eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp)
4489 {
4490 	param_t		*eripa = (void *)cp;
4491 	int		param_len = 1;
4492 	uint32_t	param_val;
4493 	mblk_t		*nmp;
4494 	int		ok;
4495 
4496 	param_val = eripa->param_val;
4497 	/*
4498 	 * Calculate space required in mblk.
4499 	 * Remember to include NULL terminator.
4500 	 */
4501 	do {
4502 		param_len++;
4503 		param_val /= 10;
4504 	} while (param_val);
4505 
4506 	ok = eri_mk_mblk_tail_space(mp, &nmp, param_len);
4507 	if (ok == 0) {
4508 		(void) sprintf((char *)nmp->b_wptr, "%d", eripa->param_val);
4509 		nmp->b_wptr += param_len;
4510 	}
4511 
4512 	return (ok);
4513 }
4514 
4515 /*
4516  * Check if there is space for p_val at the end if mblk.
4517  * If not, allocate new 1k mblk.
4518  */
4519 static int
4520 eri_mk_mblk_tail_space(mblk_t *mp, mblk_t **nmp, size_t sz)
4521 {
4522 	mblk_t *tmp = mp;
4523 
4524 	while (tmp->b_cont)
4525 		tmp = tmp->b_cont;
4526 
4527 	/*LINTED E_BAD_PTR_CAST_ALIGN*/
4528 	if (MBLKTAIL(tmp) < sz) {
4529 		if ((tmp->b_cont = allocb(1024, BPRI_HI)) == NULL)
4530 			return (ENOMEM);
4531 		tmp = tmp->b_cont;
4532 	}
4533 	*nmp = tmp;
4534 	return (0);
4535 }
4536 
4537 /*
4538  * Register each element of the parameter array with the
4539  * named dispatch handler. Each element is loaded using
4540  * eri_nd_load()
4541  */
4542 static int
4543 eri_param_register(struct eri *erip, param_t *eripa, int cnt)
4544 {
4545 	/* cnt gives the count of the number of */
4546 	/* elements present in the parameter array */
4547 
4548 	int i;
4549 
4550 	for (i = 0; i < cnt; i++, eripa++) {
4551 		pfi_t	setter = (pfi_t)eri_param_set;
4552 
4553 		switch (eripa->param_name[0]) {
4554 		case '+':	/* read-write */
4555 			setter = (pfi_t)eri_param_set;
4556 			break;
4557 
4558 		case '-':	/* read-only */
4559 			setter = NULL;
4560 			break;
4561 
4562 		case '!':	/* read-only, not displayed */
4563 		case '%':	/* read-write, not displayed */
4564 			continue;
4565 		}
4566 
4567 		if (!eri_nd_load(&erip->g_nd, eripa->param_name + 1,
4568 		    (pfi_t)eri_param_get, setter, (caddr_t)eripa)) {
4569 			(void) eri_nd_free(&erip->g_nd);
4570 			return (B_FALSE);
4571 		}
4572 	}
4573 
4574 	return (B_TRUE);
4575 }
4576 
4577 /*
4578  * Sets the eri parameter to the value in the param_register using
4579  * eri_nd_load().
4580  */
4581 /* ARGSUSED */
4582 static int
4583 eri_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp)
4584 {
4585 	char *end;
4586 	long new_value;
4587 	param_t	*eripa = (void *)cp;
4588 
4589 	if (ddi_strtol(value, &end, 10, &new_value) != 0)
4590 		return (EINVAL);
4591 	if (end == value || new_value < eripa->param_min ||
4592 	    new_value > eripa->param_max) {
4593 			return (EINVAL);
4594 	}
4595 	eripa->param_val = (uint32_t)new_value;
4596 	return (0);
4597 
4598 }
4599 
4600 /* Free the table pointed to by 'ndp' */
4601 static void
4602 eri_nd_free(caddr_t *nd_pparam)
4603 {
4604 	ND	*nd;
4605 
4606 	if ((nd = (void *)(*nd_pparam)) != NULL) {
4607 		if (nd->nd_tbl)
4608 			kmem_free(nd->nd_tbl, nd->nd_size);
4609 		kmem_free(nd, sizeof (ND));
4610 		*nd_pparam = NULL;
4611 	}
4612 }
4613 
4614 static int
4615 eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp)
4616 {
4617 	int	err;
4618 	IOCP	iocp;
4619 	MBLKP	mp1;
4620 	ND	*nd;
4621 	NDE	*nde;
4622 	char	*valp;
4623 	size_t	avail;
4624 	mblk_t	*nmp;
4625 
4626 	if (!nd_param)
4627 		return (B_FALSE);
4628 
4629 	nd = (void *)nd_param;
4630 	iocp = (void *)mp->b_rptr;
4631 	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
4632 		mp->b_datap->db_type = M_IOCACK;
4633 		iocp->ioc_count = 0;
4634 		iocp->ioc_error = EINVAL;
4635 		return (B_TRUE);
4636 	}
4637 	/*
4638 	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
4639 	 *	However, existing code sends in some big buffers.
4640 	 */
4641 	avail = iocp->ioc_count;
4642 	if (mp1->b_cont) {
4643 		freemsg(mp1->b_cont);
4644 		mp1->b_cont = NULL;
4645 	}
4646 
4647 	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
4648 	valp = (char *)mp1->b_rptr;
4649 
4650 	for (nde = nd->nd_tbl; /* */; nde++) {
4651 		if (!nde->nde_name)
4652 			return (B_FALSE);
4653 		if (strcmp(nde->nde_name, valp) == 0)
4654 			break;
4655 	}
4656 	err = EINVAL;
4657 
4658 	while (*valp++)
4659 		;
4660 
4661 	if (!*valp || valp >= (char *)mp1->b_wptr)
4662 		valp = NULL;
4663 
4664 	switch (iocp->ioc_cmd) {
4665 	case ND_GET:
4666 	/*
4667 	 * (XXX) hack: "*valp" is size of user buffer for copyout. If result
4668 	 * of action routine is too big, free excess and return ioc_rval as buf
4669 	 * size needed.  Return as many mblocks as will fit, free the rest.  For
4670 	 * backward compatibility, assume size of orig ioctl buffer if "*valp"
4671 	 * bad or not given.
4672 	 */
4673 		if (valp)
4674 			(void) ddi_strtol(valp, NULL, 10, (long *)&avail);
4675 		/* We overwrite the name/value with the reply data */
4676 		{
4677 			mblk_t *mp2 = mp1;
4678 
4679 			while (mp2) {
4680 				mp2->b_wptr = mp2->b_rptr;
4681 				mp2 = mp2->b_cont;
4682 			}
4683 		}
4684 		err = (*nde->nde_get_pfi)(q, mp1, nde->nde_data, iocp->ioc_cr);
4685 		if (!err) {
4686 			size_t	size_out;
4687 			ssize_t	excess;
4688 
4689 			iocp->ioc_rval = 0;
4690 
4691 			/* Tack on the null */
4692 			err = eri_mk_mblk_tail_space(mp1, &nmp, 1);
4693 			if (!err) {
4694 				*nmp->b_wptr++ = '\0';
4695 				size_out = msgdsize(mp1);
4696 				excess = size_out - avail;
4697 				if (excess > 0) {
4698 					iocp->ioc_rval = (unsigned)size_out;
4699 					size_out -= excess;
4700 					(void) adjmsg(mp1, -(excess + 1));
4701 					err = eri_mk_mblk_tail_space(mp1,
4702 					    &nmp, 1);
4703 					if (!err)
4704 						*nmp->b_wptr++ = '\0';
4705 					else
4706 						size_out = 0;
4707 				}
4708 
4709 			} else
4710 				size_out = 0;
4711 
4712 			iocp->ioc_count = size_out;
4713 		}
4714 		break;
4715 
4716 	case ND_SET:
4717 		if (valp) {
4718 			err = (*nde->nde_set_pfi)(q, mp1, valp,
4719 			    nde->nde_data, iocp->ioc_cr);
4720 			iocp->ioc_count = 0;
4721 			freemsg(mp1);
4722 			mp->b_cont = NULL;
4723 		}
4724 		break;
4725 	}
4726 
4727 	iocp->ioc_error = err;
4728 	mp->b_datap->db_type = M_IOCACK;
4729 	return (B_TRUE);
4730 }
4731 
4732 /*
4733  * Load 'name' into the named dispatch table pointed to by 'ndp'.
4734  * 'ndp' should be the address of a char pointer cell.  If the table
4735  * does not exist (*ndp == 0), a new table is allocated and 'ndp'
4736  * is stuffed.  If there is not enough space in the table for a new
4737  * entry, more space is allocated.
4738  */
4739 static boolean_t
4740 eri_nd_load(caddr_t *nd_pparam, char *name, pfi_t get_pfi,
4741     pfi_t set_pfi, caddr_t data)
4742 {
4743 	ND	*nd;
4744 	NDE	*nde;
4745 
4746 	if (!nd_pparam)
4747 		return (B_FALSE);
4748 
4749 	if ((nd = (void *)(*nd_pparam)) == NULL) {
4750 		if ((nd = (ND *)kmem_zalloc(sizeof (ND), KM_NOSLEEP))
4751 		    == NULL)
4752 			return (B_FALSE);
4753 		*nd_pparam = (caddr_t)nd;
4754 	}
4755 	if (nd->nd_tbl) {
4756 		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
4757 			if (strcmp(name, nde->nde_name) == 0)
4758 				goto fill_it;
4759 		}
4760 	}
4761 	if (nd->nd_free_count <= 1) {
4762 		if ((nde = (NDE *)kmem_zalloc(nd->nd_size +
4763 		    NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
4764 			return (B_FALSE);
4765 
4766 		nd->nd_free_count += NDE_ALLOC_COUNT;
4767 		if (nd->nd_tbl) {
4768 			bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
4769 			kmem_free((char *)nd->nd_tbl, nd->nd_size);
4770 		} else {
4771 			nd->nd_free_count--;
4772 			nde->nde_name = "?";
4773 			nde->nde_get_pfi = nd_get_names;
4774 			nde->nde_set_pfi = nd_set_default;
4775 		}
4776 		nde->nde_data = (caddr_t)nd;
4777 		nd->nd_tbl = nde;
4778 		nd->nd_size += NDE_ALLOC_SIZE;
4779 	}
4780 	for (nde = nd->nd_tbl; nde->nde_name; nde++)
4781 		;
4782 	nd->nd_free_count--;
4783 fill_it:
4784 	nde->nde_name = name;
4785 	nde->nde_get_pfi = get_pfi ? get_pfi : nd_get_default;
4786 	nde->nde_set_pfi = set_pfi ? set_pfi : nd_set_default;
4787 	nde->nde_data = data;
4788 	return (B_TRUE);
4789 }
4790 
4791 /*
4792  * Hardening Functions
4793  * New Section
4794  */
4795 #ifdef  DEBUG
4796 /*PRINTFLIKE5*/
4797 static void
4798 eri_debug_msg(const char *file, int line, struct eri *erip,
4799     debug_msg_t type, const char *fmt, ...)
4800 {
4801 	char	msg_buffer[255];
4802 	va_list ap;
4803 
4804 	va_start(ap, fmt);
4805 	(void) vsprintf(msg_buffer, fmt, ap);
4806 	va_end(ap);
4807 
4808 	if (eri_msg_out & ERI_CON_MSG) {
4809 		if (((type <= eri_debug_level) && eri_debug_all) ||
4810 		    ((type == eri_debug_level) && !eri_debug_all)) {
4811 			if (erip)
4812 				cmn_err(CE_CONT, "D: %s %s%d:(%s%d) %s\n",
4813 				    debug_msg_string[type], file, line,
4814 				    ddi_driver_name(erip->dip), erip->instance,
4815 				    msg_buffer);
4816 			else
4817 				cmn_err(CE_CONT, "D: %s %s(%d): %s\n",
4818 				    debug_msg_string[type], file,
4819 				    line, msg_buffer);
4820 		}
4821 	}
4822 }
4823 #endif
4824 
4825 
4826 /*PRINTFLIKE4*/
4827 static void
4828 eri_fault_msg(struct eri *erip, uint_t severity, msg_t type,
4829 	const char *fmt, ...)
4830 {
4831 	char	msg_buffer[255];
4832 	va_list	ap;
4833 
4834 	va_start(ap, fmt);
4835 	(void) vsprintf(msg_buffer, fmt, ap);
4836 	va_end(ap);
4837 
4838 	if (erip == NULL) {
4839 		cmn_err(CE_NOTE, "eri : %s", msg_buffer);
4840 		return;
4841 	}
4842 
4843 	if (severity == SEVERITY_HIGH) {
4844 		cmn_err(CE_WARN, "%s%d : %s", ddi_driver_name(erip->dip),
4845 		    erip->instance, msg_buffer);
4846 	} else switch (type) {
4847 	case ERI_VERB_MSG:
4848 		cmn_err(CE_CONT, "?%s%d : %s", ddi_driver_name(erip->dip),
4849 		    erip->instance, msg_buffer);
4850 		break;
4851 	case ERI_LOG_MSG:
4852 		cmn_err(CE_NOTE, "^%s%d : %s", ddi_driver_name(erip->dip),
4853 		    erip->instance, msg_buffer);
4854 		break;
4855 	case ERI_BUF_MSG:
4856 		cmn_err(CE_NOTE, "!%s%d : %s", ddi_driver_name(erip->dip),
4857 		    erip->instance, msg_buffer);
4858 		break;
4859 	case ERI_CON_MSG:
4860 		cmn_err(CE_CONT, "%s%d : %s", ddi_driver_name(erip->dip),
4861 		    erip->instance, msg_buffer);
4862 	default:
4863 		break;
4864 	}
4865 }
4866 
4867 /*
4868  * Transceiver (xcvr) Functions
4869  * New Section
4870  */
4871 /*
4872  * eri_stop_timer function is used by a function before doing link-related
4873  * processing. It locks the "linklock" to protect the link-related data
4874  * structures. This lock will be subsequently released in eri_start_timer().
4875  */
4876 static void
4877 eri_stop_timer(struct eri *erip)
4878 {
4879 	timeout_id_t id;
4880 	mutex_enter(&erip->linklock);
4881 	if (erip->timerid) {
4882 		erip->flags |= ERI_NOTIMEOUTS; /* prevent multiple timeout */
4883 		id = erip->timerid;
4884 		erip->timerid = 0; /* prevent other thread do untimeout */
4885 		mutex_exit(&erip->linklock); /* no mutex across untimeout() */
4886 
4887 		(void) untimeout(id);
4888 		mutex_enter(&erip->linklock); /* acquire mutex again */
4889 		erip->flags &= ~ERI_NOTIMEOUTS;
4890 	}
4891 }
4892 
4893 /*
4894  * If msec parameter is zero, just release "linklock".
4895  */
4896 static void
4897 eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec)
4898 {
4899 	if (msec) {
4900 		if (!(erip->flags & ERI_NOTIMEOUTS) &&
4901 		    (erip->flags & ERI_RUNNING)) {
4902 			erip->timerid = timeout(func, (caddr_t)erip,
4903 			    drv_usectohz(1000*msec));
4904 		}
4905 	}
4906 
4907 	mutex_exit(&erip->linklock);
4908 }
4909 
4910 static int
4911 eri_new_xcvr(struct eri *erip)
4912 {
4913 	int		status;
4914 	uint32_t 	cfg;
4915 	int		old_transceiver;
4916 
4917 	if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4918 	    PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
4919 		erip->stats.pmcap = ERI_PMCAP_NONE;
4920 
4921 	status = B_FALSE;			/* no change */
4922 	cfg = GET_MIFREG(mif_cfg);
4923 	ERI_DEBUG_MSG2(erip, MIF_MSG, "cfg value = %X", cfg);
4924 	old_transceiver = param_transceiver;
4925 
4926 	if ((cfg & ERI_MIF_CFGM1) && !use_int_xcvr) {
4927 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Found External XCVR");
4928 		/*
4929 		 * An External Transceiver was found and it takes priority
4930 		 * over an internal, given the use_int_xcvr flag
4931 		 * is false.
4932 		 */
4933 		if (old_transceiver != EXTERNAL_XCVR) {
4934 			/*
4935 			 * External transceiver has just been plugged
4936 			 * in. Isolate the internal Transceiver.
4937 			 */
4938 			if (old_transceiver == INTERNAL_XCVR) {
4939 				eri_mii_write(erip, ERI_PHY_BMCR,
4940 				    (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4941 				    PHY_BMCR_LPBK));
4942 			}
4943 			status = B_TRUE;
4944 		}
4945 		/*
4946 		 * Select the external Transceiver.
4947 		 */
4948 		erip->phyad = ERI_EXTERNAL_PHYAD;
4949 		param_transceiver = EXTERNAL_XCVR;
4950 		erip->mif_config &= ~ERI_MIF_CFGPD;
4951 		erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4952 		erip->mif_config |= ERI_MIF_CFGPS;
4953 		PUT_MIFREG(mif_cfg, erip->mif_config);
4954 
4955 		PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIIBUF_OE);
4956 		drv_usecwait(ERI_MIF_POLL_DELAY);
4957 	} else if (cfg & ERI_MIF_CFGM0) {
4958 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Found Internal XCVR");
4959 		/*
4960 		 * An Internal Transceiver was found or the
4961 		 * use_int_xcvr flag is true.
4962 		 */
4963 		if (old_transceiver != INTERNAL_XCVR) {
4964 			/*
4965 			 * The external transceiver has just been
4966 			 * disconnected or we're moving from a no
4967 			 * transceiver state.
4968 			 */
4969 			if ((old_transceiver == EXTERNAL_XCVR) &&
4970 			    (cfg & ERI_MIF_CFGM0)) {
4971 				eri_mii_write(erip, ERI_PHY_BMCR,
4972 				    (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4973 				    PHY_BMCR_LPBK));
4974 			}
4975 			status = B_TRUE;
4976 		}
4977 		/*
4978 		 * Select the internal transceiver.
4979 		 */
4980 		erip->phyad = ERI_INTERNAL_PHYAD;
4981 		param_transceiver = INTERNAL_XCVR;
4982 		erip->mif_config &= ~ERI_MIF_CFGPD;
4983 		erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4984 		erip->mif_config &= ~ERI_MIF_CFGPS;
4985 		PUT_MIFREG(mif_cfg, erip->mif_config);
4986 
4987 		PUT_MACREG(xifc, GET_MACREG(xifc) & ~ BMAC_XIFC_MIIBUF_OE);
4988 		drv_usecwait(ERI_MIF_POLL_DELAY);
4989 	} else {
4990 		/*
4991 		 * Did not find a valid xcvr.
4992 		 */
4993 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4994 		    "Eri_new_xcvr : Select None");
4995 		param_transceiver = NO_XCVR;
4996 		erip->xcvr_status = PHY_LINK_DOWN;
4997 	}
4998 
4999 	if (erip->stats.pmcap == ERI_PMCAP_NONE) {
5000 		if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
5001 		    (void *)4000) == DDI_SUCCESS)
5002 			erip->stats.pmcap = ERI_PMCAP_4MHZ;
5003 	}
5004 
5005 	return (status);
5006 }
5007 
5008 /*
5009  * This function is used for timers.  No locks are held on timer expiry.
5010  */
5011 static void
5012 eri_check_link(struct eri *erip)
5013 {
5014 	link_state_t	linkupdate = eri_check_link_noind(erip);
5015 
5016 	if (linkupdate != LINK_STATE_UNKNOWN)
5017 		mac_link_update(erip->mh, linkupdate);
5018 }
5019 
5020 /*
5021  * Compare our xcvr in our structure to the xcvr that we get from
5022  * eri_check_mii_xcvr(). If they are different then mark the
5023  * link down, reset xcvr, and return.
5024  *
5025  * Note without the MII connector, conditions can not change that
5026  * will then use a external phy, thus this code has been cleaned
5027  * to not even call the function or to possibly change the xcvr.
5028  */
5029 static uint32_t
5030 eri_check_link_noind(struct eri *erip)
5031 {
5032 	uint16_t stat, control, mif_ints;
5033 	uint32_t link_timeout	= ERI_LINKCHECK_TIMER;
5034 	uint32_t linkupdate = 0;
5035 
5036 	eri_stop_timer(erip);	/* acquire linklock */
5037 
5038 	mutex_enter(&erip->xmitlock);
5039 	mutex_enter(&erip->xcvrlock);
5040 	eri_mif_poll(erip, MIF_POLL_STOP);
5041 
5042 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5043 	mif_ints = erip->mii_status ^ stat;
5044 
5045 	if (erip->openloop_autoneg) {
5046 		(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5047 		ERI_DEBUG_MSG3(erip, XCVR_MSG,
5048 		    "eri_check_link:openloop stat %X mii_status %X",
5049 		    stat, erip->mii_status);
5050 		(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5051 		if (!(stat & PHY_BMSR_LNKSTS) &&
5052 		    (erip->openloop_autoneg < 2)) {
5053 			if (param_speed) {
5054 				control &= ~PHY_BMCR_100M;
5055 				param_anlpar_100hdx = 0;
5056 				param_anlpar_10hdx = 1;
5057 				param_speed = 0;
5058 				erip->stats.ifspeed = 10;
5059 
5060 			} else {
5061 				control |= PHY_BMCR_100M;
5062 				param_anlpar_100hdx = 1;
5063 				param_anlpar_10hdx = 0;
5064 				param_speed = 1;
5065 				erip->stats.ifspeed = 100;
5066 			}
5067 			ERI_DEBUG_MSG3(erip, XCVR_MSG,
5068 			    "eri_check_link: trying speed %X stat %X",
5069 			    param_speed, stat);
5070 
5071 			erip->openloop_autoneg ++;
5072 			eri_mii_write(erip, ERI_PHY_BMCR, control);
5073 			link_timeout = ERI_P_FAULT_TIMER;
5074 		} else {
5075 			erip->openloop_autoneg = 0;
5076 			linkupdate = eri_mif_check(erip, stat, stat);
5077 			if (erip->openloop_autoneg)
5078 				link_timeout = ERI_P_FAULT_TIMER;
5079 		}
5080 		eri_mif_poll(erip, MIF_POLL_START);
5081 		mutex_exit(&erip->xcvrlock);
5082 		mutex_exit(&erip->xmitlock);
5083 
5084 		eri_start_timer(erip, eri_check_link, link_timeout);
5085 		return (linkupdate);
5086 	}
5087 
5088 	linkupdate = eri_mif_check(erip, mif_ints, stat);
5089 	eri_mif_poll(erip, MIF_POLL_START);
5090 	mutex_exit(&erip->xcvrlock);
5091 	mutex_exit(&erip->xmitlock);
5092 
5093 #ifdef ERI_RMAC_HANG_WORKAROUND
5094 	/*
5095 	 * Check if rx hung.
5096 	 */
5097 	if ((erip->flags & ERI_RUNNING) && param_linkup) {
5098 		if (erip->check_rmac_hang) {
5099 			ERI_DEBUG_MSG5(erip,
5100 			    NONFATAL_MSG,
5101 			    "check1 %d: macsm:%8x wr:%2x rd:%2x",
5102 			    erip->check_rmac_hang,
5103 			    GET_MACREG(macsm),
5104 			    GET_ERXREG(rxfifo_wr_ptr),
5105 			    GET_ERXREG(rxfifo_rd_ptr));
5106 
5107 			erip->check_rmac_hang = 0;
5108 			erip->check2_rmac_hang ++;
5109 
5110 			erip->rxfifo_wr_ptr_c = GET_ERXREG(rxfifo_wr_ptr);
5111 			erip->rxfifo_rd_ptr_c = GET_ERXREG(rxfifo_rd_ptr);
5112 
5113 			eri_start_timer(erip, eri_check_link,
5114 			    ERI_CHECK_HANG_TIMER);
5115 			return (linkupdate);
5116 		}
5117 
5118 		if (erip->check2_rmac_hang) {
5119 			ERI_DEBUG_MSG5(erip,
5120 			    NONFATAL_MSG,
5121 			    "check2 %d: macsm:%8x wr:%2x rd:%2x",
5122 			    erip->check2_rmac_hang,
5123 			    GET_MACREG(macsm),
5124 			    GET_ERXREG(rxfifo_wr_ptr),
5125 			    GET_ERXREG(rxfifo_rd_ptr));
5126 
5127 			erip->check2_rmac_hang = 0;
5128 
5129 			erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
5130 			erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
5131 
5132 			if (((GET_MACREG(macsm) & BMAC_OVERFLOW_STATE) ==
5133 			    BMAC_OVERFLOW_STATE) &&
5134 			    ((erip->rxfifo_wr_ptr_c == erip->rxfifo_rd_ptr_c) ||
5135 			    ((erip->rxfifo_rd_ptr == erip->rxfifo_rd_ptr_c) &&
5136 			    (erip->rxfifo_wr_ptr == erip->rxfifo_wr_ptr_c)))) {
5137 				ERI_DEBUG_MSG1(erip,
5138 				    NONFATAL_MSG,
5139 				    "RX hang: Reset mac");
5140 
5141 				HSTAT(erip, rx_hang);
5142 				erip->linkcheck = 1;
5143 
5144 				eri_start_timer(erip, eri_check_link,
5145 				    ERI_LINKCHECK_TIMER);
5146 				(void) eri_init(erip);
5147 				return (linkupdate);
5148 			}
5149 		}
5150 	}
5151 #endif
5152 
5153 	/*
5154 	 * Check if tx hung.
5155 	 */
5156 #ifdef	ERI_TX_HUNG
5157 	if ((erip->flags & ERI_RUNNING) && param_linkup &&
5158 	    (eri_check_txhung(erip))) {
5159 		HSTAT(erip, tx_hang);
5160 		eri_reinit_txhung++;
5161 		erip->linkcheck = 1;
5162 		eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
5163 		(void) eri_init(erip);
5164 		return (linkupdate);
5165 	}
5166 #endif
5167 
5168 #ifdef ERI_PM_WORKAROUND
5169 	if (erip->stats.pmcap == ERI_PMCAP_NONE) {
5170 		if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
5171 		    (void *)4000) == DDI_SUCCESS)
5172 			erip->stats.pmcap = ERI_PMCAP_4MHZ;
5173 
5174 		ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
5175 		    "eri_check_link: PMCAP %d", erip->stats.pmcap);
5176 	}
5177 #endif
5178 	if ((!param_mode) && (param_transceiver != NO_XCVR))
5179 		eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
5180 	else
5181 		eri_start_timer(erip, eri_check_link, ERI_LINKCHECK_TIMER);
5182 	return (linkupdate);
5183 }
5184 
5185 static link_state_t
5186 eri_mif_check(struct eri *erip, uint16_t mif_ints, uint16_t mif_data)
5187 {
5188 	uint16_t control, aner, anlpar, anar, an_common;
5189 	uint16_t old_mintrans;
5190 	int restart_autoneg = 0;
5191 	link_state_t retv;
5192 
5193 	ERI_DEBUG_MSG4(erip, XCVR_MSG, "eri_mif_check: mif_mask: %X, %X, %X",
5194 	    erip->mif_mask, mif_ints, mif_data);
5195 
5196 	mif_ints &= ~erip->mif_mask;
5197 	erip->mii_status = mif_data;
5198 	/*
5199 	 * Now check if someone has pulled the xcvr or
5200 	 * a new xcvr has shown up
5201 	 * If so try to find out what the new xcvr setup is.
5202 	 */
5203 	if (((mif_ints & PHY_BMSR_RES1) && (mif_data == 0xFFFF)) ||
5204 	    (param_transceiver == NO_XCVR)) {
5205 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5206 		    "No status transceiver gone");
5207 		if (eri_new_xcvr(erip)) {
5208 			if (param_transceiver != NO_XCVR) {
5209 				/*
5210 				 * Reset the new PHY and bring up the link
5211 				 */
5212 				(void) eri_reset_xcvr(erip);
5213 			}
5214 		}
5215 		return (LINK_STATE_UNKNOWN);
5216 	}
5217 
5218 	if (param_autoneg && (mif_ints & PHY_BMSR_LNKSTS) &&
5219 	    (mif_data & PHY_BMSR_LNKSTS) && (mif_data & PHY_BMSR_ANC)) {
5220 		mif_ints |= PHY_BMSR_ANC;
5221 		ERI_DEBUG_MSG3(erip, PHY_MSG,
5222 		    "eri_mif_check: Set ANC bit mif_data %X mig_ints %X",
5223 		    mif_data, mif_ints);
5224 	}
5225 
5226 	if ((mif_ints & PHY_BMSR_ANC) && (mif_data & PHY_BMSR_ANC)) {
5227 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Auto-negotiation interrupt.");
5228 
5229 		/*
5230 		 * Switch off Auto-negotiation interrupts and switch on
5231 		 * Link ststus interrupts.
5232 		 */
5233 		erip->mif_mask |= PHY_BMSR_ANC;
5234 		erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5235 		(void) eri_mii_read(erip, ERI_PHY_ANER, &aner);
5236 		param_aner_lpancap = 1 && (aner & PHY_ANER_LPNW);
5237 		if ((aner & PHY_ANER_MLF) || (eri_force_mlf)) {
5238 			ERI_DEBUG_MSG1(erip, XCVR_MSG,
5239 			    "parallel detection fault");
5240 			/*
5241 			 * Consider doing open loop auto-negotiation.
5242 			 */
5243 			ERI_DEBUG_MSG1(erip, XCVR_MSG,
5244 			    "Going into Open loop Auto-neg");
5245 			(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5246 
5247 			control &= ~(PHY_BMCR_ANE | PHY_BMCR_RAN |
5248 			    PHY_BMCR_FDX);
5249 			if (param_anar_100fdx || param_anar_100hdx) {
5250 				control |= PHY_BMCR_100M;
5251 				param_anlpar_100hdx = 1;
5252 				param_anlpar_10hdx = 0;
5253 				param_speed = 1;
5254 				erip->stats.ifspeed = 100;
5255 
5256 			} else if (param_anar_10fdx || param_anar_10hdx) {
5257 				control &= ~PHY_BMCR_100M;
5258 				param_anlpar_100hdx = 0;
5259 				param_anlpar_10hdx = 1;
5260 				param_speed = 0;
5261 				erip->stats.ifspeed = 10;
5262 			} else {
5263 				ERI_FAULT_MSG1(erip, SEVERITY_NONE,
5264 				    ERI_VERB_MSG,
5265 				    "Transceiver speed set incorrectly.");
5266 				return (0);
5267 			}
5268 
5269 			(void) eri_mii_write(erip, ERI_PHY_BMCR, control);
5270 			param_anlpar_100fdx = 0;
5271 			param_anlpar_10fdx = 0;
5272 			param_mode = 0;
5273 			erip->openloop_autoneg = 1;
5274 			return (0);
5275 		}
5276 		(void) eri_mii_read(erip, ERI_PHY_ANLPAR, &anlpar);
5277 		(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5278 		an_common = anar & anlpar;
5279 
5280 		ERI_DEBUG_MSG2(erip, XCVR_MSG, "an_common = 0x%X", an_common);
5281 
5282 		if (an_common & (PHY_ANLPAR_TXFDX | PHY_ANLPAR_TX)) {
5283 			param_speed = 1;
5284 			erip->stats.ifspeed = 100;
5285 			param_mode = 1 && (an_common & PHY_ANLPAR_TXFDX);
5286 
5287 		} else if (an_common & (PHY_ANLPAR_10FDX | PHY_ANLPAR_10)) {
5288 			param_speed = 0;
5289 			erip->stats.ifspeed = 10;
5290 			param_mode = 1 && (an_common & PHY_ANLPAR_10FDX);
5291 
5292 		} else an_common = 0x0;
5293 
5294 		if (!an_common) {
5295 			ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
5296 			    "Transceiver: anar not set with speed selection");
5297 		}
5298 		param_anlpar_100T4 = 1 && (anlpar & PHY_ANLPAR_T4);
5299 		param_anlpar_100fdx = 1 && (anlpar & PHY_ANLPAR_TXFDX);
5300 		param_anlpar_100hdx = 1 && (anlpar & PHY_ANLPAR_TX);
5301 		param_anlpar_10fdx = 1 && (anlpar & PHY_ANLPAR_10FDX);
5302 		param_anlpar_10hdx = 1 && (anlpar & PHY_ANLPAR_10);
5303 
5304 		ERI_DEBUG_MSG2(erip, PHY_MSG,
5305 		    "Link duplex = 0x%X", param_mode);
5306 		ERI_DEBUG_MSG2(erip, PHY_MSG,
5307 		    "Link speed = 0x%X", param_speed);
5308 	/*	mif_ints |= PHY_BMSR_LNKSTS; prevent double msg */
5309 	/*	mif_data |= PHY_BMSR_LNKSTS; prevent double msg */
5310 	}
5311 	retv = LINK_STATE_UNKNOWN;
5312 	if (mif_ints & PHY_BMSR_LNKSTS) {
5313 		if (mif_data & PHY_BMSR_LNKSTS) {
5314 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Link Up");
5315 			/*
5316 			 * Program Lu3X31T for mininum transition
5317 			 */
5318 			if (eri_phy_mintrans) {
5319 				eri_mii_write(erip, 31, 0x8000);
5320 				(void) eri_mii_read(erip, 0, &old_mintrans);
5321 				eri_mii_write(erip, 0, 0x00F1);
5322 				eri_mii_write(erip, 31, 0x0000);
5323 			}
5324 			/*
5325 			 * The link is up.
5326 			 */
5327 			eri_init_txmac(erip);
5328 			param_linkup = 1;
5329 			erip->stats.link_up = LINK_STATE_UP;
5330 			if (param_mode)
5331 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5332 			else
5333 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5334 
5335 			retv = LINK_STATE_UP;
5336 		} else {
5337 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Link down.");
5338 			param_linkup = 0;
5339 			erip->stats.link_up = LINK_STATE_DOWN;
5340 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5341 			retv = LINK_STATE_DOWN;
5342 			if (param_autoneg) {
5343 				restart_autoneg = 1;
5344 			}
5345 		}
5346 	} else {
5347 		if (mif_data & PHY_BMSR_LNKSTS) {
5348 			if (!param_linkup) {
5349 				ERI_DEBUG_MSG1(erip, PHY_MSG,
5350 				    "eri_mif_check: MIF data link up");
5351 				/*
5352 				 * Program Lu3X31T for minimum transition
5353 				 */
5354 				if (eri_phy_mintrans) {
5355 					eri_mii_write(erip, 31, 0x8000);
5356 					(void) eri_mii_read(erip, 0,
5357 					    &old_mintrans);
5358 					eri_mii_write(erip, 0, 0x00F1);
5359 					eri_mii_write(erip, 31, 0x0000);
5360 				}
5361 				/*
5362 				 * The link is up.
5363 				 */
5364 				eri_init_txmac(erip);
5365 
5366 				param_linkup = 1;
5367 				erip->stats.link_up = LINK_STATE_UP;
5368 				if (param_mode)
5369 					erip->stats.link_duplex =
5370 					    LINK_DUPLEX_FULL;
5371 				else
5372 					erip->stats.link_duplex =
5373 					    LINK_DUPLEX_HALF;
5374 
5375 				retv = LINK_STATE_UP;
5376 			}
5377 		} else if (param_linkup) {
5378 			/*
5379 			 * The link is down now.
5380 			 */
5381 			ERI_DEBUG_MSG1(erip, PHY_MSG,
5382 			    "eri_mif_check:Link was up and went down");
5383 			param_linkup = 0;
5384 			erip->stats.link_up = LINK_STATE_DOWN;
5385 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5386 			retv = LINK_STATE_DOWN;
5387 			if (param_autoneg)
5388 				restart_autoneg = 1;
5389 		}
5390 	}
5391 	if (restart_autoneg) {
5392 		/*
5393 		 * Restart normal auto-negotiation.
5394 		 */
5395 		ERI_DEBUG_MSG1(erip, PHY_MSG,
5396 		    "eri_mif_check:Restart AUto Negotiation");
5397 		erip->openloop_autoneg = 0;
5398 		param_mode = 0;
5399 		param_speed = 0;
5400 		param_anlpar_100T4 = 0;
5401 		param_anlpar_100fdx = 0;
5402 		param_anlpar_100hdx = 0;
5403 		param_anlpar_10fdx = 0;
5404 		param_anlpar_10hdx = 0;
5405 		param_aner_lpancap = 0;
5406 		(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5407 		control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5408 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5409 	}
5410 	if (mif_ints & PHY_BMSR_JABDET) {
5411 		if (mif_data & PHY_BMSR_JABDET) {
5412 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Jabber detected.");
5413 			HSTAT(erip, jab);
5414 			/*
5415 			 * Reset the new PHY and bring up the link
5416 			 * (Check for failure?)
5417 			 */
5418 			(void) eri_reset_xcvr(erip);
5419 		}
5420 	}
5421 	return (retv);
5422 }
5423 
5424 #define	PHYRST_PERIOD 500
5425 static int
5426 eri_reset_xcvr(struct eri *erip)
5427 {
5428 	uint16_t	stat;
5429 	uint16_t	anar;
5430 	uint16_t	control;
5431 	uint16_t	idr1;
5432 	uint16_t	idr2;
5433 	uint16_t	nicr;
5434 	uint32_t	speed_100;
5435 	uint32_t	speed_10;
5436 	int n;
5437 
5438 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
5439 	erip->ifspeed_old = erip->stats.ifspeed;
5440 #endif
5441 	/*
5442 	 * Reset Open loop auto-negotiation this means you can try
5443 	 * Normal auto-negotiation, until you get a Multiple Link fault
5444 	 * at which point you try 100M half duplex then 10M half duplex
5445 	 * until you get a Link up.
5446 	 */
5447 	erip->openloop_autoneg = 0;
5448 
5449 	/*
5450 	 * Reset the xcvr.
5451 	 */
5452 	eri_mii_write(erip, ERI_PHY_BMCR, PHY_BMCR_RESET);
5453 
5454 	/* Check for transceiver reset completion */
5455 
5456 	n = 1000;
5457 	while (--n > 0) {
5458 		drv_usecwait((clock_t)PHYRST_PERIOD);
5459 		if (eri_mii_read(erip, ERI_PHY_BMCR, &control) == 1) {
5460 			/* Transceiver does not talk MII */
5461 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5462 			    "eri_reset_xcvr: no mii");
5463 		}
5464 		if ((control & PHY_BMCR_RESET) == 0)
5465 			goto reset_done;
5466 	}
5467 	ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
5468 	    "eri_reset_xcvr:reset_failed n == 0, control %x", control);
5469 	goto eri_reset_xcvr_failed;
5470 
5471 reset_done:
5472 
5473 	ERI_DEBUG_MSG2(erip, AUTOCONFIG_MSG,
5474 	    "eri_reset_xcvr: reset complete in %d us",
5475 	    (1000 - n) * PHYRST_PERIOD);
5476 
5477 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5478 	(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5479 	(void) eri_mii_read(erip, ERI_PHY_IDR1, &idr1);
5480 	(void) eri_mii_read(erip, ERI_PHY_IDR2, &idr2);
5481 
5482 	ERI_DEBUG_MSG4(erip, XCVR_MSG,
5483 	    "eri_reset_xcvr: control %x stat %x anar %x", control, stat, anar);
5484 
5485 	/*
5486 	 * Initialize the read only transceiver ndd information
5487 	 * the values are either 0 or 1.
5488 	 */
5489 	param_bmsr_ancap = 1 && (stat & PHY_BMSR_ACFG);
5490 	param_bmsr_100T4 = 1 && (stat & PHY_BMSR_100T4);
5491 	param_bmsr_100fdx = 1 && (stat & PHY_BMSR_100FDX);
5492 	param_bmsr_100hdx = 1 && (stat & PHY_BMSR_100HDX);
5493 	param_bmsr_10fdx = 1 && (stat & PHY_BMSR_10FDX);
5494 	param_bmsr_10hdx = 1 && (stat & PHY_BMSR_10HDX);
5495 
5496 	/*
5497 	 * Match up the ndd capabilities with the transceiver.
5498 	 */
5499 	param_autoneg &= param_bmsr_ancap;
5500 	param_anar_100fdx &= param_bmsr_100fdx;
5501 	param_anar_100hdx &= param_bmsr_100hdx;
5502 	param_anar_10fdx &= param_bmsr_10fdx;
5503 	param_anar_10hdx &= param_bmsr_10hdx;
5504 
5505 	/*
5506 	 * Select the operation mode of the transceiver.
5507 	 */
5508 	if (param_autoneg) {
5509 		/*
5510 		 * Initialize our auto-negotiation capabilities.
5511 		 */
5512 		anar = PHY_SELECTOR;
5513 		if (param_anar_100T4)
5514 			anar |= PHY_ANAR_T4;
5515 		if (param_anar_100fdx)
5516 			anar |= PHY_ANAR_TXFDX;
5517 		if (param_anar_100hdx)
5518 			anar |= PHY_ANAR_TX;
5519 		if (param_anar_10fdx)
5520 			anar |= PHY_ANAR_10FDX;
5521 		if (param_anar_10hdx)
5522 			anar |= PHY_ANAR_10;
5523 		ERI_DEBUG_MSG2(erip, XCVR_MSG, "anar = %x", anar);
5524 		eri_mii_write(erip, ERI_PHY_ANAR, anar);
5525 	}
5526 
5527 	/* Place the Transceiver in normal operation mode */
5528 	if ((control & PHY_BMCR_ISOLATE) || (control & PHY_BMCR_LPBK)) {
5529 		control &= ~(PHY_BMCR_ISOLATE | PHY_BMCR_LPBK);
5530 		eri_mii_write(erip, ERI_PHY_BMCR,
5531 		    (control & ~PHY_BMCR_ISOLATE));
5532 	}
5533 
5534 	/*
5535 	 * If Lu3X31T then allow nonzero eri_phy_mintrans
5536 	 */
5537 	if (eri_phy_mintrans &&
5538 	    (idr1 != 0x43 || (idr2 & 0xFFF0) != 0x7420)) {
5539 		eri_phy_mintrans = 0;
5540 	}
5541 	/*
5542 	 * Initialize the mif interrupt mask.
5543 	 */
5544 	erip->mif_mask = (uint16_t)(~PHY_BMSR_RES1);
5545 
5546 	/*
5547 	 * Establish link speeds and do necessary special stuff based
5548 	 * in the speed.
5549 	 */
5550 	speed_100 = param_anar_100fdx | param_anar_100hdx;
5551 	speed_10 = param_anar_10fdx | param_anar_10hdx;
5552 
5553 	ERI_DEBUG_MSG5(erip, XCVR_MSG, "eri_reset_xcvr: %d %d %d %d",
5554 	    param_anar_100fdx, param_anar_100hdx, param_anar_10fdx,
5555 	    param_anar_10hdx);
5556 
5557 	ERI_DEBUG_MSG3(erip, XCVR_MSG,
5558 	    "eri_reset_xcvr: speed_100 %d speed_10 %d", speed_100, speed_10);
5559 
5560 	if ((!speed_100) && (speed_10)) {
5561 		erip->mif_mask &= ~PHY_BMSR_JABDET;
5562 		if (!(param_anar_10fdx) &&
5563 		    (param_anar_10hdx) &&
5564 		    (erip->link_pulse_disabled)) {
5565 			param_speed = 0;
5566 			param_mode = 0;
5567 			(void) eri_mii_read(erip, ERI_PHY_NICR, &nicr);
5568 			nicr &= ~PHY_NICR_LD;
5569 			eri_mii_write(erip, ERI_PHY_NICR, nicr);
5570 			param_linkup = 1;
5571 			erip->stats.link_up = LINK_STATE_UP;
5572 			if (param_mode)
5573 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5574 			else
5575 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5576 		}
5577 	}
5578 
5579 	/*
5580 	 * Clear the autonegotitation before re-starting
5581 	 */
5582 	control = PHY_BMCR_100M | PHY_BMCR_FDX;
5583 /*	eri_mii_write(erip, ERI_PHY_BMCR, control); */
5584 	if (param_autoneg) {
5585 		/*
5586 		 * Setup the transceiver for autonegotiation.
5587 		 */
5588 		erip->mif_mask &= ~PHY_BMSR_ANC;
5589 
5590 		/*
5591 		 * Clear the Auto-negotiation before re-starting
5592 		 */
5593 		eri_mii_write(erip, ERI_PHY_BMCR, control & ~PHY_BMCR_ANE);
5594 
5595 		/*
5596 		 * Switch on auto-negotiation.
5597 		 */
5598 		control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5599 
5600 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5601 	} else {
5602 		/*
5603 		 * Force the transceiver.
5604 		 */
5605 		erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5606 
5607 		/*
5608 		 * Switch off auto-negotiation.
5609 		 */
5610 		control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5611 
5612 		if (speed_100) {
5613 			control |= PHY_BMCR_100M;
5614 			param_aner_lpancap = 0; /* Clear LP nway */
5615 			param_anlpar_10fdx = 0;
5616 			param_anlpar_10hdx = 0;
5617 			param_anlpar_100T4 = param_anar_100T4;
5618 			param_anlpar_100fdx = param_anar_100fdx;
5619 			param_anlpar_100hdx = param_anar_100hdx;
5620 			param_speed = 1;
5621 			erip->stats.ifspeed = 100;
5622 			param_mode = param_anar_100fdx;
5623 			if (param_mode) {
5624 				param_anlpar_100hdx = 0;
5625 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5626 			} else {
5627 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5628 			}
5629 		} else if (speed_10) {
5630 			control &= ~PHY_BMCR_100M;
5631 			param_aner_lpancap = 0; /* Clear LP nway */
5632 			param_anlpar_100fdx = 0;
5633 			param_anlpar_100hdx = 0;
5634 			param_anlpar_100T4 = 0;
5635 			param_anlpar_10fdx = param_anar_10fdx;
5636 			param_anlpar_10hdx = param_anar_10hdx;
5637 			param_speed = 0;
5638 			erip->stats.ifspeed = 10;
5639 			param_mode = param_anar_10fdx;
5640 			if (param_mode) {
5641 				param_anlpar_10hdx = 0;
5642 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5643 			} else {
5644 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5645 			}
5646 		} else {
5647 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5648 			    "Transceiver speed set incorrectly.");
5649 		}
5650 
5651 		if (param_mode) {
5652 			control |= PHY_BMCR_FDX;
5653 		}
5654 
5655 		ERI_DEBUG_MSG4(erip, PHY_MSG,
5656 		    "control = %x status = %x param_mode %d",
5657 		    control, stat, param_mode);
5658 
5659 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5660 /*
5661  *		if (param_mode) {
5662  *			control |= PHY_BMCR_FDX;
5663  *		}
5664  *		control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5665  *		eri_mii_write(erip, ERI_PHY_BMCR, control);
5666  */
5667 	}
5668 
5669 #ifdef DEBUG
5670 	(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5671 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5672 	(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5673 #endif
5674 	ERI_DEBUG_MSG4(erip, PHY_MSG,
5675 	    "control %X status %X anar %X", control, stat, anar);
5676 
5677 eri_reset_xcvr_exit:
5678 	return (0);
5679 
5680 eri_reset_xcvr_failed:
5681 	return (1);
5682 }
5683 
5684 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
5685 
5686 static void
5687 eri_xcvr_force_mode(struct eri *erip, uint32_t *link_timeout)
5688 {
5689 
5690 	if (!param_autoneg && !param_linkup && (erip->stats.ifspeed == 10) &&
5691 	    (param_anar_10fdx | param_anar_10hdx)) {
5692 		*link_timeout = SECOND(1);
5693 		return;
5694 	}
5695 
5696 	if (!param_autoneg && !param_linkup && (erip->ifspeed_old == 10) &&
5697 	    (param_anar_100fdx | param_anar_100hdx)) {
5698 		/*
5699 		 * May have to set link partner's speed and mode.
5700 		 */
5701 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_LOG_MSG,
5702 		"May have to set link partner's speed and duplex mode.");
5703 	}
5704 }
5705 #endif
5706 
5707 static void
5708 eri_mif_poll(struct eri *erip, soft_mif_enable_t enable)
5709 {
5710 	if (enable == MIF_POLL_START) {
5711 		if (erip->mifpoll_enable && !erip->openloop_autoneg) {
5712 			erip->mif_config |= ERI_MIF_CFGPE;
5713 			PUT_MIFREG(mif_cfg, erip->mif_config);
5714 			drv_usecwait(ERI_MIF_POLL_DELAY);
5715 			PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
5716 			    ~ERI_G_MASK_MIF_INT);
5717 			PUT_MIFREG(mif_imask, erip->mif_mask);
5718 		}
5719 	} else if (enable == MIF_POLL_STOP) {
5720 			erip->mif_config &= ~ERI_MIF_CFGPE;
5721 			PUT_MIFREG(mif_cfg, erip->mif_config);
5722 			drv_usecwait(ERI_MIF_POLL_DELAY);
5723 			PUT_GLOBREG(intmask, GET_GLOBREG(intmask) |
5724 			    ERI_G_MASK_MIF_INT);
5725 			PUT_MIFREG(mif_imask, ERI_MIF_INTMASK);
5726 	}
5727 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF Config = 0x%X",
5728 	    GET_MIFREG(mif_cfg));
5729 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF imask = 0x%X",
5730 	    GET_MIFREG(mif_imask));
5731 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "INT imask = 0x%X",
5732 	    GET_GLOBREG(intmask));
5733 	ERI_DEBUG_MSG1(erip, XCVR_MSG, "<== mif_poll");
5734 }
5735 
5736 /* Decide if transmitter went dead and reinitialize everything */
5737 #ifdef	ERI_TX_HUNG
5738 static int eri_txhung_limit = 2;
5739 static int
5740 eri_check_txhung(struct eri *erip)
5741 {
5742 	boolean_t	macupdate = B_FALSE;
5743 
5744 	mutex_enter(&erip->xmitlock);
5745 	if (erip->flags & ERI_RUNNING)
5746 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
5747 		    ETX_COMPLETION_MASK);
5748 		macupdate |= eri_reclaim(erip, erip->tx_completion);
5749 
5750 	/* Something needs to be sent out but it is not going out */
5751 	if ((erip->tcurp != erip->tnextp) &&
5752 	    (erip->stats.opackets64 == erip->erisave.reclaim_opackets) &&
5753 	    (erip->stats.collisions == erip->erisave.starts))
5754 		erip->txhung++;
5755 	else
5756 		erip->txhung = 0;
5757 
5758 	erip->erisave.reclaim_opackets = erip->stats.opackets64;
5759 	erip->erisave.starts = erip->stats.collisions;
5760 	mutex_exit(&erip->xmitlock);
5761 
5762 	if (macupdate)
5763 		mac_tx_update(erip->mh);
5764 
5765 	return (erip->txhung >= eri_txhung_limit);
5766 }
5767 #endif
5768