xref: /titanic_44/usr/src/uts/sun/io/eri/eri.c (revision ee0eb9f2d12b8a03b5a193cd667735c1962e0c7b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SunOS MT STREAMS ERI(PCI) 10/100 Mb Ethernet Device Driver
30  */
31 
32 #include	<sys/types.h>
33 #include	<sys/debug.h>
34 #include	<sys/stropts.h>
35 #include	<sys/stream.h>
36 #include	<sys/strsubr.h>
37 #include	<sys/kmem.h>
38 #include	<sys/crc32.h>
39 #include	<sys/ddi.h>
40 #include	<sys/sunddi.h>
41 #include	<sys/strsun.h>
42 #include	<sys/stat.h>
43 #include	<sys/cpu.h>
44 #include	<sys/kstat.h>
45 #include	<inet/common.h>
46 #include	<sys/pattr.h>
47 #include	<inet/mi.h>
48 #include	<inet/nd.h>
49 #include	<sys/ethernet.h>
50 #include	<sys/vlan.h>
51 #include	<sys/policy.h>
52 #include	<sys/mac.h>
53 #include	<sys/mac_ether.h>
54 #include	<sys/dlpi.h>
55 
56 #include	<sys/pci.h>
57 
58 #include	"eri_phy.h"
59 #include	"eri_mac.h"
60 #include	"eri.h"
61 #include	"eri_common.h"
62 
63 #include	"eri_msg.h"
64 
65 /*
66  *  **** Function Prototypes *****
67  */
68 /*
69  * Entry points (man9e)
70  */
71 static	int	eri_attach(dev_info_t *, ddi_attach_cmd_t);
72 static	int	eri_detach(dev_info_t *, ddi_detach_cmd_t);
73 static	uint_t	eri_intr(caddr_t);
74 
75 /*
76  * I/O (Input/Output) Functions
77  */
78 static	boolean_t	eri_send_msg(struct eri *, mblk_t *);
79 static  mblk_t		*eri_read_dma(struct eri *, volatile struct rmd *,
80 			    volatile int, uint64_t flags);
81 
82 /*
83  * Initialization Functions
84  */
85 static  boolean_t	eri_init(struct eri *);
86 static	int	eri_allocthings(struct eri *);
87 static  int	eri_init_xfer_params(struct eri *);
88 static  void	eri_statinit(struct eri *);
89 static	int	eri_burstsize(struct eri *);
90 
91 static	void	eri_setup_mac_address(struct eri *, dev_info_t *);
92 
93 static	uint32_t eri_init_rx_channel(struct eri *);
94 static	void	eri_init_rx(struct eri *);
95 static	void	eri_init_txmac(struct eri *);
96 
97 /*
98  * Un-init Functions
99  */
100 static	uint32_t eri_txmac_disable(struct eri *);
101 static	uint32_t eri_rxmac_disable(struct eri *);
102 static	int	eri_stop(struct eri *);
103 static	void	eri_uninit(struct eri *erip);
104 static	int	eri_freebufs(struct eri *);
105 static	boolean_t	eri_reclaim(struct eri *, uint32_t);
106 
107 /*
108  * Transceiver (xcvr) Functions
109  */
110 static	int	eri_new_xcvr(struct eri *); /* Initializes & detects xcvrs */
111 static	int	eri_reset_xcvr(struct eri *);
112 
113 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
114 static	void	eri_xcvr_force_mode(struct eri *, uint32_t *);
115 #endif
116 
117 static	void	eri_mif_poll(struct eri *, soft_mif_enable_t);
118 static	void	eri_check_link(struct eri *);
119 static	uint32_t eri_check_link_noind(struct eri *);
120 static	link_state_t eri_mif_check(struct eri *, uint16_t, uint16_t);
121 static	void    eri_mii_write(struct eri *, uint8_t, uint16_t);
122 static	uint32_t eri_mii_read(struct eri *, uint8_t, uint16_t *);
123 
124 /*
125  * Reset Functions
126  */
127 static	uint32_t eri_etx_reset(struct eri *);
128 static	uint32_t eri_erx_reset(struct eri *);
129 
130 /*
131  * Error Functions
132  */
133 static	void eri_fatal_err(struct eri *, uint32_t);
134 static	void eri_nonfatal_err(struct eri *, uint32_t);
135 
136 #ifdef	ERI_TX_HUNG
137 static	int eri_check_txhung(struct eri *);
138 #endif
139 
140 /*
141  * Hardening Functions
142  */
143 static void eri_fault_msg(struct eri *, uint_t, msg_t, const char *, ...);
144 
145 /*
146  * Misc Functions
147  */
148 static void	eri_savecntrs(struct eri *);
149 
150 static	void	eri_stop_timer(struct eri *erip);
151 static	void	eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec);
152 
153 static	void eri_bb_force_idle(struct eri *);
154 
155 /*
156  * Utility Functions
157  */
158 static	mblk_t *eri_allocb(size_t size);
159 static	mblk_t *eri_allocb_sp(size_t size);
160 static	int	eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp);
161 static	int	eri_param_set(queue_t *, mblk_t *, char *, caddr_t);
162 
163 /*
164  * Functions to support ndd
165  */
166 static	void	eri_nd_free(caddr_t *nd_pparam);
167 
168 static	boolean_t	eri_nd_load(caddr_t *nd_pparam, char *name,
169 				pfi_t get_pfi, pfi_t set_pfi, caddr_t data);
170 
171 static	int	eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp);
172 static	void	eri_param_cleanup(struct eri *);
173 static	int	eri_param_register(struct eri *, param_t *, int);
174 static	void	eri_process_ndd_ioctl(struct eri *, queue_t *, mblk_t *, int);
175 static	int	eri_mk_mblk_tail_space(mblk_t *, mblk_t **, size_t);
176 
177 
178 static	void eri_loopback(struct eri *, queue_t *, mblk_t *);
179 
180 static uint32_t	eri_ladrf_bit(const uint8_t *);
181 
182 
183 /*
184  * Nemo (GLDv3) Functions.
185  */
186 static	int		eri_m_stat(void *, uint_t, uint64_t *);
187 static	int		eri_m_start(void *);
188 static	void		eri_m_stop(void *);
189 static	int		eri_m_promisc(void *, boolean_t);
190 static	int		eri_m_multicst(void *, boolean_t, const uint8_t *);
191 static	int		eri_m_unicst(void *, const uint8_t *);
192 static	void		eri_m_ioctl(void *, queue_t *, mblk_t *);
193 static	boolean_t	eri_m_getcapab(void *, mac_capab_t, void *);
194 static	mblk_t		*eri_m_tx(void *, mblk_t *);
195 
196 static mac_callbacks_t eri_m_callbacks = {
197 	MC_IOCTL | MC_GETCAPAB,
198 	eri_m_stat,
199 	eri_m_start,
200 	eri_m_stop,
201 	eri_m_promisc,
202 	eri_m_multicst,
203 	eri_m_unicst,
204 	eri_m_tx,
205 	NULL,
206 	eri_m_ioctl,
207 	eri_m_getcapab
208 };
209 
210 /*
211  * Define PHY Vendors: Matches to IEEE
212  * Organizationally Unique Identifier (OUI)
213  */
214 /*
215  * The first two are supported as Internal XCVRs
216  */
217 #define	PHY_VENDOR_LUCENT	0x601d
218 
219 #define	PHY_LINK_NONE		0	/* Not attempted yet or retry */
220 #define	PHY_LINK_DOWN		1	/* Not being used	*/
221 #define	PHY_LINK_UP		2	/* Not being used	*/
222 
223 #define	AUTO_SPEED		0
224 #define	FORCE_SPEED		1
225 
226 /*
227  * MIB II broadcast/multicast packets
228  */
229 
230 #define	IS_BROADCAST(pkt) (bcmp(pkt, &etherbroadcastaddr, ETHERADDRL) == 0)
231 #define	IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
232 
233 #define	BUMP_InNUcast(erip, pkt) \
234 		if (IS_BROADCAST(pkt)) { \
235 			HSTAT(erip, brdcstrcv); \
236 		} else if (IS_MULTICAST(pkt)) { \
237 			HSTAT(erip, multircv); \
238 		}
239 
240 #define	BUMP_OutNUcast(erip, pkt) \
241 		if (IS_BROADCAST(pkt)) { \
242 			HSTAT(erip, brdcstxmt); \
243 		} else if (IS_MULTICAST(pkt)) { \
244 			HSTAT(erip, multixmt); \
245 		}
246 
247 #define	NEXTTMDP(tbasep, tmdlimp, tmdp)	(((tmdp) + 1) == tmdlimp	\
248 	? tbasep : ((tmdp) + 1))
249 
250 #define	ETHERHEADER_SIZE (sizeof (struct ether_header))
251 
252 #ifdef	ERI_RCV_CKSUM
253 #define	ERI_PROCESS_READ(erip, bp, sum)				\
254 {								\
255 	t_uscalar_t	type;					\
256 	uint_t	start_offset, end_offset;			\
257 								\
258 	*(bp->b_wptr) = 0;	/* pad byte */			\
259 								\
260 	/*							\
261 	 * update MIB II statistics				\
262 	 */							\
263 	HSTAT(erip, ipackets64);				\
264 	HSTATN(erip, rbytes64, len);				\
265 	BUMP_InNUcast(erip, bp->b_rptr);			\
266 	type = get_ether_type(bp->b_rptr);			\
267 	if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {	\
268 		start_offset = 0;				\
269 		end_offset = MBLKL(bp) - ETHERHEADER_SIZE;	\
270 		(void) hcksum_assoc(bp, NULL, NULL,		\
271 			start_offset, 0, end_offset, sum, 	\
272 			HCK_PARTIALCKSUM, 0);			\
273 	} else {						\
274 		/*						\
275 		 * Strip the PADS for 802.3			\
276 		 */						\
277 		if (type <= ETHERMTU)				\
278 			bp->b_wptr = bp->b_rptr +		\
279 				ETHERHEADER_SIZE + type;	\
280 	}							\
281 }
282 #else
283 
284 #define	ERI_PROCESS_READ(erip, bp)				\
285 {								\
286 	t_uscalar_t	type;					\
287 	type = get_ether_type(bp->b_rptr);			\
288 								\
289 	/*							\
290 	 * update MIB II statistics				\
291 	 */							\
292 	HSTAT(erip, ipackets64);				\
293 	HSTATN(erip, rbytes64, len);				\
294 	BUMP_InNUcast(erip, bp->b_rptr);			\
295 	/*							\
296 	 * Strip the PADS for 802.3				\
297 	 */							\
298 	if (type <= ETHERMTU)					\
299 		bp->b_wptr = bp->b_rptr + ETHERHEADER_SIZE + 	\
300 			type;					\
301 }
302 #endif  /* ERI_RCV_CKSUM */
303 
304 /*
305  * TX Interrupt Rate
306  */
307 static	int	tx_interrupt_rate = 16;
308 
309 /*
310  * Ethernet broadcast address definition.
311  */
312 static uint8_t	etherbroadcastaddr[] = {
313 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
314 };
315 
316 /*
317  * The following variables are used for configuring various features
318  */
319 #define	ERI_DESC_HANDLE_ALLOC	0x0001
320 #define	ERI_DESC_MEM_ALLOC	0x0002
321 #define	ERI_DESC_MEM_MAP	0x0004
322 #define	ERI_XMIT_HANDLE_ALLOC	0x0008
323 #define	ERI_XMIT_HANDLE_BIND	0x0010
324 #define	ERI_RCV_HANDLE_ALLOC	0x0020
325 #define	ERI_RCV_HANDLE_BIND	0x0040
326 #define	ERI_XMIT_DVMA_ALLOC	0x0100
327 #define	ERI_RCV_DVMA_ALLOC	0x0200
328 #define	ERI_XBUFS_HANDLE_ALLOC  0x0400
329 #define	ERI_XBUFS_KMEM_ALLOC    0x0800
330 #define	ERI_XBUFS_KMEM_DMABIND  0x1000
331 
332 
333 #define	ERI_DONT_STRIP_CRC
334 /*
335  * Translate a kernel virtual address to i/o address.
336  */
337 #define	ERI_IOPBIOADDR(erip, a) \
338 	((erip)->iopbiobase + ((uintptr_t)a - (erip)->iopbkbase))
339 
340 /*
341  * ERI Configuration Register Value
342  * Used to configure parameters that define DMA burst
343  * and internal arbitration behavior.
344  * for equal TX and RX bursts, set the following in global
345  * configuration register.
346  * static	int	global_config = 0x42;
347  */
348 
349 /*
350  * ERI ERX Interrupt Blanking Time
351  * Each count is about 16 us (2048 clocks) for 66 MHz PCI.
352  */
353 static	int	intr_blank_time = 6;	/* for about 96 us */
354 static	int	intr_blank_packets = 8;	/*  */
355 
356 /*
357  * ERX PAUSE Threshold Register value
358  * The following value is for an OFF Threshold of about 15.5 Kbytes
359  * and an ON Threshold of 4K bytes.
360  */
361 static	int rx_pause_threshold = 0xf8 | (0x40 << 12);
362 static	int eri_reinit_fatal = 0;
363 #ifdef	DEBUG
364 static	int noteri = 0;
365 #endif
366 
367 #ifdef	ERI_TX_HUNG
368 static	int eri_reinit_txhung = 0;
369 #endif
370 
371 #ifdef ERI_HDX_BUG_WORKAROUND
372 /*
373  * By default enable padding in hdx mode to 97 bytes.
374  * To disabled, in /etc/system:
375  * set eri:eri_hdx_pad_enable=0
376  */
377 static	uchar_t eri_hdx_pad_enable = 1;
378 #endif
379 
380 /*
381  * Default values to initialize the cache line size and latency timer
382  * registers in the PCI configuration space.
383  * ERI_G_CACHE_LINE_SIZE_16 is defined as 16 since RIO expects in units
384  * of 4 bytes.
385  */
386 #ifdef ERI_PM_WORKAROUND_PCI
387 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_32; /* 128 bytes */
388 static int eri_pci_latency_timer = 0xff;		/* 255 PCI cycles */
389 #else
390 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_16; /* 64 bytes */
391 static int eri_pci_latency_timer = 0x40;		/* 64 PCI cycles */
392 #endif
393 #define	ERI_CACHE_LINE_SIZE	(eri_pci_cache_line << ERI_G_CACHE_BIT)
394 
395 /*
396  * Claim the device is ultra-capable of burst in the beginning.  Use
397  * the value returned by ddi_dma_burstsizes() to actually set the ERI
398  * global configuration register later.
399  *
400  * PCI_ERI supports Infinite burst or 64-byte-multiple bursts.
401  */
402 #define	ERI_LIMADDRLO	((uint64_t)0x00000000)
403 #define	ERI_LIMADDRHI	((uint64_t)0xffffffff)
404 
405 static ddi_dma_attr_t dma_attr = {
406 	DMA_ATTR_V0,		/* version number. */
407 	(uint64_t)ERI_LIMADDRLO, /* low address */
408 	(uint64_t)ERI_LIMADDRHI, /* high address */
409 	(uint64_t)0x00ffffff,	/* address counter max */
410 	(uint64_t)1,		/* alignment */
411 	(uint_t)0xe000e0,	/* dlim_burstsizes for 32 4 bit xfers */
412 	(uint32_t)0x1,		/* minimum transfer size */
413 	(uint64_t)0x7fffffff,	/* maximum transfer size */
414 	(uint64_t)0x00ffffff,	/* maximum segment size */
415 	1,			/* scatter/gather list length */
416 	(uint32_t)1,		/* granularity */
417 	(uint_t)0		/* attribute flags */
418 };
419 
420 static ddi_dma_attr_t desc_dma_attr = {
421 	DMA_ATTR_V0,		/* version number. */
422 	(uint64_t)ERI_LIMADDRLO, /* low address */
423 	(uint64_t)ERI_LIMADDRHI, /* high address */
424 	(uint64_t)0x00ffffff,	/* address counter max */
425 	(uint64_t)8,		/* alignment */
426 	(uint_t)0xe000e0,	/* dlim_burstsizes for 32 4 bit xfers */
427 	(uint32_t)0x1,		/* minimum transfer size */
428 	(uint64_t)0x7fffffff,	/* maximum transfer size */
429 	(uint64_t)0x00ffffff,	/* maximum segment size */
430 	1,			/* scatter/gather list length */
431 	16,			/* granularity */
432 	0			/* attribute flags */
433 };
434 
435 ddi_dma_lim_t eri_dma_limits = {
436 	(uint64_t)ERI_LIMADDRLO, /* dlim_addr_lo */
437 	(uint64_t)ERI_LIMADDRHI, /* dlim_addr_hi */
438 	(uint64_t)ERI_LIMADDRHI, /* dlim_cntr_max */
439 	(uint_t)0x00e000e0,	/* dlim_burstsizes for 32 and 64 bit xfers */
440 	(uint32_t)0x1,		/* dlim_minxfer */
441 	1024			/* dlim_speed */
442 };
443 
444 /*
445  * Link Configuration variables
446  *
447  * On Motherboard implementations, 10/100 Mbps speeds may be supported
448  * by using both the Serial Link and the MII on Non-serial-link interface.
449  * When both links are present, the driver automatically tries to bring up
450  * both. If both are up, the Gigabit Serial Link is selected for use, by
451  * default. The following configuration variable is used to force the selection
452  * of one of the links when both are up.
453  * To change the default selection to the MII link when both the Serial
454  * Link and the MII link are up, change eri_default_link to 1.
455  *
456  * Once a link is in use, the driver will continue to use that link till it
457  * goes down. When it goes down, the driver will look at the status of both the
458  * links again for link selection.
459  *
460  * Currently the standard is not stable w.r.t. gigabit link configuration
461  * using auto-negotiation procedures. Meanwhile, the link may be configured
462  * in "forced" mode using the "autonegotiation enable" bit (bit-12) in the
463  * PCS MII Command Register. In this mode the PCS sends "idles" until sees
464  * "idles" as initialization instead of the Link Configuration protocol
465  * where a Config register is exchanged. In this mode, the ERI is programmed
466  * for full-duplex operation with both pauseTX and pauseRX (for flow control)
467  * enabled.
468  */
469 
470 static	int	select_link = 0; /* automatic selection */
471 static	int	default_link = 0; /* Select Serial link if both are up */
472 
473 /*
474  * The following variables are used for configuring link-operation
475  * for all the "eri" interfaces in the system.
476  * Later these parameters may be changed per interface using "ndd" command
477  * These parameters may also be specified as properties using the .conf
478  * file mechanism for each interface.
479  */
480 
481 /*
482  * The following variable value will be overridden by "link-pulse-disabled"
483  * property which may be created by OBP or eri.conf file. This property is
484  * applicable only for 10 Mbps links.
485  */
486 static	int	link_pulse_disabled = 0;	/* link pulse disabled */
487 
488 /* For MII-based FastEthernet links */
489 static	int	adv_autoneg_cap = 1;
490 static	int	adv_100T4_cap = 0;
491 static	int	adv_100fdx_cap = 1;
492 static	int	adv_100hdx_cap = 1;
493 static	int	adv_10fdx_cap = 1;
494 static	int	adv_10hdx_cap = 1;
495 static	int	adv_pauseTX_cap =  0;
496 static	int	adv_pauseRX_cap =  0;
497 
498 /*
499  * The following gap parameters are in terms of byte times.
500  */
501 static	int	ipg0 = 8;
502 static	int	ipg1 = 8;
503 static	int	ipg2 = 4;
504 
505 static	int	lance_mode = 1;		/* to enable LANCE mode */
506 static	int	mifpoll_enable = 0;	/* to enable mif poll */
507 static	int	ngu_enable = 0;		/* to enable Never Give Up mode */
508 
509 static	int	eri_force_mlf = 0; 	/* to enable mif poll */
510 static	int	eri_phy_mintrans = 1;	/* Lu3X31T mintrans algorithm */
511 /*
512  * For the MII interface, the External Transceiver is selected when present.
513  * The following variable is used to select the Internal Transceiver even
514  * when the External Transceiver is present.
515  */
516 static	int	use_int_xcvr = 0;
517 static	int	pace_size = 0;	/* Do not use pacing for now */
518 
519 static	int	eri_use_dvma_rx = 0;	/* =1:use dvma */
520 static	int	eri_use_dvma_tx = 1;	/* =1:use dvma */
521 static	int	eri_rx_bcopy_max = RX_BCOPY_MAX;	/* =1:use bcopy() */
522 static	int	eri_tx_bcopy_max = TX_BCOPY_MAX;	/* =1:use bcopy() */
523 static	int	eri_overflow_reset = 1;	/* global reset if rx_fifo_overflow */
524 static	int	eri_tx_ring_size = 2048; /* number of entries in tx ring */
525 static	int	eri_rx_ring_size = 1024; /* number of entries in rx ring */
526 /*
527  * The following parameters may be configured by the user. If they are not
528  * configured by the user, the values will be based on the capabilities of
529  * the transceiver.
530  * The value "ERI_NOTUSR" is ORed with the parameter value to indicate values
531  * which are NOT configured by the user.
532  */
533 
534 #define	ERI_NOTUSR	0x0f000000
535 #define	ERI_MASK_1BIT	0x1
536 #define	ERI_MASK_2BIT	0x3
537 #define	ERI_MASK_8BIT	0xff
538 
539 
540 /*
541  * Note:
542  * ERI has all of the above capabilities.
543  * Only when an External Transceiver is selected for MII-based FastEthernet
544  * link operation, the capabilities depend upon the capabilities of the
545  * External Transceiver.
546  */
547 
548 /* ------------------------------------------------------------------------- */
549 
550 static  param_t	param_arr[] = {
551 	/* min		max		value	r/w/hidden+name */
552 	{  0,		2,		2,	"-transceiver_inuse"},
553 	{  0,		1,		0,	"-link_status"},
554 	{  0,		1,		0,	"-link_speed"},
555 	{  0,		1,		0,	"-link_mode"},
556 	{  0,		255,		8,	"+ipg1"},
557 	{  0,		255,		4,	"+ipg2"},
558 	{  0,		1,		0,	"+use_int_xcvr"},
559 	{  0,		255,		0,	"+pace_size"},
560 	{  0,		1,		1,	"+adv_autoneg_cap"},
561 	{  0,		1,		1,	"+adv_100T4_cap"},
562 	{  0,		1,		1,	"+adv_100fdx_cap"},
563 	{  0,		1,		1,	"+adv_100hdx_cap"},
564 	{  0,		1,		1,	"+adv_10fdx_cap"},
565 	{  0,		1,		1,	"+adv_10hdx_cap"},
566 	{  0,		1,		1,	"-autoneg_cap"},
567 	{  0,		1,		1,	"-100T4_cap"},
568 	{  0,		1,		1,	"-100fdx_cap"},
569 	{  0,		1,		1,	"-100hdx_cap"},
570 	{  0,		1,		1,	"-10fdx_cap"},
571 	{  0,		1,		1,	"-10hdx_cap"},
572 	{  0,		1,		0,	"-lp_autoneg_cap"},
573 	{  0,		1,		0,	"-lp_100T4_cap"},
574 	{  0,		1,		0,	"-lp_100fdx_cap"},
575 	{  0,		1,		0,	"-lp_100hdx_cap"},
576 	{  0,		1,		0,	"-lp_10fdx_cap"},
577 	{  0,		1,		0,	"-lp_10hdx_cap"},
578 	{  0,		1,		1,	"+lance_mode"},
579 	{  0,		31,		8,	"+ipg0"},
580 	{  0,		127,		6,	"+intr_blank_time"},
581 	{  0,		255,		8,	"+intr_blank_packets"},
582 	{  0,		1,		1,	"!serial-link"},
583 	{  0,		2,		1,	"!non-serial-link"},
584 	{  0,		1,		0,	"%select-link"},
585 	{  0,		1,		0,	"%default-link"},
586 	{  0,		2,		0,	"!link-in-use"},
587 	{  0,		1,		1,	"%adv_asm_dir_cap"},
588 	{  0,		1,		1,	"%adv_pause_cap"},
589 	{  0,		1,		0,	"!asm_dir_cap"},
590 	{  0,		1,		0,	"!pause_cap"},
591 	{  0,		1,		0,	"!lp_asm_dir_cap"},
592 	{  0,		1,		0,	"!lp_pause_cap"},
593 };
594 
595 DDI_DEFINE_STREAM_OPS(eri_dev_ops, nulldev, nulldev, eri_attach, eri_detach,
596 	nodev, NULL, D_MP, NULL);
597 
598 /*
599  * This is the loadable module wrapper.
600  */
601 #include <sys/modctl.h>
602 
603 /*
604  * Module linkage information for the kernel.
605  */
606 static struct modldrv modldrv = {
607 	&mod_driverops,	/* Type of module.  This one is a driver */
608 	"Sun RIO 10/100 Mb Ethernet",
609 	&eri_dev_ops,	/* driver ops */
610 };
611 
612 static struct modlinkage modlinkage = {
613 	MODREV_1, &modldrv, NULL
614 };
615 
616 /*
617  * Hardware Independent Functions
618  * New Section
619  */
620 
621 int
622 _init(void)
623 {
624 	int	status;
625 
626 	mac_init_ops(&eri_dev_ops, "eri");
627 	if ((status = mod_install(&modlinkage)) != 0) {
628 		mac_fini_ops(&eri_dev_ops);
629 	}
630 	return (status);
631 }
632 
633 int
634 _fini(void)
635 {
636 	int status;
637 
638 	status = mod_remove(&modlinkage);
639 	if (status == 0) {
640 		mac_fini_ops(&eri_dev_ops);
641 	}
642 	return (status);
643 }
644 
645 int
646 _info(struct modinfo *modinfop)
647 {
648 	return (mod_info(&modlinkage, modinfop));
649 }
650 
651 
652 /*
653  * Interface exists: make available by filling in network interface
654  * record.  System will initialize the interface when it is ready
655  * to accept packets.
656  */
657 static int
658 eri_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
659 {
660 	struct eri *erip = NULL;
661 	mac_register_t *macp = NULL;
662 	int	regno;
663 	boolean_t	doinit;
664 	boolean_t	mutex_inited = B_FALSE;
665 	boolean_t	intr_add = B_FALSE;
666 
667 	switch (cmd) {
668 	case DDI_ATTACH:
669 		break;
670 
671 	case DDI_RESUME:
672 		if ((erip = ddi_get_driver_private(dip)) == NULL)
673 			return (DDI_FAILURE);
674 
675 		mutex_enter(&erip->intrlock);
676 		erip->flags &= ~ERI_SUSPENDED;
677 		erip->init_macregs = 1;
678 		param_linkup = 0;
679 		erip->stats.link_up = LINK_STATE_DOWN;
680 		erip->linkcheck = 0;
681 
682 		doinit =  (erip->flags & ERI_STARTED) ? B_TRUE : B_FALSE;
683 		mutex_exit(&erip->intrlock);
684 
685 		if (doinit && !eri_init(erip)) {
686 			return (DDI_FAILURE);
687 		}
688 		return (DDI_SUCCESS);
689 
690 	default:
691 		return (DDI_FAILURE);
692 	}
693 
694 	/*
695 	 * Allocate soft device data structure
696 	 */
697 	erip = kmem_zalloc(sizeof (struct eri), KM_SLEEP);
698 
699 	/*
700 	 * Initialize as many elements as possible.
701 	 */
702 	ddi_set_driver_private(dip, erip);
703 	erip->dip = dip;			/* dip	*/
704 	erip->instance = ddi_get_instance(dip);	/* instance */
705 	erip->flags = 0;
706 	erip->multi_refcnt = 0;
707 	erip->promisc = B_FALSE;
708 
709 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
710 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
711 		    "mac_alloc failed");
712 		goto attach_fail;
713 	}
714 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
715 	macp->m_driver = erip;
716 	macp->m_dip = dip;
717 	macp->m_src_addr = erip->ouraddr;
718 	macp->m_callbacks = &eri_m_callbacks;
719 	macp->m_min_sdu = 0;
720 	macp->m_max_sdu = ETHERMTU;
721 	macp->m_margin = VLAN_TAGSZ;
722 
723 	/*
724 	 * Map in the device registers.
725 	 * Separate pointers will be set up for the following
726 	 * register groups within the GEM Register Space:
727 	 * 	Global register set
728 	 * 	ETX register set
729 	 * 	ERX register set
730 	 * 	BigMAC register set.
731 	 * 	MIF register set
732 	 */
733 
734 	if (ddi_dev_nregs(dip, &regno) != (DDI_SUCCESS)) {
735 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
736 		    "ddi_dev_nregs failed, returned %d", regno);
737 		goto attach_fail;
738 	}
739 
740 	/*
741 	 * Map the PCI config space
742 	 */
743 	if (pci_config_setup(dip, &erip->pci_config_handle) != DDI_SUCCESS) {
744 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
745 		    "%s pci_config_setup()", config_space_fatal_msg);
746 		goto attach_fail;
747 	}
748 
749 	/*
750 	 * Initialize device attributes structure
751 	 */
752 	erip->dev_attr.devacc_attr_version =	DDI_DEVICE_ATTR_V0;
753 	erip->dev_attr.devacc_attr_dataorder =	DDI_STRICTORDER_ACC;
754 	erip->dev_attr.devacc_attr_endian_flags =	DDI_STRUCTURE_LE_ACC;
755 
756 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->globregp), 0, 0,
757 	    &erip->dev_attr, &erip->globregh)) {
758 		goto attach_fail;
759 	}
760 	erip->etxregh =		erip->globregh;
761 	erip->erxregh =		erip->globregh;
762 	erip->bmacregh =	erip->globregh;
763 	erip->mifregh =		erip->globregh;
764 
765 	erip->etxregp =  (void *)(((caddr_t)erip->globregp) + 0x2000);
766 	erip->erxregp =  (void *)(((caddr_t)erip->globregp) + 0x4000);
767 	erip->bmacregp = (void *)(((caddr_t)erip->globregp) + 0x6000);
768 	erip->mifregp =  (void *)(((caddr_t)erip->globregp) + 0x6200);
769 
770 	/*
771 	 * Map the software reset register.
772 	 */
773 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->sw_reset_reg),
774 	    0x1010, 4, &erip->dev_attr, &erip->sw_reset_regh)) {
775 		ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
776 		    mregs_4soft_reset_fail_msg);
777 		goto attach_fail;
778 	}
779 
780 	/*
781 	 * Try and stop the device.
782 	 * This is done until we want to handle interrupts.
783 	 */
784 	if (eri_stop(erip))
785 		goto attach_fail;
786 
787 	/*
788 	 * set PCI latency timer register.
789 	 */
790 	pci_config_put8(erip->pci_config_handle, PCI_CONF_LATENCY_TIMER,
791 	    (uchar_t)eri_pci_latency_timer);
792 
793 	if (ddi_intr_hilevel(dip, 0)) {
794 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
795 		    " high-level interrupts are not supported");
796 		goto attach_fail;
797 	}
798 
799 	/*
800 	 * Get the interrupt cookie so the mutexes can be
801 	 * Initialized.
802 	 */
803 	if (ddi_get_iblock_cookie(dip, 0, &erip->cookie) != DDI_SUCCESS)
804 		goto attach_fail;
805 
806 	/*
807 	 * Initialize mutex's for this device.
808 	 */
809 	mutex_init(&erip->xmitlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
810 	mutex_init(&erip->intrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
811 	mutex_init(&erip->linklock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
812 	mutex_init(&erip->xcvrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
813 
814 	mutex_inited = B_TRUE;
815 
816 	/*
817 	 * Add interrupt to system
818 	 */
819 	if (ddi_add_intr(dip, 0, &erip->cookie, 0, eri_intr, (caddr_t)erip) ==
820 	    DDI_SUCCESS)
821 		intr_add = B_TRUE;
822 	else {
823 		goto attach_fail;
824 	}
825 
826 	/*
827 	 * Set up the ethernet mac address.
828 	 */
829 	(void) eri_setup_mac_address(erip, dip);
830 
831 	if (eri_init_xfer_params(erip))
832 		goto attach_fail;
833 
834 	if (eri_burstsize(erip) == DDI_FAILURE) {
835 		goto attach_fail;
836 	}
837 
838 	/*
839 	 * Setup fewer receive bufers.
840 	 */
841 	ERI_RPENDING = eri_rx_ring_size;
842 	ERI_TPENDING = eri_tx_ring_size;
843 
844 	erip->rpending_mask = ERI_RPENDING - 1;
845 	erip->rmdmax_mask = ERI_RPENDING - 1;
846 	erip->mif_config = (ERI_PHY_BMSR << ERI_MIF_CFGPR_SHIFT);
847 
848 	erip->stats.pmcap = ERI_PMCAP_NONE;
849 	if (pci_report_pmcap(dip, PCI_PM_IDLESPEED, (void *)4000) ==
850 	    DDI_SUCCESS)
851 		erip->stats.pmcap = ERI_PMCAP_4MHZ;
852 
853 	if (mac_register(macp, &erip->mh) != 0)
854 		goto attach_fail;
855 
856 	mac_free(macp);
857 
858 	return (DDI_SUCCESS);
859 
860 attach_fail:
861 	if (erip->pci_config_handle)
862 		(void) pci_config_teardown(&erip->pci_config_handle);
863 
864 	if (mutex_inited) {
865 		mutex_destroy(&erip->xmitlock);
866 		mutex_destroy(&erip->intrlock);
867 		mutex_destroy(&erip->linklock);
868 		mutex_destroy(&erip->xcvrlock);
869 	}
870 
871 	ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, attach_fail_msg);
872 
873 	if (intr_add)
874 		ddi_remove_intr(dip, 0, erip->cookie);
875 
876 	if (erip->globregh)
877 		ddi_regs_map_free(&erip->globregh);
878 
879 	if (macp != NULL)
880 		mac_free(macp);
881 	if (erip != NULL)
882 		kmem_free(erip, sizeof (*erip));
883 
884 	return (DDI_FAILURE);
885 }
886 
887 static int
888 eri_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
889 {
890 	struct eri 	*erip;
891 	int i;
892 
893 	if ((erip = ddi_get_driver_private(dip)) == NULL) {
894 		/*
895 		 * No resources allocated.
896 		 */
897 		return (DDI_FAILURE);
898 	}
899 
900 	switch (cmd) {
901 	case DDI_DETACH:
902 		break;
903 
904 	case DDI_SUSPEND:
905 		erip->flags |= ERI_SUSPENDED;
906 		eri_uninit(erip);
907 		return (DDI_SUCCESS);
908 
909 	default:
910 		return (DDI_FAILURE);
911 	}
912 
913 	if (erip->flags & (ERI_RUNNING | ERI_SUSPENDED)) {
914 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, busy_msg);
915 		return (DDI_FAILURE);
916 	}
917 
918 	if (mac_unregister(erip->mh) != 0) {
919 		return (DDI_FAILURE);
920 	}
921 
922 	/*
923 	 * Make the device quiescent
924 	 */
925 	(void) eri_stop(erip);
926 
927 	/*
928 	 * Remove instance of the intr
929 	 */
930 	ddi_remove_intr(dip, 0, erip->cookie);
931 
932 	if (erip->pci_config_handle)
933 		(void) pci_config_teardown(&erip->pci_config_handle);
934 
935 	/*
936 	 * Destroy all mutexes and data structures allocated during
937 	 * attach time.
938 	 */
939 
940 	if (erip->globregh)
941 		ddi_regs_map_free(&erip->globregh);
942 
943 	erip->etxregh =		NULL;
944 	erip->erxregh =		NULL;
945 	erip->bmacregh =	NULL;
946 	erip->mifregh =		NULL;
947 	erip->globregh =	NULL;
948 
949 	if (erip->sw_reset_regh)
950 		ddi_regs_map_free(&erip->sw_reset_regh);
951 
952 	if (erip->ksp)
953 		kstat_delete(erip->ksp);
954 
955 	eri_stop_timer(erip); /* acquire linklock */
956 	eri_start_timer(erip, eri_check_link, 0);
957 	mutex_destroy(&erip->xmitlock);
958 	mutex_destroy(&erip->intrlock);
959 	mutex_destroy(&erip->linklock);
960 	mutex_destroy(&erip->xcvrlock);
961 
962 	if (erip->md_h) {
963 		if (ddi_dma_unbind_handle(erip->md_h) ==
964 		    DDI_FAILURE)
965 			return (DDI_FAILURE);
966 		ddi_dma_mem_free(&erip->mdm_h);
967 		ddi_dma_free_handle(&erip->md_h);
968 	}
969 
970 	if (eri_freebufs(erip))
971 		return (DDI_FAILURE);
972 
973 	/* dvma handle case */
974 	if (erip->eri_dvmaxh) {
975 		(void) dvma_release(erip->eri_dvmaxh);
976 		erip->eri_dvmaxh = NULL;
977 	}
978 
979 	if (erip->eri_dvmarh) {
980 		(void) dvma_release(erip->eri_dvmarh);
981 		erip->eri_dvmarh = NULL;
982 	}
983 /*
984  *	xmit_dma_mode, erip->ndmaxh[i]=NULL for dvma
985  */
986 	else {
987 		for (i = 0; i < ERI_TPENDING; i++)
988 			if (erip->ndmaxh[i])
989 				ddi_dma_free_handle(&erip->ndmaxh[i]);
990 		for (i = 0; i < ERI_RPENDING; i++)
991 			if (erip->ndmarh[i])
992 				ddi_dma_free_handle(&erip->ndmarh[i]);
993 	}
994 /*
995  *	Release tiny TX buffers
996  */
997 	if (erip->tbuf_ioaddr != 0) {
998 		(void) ddi_dma_unbind_handle(erip->tbuf_handle);
999 		erip->tbuf_ioaddr = 0;
1000 	}
1001 	if (erip->tbuf_kaddr != NULL) {
1002 		kmem_free(erip->tbuf_kaddr, ERI_TPENDING * eri_tx_bcopy_max);
1003 		erip->tbuf_kaddr = NULL;
1004 	}
1005 	if (erip->tbuf_handle != NULL) {
1006 		ddi_dma_free_handle(&erip->tbuf_handle);
1007 		erip->tbuf_handle = NULL;
1008 	}
1009 
1010 	eri_param_cleanup(erip);
1011 
1012 	ddi_set_driver_private(dip, NULL);
1013 	kmem_free((caddr_t)erip, sizeof (struct eri));
1014 
1015 	return (DDI_SUCCESS);
1016 }
1017 
1018 /*
1019  * To set up the mac address for the network interface:
1020  * The adapter card may support a local mac address which is published
1021  * in a device node property "local-mac-address". This mac address is
1022  * treated as the factory-installed mac address for DLPI interface.
1023  * If the adapter firmware has used the device for diskless boot
1024  * operation it publishes a property called "mac-address" for use by
1025  * inetboot and the device driver.
1026  * If "mac-address" is not found, the system options property
1027  * "local-mac-address" is used to select the mac-address. If this option
1028  * is set to "true", and "local-mac-address" has been found, then
1029  * local-mac-address is used; otherwise the system mac address is used
1030  * by calling the "localetheraddr()" function.
1031  */
1032 
1033 static void
1034 eri_setup_mac_address(struct eri *erip, dev_info_t *dip)
1035 {
1036 	uchar_t			*prop;
1037 	char			*uselocal;
1038 	unsigned		prop_len;
1039 	uint32_t		addrflags = 0;
1040 	struct ether_addr	factaddr;
1041 
1042 	/*
1043 	 * Check if it is an adapter with its own local mac address
1044 	 * If it is present, save it as the "factory-address"
1045 	 * for this adapter.
1046 	 */
1047 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1048 	    "local-mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1049 		if (prop_len == ETHERADDRL) {
1050 			addrflags = ERI_FACTADDR_PRESENT;
1051 			bcopy(prop, &factaddr, ETHERADDRL);
1052 			ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
1053 			    lether_addr_msg, ether_sprintf(&factaddr));
1054 		}
1055 		ddi_prop_free(prop);
1056 	}
1057 	/*
1058 	 * Check if the adapter has published "mac-address" property.
1059 	 * If it is present, use it as the mac address for this device.
1060 	 */
1061 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1062 	    "mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1063 		if (prop_len >= ETHERADDRL) {
1064 			bcopy(prop, erip->ouraddr, ETHERADDRL);
1065 			ddi_prop_free(prop);
1066 			return;
1067 		}
1068 		ddi_prop_free(prop);
1069 	}
1070 
1071 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
1072 	    &uselocal) == DDI_PROP_SUCCESS) {
1073 		if ((strcmp("true", uselocal) == 0) &&
1074 		    (addrflags & ERI_FACTADDR_PRESENT)) {
1075 			addrflags |= ERI_FACTADDR_USE;
1076 			bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1077 			ddi_prop_free(uselocal);
1078 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1079 			    lmac_addr_msg);
1080 			return;
1081 		}
1082 		ddi_prop_free(uselocal);
1083 	}
1084 
1085 	/*
1086 	 * Get the system ethernet address.
1087 	 */
1088 	(void) localetheraddr(NULL, &factaddr);
1089 	bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1090 }
1091 
1092 
1093 /*
1094  * Calculate the bit in the multicast address filter that selects the given
1095  * address.
1096  * Note: For ERI, the last 8-bits are used.
1097  */
1098 
1099 static uint32_t
1100 eri_ladrf_bit(const uint8_t *addr)
1101 {
1102 	uint32_t crc;
1103 
1104 	CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
1105 
1106 	/*
1107 	 * Just want the 8 most significant bits.
1108 	 */
1109 	return ((~crc) >> 24);
1110 }
1111 
1112 static void
1113 eri_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1114 {
1115 	struct	eri	*erip = arg;
1116 	struct	iocblk	*iocp = (void *)mp->b_rptr;
1117 	int	err;
1118 
1119 	ASSERT(erip != NULL);
1120 
1121 	/*
1122 	 * Privilege checks.
1123 	 */
1124 	switch (iocp->ioc_cmd) {
1125 	case ERI_SET_LOOP_MODE:
1126 	case ERI_ND_SET:
1127 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1128 		if (err != 0) {
1129 			miocnak(wq, mp, 0, err);
1130 			return;
1131 		}
1132 		break;
1133 	default:
1134 		break;
1135 	}
1136 
1137 	switch (iocp->ioc_cmd) {
1138 	case ERI_ND_GET:
1139 	case ERI_ND_SET:
1140 		eri_process_ndd_ioctl(erip, wq, mp, iocp->ioc_cmd);
1141 		break;
1142 
1143 	case ERI_SET_LOOP_MODE:
1144 	case ERI_GET_LOOP_MODE:
1145 		/*
1146 		 * XXX: Consider updating this to the new netlb ioctls.
1147 		 */
1148 		eri_loopback(erip, wq, mp);
1149 		break;
1150 
1151 	default:
1152 		miocnak(wq, mp, 0, EINVAL);
1153 		break;
1154 	}
1155 
1156 	ASSERT(!MUTEX_HELD(&erip->linklock));
1157 }
1158 
1159 static void
1160 eri_loopback(struct eri *erip, queue_t *wq, mblk_t *mp)
1161 {
1162 	struct	iocblk	*iocp = (void *)mp->b_rptr;
1163 	loopback_t	*al;
1164 
1165 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) {
1166 		miocnak(wq, mp, 0, EINVAL);
1167 		return;
1168 	}
1169 
1170 	al = (void *)mp->b_cont->b_rptr;
1171 
1172 	switch (iocp->ioc_cmd) {
1173 	case ERI_SET_LOOP_MODE:
1174 		switch (al->loopback) {
1175 		case ERI_LOOPBACK_OFF:
1176 			erip->flags &= (~ERI_MACLOOPBACK & ~ERI_SERLOOPBACK);
1177 			/* force link status to go down */
1178 			param_linkup = 0;
1179 			erip->stats.link_up = LINK_STATE_DOWN;
1180 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1181 			(void) eri_init(erip);
1182 			break;
1183 
1184 		case ERI_MAC_LOOPBACK_ON:
1185 			erip->flags |= ERI_MACLOOPBACK;
1186 			erip->flags &= ~ERI_SERLOOPBACK;
1187 			param_linkup = 0;
1188 			erip->stats.link_up = LINK_STATE_DOWN;
1189 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1190 			(void) eri_init(erip);
1191 			break;
1192 
1193 		case ERI_PCS_LOOPBACK_ON:
1194 			break;
1195 
1196 		case ERI_SER_LOOPBACK_ON:
1197 			erip->flags |= ERI_SERLOOPBACK;
1198 			erip->flags &= ~ERI_MACLOOPBACK;
1199 			/* force link status to go down */
1200 			param_linkup = 0;
1201 			erip->stats.link_up = LINK_STATE_DOWN;
1202 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1203 			(void) eri_init(erip);
1204 			break;
1205 
1206 		default:
1207 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1208 			    loopback_val_default);
1209 			miocnak(wq, mp, 0, EINVAL);
1210 			return;
1211 		}
1212 		miocnak(wq, mp, 0, 0);
1213 		break;
1214 
1215 	case ERI_GET_LOOP_MODE:
1216 		al->loopback =	ERI_MAC_LOOPBACK_ON | ERI_PCS_LOOPBACK_ON |
1217 		    ERI_SER_LOOPBACK_ON;
1218 		miocack(wq, mp, sizeof (loopback_t), 0);
1219 		break;
1220 
1221 	default:
1222 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1223 		    loopback_cmd_default);
1224 	}
1225 }
1226 
1227 static int
1228 eri_m_promisc(void *arg, boolean_t on)
1229 {
1230 	struct	eri	*erip = arg;
1231 
1232 	mutex_enter(&erip->intrlock);
1233 	erip->promisc = on;
1234 	eri_init_rx(erip);
1235 	mutex_exit(&erip->intrlock);
1236 	return (0);
1237 }
1238 
1239 /*
1240  * This is to support unlimited number of members
1241  * in Multicast.
1242  */
1243 static int
1244 eri_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1245 {
1246 	struct eri		*erip = arg;
1247 	uint32_t 		ladrf_bit;
1248 
1249 	/*
1250 	 * If this address's bit was not already set in the local address
1251 	 * filter, add it and re-initialize the Hardware.
1252 	 */
1253 	ladrf_bit = eri_ladrf_bit(mca);
1254 
1255 	mutex_enter(&erip->intrlock);
1256 	if (add) {
1257 		erip->ladrf_refcnt[ladrf_bit]++;
1258 		if (erip->ladrf_refcnt[ladrf_bit] == 1) {
1259 			LADRF_SET(erip, ladrf_bit);
1260 			erip->multi_refcnt++;
1261 			eri_init_rx(erip);
1262 		}
1263 	} else {
1264 		erip->ladrf_refcnt[ladrf_bit]--;
1265 		if (erip->ladrf_refcnt[ladrf_bit] == 0) {
1266 			LADRF_CLR(erip, ladrf_bit);
1267 			erip->multi_refcnt--;
1268 			eri_init_rx(erip);
1269 		}
1270 	}
1271 	mutex_exit(&erip->intrlock);
1272 	return (0);
1273 }
1274 
1275 static int
1276 eri_m_unicst(void *arg, const uint8_t *macaddr)
1277 {
1278 	struct	eri	*erip = arg;
1279 
1280 	/*
1281 	 * Set new interface local address and re-init device.
1282 	 * This is destructive to any other streams attached
1283 	 * to this device.
1284 	 */
1285 	mutex_enter(&erip->intrlock);
1286 	bcopy(macaddr, &erip->ouraddr, ETHERADDRL);
1287 	eri_init_rx(erip);
1288 	mutex_exit(&erip->intrlock);
1289 	return (0);
1290 }
1291 
1292 /*ARGSUSED*/
1293 static boolean_t
1294 eri_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1295 {
1296 	switch (cap) {
1297 	case MAC_CAPAB_HCKSUM: {
1298 		uint32_t *hcksum_txflags = cap_data;
1299 		*hcksum_txflags = HCKSUM_INET_PARTIAL;
1300 		return (B_TRUE);
1301 	}
1302 	case MAC_CAPAB_POLL:
1303 	default:
1304 		return (B_FALSE);
1305 	}
1306 }
1307 
1308 static int
1309 eri_m_start(void *arg)
1310 {
1311 	struct eri	*erip = arg;
1312 
1313 	mutex_enter(&erip->intrlock);
1314 	erip->flags |= ERI_STARTED;
1315 	mutex_exit(&erip->intrlock);
1316 
1317 	if (!eri_init(erip)) {
1318 		mutex_enter(&erip->intrlock);
1319 		erip->flags &= ~ERI_STARTED;
1320 		mutex_exit(&erip->intrlock);
1321 		return (EIO);
1322 	}
1323 	return (0);
1324 }
1325 
1326 static void
1327 eri_m_stop(void *arg)
1328 {
1329 	struct eri	*erip = arg;
1330 
1331 	mutex_enter(&erip->intrlock);
1332 	erip->flags &= ~ERI_STARTED;
1333 	mutex_exit(&erip->intrlock);
1334 	eri_uninit(erip);
1335 }
1336 
1337 static int
1338 eri_m_stat(void *arg, uint_t stat, uint64_t *val)
1339 {
1340 	struct eri	*erip = arg;
1341 	struct stats	*esp;
1342 	boolean_t	macupdate = B_FALSE;
1343 
1344 	esp = &erip->stats;
1345 
1346 	mutex_enter(&erip->xmitlock);
1347 	if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
1348 		erip->tx_completion =
1349 		    GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
1350 		macupdate |= eri_reclaim(erip, erip->tx_completion);
1351 	}
1352 	mutex_exit(&erip->xmitlock);
1353 	if (macupdate)
1354 		mac_tx_update(erip->mh);
1355 
1356 	eri_savecntrs(erip);
1357 
1358 	switch (stat) {
1359 	case MAC_STAT_IFSPEED:
1360 		*val = esp->ifspeed * 1000000ULL;
1361 		break;
1362 	case MAC_STAT_MULTIRCV:
1363 		*val = esp->multircv;
1364 		break;
1365 	case MAC_STAT_BRDCSTRCV:
1366 		*val = esp->brdcstrcv;
1367 		break;
1368 	case MAC_STAT_IPACKETS:
1369 		*val = esp->ipackets64;
1370 		break;
1371 	case MAC_STAT_RBYTES:
1372 		*val = esp->rbytes64;
1373 		break;
1374 	case MAC_STAT_OBYTES:
1375 		*val = esp->obytes64;
1376 		break;
1377 	case MAC_STAT_OPACKETS:
1378 		*val = esp->opackets64;
1379 		break;
1380 	case MAC_STAT_IERRORS:
1381 		*val = esp->ierrors;
1382 		break;
1383 	case MAC_STAT_OERRORS:
1384 		*val = esp->oerrors;
1385 		break;
1386 	case MAC_STAT_MULTIXMT:
1387 		*val = esp->multixmt;
1388 		break;
1389 	case MAC_STAT_BRDCSTXMT:
1390 		*val = esp->brdcstxmt;
1391 		break;
1392 	case MAC_STAT_NORCVBUF:
1393 		*val = esp->norcvbuf;
1394 		break;
1395 	case MAC_STAT_NOXMTBUF:
1396 		*val = esp->noxmtbuf;
1397 		break;
1398 	case MAC_STAT_UNDERFLOWS:
1399 		*val = esp->txmac_urun;
1400 		break;
1401 	case MAC_STAT_OVERFLOWS:
1402 		*val = esp->rx_overflow;
1403 		break;
1404 	case MAC_STAT_COLLISIONS:
1405 		*val = esp->collisions;
1406 		break;
1407 	case ETHER_STAT_ALIGN_ERRORS:
1408 		*val = esp->rx_align_err;
1409 		break;
1410 	case ETHER_STAT_FCS_ERRORS:
1411 		*val = esp->rx_crc_err;
1412 		break;
1413 	case ETHER_STAT_EX_COLLISIONS:
1414 		*val = esp->excessive_coll;
1415 		break;
1416 	case ETHER_STAT_TX_LATE_COLLISIONS:
1417 		*val = esp->late_coll;
1418 		break;
1419 	case ETHER_STAT_FIRST_COLLISIONS:
1420 		*val = esp->first_coll;
1421 		break;
1422 	case ETHER_STAT_LINK_DUPLEX:
1423 		*val = esp->link_duplex;
1424 		break;
1425 	case ETHER_STAT_TOOLONG_ERRORS:
1426 		*val = esp->rx_toolong_pkts;
1427 		break;
1428 	case ETHER_STAT_TOOSHORT_ERRORS:
1429 		*val = esp->rx_runt;
1430 		break;
1431 
1432 	case ETHER_STAT_XCVR_ADDR:
1433 		*val = erip->phyad;
1434 		break;
1435 
1436 	case ETHER_STAT_XCVR_INUSE:
1437 		*val = XCVR_100X;	/* should always be 100X for now */
1438 		break;
1439 
1440 	case ETHER_STAT_CAP_100FDX:
1441 		*val = param_bmsr_100fdx;
1442 		break;
1443 	case ETHER_STAT_CAP_100HDX:
1444 		*val = param_bmsr_100hdx;
1445 		break;
1446 	case ETHER_STAT_CAP_10FDX:
1447 		*val = param_bmsr_10fdx;
1448 		break;
1449 	case ETHER_STAT_CAP_10HDX:
1450 		*val = param_bmsr_10hdx;
1451 		break;
1452 	case ETHER_STAT_CAP_AUTONEG:
1453 		*val = param_bmsr_ancap;
1454 		break;
1455 	case ETHER_STAT_CAP_ASMPAUSE:
1456 		*val = param_bmsr_asm_dir;
1457 		break;
1458 	case ETHER_STAT_CAP_PAUSE:
1459 		*val = param_bmsr_pause;
1460 		break;
1461 	case ETHER_STAT_ADV_CAP_100FDX:
1462 		*val = param_anar_100fdx;
1463 		break;
1464 	case ETHER_STAT_ADV_CAP_100HDX:
1465 		*val = param_anar_100hdx;
1466 		break;
1467 	case ETHER_STAT_ADV_CAP_10FDX:
1468 		*val = param_anar_10fdx;
1469 		break;
1470 	case ETHER_STAT_ADV_CAP_10HDX:
1471 		*val = param_anar_10hdx;
1472 		break;
1473 	case ETHER_STAT_ADV_CAP_AUTONEG:
1474 		*val = param_autoneg;
1475 		break;
1476 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
1477 		*val = param_anar_asm_dir;
1478 		break;
1479 	case ETHER_STAT_ADV_CAP_PAUSE:
1480 		*val = param_anar_pause;
1481 		break;
1482 	case ETHER_STAT_LP_CAP_100FDX:
1483 		*val = param_anlpar_100fdx;
1484 		break;
1485 	case ETHER_STAT_LP_CAP_100HDX:
1486 		*val = param_anlpar_100hdx;
1487 		break;
1488 	case ETHER_STAT_LP_CAP_10FDX:
1489 		*val = param_anlpar_10fdx;
1490 		break;
1491 	case ETHER_STAT_LP_CAP_10HDX:
1492 		*val = param_anlpar_10hdx;
1493 		break;
1494 	case ETHER_STAT_LP_CAP_AUTONEG:
1495 		*val = param_aner_lpancap;
1496 		break;
1497 	case ETHER_STAT_LP_CAP_ASMPAUSE:
1498 		*val = param_anlpar_pauseTX;
1499 		break;
1500 	case ETHER_STAT_LP_CAP_PAUSE:
1501 		*val = param_anlpar_pauseRX;
1502 		break;
1503 	case ETHER_STAT_LINK_PAUSE:
1504 		*val = esp->pausing;
1505 		break;
1506 	case ETHER_STAT_LINK_ASMPAUSE:
1507 		*val = param_anar_asm_dir &&
1508 		    param_anlpar_pauseTX &&
1509 		    (param_anar_pause != param_anlpar_pauseRX);
1510 		break;
1511 	case ETHER_STAT_LINK_AUTONEG:
1512 		*val = param_autoneg && param_aner_lpancap;
1513 		break;
1514 	}
1515 	return (0);
1516 }
1517 
1518 /*
1519  * Hardware Functions
1520  * New Section
1521  */
1522 
1523 /*
1524  * Initialize the MAC registers. Some of of the MAC  registers are initialized
1525  * just once since  Global Reset or MAC reset doesn't clear them. Others (like
1526  * Host MAC Address Registers) are cleared on every reset and have to be
1527  * reinitialized.
1528  */
1529 static void
1530 eri_init_macregs_generic(struct eri *erip)
1531 {
1532 	/*
1533 	 * set up the MAC parameter registers once
1534 	 * after power cycle. SUSPEND/RESUME also requires
1535 	 * setting these registers.
1536 	 */
1537 	if ((erip->stats.inits == 1) || (erip->init_macregs)) {
1538 		erip->init_macregs = 0;
1539 		PUT_MACREG(ipg0, param_ipg0);
1540 		PUT_MACREG(ipg1, param_ipg1);
1541 		PUT_MACREG(ipg2, param_ipg2);
1542 		PUT_MACREG(macmin, BMAC_MIN_FRAME_SIZE);
1543 #ifdef	ERI_RX_TAG_ERROR_WORKAROUND
1544 		PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE_TAG | BMAC_MAX_BURST);
1545 #else
1546 		PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE | BMAC_MAX_BURST);
1547 #endif
1548 		PUT_MACREG(palen, BMAC_PREAMBLE_SIZE);
1549 		PUT_MACREG(jam, BMAC_JAM_SIZE);
1550 		PUT_MACREG(alimit, BMAC_ATTEMPT_LIMIT);
1551 		PUT_MACREG(macctl_type, BMAC_CONTROL_TYPE);
1552 		PUT_MACREG(rseed,
1553 		    ((erip->ouraddr[0] & 0x3) << 8) | erip->ouraddr[1]);
1554 
1555 		PUT_MACREG(madd3, BMAC_ADDRESS_3);
1556 		PUT_MACREG(madd4, BMAC_ADDRESS_4);
1557 		PUT_MACREG(madd5, BMAC_ADDRESS_5);
1558 
1559 		/* Program MAC Control address */
1560 		PUT_MACREG(madd6, BMAC_ADDRESS_6);
1561 		PUT_MACREG(madd7, BMAC_ADDRESS_7);
1562 		PUT_MACREG(madd8, BMAC_ADDRESS_8);
1563 
1564 		PUT_MACREG(afr0, BMAC_AF_0);
1565 		PUT_MACREG(afr1, BMAC_AF_1);
1566 		PUT_MACREG(afr2, BMAC_AF_2);
1567 		PUT_MACREG(afmr1_2, BMAC_AF21_MASK);
1568 		PUT_MACREG(afmr0, BMAC_AF0_MASK);
1569 	}
1570 
1571 	/* The counters need to be zeroed */
1572 	PUT_MACREG(nccnt, 0);
1573 	PUT_MACREG(fccnt, 0);
1574 	PUT_MACREG(excnt, 0);
1575 	PUT_MACREG(ltcnt, 0);
1576 	PUT_MACREG(dcnt,  0);
1577 	PUT_MACREG(frcnt, 0);
1578 	PUT_MACREG(lecnt, 0);
1579 	PUT_MACREG(aecnt, 0);
1580 	PUT_MACREG(fecnt, 0);
1581 	PUT_MACREG(rxcv,  0);
1582 
1583 	if (erip->pauseTX)
1584 		PUT_MACREG(spcmd, BMAC_SEND_PAUSE_CMD);
1585 	else
1586 		PUT_MACREG(spcmd, 0);
1587 
1588 	/*
1589 	 * Program BigMAC with local individual ethernet address.
1590 	 */
1591 
1592 	PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
1593 	PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
1594 	PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
1595 
1596 	/*
1597 	 * Install multicast address filter.
1598 	 */
1599 
1600 	PUT_MACREG(hash0, erip->ladrf[0]);
1601 	PUT_MACREG(hash1, erip->ladrf[1]);
1602 	PUT_MACREG(hash2, erip->ladrf[2]);
1603 	PUT_MACREG(hash3, erip->ladrf[3]);
1604 	PUT_MACREG(hash4, erip->ladrf[4]);
1605 	PUT_MACREG(hash5, erip->ladrf[5]);
1606 	PUT_MACREG(hash6, erip->ladrf[6]);
1607 	PUT_MACREG(hash7, erip->ladrf[7]);
1608 	PUT_MACREG(hash8, erip->ladrf[8]);
1609 	PUT_MACREG(hash9, erip->ladrf[9]);
1610 	PUT_MACREG(hash10, erip->ladrf[10]);
1611 	PUT_MACREG(hash11, erip->ladrf[11]);
1612 	PUT_MACREG(hash12, erip->ladrf[12]);
1613 	PUT_MACREG(hash13, erip->ladrf[13]);
1614 	PUT_MACREG(hash14, erip->ladrf[14]);
1615 }
1616 
1617 static int
1618 eri_flush_txbufs(struct eri *erip)
1619 {
1620 	uint_t	i;
1621 	int	status = 0;
1622 	/*
1623 	 * Free and dvma_unload pending xmit  buffers.
1624 	 * Maintaining the 1-to-1 ordered sequence of
1625 	 * dvma_load() followed by dvma_unload() is critical.
1626 	 * Always unload anything before loading it again.
1627 	 * Never unload anything twice.  Always unload
1628 	 * before freeing the buffer.  We satisfy these
1629 	 * requirements by unloading only those descriptors
1630 	 * which currently have an mblk associated with them.
1631 	 */
1632 	for (i = 0; i < ERI_TPENDING; i++) {
1633 		if (erip->tmblkp[i]) {
1634 			if (erip->eri_dvmaxh)
1635 				dvma_unload(erip->eri_dvmaxh, 2*i, DONT_FLUSH);
1636 			else if ((ddi_dma_unbind_handle(erip->ndmaxh[i]) ==
1637 			    DDI_FAILURE))
1638 				status = -1;
1639 			freeb(erip->tmblkp[i]);
1640 			erip->tmblkp[i] = NULL;
1641 		}
1642 	}
1643 	return (status);
1644 }
1645 
1646 static int
1647 eri_flush_rxbufs(struct eri *erip)
1648 {
1649 	uint_t	i;
1650 	int	status = 0;
1651 	/*
1652 	 * Free and dvma_unload pending recv buffers.
1653 	 * Maintaining the 1-to-1 ordered sequence of
1654 	 * dvma_load() followed by dvma_unload() is critical.
1655 	 * Always unload anything before loading it again.
1656 	 * Never unload anything twice.  Always unload
1657 	 * before freeing the buffer.  We satisfy these
1658 	 * requirements by unloading only those descriptors
1659 	 * which currently have an mblk associated with them.
1660 	 */
1661 	for (i = 0; i < ERI_RPENDING; i++) {
1662 		if (erip->rmblkp[i]) {
1663 			if (erip->eri_dvmarh)
1664 				dvma_unload(erip->eri_dvmarh, 2 * i,
1665 				    DDI_DMA_SYNC_FORCPU);
1666 			else if ((ddi_dma_unbind_handle(erip->ndmarh[i]) ==
1667 			    DDI_FAILURE))
1668 				status = -1;
1669 			freeb(erip->rmblkp[i]);
1670 			erip->rmblkp[i] = NULL;
1671 		}
1672 	}
1673 	return (status);
1674 }
1675 
1676 static void
1677 eri_init_txbufs(struct eri *erip)
1678 {
1679 	/*
1680 	 * Clear TX descriptors.
1681 	 */
1682 	bzero((caddr_t)erip->eri_tmdp, ERI_TPENDING * sizeof (struct eri_tmd));
1683 
1684 	/*
1685 	 * sync TXDMA descriptors.
1686 	 */
1687 	ERI_SYNCIOPB(erip, erip->eri_tmdp,
1688 	    (ERI_TPENDING * sizeof (struct eri_tmd)), DDI_DMA_SYNC_FORDEV);
1689 	/*
1690 	 * Reset TMD 'walking' pointers.
1691 	 */
1692 	erip->tcurp = erip->eri_tmdp;
1693 	erip->tnextp = erip->eri_tmdp;
1694 	erip->tx_cur_cnt = 0;
1695 	erip->tx_kick = 0;
1696 	erip->tx_completion = 0;
1697 }
1698 
1699 static int
1700 eri_init_rxbufs(struct eri *erip)
1701 {
1702 
1703 	ddi_dma_cookie_t	dma_cookie;
1704 	mblk_t			*bp;
1705 	int			i, status = 0;
1706 	uint32_t		ccnt;
1707 
1708 	/*
1709 	 * clear rcv descriptors
1710 	 */
1711 	bzero((caddr_t)erip->rmdp, ERI_RPENDING * sizeof (struct rmd));
1712 
1713 	for (i = 0; i < ERI_RPENDING; i++) {
1714 		if ((bp = eri_allocb(ERI_BUFSIZE)) == NULL) {
1715 			status = -1;
1716 			continue;
1717 		}
1718 		/* Load data buffer to DVMA space */
1719 		if (erip->eri_dvmarh)
1720 			dvma_kaddr_load(erip->eri_dvmarh,
1721 			    (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1722 			    2 * i, &dma_cookie);
1723 /*
1724  *		Bind data buffer to DMA handle
1725  */
1726 		else if (ddi_dma_addr_bind_handle(erip->ndmarh[i], NULL,
1727 		    (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1728 		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1729 		    &dma_cookie, &ccnt) != DDI_DMA_MAPPED)
1730 			status = -1;
1731 
1732 		PUT_RMD((&erip->rmdp[i]), dma_cookie);
1733 		erip->rmblkp[i] = bp;	/* save for later use */
1734 	}
1735 
1736 	/*
1737 	 * sync RXDMA descriptors.
1738 	 */
1739 	ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
1740 	    DDI_DMA_SYNC_FORDEV);
1741 	/*
1742 	 * Reset RMD 'walking' pointers.
1743 	 */
1744 	erip->rnextp = erip->rmdp;
1745 	erip->rx_completion = 0;
1746 	erip->rx_kick = ERI_RPENDING - 4;
1747 	return (status);
1748 }
1749 
1750 static uint32_t
1751 eri_txmac_disable(struct eri *erip)
1752 {
1753 	int	n;
1754 
1755 	PUT_MACREG(txcfg, GET_MACREG(txcfg) & ~BMAC_TXCFG_ENAB);
1756 	n = (BMACTXRSTDELAY * 10) / ERI_WAITPERIOD;
1757 
1758 	while (--n > 0) {
1759 		drv_usecwait(ERI_WAITPERIOD);
1760 		if ((GET_MACREG(txcfg) & 1) == 0)
1761 			return (0);
1762 	}
1763 	return (1);
1764 }
1765 
1766 static uint32_t
1767 eri_rxmac_disable(struct eri *erip)
1768 {
1769 	int	n;
1770 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) & ~BMAC_RXCFG_ENAB);
1771 	n = BMACRXRSTDELAY / ERI_WAITPERIOD;
1772 
1773 	while (--n > 0) {
1774 		drv_usecwait(ERI_WAITPERIOD);
1775 		if ((GET_MACREG(rxcfg) & 1) == 0)
1776 			return (0);
1777 	}
1778 	return (1);
1779 }
1780 
1781 /*
1782  * Return 0 upon success, 1 on failure.
1783  */
1784 static int
1785 eri_stop(struct eri *erip)
1786 {
1787 	(void) eri_erx_reset(erip);
1788 	(void) eri_etx_reset(erip);
1789 
1790 	/*
1791 	 * set up cache line to 16 for 64 bytes of pci burst size
1792 	 */
1793 	PUT_SWRSTREG(reset, ERI_G_RESET_GLOBAL | ERI_CACHE_LINE_SIZE);
1794 
1795 	if (erip->linkcheck) {
1796 		erip->linkcheck = 0;
1797 		erip->global_reset_issued = 2;
1798 	} else {
1799 		param_linkup = 0;
1800 		erip->stats.link_up = LINK_STATE_DOWN;
1801 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1802 		erip->global_reset_issued = -1;
1803 	}
1804 
1805 	ERI_DELAY((GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE),
1806 	    ERI_MAX_RST_DELAY);
1807 	erip->rx_reset_issued = -1;
1808 	erip->tx_reset_issued = -1;
1809 
1810 	/*
1811 	 * workaround for RIO not resetting the interrupt mask
1812 	 * register to default value 0xffffffff.
1813 	 */
1814 	PUT_GLOBREG(intmask, ERI_G_MASK_ALL);
1815 
1816 	if (GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE) {
1817 		return (0);
1818 	} else {
1819 		return (1);
1820 	}
1821 }
1822 
1823 /*
1824  * Reset Just the RX Portion
1825  * Return 0 upon success, 1 on failure.
1826  *
1827  * Resetting the rxdma while there is a rx dma transaction going on the
1828  * bus, will cause bus hang or parity errors. To avoid this, we would first
1829  * disable the rxdma by clearing the ENABLE bit (bit 0). To make sure it is
1830  * disabled, we will poll it until it realy clears. Furthermore, to verify
1831  * any RX DMA activity is subsided, we delay for 5 msec.
1832  */
1833 static uint32_t
1834 eri_erx_reset(struct eri *erip)
1835 {
1836 	(void) eri_rxmac_disable(erip); /* Disable the RX MAC */
1837 
1838 	/* Disable the RX DMA */
1839 	PUT_ERXREG(config, GET_ERXREG(config) & ~GET_CONFIG_RXDMA_EN);
1840 	ERI_DELAY(((GET_ERXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1841 	if ((GET_ERXREG(config) & 1) != 0)
1842 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1843 		    disable_erx_msg);
1844 
1845 	drv_usecwait(5000); /* Delay to insure no RX DMA activity */
1846 
1847 	PUT_SWRSTREG(reset, ERI_G_RESET_ERX | ERI_CACHE_LINE_SIZE);
1848 	/*
1849 	 * Wait until the reset is completed which is indicated by
1850 	 * the reset bit cleared or time out..
1851 	 */
1852 	ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ==
1853 	    ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1854 	erip->rx_reset_issued = -1;
1855 
1856 	return ((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ? 1 : 0);
1857 }
1858 
1859 /*
1860  * Reset Just the TX Portion
1861  * Return 0 upon success, 1 on failure.
1862  * Resetting the txdma while there is a tx dma transaction on the bus, may cause
1863  * bus hang or parity errors. To avoid this we would first disable the txdma by
1864  * clearing the ENABLE bit (bit 0). To make sure it is disabled, we will poll
1865  * it until it realy clears. Furthermore, to any TX DMA activity is subsided,
1866  * we delay for 1 msec.
1867  */
1868 static uint32_t
1869 eri_etx_reset(struct eri *erip)
1870 {
1871 	(void) eri_txmac_disable(erip);
1872 
1873 	/* Disable the TX DMA */
1874 	PUT_ETXREG(config, GET_ETXREG(config) & ~GET_CONFIG_TXDMA_EN);
1875 #ifdef ORIG
1876 	ERI_DELAY(((GET_ETXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1877 	if ((GET_ETXREG(config) &  1) != 0)
1878 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1879 		    disable_etx_msg);
1880 	drv_usecwait(5000); /* Delay  to ensure DMA completed (if any). */
1881 #endif
1882 	drv_usecwait(5000); /* Delay  to ensure DMA completed (if any). */
1883 	ERI_DELAY(((GET_ETXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1884 	if ((GET_ETXREG(config) &  1) != 0)
1885 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1886 		    disable_etx_msg);
1887 
1888 	PUT_SWRSTREG(reset, ERI_G_RESET_ETX | ERI_CACHE_LINE_SIZE);
1889 
1890 	/*
1891 	 * Wait until the reset is completed which is indicated by the reset bit
1892 	 * cleared or time out..
1893 	 */
1894 	ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) ==
1895 	    ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1896 	erip->tx_reset_issued = -1;
1897 
1898 	if (GET_SWRSTREG(reset) &  (ERI_G_RESET_ETX)) {
1899 		return (1);
1900 	} else
1901 		return (0);
1902 }
1903 
1904 
1905 /*
1906  * Initialize the TX DMA registers and Enable the TX DMA.
1907  */
1908 static uint32_t
1909 eri_init_txregs(struct eri *erip)
1910 {
1911 
1912 	uint32_t	i;
1913 	uint64_t	tx_ring;
1914 
1915 	/*
1916 	 * Initialize ETX Registers:
1917 	 * config, txring_lo, txring_hi
1918 	 */
1919 	tx_ring = ERI_IOPBIOADDR(erip, erip->eri_tmdp);
1920 	PUT_ETXREG(txring_lo, (uint32_t)(tx_ring));
1921 	PUT_ETXREG(txring_hi, (uint32_t)(tx_ring >> 32));
1922 
1923 	/*
1924 	 * Get TX Ring Size Masks.
1925 	 * The ring size ERI_TPENDING is defined in eri_mac.h.
1926 	 */
1927 	switch (ERI_TPENDING) {
1928 	case 32: i = ETX_RINGSZ_32;
1929 		break;
1930 	case 64: i = ETX_RINGSZ_64;
1931 		break;
1932 	case 128: i = ETX_RINGSZ_128;
1933 		break;
1934 	case 256: i = ETX_RINGSZ_256;
1935 		break;
1936 	case 512: i = ETX_RINGSZ_512;
1937 		break;
1938 	case 1024: i = ETX_RINGSZ_1024;
1939 		break;
1940 	case 2048: i = ETX_RINGSZ_2048;
1941 		break;
1942 	case 4096: i = ETX_RINGSZ_4096;
1943 		break;
1944 	default:
1945 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1946 		    unk_tx_descr_sze_msg, ERI_TPENDING);
1947 		return (1);
1948 	}
1949 
1950 	i <<= ERI_TX_RINGSZ_SHIFT;
1951 	PUT_ETXREG(config, ETX_CONFIG_THRESHOLD | i);
1952 	ENABLE_TXDMA(erip);
1953 	ENABLE_MAC(erip);
1954 	return (0);
1955 }
1956 
1957 
1958 /*
1959  * Initialize the RX DMA registers and Enable the RX DMA.
1960  */
1961 static uint32_t
1962 eri_init_rxregs(struct eri *erip)
1963 {
1964 	int i;
1965 	uint64_t	rx_ring;
1966 
1967 	/*
1968 	 * Initialize ERX Registers:
1969 	 * rxring_lo, rxring_hi, config, rx_blanking, rx_pause_threshold.
1970 	 * Also, rx_kick
1971 	 * Read and save rxfifo_size.
1972 	 * XXX: Use this to properly configure PAUSE threshold values.
1973 	 */
1974 	rx_ring = ERI_IOPBIOADDR(erip, erip->rmdp);
1975 	PUT_ERXREG(rxring_lo, (uint32_t)(rx_ring));
1976 	PUT_ERXREG(rxring_hi, (uint32_t)(rx_ring >> 32));
1977 	PUT_ERXREG(rx_kick, erip->rx_kick);
1978 
1979 	/*
1980 	 * The Max ring size, ERI_RMDMAX is defined in eri_mac.h.
1981 	 * More ERI_RPENDING will provide better performance but requires more
1982 	 * system DVMA memory.
1983 	 * eri_rx_ring_size can be used to tune this value from /etc/system
1984 	 * eri_rx_ring_size cannot be NDD'able due to non-recoverable errors
1985 	 * which cannot be detected from NDD operations
1986 	 */
1987 
1988 	/*
1989 	 * get the rxring size bits
1990 	 */
1991 	switch (ERI_RPENDING) {
1992 	case 32: i = ERX_RINGSZ_32;
1993 		break;
1994 	case 64: i = ERX_RINGSZ_64;
1995 		break;
1996 	case 128: i = ERX_RINGSZ_128;
1997 		break;
1998 	case 256: i = ERX_RINGSZ_256;
1999 		break;
2000 	case 512: i = ERX_RINGSZ_512;
2001 		break;
2002 	case 1024: i = ERX_RINGSZ_1024;
2003 		break;
2004 	case 2048: i = ERX_RINGSZ_2048;
2005 		break;
2006 	case 4096: i = ERX_RINGSZ_4096;
2007 		break;
2008 	default:
2009 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2010 		    unk_rx_descr_sze_msg, ERI_RPENDING);
2011 		return (1);
2012 	}
2013 
2014 	i <<= ERI_RX_RINGSZ_SHIFT;
2015 	i |=  (ERI_FSTBYTE_OFFSET << ERI_RX_CONFIG_FBO_SHIFT) |
2016 	    (ETHERHEADER_SIZE << ERI_RX_CONFIG_RX_CSSTART_SHIFT) |
2017 	    (ERI_RX_FIFOTH_1024 << ERI_RX_CONFIG_RXFIFOTH_SHIFT);
2018 
2019 	PUT_ERXREG(config, i);
2020 	PUT_ERXREG(rx_blanking,
2021 	    (param_intr_blank_time << ERI_RX_BLNK_INTR_TIME_SHIFT) |
2022 	    param_intr_blank_packets);
2023 
2024 	PUT_ERXREG(rx_pause_threshold, rx_pause_threshold);
2025 	erip->rxfifo_size = GET_ERXREG(rxfifo_size);
2026 	ENABLE_RXDMA(erip);
2027 	return (0);
2028 }
2029 
2030 static int
2031 eri_freebufs(struct eri *erip)
2032 {
2033 	int status = 0;
2034 
2035 	status = eri_flush_rxbufs(erip) | eri_flush_txbufs(erip);
2036 	return (status);
2037 }
2038 
2039 static void
2040 eri_update_rxbufs(struct eri *erip)
2041 {
2042 	int		i;
2043 	volatile struct rmd  *rmdp, *rmdpbase;
2044 
2045 	/*
2046 	 * Hang out receive buffers.
2047 	 */
2048 	rmdpbase = erip->rmdp;
2049 	for (i = 0; i < ERI_RPENDING; i++) {
2050 		rmdp = rmdpbase + i;
2051 		UPDATE_RMD(rmdp);
2052 	}
2053 
2054 	/*
2055 	 * sync RXDMA descriptors.
2056 	 */
2057 	ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
2058 	    DDI_DMA_SYNC_FORDEV);
2059 	/*
2060 	 * Reset RMD 'walking' pointers.
2061 	 */
2062 	erip->rnextp =	erip->rmdp;
2063 	erip->rx_completion = 0;
2064 	erip->rx_kick =	ERI_RPENDING - 4;
2065 }
2066 
2067 /*
2068  * This routine is used to reset the RX DMA only. In the case of RX
2069  * failures such as RX Tag Error, RX hang etc... we don't want to
2070  * do global reset which takes down the link and clears the FIFO's
2071  * By doing RX only reset, we leave the TX and the link intact.
2072  */
2073 static uint32_t
2074 eri_init_rx_channel(struct eri *erip)
2075 {
2076 	erip->flags &= ~ERI_RXINIT;
2077 	(void) eri_erx_reset(erip);
2078 	eri_update_rxbufs(erip);
2079 	if (eri_init_rxregs(erip))
2080 		return (1);
2081 	PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2082 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2083 	erip->rx_reset_issued = 0;
2084 	HSTAT(erip, rx_inits);
2085 	erip->flags |= ERI_RXINIT;
2086 	return (0);
2087 }
2088 
2089 static void
2090 eri_init_rx(struct eri *erip)
2091 {
2092 	uint16_t	*ladrf;
2093 
2094 	/*
2095 	 * First of all make sure the Receive MAC is stop.
2096 	 */
2097 	(void) eri_rxmac_disable(erip); /* Disable the RX MAC */
2098 
2099 	/*
2100 	 * Program BigMAC with local individual ethernet address.
2101 	 */
2102 
2103 	PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
2104 	PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
2105 	PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
2106 
2107 	/*
2108 	 * Set up multicast address filter by passing all multicast
2109 	 * addresses through a crc generator, and then using the
2110 	 * low order 8 bits as a index into the 256 bit logical
2111 	 * address filter. The high order four bits select the word,
2112 	 * while the rest of the bits select the bit within the word.
2113 	 */
2114 
2115 	ladrf = erip->ladrf;
2116 
2117 	PUT_MACREG(hash0, ladrf[0]);
2118 	PUT_MACREG(hash1, ladrf[1]);
2119 	PUT_MACREG(hash2, ladrf[2]);
2120 	PUT_MACREG(hash3, ladrf[3]);
2121 	PUT_MACREG(hash4, ladrf[4]);
2122 	PUT_MACREG(hash5, ladrf[5]);
2123 	PUT_MACREG(hash6, ladrf[6]);
2124 	PUT_MACREG(hash7, ladrf[7]);
2125 	PUT_MACREG(hash8, ladrf[8]);
2126 	PUT_MACREG(hash9, ladrf[9]);
2127 	PUT_MACREG(hash10, ladrf[10]);
2128 	PUT_MACREG(hash11, ladrf[11]);
2129 	PUT_MACREG(hash12, ladrf[12]);
2130 	PUT_MACREG(hash13, ladrf[13]);
2131 	PUT_MACREG(hash14, ladrf[14]);
2132 	PUT_MACREG(hash15, ladrf[15]);
2133 
2134 #ifdef ERI_DONT_STRIP_CRC
2135 	PUT_MACREG(rxcfg,
2136 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2137 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2138 	    BMAC_RXCFG_ENAB));
2139 #else
2140 	PUT_MACREG(rxcfg,
2141 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2142 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2143 	    BMAC_RXCFG_ENAB | BMAC_RXCFG_STRIP_CRC));
2144 #endif
2145 	/* wait after setting Hash Enable bit */
2146 	/* drv_usecwait(10); */
2147 
2148 	HSTAT(erip, rx_inits);
2149 }
2150 
2151 /*
2152  * This routine is used to init the TX MAC only.
2153  *	&erip->xmitlock is held before calling this routine.
2154  */
2155 void
2156 eri_init_txmac(struct eri *erip)
2157 {
2158 	uint32_t carrier_ext = 0;
2159 
2160 	erip->flags &= ~ERI_TXINIT;
2161 	/*
2162 	 * Stop the Transmit MAC.
2163 	 */
2164 	(void) eri_txmac_disable(erip);
2165 
2166 	/*
2167 	 * Must be Internal Transceiver
2168 	 */
2169 	if (param_mode)
2170 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2171 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2172 	else
2173 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2174 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2175 		    BMAC_XIFC_DIS_ECHO));
2176 
2177 	/*
2178 	 * Initialize the interpacket gap registers
2179 	 */
2180 	PUT_MACREG(ipg1, param_ipg1);
2181 	PUT_MACREG(ipg2, param_ipg2);
2182 
2183 	if (erip->ngu_enable)
2184 		PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2185 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2186 		    BMAC_TXCFG_ENIPG0 : 0) |
2187 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2188 		    BMAC_TXCFG_NGU));
2189 	else
2190 		PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2191 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2192 		    BMAC_TXCFG_ENIPG0 : 0) |
2193 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2194 
2195 	ENABLE_TXDMA(erip);
2196 	ENABLE_TXMAC(erip);
2197 
2198 	HSTAT(erip, tx_inits);
2199 	erip->flags |= ERI_TXINIT;
2200 }
2201 
2202 static void
2203 eri_unallocthings(struct eri *erip)
2204 {
2205 	uint32_t	flag;
2206 	uint32_t	i;
2207 
2208 	flag = erip->alloc_flag;
2209 
2210 	if (flag & ERI_DESC_MEM_MAP)
2211 		(void) ddi_dma_unbind_handle(erip->md_h);
2212 
2213 	if (flag & ERI_DESC_MEM_ALLOC) {
2214 		ddi_dma_mem_free(&erip->mdm_h);
2215 		erip->rmdp = NULL;
2216 		erip->eri_tmdp = NULL;
2217 	}
2218 
2219 	if (flag & ERI_DESC_HANDLE_ALLOC)
2220 		ddi_dma_free_handle(&erip->md_h);
2221 
2222 	(void) eri_freebufs(erip);
2223 
2224 	if (flag & ERI_XMIT_HANDLE_ALLOC)
2225 		for (i = 0; i < erip->xmit_handle_cnt; i++)
2226 			ddi_dma_free_handle(&erip->ndmaxh[i]);
2227 
2228 	if (flag & ERI_XMIT_DVMA_ALLOC) {
2229 		(void) dvma_release(erip->eri_dvmaxh);
2230 		erip->eri_dvmaxh = NULL;
2231 	}
2232 
2233 	if (flag & ERI_RCV_HANDLE_ALLOC)
2234 		for (i = 0; i < erip->rcv_handle_cnt; i++)
2235 			ddi_dma_free_handle(&erip->ndmarh[i]);
2236 
2237 	if (flag & ERI_RCV_DVMA_ALLOC) {
2238 		(void) dvma_release(erip->eri_dvmarh);
2239 		erip->eri_dvmarh = NULL;
2240 	}
2241 
2242 	if (flag & ERI_XBUFS_KMEM_DMABIND) {
2243 		(void) ddi_dma_unbind_handle(erip->tbuf_handle);
2244 		erip->tbuf_ioaddr = 0;
2245 	}
2246 
2247 	if (flag & ERI_XBUFS_KMEM_ALLOC) {
2248 		kmem_free(erip->tbuf_kaddr, ERI_TPENDING * eri_tx_bcopy_max);
2249 		erip->tbuf_kaddr = NULL;
2250 	}
2251 
2252 	if (flag & ERI_XBUFS_HANDLE_ALLOC) {
2253 		ddi_dma_free_handle(&erip->tbuf_handle);
2254 		erip->tbuf_handle = NULL;
2255 	}
2256 
2257 }
2258 
2259 /*
2260  * Initialize channel.
2261  * Return true on success, false on error.
2262  *
2263  * The recommended sequence for initialization is:
2264  * 1. Issue a Global Reset command to the Ethernet Channel.
2265  * 2. Poll the Global_Reset bits until the execution of the reset has been
2266  *    completed.
2267  * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2268  *	 Poll Register 0 to till the Resetbit is 0.
2269  * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2270  *	 100Mbps and Non-Isolated mode. The main point here is to bring the
2271  *	 PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2272  *	 to the MII interface so that the Bigmac core can correctly reset
2273  *	 upon a software reset.
2274  * 2(c).  Issue another Global Reset command to the Ethernet Channel and poll
2275  *	  the Global_Reset bits till completion.
2276  * 3. Set up all the data structures in the host memory.
2277  * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2278  *    Register).
2279  * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2280  *    Register).
2281  * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2282  * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2283  * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2284  * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2285  * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2286  * 11. Program the XIF Configuration Register (enable the XIF).
2287  * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2288  * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2289  */
2290 /*
2291  * lock order:
2292  *	intrlock->linklock->xmitlock->xcvrlock
2293  */
2294 static boolean_t
2295 eri_init(struct eri *erip)
2296 {
2297 	uint32_t	init_stat = 0;
2298 	uint32_t	partial_init = 0;
2299 	uint32_t	carrier_ext = 0;
2300 	uint32_t	mac_ctl = 0;
2301 	boolean_t	ret;
2302 	uint32_t 	link_timeout = ERI_LINKCHECK_TIMER;
2303 	link_state_t	linkupdate = LINK_STATE_UNKNOWN;
2304 
2305 	/*
2306 	 * Just return successfully if device is suspended.
2307 	 * eri_init() will be called again from resume.
2308 	 */
2309 	ASSERT(erip != NULL);
2310 
2311 	if (erip->flags & ERI_SUSPENDED) {
2312 		ret = B_TRUE;
2313 		goto init_exit;
2314 	}
2315 
2316 	mutex_enter(&erip->intrlock);
2317 	eri_stop_timer(erip);	/* acquire linklock */
2318 	mutex_enter(&erip->xmitlock);
2319 	erip->flags &= (ERI_DLPI_LINKUP | ERI_STARTED);
2320 	erip->wantw = B_FALSE;
2321 	HSTAT(erip, inits);
2322 	erip->txhung = 0;
2323 
2324 	if ((erip->stats.inits > 1) && (erip->init_macregs == 0))
2325 		eri_savecntrs(erip);
2326 
2327 	mutex_enter(&erip->xcvrlock);
2328 	if (!param_linkup || erip->linkcheck) {
2329 		if (!erip->linkcheck)
2330 			linkupdate = LINK_STATE_DOWN;
2331 		(void) eri_stop(erip);
2332 	}
2333 	if (!(erip->flags & ERI_DLPI_LINKUP) || !param_linkup) {
2334 		erip->flags |= ERI_DLPI_LINKUP;
2335 		eri_mif_poll(erip, MIF_POLL_STOP);
2336 		(void) eri_new_xcvr(erip);
2337 		ERI_DEBUG_MSG1(erip, XCVR_MSG, "New transceiver detected.");
2338 		if (param_transceiver != NO_XCVR) {
2339 			/*
2340 			 * Reset the new PHY and bring up the
2341 			 * link
2342 			 */
2343 			if (eri_reset_xcvr(erip)) {
2344 				ERI_FAULT_MSG1(erip, SEVERITY_NONE,
2345 				    ERI_VERB_MSG, "In Init after reset");
2346 				mutex_exit(&erip->xcvrlock);
2347 				link_timeout = 0;
2348 				goto done;
2349 			}
2350 			if (erip->stats.link_up == LINK_STATE_UP)
2351 				linkupdate = LINK_STATE_UP;
2352 		} else {
2353 			erip->flags |= (ERI_RUNNING | ERI_INITIALIZED);
2354 			param_linkup = 0;
2355 			erip->stats.link_up = LINK_STATE_DOWN;
2356 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2357 			linkupdate = LINK_STATE_DOWN;
2358 			/*
2359 			 * Still go on and complete the MAC initialization as
2360 			 * xcvr might show up later.
2361 			 * you must return to their mutex ordering.
2362 			 */
2363 		}
2364 		eri_mif_poll(erip, MIF_POLL_START);
2365 	}
2366 
2367 	mutex_exit(&erip->xcvrlock);
2368 
2369 	/*
2370 	 * Allocate data structures.
2371 	 */
2372 	if (erip->global_reset_issued) {
2373 		if (erip->global_reset_issued == 2) { /* fast path */
2374 			if (eri_flush_txbufs(erip))
2375 				goto done;
2376 			/*
2377 			 * Hang out/Initialize descriptors and buffers.
2378 			 */
2379 			eri_init_txbufs(erip);
2380 
2381 			eri_update_rxbufs(erip);
2382 		} else {
2383 			init_stat = eri_allocthings(erip);
2384 			if (init_stat)
2385 				goto done;
2386 
2387 			if (eri_freebufs(erip))
2388 				goto done;
2389 			/*
2390 			 * Hang out/Initialize descriptors and buffers.
2391 			 */
2392 			eri_init_txbufs(erip);
2393 			if (eri_init_rxbufs(erip))
2394 				goto done;
2395 		}
2396 	}
2397 
2398 	/*
2399 	 * BigMAC requires that we confirm that tx, rx and hash are in
2400 	 * quiescent state.
2401 	 * MAC will not reset successfully if the transceiver is not reset and
2402 	 * brought out of Isolate mode correctly. TXMAC reset may fail if the
2403 	 * ext. transceiver is just disconnected. If it fails, try again by
2404 	 * checking the transceiver.
2405 	 */
2406 	if (eri_txmac_disable(erip)) {
2407 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2408 		    disable_txmac_msg);
2409 		param_linkup = 0;	/* force init again */
2410 		erip->stats.link_up = LINK_STATE_DOWN;
2411 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2412 		linkupdate = LINK_STATE_DOWN;
2413 		goto done;
2414 	}
2415 
2416 	if (eri_rxmac_disable(erip)) {
2417 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2418 		    disable_rxmac_msg);
2419 		param_linkup = 0;	/* force init again */
2420 		erip->stats.link_up = LINK_STATE_DOWN;
2421 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2422 		linkupdate = LINK_STATE_DOWN;
2423 		goto done;
2424 	}
2425 
2426 	eri_init_macregs_generic(erip);
2427 
2428 	/*
2429 	 * Initialize ERI Global registers :
2430 	 * config
2431 	 * For PCI :  err_mask, bif_cfg
2432 	 *
2433 	 * Use user-configurable parameter for enabling 64-bit transfers.
2434 	 * Note:For PCI, burst sizes are in multiples of 64-bytes.
2435 	 */
2436 
2437 	/*
2438 	 * Significant performance improvements can be achieved by
2439 	 * disabling transmit interrupt. Thus TMD's are reclaimed
2440 	 * only very infrequently.
2441 	 * The PCS Interrupt is masked here. It is enabled only when
2442 	 * a PCS link is brought up because there is no second level
2443 	 * mask for this interrupt..
2444 	 * Init GLOBAL, TXMAC, RXMAC and MACCTL interrupt masks here.
2445 	 */
2446 	if (! partial_init) {
2447 		PUT_GLOBREG(intmask, ERI_G_MASK_INTR);
2448 		erip->tx_int_me = 0;
2449 		PUT_MACREG(txmask, BMAC_TXINTR_MASK);
2450 		PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2451 		PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2452 	}
2453 
2454 	if (erip->global_reset_issued) {
2455 		/*
2456 		 * Initialize ETX Registers:
2457 		 * config, txring_lo, txring_hi
2458 		 */
2459 		if (eri_init_txregs(erip))
2460 			goto done;
2461 		/*
2462 		 * Initialize ERX Registers:
2463 		 * rxring_lo, rxring_hi, config, rx_blanking,
2464 		 * rx_pause_threshold.  Also, rx_kick
2465 		 * Read and save rxfifo_size.
2466 		 */
2467 		if (eri_init_rxregs(erip))
2468 			goto done;
2469 	}
2470 
2471 	PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2472 
2473 	/*
2474 	 * Set up the slottime,and  rxconfig, txconfig without enabling
2475 	 * the latter two at this time
2476 	 */
2477 	PUT_MACREG(slot, BMAC_SLOT_TIME);
2478 	carrier_ext = 0;
2479 
2480 #ifdef ERI_DONT_STRIP_CRC
2481 	PUT_MACREG(rxcfg,
2482 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2483 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2484 	    (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2485 #else
2486 	PUT_MACREG(rxcfg,
2487 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2488 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2489 	    BMAC_RXCFG_STRIP_CRC |
2490 	    (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2491 #endif
2492 	drv_usecwait(10);	/* wait after setting Hash Enable bit */
2493 
2494 	if (erip->ngu_enable)
2495 		PUT_MACREG(txcfg,
2496 		    ((param_mode ? BMAC_TXCFG_FDX: 0) |
2497 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2498 		    BMAC_TXCFG_ENIPG0 : 0) |
2499 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2500 		    BMAC_TXCFG_NGU));
2501 	else
2502 		PUT_MACREG(txcfg,
2503 		    ((param_mode ? BMAC_TXCFG_FDX: 0) |
2504 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2505 		    BMAC_TXCFG_ENIPG0 : 0) |
2506 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2507 
2508 	if (erip->pauseRX)
2509 		mac_ctl = ERI_MCTLCFG_RXPAUSE;
2510 	if (erip->pauseTX)
2511 		mac_ctl |= ERI_MCTLCFG_TXPAUSE;
2512 
2513 	PUT_MACREG(macctl_cfg, mac_ctl);
2514 
2515 	/*
2516 	 * Must be Internal Transceiver
2517 	 */
2518 	if (param_mode)
2519 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2520 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2521 	else {
2522 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2523 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2524 		    BMAC_XIFC_DIS_ECHO));
2525 
2526 		link_timeout = ERI_CHECK_HANG_TIMER;
2527 	}
2528 
2529 	/*
2530 	 * if MAC int loopback flag is set, put xifc reg in mii loopback
2531 	 * mode {DIAG}
2532 	 */
2533 	if (erip->flags & ERI_MACLOOPBACK) {
2534 		PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIILPBK);
2535 	}
2536 
2537 	/*
2538 	 * Enable TX and RX MACs.
2539 	 */
2540 	ENABLE_MAC(erip);
2541 	erip->flags |= (ERI_RUNNING | ERI_INITIALIZED |
2542 	    ERI_TXINIT | ERI_RXINIT);
2543 	mac_tx_update(erip->mh);
2544 	erip->global_reset_issued = 0;
2545 
2546 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
2547 	eri_xcvr_force_mode(erip, &link_timeout);
2548 #endif
2549 
2550 done:
2551 	if (init_stat)
2552 		eri_unallocthings(erip);
2553 
2554 	mutex_exit(&erip->xmitlock);
2555 	eri_start_timer(erip, eri_check_link, link_timeout);
2556 	mutex_exit(&erip->intrlock);
2557 
2558 	if (linkupdate != LINK_STATE_UNKNOWN)
2559 		mac_link_update(erip->mh, linkupdate);
2560 
2561 	ret = (erip->flags & ERI_RUNNING) ? B_TRUE : B_FALSE;
2562 	if (!ret) {
2563 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
2564 		    "eri_init failed");
2565 	}
2566 
2567 init_exit:
2568 	ASSERT(!MUTEX_HELD(&erip->linklock));
2569 	return (ret);
2570 }
2571 
2572 /*
2573  * 0 as burstsize upon failure as it signifies no burst size.
2574  */
2575 static int
2576 eri_burstsize(struct eri *erip)
2577 {
2578 	ddi_dma_handle_t handle;
2579 
2580 	if (ddi_dma_alloc_handle(erip->dip, &dma_attr, DDI_DMA_DONTWAIT,
2581 	    NULL, &handle))
2582 		return (DDI_FAILURE);
2583 
2584 	erip->burstsizes = ddi_dma_burstsizes(handle);
2585 	ddi_dma_free_handle(&handle);
2586 
2587 	if (erip->burstsizes)
2588 		return (DDI_SUCCESS);
2589 
2590 	return (DDI_FAILURE);
2591 }
2592 
2593 /*
2594  * Un-initialize (STOP) ERI channel.
2595  */
2596 static void
2597 eri_uninit(struct eri *erip)
2598 {
2599 	boolean_t needind;
2600 
2601 	/*
2602 	 * Allow up to 'ERI_DRAINTIME' for pending xmit's to complete.
2603 	 */
2604 	ERI_DELAY((erip->tcurp == erip->tnextp), ERI_DRAINTIME);
2605 
2606 	mutex_enter(&erip->intrlock);
2607 	eri_stop_timer(erip);   /* acquire linklock */
2608 	mutex_enter(&erip->xmitlock);
2609 	mutex_enter(&erip->xcvrlock);
2610 	eri_mif_poll(erip, MIF_POLL_STOP);
2611 	erip->flags &= ~ERI_DLPI_LINKUP;
2612 	mutex_exit(&erip->xcvrlock);
2613 
2614 	needind = !erip->linkcheck;
2615 	(void) eri_stop(erip);
2616 	erip->flags &= ~ERI_RUNNING;
2617 
2618 	mutex_exit(&erip->xmitlock);
2619 	eri_start_timer(erip, eri_check_link, 0);
2620 	mutex_exit(&erip->intrlock);
2621 
2622 	if (needind)
2623 		mac_link_update(erip->mh, LINK_STATE_DOWN);
2624 }
2625 
2626 /*
2627  * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2628  * map it in IO space.
2629  *
2630  * The driver allocates STREAMS buffers which will be mapped in DVMA
2631  * space using DDI DMA resources.
2632  *
2633  */
2634 static int
2635 eri_allocthings(struct eri *erip)
2636 {
2637 
2638 	uintptr_t	a;
2639 	int		size;
2640 	uint32_t	rval;
2641 	int		i;
2642 	size_t		real_len;
2643 	uint32_t	cookiec;
2644 	int		alloc_stat = 0;
2645 	ddi_dma_cookie_t dma_cookie;
2646 
2647 	/*
2648 	 * Return if resources are already allocated.
2649 	 */
2650 	if (erip->rmdp)
2651 		return (alloc_stat);
2652 
2653 	erip->alloc_flag = 0;
2654 
2655 	/*
2656 	 * Allocate the TMD and RMD descriptors and extra for alignments.
2657 	 */
2658 	size = (ERI_RPENDING * sizeof (struct rmd) +
2659 	    ERI_TPENDING * sizeof (struct eri_tmd)) + ERI_GMDALIGN;
2660 
2661 	rval = ddi_dma_alloc_handle(erip->dip, &desc_dma_attr,
2662 	    DDI_DMA_DONTWAIT, 0, &erip->md_h);
2663 	if (rval != DDI_SUCCESS) {
2664 		return (++alloc_stat);
2665 	}
2666 	erip->alloc_flag |= ERI_DESC_HANDLE_ALLOC;
2667 
2668 	rval = ddi_dma_mem_alloc(erip->md_h, size, &erip->dev_attr,
2669 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
2670 	    (caddr_t *)&erip->iopbkbase, &real_len, &erip->mdm_h);
2671 	if (rval != DDI_SUCCESS) {
2672 		return (++alloc_stat);
2673 	}
2674 	erip->alloc_flag |= ERI_DESC_MEM_ALLOC;
2675 
2676 	rval = ddi_dma_addr_bind_handle(erip->md_h, NULL,
2677 	    (caddr_t)erip->iopbkbase, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2678 	    DDI_DMA_DONTWAIT, 0, &erip->md_c, &cookiec);
2679 
2680 	if (rval != DDI_DMA_MAPPED)
2681 		return (++alloc_stat);
2682 
2683 	erip->alloc_flag |= ERI_DESC_MEM_MAP;
2684 
2685 	if (cookiec != 1)
2686 		return (++alloc_stat);
2687 
2688 	erip->iopbiobase = erip->md_c.dmac_address;
2689 
2690 	a = erip->iopbkbase;
2691 	a = ROUNDUP(a, ERI_GMDALIGN);
2692 	erip->rmdp = (struct rmd *)a;
2693 	a += ERI_RPENDING * sizeof (struct rmd);
2694 	erip->eri_tmdp = (struct eri_tmd *)a;
2695 /*
2696  *	Specifically we reserve n (ERI_TPENDING + ERI_RPENDING)
2697  *	pagetable entries. Therefore we have 2 ptes for each
2698  *	descriptor. Since the ethernet buffers are 1518 bytes
2699  *	so they can at most use 2 ptes.
2700  * 	Will do a ddi_dma_addr_setup for each bufer
2701  */
2702 	/*
2703 	 * In the current implementation, we use the ddi compliant
2704 	 * dma interface. We allocate ERI_TPENDING dma handles for
2705 	 * Transmit  activity and ERI_RPENDING dma handles for receive
2706 	 * activity. The actual dma mapping is done in the io functions
2707 	 * eri_start() and eri_read_dma(),
2708 	 * by calling the ddi_dma_addr_bind_handle.
2709 	 * Dma resources are deallocated by calling ddi_dma_unbind_handle
2710 	 * in eri_reclaim() for transmit and eri_read_dma(), for receive io.
2711 	 */
2712 
2713 	if (eri_use_dvma_tx &&
2714 	    (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_TPENDING * 2),
2715 	    &erip->eri_dvmaxh)) == DDI_SUCCESS) {
2716 		erip->alloc_flag |= ERI_XMIT_DVMA_ALLOC;
2717 	} else {
2718 		erip->eri_dvmaxh = NULL;
2719 		for (i = 0; i < ERI_TPENDING; i++) {
2720 			rval = ddi_dma_alloc_handle(erip->dip,
2721 			    &dma_attr, DDI_DMA_DONTWAIT, 0,
2722 			    &erip->ndmaxh[i]);
2723 
2724 			if (rval != DDI_SUCCESS) {
2725 				ERI_FAULT_MSG1(erip, SEVERITY_HIGH,
2726 				    ERI_VERB_MSG, alloc_tx_dmah_msg);
2727 				alloc_stat++;
2728 				break;
2729 			}
2730 		}
2731 
2732 		erip->xmit_handle_cnt = i;
2733 
2734 		if (i)
2735 			erip->alloc_flag |= ERI_XMIT_HANDLE_ALLOC;
2736 
2737 		if (alloc_stat)
2738 			return (alloc_stat);
2739 	}
2740 
2741 	if (eri_use_dvma_rx &&
2742 	    (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_RPENDING * 2),
2743 	    &erip->eri_dvmarh)) == DDI_SUCCESS) {
2744 		erip->alloc_flag |= ERI_RCV_DVMA_ALLOC;
2745 	} else {
2746 		erip->eri_dvmarh = NULL;
2747 
2748 		for (i = 0; i < ERI_RPENDING; i++) {
2749 			rval = ddi_dma_alloc_handle(erip->dip,
2750 			    &dma_attr, DDI_DMA_DONTWAIT,
2751 			    0, &erip->ndmarh[i]);
2752 
2753 			if (rval != DDI_SUCCESS) {
2754 				ERI_FAULT_MSG1(erip, SEVERITY_HIGH,
2755 				    ERI_VERB_MSG, alloc_rx_dmah_msg);
2756 				alloc_stat++;
2757 				break;
2758 			}
2759 		}
2760 
2761 		erip->rcv_handle_cnt = i;
2762 
2763 		if (i)
2764 			erip->alloc_flag |= ERI_RCV_HANDLE_ALLOC;
2765 
2766 		if (alloc_stat)
2767 			return (alloc_stat);
2768 
2769 	}
2770 
2771 /*
2772  *	Allocate tiny TX buffers
2773  *	Note: tinybufs must always be allocated in the native
2774  *	ordering of the CPU (always big-endian for Sparc).
2775  *	ddi_dma_mem_alloc returns memory in the native ordering
2776  *	of the bus (big endian for SBus, little endian for PCI).
2777  *	So we cannot use ddi_dma_mem_alloc(, &erip->ge_dev_attr)
2778  *	because we'll get little endian memory on PCI.
2779  */
2780 	if (ddi_dma_alloc_handle(erip->dip, &desc_dma_attr, DDI_DMA_DONTWAIT,
2781 	    0, &erip->tbuf_handle) != DDI_SUCCESS) {
2782 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2783 		    alloc_tx_dmah_msg);
2784 		return (++alloc_stat);
2785 	}
2786 	erip->alloc_flag |= ERI_XBUFS_HANDLE_ALLOC;
2787 	size = ERI_TPENDING * eri_tx_bcopy_max;
2788 	erip->tbuf_kaddr = (caddr_t)kmem_alloc(size, KM_NOSLEEP);
2789 	if (erip->tbuf_kaddr == NULL) {
2790 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2791 		    alloc_tx_dmah_msg);
2792 		return (++alloc_stat);
2793 	}
2794 	erip->alloc_flag |= ERI_XBUFS_KMEM_ALLOC;
2795 	if (ddi_dma_addr_bind_handle(erip->tbuf_handle, NULL,
2796 	    erip->tbuf_kaddr, size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2797 	    DDI_DMA_DONTWAIT, 0, &dma_cookie, &cookiec) != DDI_DMA_MAPPED) {
2798 			return (++alloc_stat);
2799 	}
2800 	erip->tbuf_ioaddr = dma_cookie.dmac_address;
2801 	erip->alloc_flag |= ERI_XBUFS_KMEM_DMABIND;
2802 	if (cookiec != 1)
2803 		return (++alloc_stat);
2804 
2805 	/*
2806 	 * Keep handy limit values for RMD, TMD, and Buffers.
2807 	 */
2808 	erip->rmdlimp = &((erip->rmdp)[ERI_RPENDING]);
2809 	erip->eri_tmdlimp = &((erip->eri_tmdp)[ERI_TPENDING]);
2810 
2811 	/*
2812 	 * Zero out xmit and RCV holders.
2813 	 */
2814 	bzero((caddr_t)erip->tmblkp, sizeof (erip->tmblkp));
2815 	bzero((caddr_t)erip->rmblkp, sizeof (erip->rmblkp));
2816 	return (alloc_stat);
2817 }
2818 
2819 /* <<<<<<<<<<<<<<<<<	INTERRUPT HANDLING FUNCTION	>>>>>>>>>>>>>>>>>>>> */
2820 /*
2821  *	First check to see if it is our device interrupting.
2822  */
2823 static uint_t
2824 eri_intr(caddr_t arg)
2825 {
2826 	struct eri *erip = (void *)arg;
2827 	uint32_t erisbits;
2828 	uint32_t mif_status;
2829 	uint32_t serviced = DDI_INTR_UNCLAIMED;
2830 	link_state_t linkupdate = LINK_STATE_UNKNOWN;
2831 	boolean_t macupdate = B_FALSE;
2832 	mblk_t *mp;
2833 	mblk_t *head;
2834 	mblk_t **tail;
2835 
2836 	head = NULL;
2837 	tail = &head;
2838 
2839 	mutex_enter(&erip->intrlock);
2840 
2841 	erisbits = GET_GLOBREG(status);
2842 
2843 	/*
2844 	 * Check if it is only the RX_DONE interrupt, which is
2845 	 * the most frequent one.
2846 	 */
2847 	if (((erisbits & ERI_G_STATUS_RX_INT) == ERI_G_STATUS_RX_DONE) &&
2848 	    (erip->flags & ERI_RUNNING)) {
2849 		serviced = DDI_INTR_CLAIMED;
2850 		goto rx_done_int;
2851 	}
2852 
2853 	/* Claim the first interrupt after initialization */
2854 	if (erip->flags & ERI_INITIALIZED) {
2855 		erip->flags &= ~ERI_INITIALIZED;
2856 		serviced = DDI_INTR_CLAIMED;
2857 	}
2858 
2859 	/* Check for interesting events */
2860 	if ((erisbits & ERI_G_STATUS_INTR) == 0) {
2861 #ifdef	ESTAR_WORKAROUND
2862 		uint32_t linkupdate;
2863 #endif
2864 
2865 		ERI_DEBUG_MSG2(erip, DIAG_MSG,
2866 		    "eri_intr: Interrupt Not Claimed gsbits  %X", erisbits);
2867 #ifdef	DEBUG
2868 		noteri++;
2869 #endif
2870 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF Config = 0x%X",
2871 		    GET_MIFREG(mif_cfg));
2872 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF imask = 0x%X",
2873 		    GET_MIFREG(mif_imask));
2874 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:INT imask = 0x%X",
2875 		    GET_GLOBREG(intmask));
2876 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:alias %X",
2877 		    GET_GLOBREG(status_alias));
2878 #ifdef	ESTAR_WORKAROUND
2879 		linkupdate = eri_check_link_noind(erip);
2880 #endif
2881 		mutex_exit(&erip->intrlock);
2882 #ifdef	ESTAR_WORKAROUND
2883 		if (linkupdate != LINK_STATE_UNKNOWN)
2884 			mac_link_update(erip->mh, linkupdate);
2885 #endif
2886 		return (serviced);
2887 	}
2888 	serviced = DDI_INTR_CLAIMED;
2889 
2890 	if (!(erip->flags & ERI_RUNNING)) {
2891 		mutex_exit(&erip->intrlock);
2892 		eri_uninit(erip);
2893 		return (serviced);
2894 	}
2895 
2896 	if (erisbits & ERI_G_STATUS_FATAL_ERR) {
2897 		ERI_DEBUG_MSG2(erip, INTR_MSG,
2898 		    "eri_intr: fatal error: erisbits = %X", erisbits);
2899 		(void) eri_fatal_err(erip, erisbits);
2900 		eri_reinit_fatal++;
2901 
2902 		if (erip->rx_reset_issued) {
2903 			erip->rx_reset_issued = 0;
2904 			(void) eri_init_rx_channel(erip);
2905 			mutex_exit(&erip->intrlock);
2906 		} else {
2907 			param_linkup = 0;
2908 			erip->stats.link_up = LINK_STATE_DOWN;
2909 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2910 			DISABLE_MAC(erip);
2911 			mutex_exit(&erip->intrlock);
2912 			(void) eri_init(erip);
2913 		}
2914 		return (serviced);
2915 	}
2916 
2917 	if (erisbits & ERI_G_STATUS_NONFATAL_ERR) {
2918 		ERI_DEBUG_MSG2(erip, INTR_MSG,
2919 		    "eri_intr: non-fatal error: erisbits = %X", erisbits);
2920 		(void) eri_nonfatal_err(erip, erisbits);
2921 		if (erip->linkcheck) {
2922 			mutex_exit(&erip->intrlock);
2923 			(void) eri_init(erip);
2924 			return (serviced);
2925 		}
2926 	}
2927 
2928 	if (erisbits & ERI_G_STATUS_MIF_INT) {
2929 		uint16_t stat;
2930 		ERI_DEBUG_MSG2(erip, XCVR_MSG,
2931 		    "eri_intr:MIF Interrupt:mii_status %X", erip->mii_status);
2932 		eri_stop_timer(erip);   /* acquire linklock */
2933 
2934 		mutex_enter(&erip->xmitlock);
2935 		mutex_enter(&erip->xcvrlock);
2936 #ifdef	ERI_MIF_POLL_STATUS_WORKAROUND
2937 		mif_status = GET_MIFREG(mif_bsts);
2938 		eri_mif_poll(erip, MIF_POLL_STOP);
2939 		ERI_DEBUG_MSG3(erip, XCVR_MSG,
2940 		    "eri_intr: new MIF interrupt status %X XCVR status %X",
2941 		    mif_status, erip->mii_status);
2942 		(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
2943 		linkupdate = eri_mif_check(erip, stat, stat);
2944 
2945 #else
2946 		mif_status = GET_MIFREG(mif_bsts);
2947 		eri_mif_poll(erip, MIF_POLL_STOP);
2948 		linkupdate = eri_mif_check(erip, (uint16_t)mif_status,
2949 		    (uint16_t)(mif_status >> 16));
2950 #endif
2951 		eri_mif_poll(erip, MIF_POLL_START);
2952 		mutex_exit(&erip->xcvrlock);
2953 		mutex_exit(&erip->xmitlock);
2954 
2955 		if (!erip->openloop_autoneg)
2956 			eri_start_timer(erip, eri_check_link,
2957 			    ERI_LINKCHECK_TIMER);
2958 		else
2959 			eri_start_timer(erip, eri_check_link,
2960 			    ERI_P_FAULT_TIMER);
2961 	}
2962 
2963 	ERI_DEBUG_MSG2(erip, INTR_MSG,
2964 	    "eri_intr:May have Read Interrupt status:status %X", erisbits);
2965 
2966 rx_done_int:
2967 	if ((erisbits & (ERI_G_STATUS_TX_INT_ME)) ||
2968 	    (erip->tx_cur_cnt >= tx_interrupt_rate)) {
2969 		mutex_enter(&erip->xmitlock);
2970 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
2971 		    ETX_COMPLETION_MASK);
2972 
2973 		macupdate |= eri_reclaim(erip, erip->tx_completion);
2974 		mutex_exit(&erip->xmitlock);
2975 	}
2976 
2977 	if (erisbits & ERI_G_STATUS_RX_DONE) {
2978 		volatile struct	rmd	*rmdp, *rmdpbase;
2979 		volatile uint32_t rmdi;
2980 		uint8_t loop_limit = 0x20;
2981 		uint64_t flags;
2982 		uint32_t rmdmax_mask = erip->rmdmax_mask;
2983 
2984 		rmdpbase = erip->rmdp;
2985 		rmdi = erip->rx_completion;
2986 		rmdp = rmdpbase + rmdi;
2987 
2988 		/*
2989 		 * Sync RMD before looking at it.
2990 		 */
2991 		ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2992 		    DDI_DMA_SYNC_FORCPU);
2993 		/*
2994 		 * Loop through each RMD.
2995 		 */
2996 
2997 		flags = GET_RMD_FLAGS(rmdp);
2998 		while (((flags & ERI_RMD_OWN) == 0) && (loop_limit)) {
2999 			/* process one packet */
3000 			mp = eri_read_dma(erip, rmdp, rmdi, flags);
3001 			rmdi =  (rmdi + 1) & rmdmax_mask;
3002 			rmdp = rmdpbase + rmdi;
3003 
3004 			if (mp != NULL) {
3005 				*tail = mp;
3006 				tail = &mp->b_next;
3007 			}
3008 
3009 			/*
3010 			 * ERI RCV DMA fetches or updates four descriptors
3011 			 * a time. Also we don't want to update the desc.
3012 			 * batch we just received packet on. So we update
3013 			 * descriptors for every 4 packets and we update
3014 			 * the group of 4 after the current batch.
3015 			 */
3016 
3017 			if (!(rmdi % 4)) {
3018 				if (eri_overflow_reset &&
3019 				    (GET_GLOBREG(status_alias) &
3020 				    ERI_G_STATUS_NONFATAL_ERR)) {
3021 					loop_limit = 1;
3022 				} else {
3023 					erip->rx_kick =
3024 					    (rmdi + ERI_RPENDING - 4) &
3025 					    rmdmax_mask;
3026 					PUT_ERXREG(rx_kick, erip->rx_kick);
3027 				}
3028 			}
3029 
3030 			/*
3031 			 * Sync the next RMD before looking at it.
3032 			 */
3033 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3034 			    DDI_DMA_SYNC_FORCPU);
3035 			flags = GET_RMD_FLAGS(rmdp);
3036 			loop_limit--;
3037 		}
3038 		erip->rx_completion = rmdi;
3039 	}
3040 
3041 	erip->wantw = B_FALSE;
3042 
3043 	mutex_exit(&erip->intrlock);
3044 
3045 	if (head)
3046 		mac_rx(erip->mh, NULL, head);
3047 
3048 	if (macupdate)
3049 		mac_tx_update(erip->mh);
3050 
3051 	if (linkupdate != LINK_STATE_UNKNOWN)
3052 		mac_link_update(erip->mh, linkupdate);
3053 
3054 	return (serviced);
3055 }
3056 
3057 /*
3058  * Handle interrupts for fatal errors
3059  * Need reinitialization.
3060  */
3061 #define	PCI_DATA_PARITY_REP	(1 << 8)
3062 #define	PCI_SING_TARGET_ABORT	(1 << 11)
3063 #define	PCI_RCV_TARGET_ABORT	(1 << 12)
3064 #define	PCI_RCV_MASTER_ABORT	(1 << 13)
3065 #define	PCI_SING_SYSTEM_ERR	(1 << 14)
3066 #define	PCI_DATA_PARITY_ERR	(1 << 15)
3067 
3068 /* called with intrlock held */
3069 static void
3070 eri_fatal_err(struct eri *erip, uint32_t erisbits)
3071 {
3072 	uint16_t	pci_status;
3073 	uint32_t	pci_error_int = 0;
3074 
3075 	if (erisbits & ERI_G_STATUS_RX_TAG_ERR) {
3076 		erip->rx_reset_issued = 1;
3077 		HSTAT(erip, rxtag_err);
3078 	} else {
3079 		erip->global_reset_issued = 1;
3080 		if (erisbits & ERI_G_STATUS_BUS_ERR_INT) {
3081 			pci_error_int = 1;
3082 			HSTAT(erip, pci_error_int);
3083 		} else if (erisbits & ERI_G_STATUS_PERR_INT) {
3084 			HSTAT(erip, parity_error);
3085 		} else {
3086 			HSTAT(erip, unknown_fatal);
3087 		}
3088 	}
3089 
3090 	/*
3091 	 * PCI bus error
3092 	 */
3093 	if (pci_error_int && erip->pci_config_handle) {
3094 		pci_status = pci_config_get16(erip->pci_config_handle,
3095 		    PCI_CONF_STAT);
3096 		ERI_DEBUG_MSG2(erip, FATAL_ERR_MSG, "Bus Error Status %x",
3097 		    pci_status);
3098 		if (pci_status & PCI_DATA_PARITY_REP)
3099 			HSTAT(erip, pci_data_parity_err);
3100 		if (pci_status & PCI_SING_TARGET_ABORT)
3101 			HSTAT(erip, pci_signal_target_abort);
3102 		if (pci_status & PCI_RCV_TARGET_ABORT)
3103 			HSTAT(erip, pci_rcvd_target_abort);
3104 		if (pci_status & PCI_RCV_MASTER_ABORT)
3105 			HSTAT(erip, pci_rcvd_master_abort);
3106 		if (pci_status & PCI_SING_SYSTEM_ERR)
3107 			HSTAT(erip, pci_signal_system_err);
3108 		if (pci_status & PCI_DATA_PARITY_ERR)
3109 			HSTAT(erip, pci_signal_system_err);
3110 		/*
3111 		 * clear it by writing the value that was read back.
3112 		 */
3113 		pci_config_put16(erip->pci_config_handle, PCI_CONF_STAT,
3114 		    pci_status);
3115 	}
3116 }
3117 
3118 /*
3119  * Handle interrupts regarding non-fatal events.
3120  * TXMAC, RXMAC and MACCTL events
3121  */
3122 static void
3123 eri_nonfatal_err(struct eri *erip, uint32_t erisbits)
3124 {
3125 
3126 	uint32_t	txmac_sts, rxmac_sts, macctl_sts, pause_time;
3127 
3128 #ifdef ERI_PM_WORKAROUND
3129 	if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
3130 	    PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
3131 		erip->stats.pmcap = ERI_PMCAP_NONE;
3132 #endif
3133 
3134 	if (erisbits & ERI_G_STATUS_TX_MAC_INT) {
3135 		txmac_sts = GET_MACREG(txsts);
3136 		if (txmac_sts & BMAC_TXSTS_TX_URUN) {
3137 			erip->linkcheck = 1;
3138 			HSTAT(erip, txmac_urun);
3139 			HSTAT(erip, oerrors);
3140 		}
3141 
3142 		if (txmac_sts & BMAC_TXSTS_MAXPKT_ERR) {
3143 			erip->linkcheck = 1;
3144 			HSTAT(erip, txmac_maxpkt_err);
3145 			HSTAT(erip, oerrors);
3146 		}
3147 		if (txmac_sts & BMAC_TXSTS_NCC_EXP) {
3148 			erip->stats.collisions += 0x10000;
3149 		}
3150 
3151 		if (txmac_sts & BMAC_TXSTS_ECC_EXP) {
3152 			erip->stats.excessive_coll += 0x10000;
3153 		}
3154 
3155 		if (txmac_sts & BMAC_TXSTS_LCC_EXP) {
3156 			erip->stats.late_coll += 0x10000;
3157 		}
3158 
3159 		if (txmac_sts & BMAC_TXSTS_FCC_EXP) {
3160 			erip->stats.first_coll += 0x10000;
3161 		}
3162 
3163 		if (txmac_sts & BMAC_TXSTS_DEFER_EXP) {
3164 			HSTAT(erip, defer_timer_exp);
3165 		}
3166 
3167 		if (txmac_sts & BMAC_TXSTS_PEAK_EXP) {
3168 			erip->stats.peak_attempt_cnt += 0x100;
3169 		}
3170 	}
3171 
3172 	if (erisbits & ERI_G_STATUS_RX_NO_BUF) {
3173 		ERI_DEBUG_MSG1(erip, NONFATAL_MSG, "rx dropped/no free desc");
3174 
3175 		if (eri_overflow_reset)
3176 			erip->linkcheck = 1;
3177 
3178 		HSTAT(erip, no_free_rx_desc);
3179 		HSTAT(erip, ierrors);
3180 	}
3181 	if (erisbits & ERI_G_STATUS_RX_MAC_INT) {
3182 		rxmac_sts = GET_MACREG(rxsts);
3183 		if (rxmac_sts & BMAC_RXSTS_RX_OVF) {
3184 #ifndef ERI_RMAC_HANG_WORKAROUND
3185 			eri_stop_timer(erip);   /* acquire linklock */
3186 			erip->check_rmac_hang ++;
3187 			erip->check2_rmac_hang = 0;
3188 			erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
3189 			erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
3190 
3191 			ERI_DEBUG_MSG5(erip, NONFATAL_MSG,
3192 			    "overflow intr %d: %8x wr:%2x rd:%2x",
3193 			    erip->check_rmac_hang,
3194 			    GET_MACREG(macsm),
3195 			    GET_ERXREG(rxfifo_wr_ptr),
3196 			    GET_ERXREG(rxfifo_rd_ptr));
3197 
3198 			eri_start_timer(erip, eri_check_link,
3199 			    ERI_CHECK_HANG_TIMER);
3200 #endif
3201 			if (eri_overflow_reset)
3202 				erip->linkcheck = 1;
3203 
3204 			HSTAT(erip, rx_overflow);
3205 			HSTAT(erip, ierrors);
3206 		}
3207 
3208 		if (rxmac_sts & BMAC_RXSTS_ALE_EXP) {
3209 			erip->stats.rx_align_err += 0x10000;
3210 			erip->stats.ierrors += 0x10000;
3211 		}
3212 
3213 		if (rxmac_sts & BMAC_RXSTS_CRC_EXP) {
3214 			erip->stats.rx_crc_err += 0x10000;
3215 			erip->stats.ierrors += 0x10000;
3216 		}
3217 
3218 		if (rxmac_sts & BMAC_RXSTS_LEN_EXP) {
3219 			erip->stats.rx_length_err += 0x10000;
3220 			erip->stats.ierrors += 0x10000;
3221 		}
3222 
3223 		if (rxmac_sts & BMAC_RXSTS_CVI_EXP) {
3224 			erip->stats.rx_code_viol_err += 0x10000;
3225 			erip->stats.ierrors += 0x10000;
3226 		}
3227 	}
3228 
3229 	if (erisbits & ERI_G_STATUS_MAC_CTRL_INT) {
3230 
3231 		macctl_sts = GET_MACREG(macctl_sts);
3232 		if (macctl_sts & ERI_MCTLSTS_PAUSE_RCVD) {
3233 			pause_time = ((macctl_sts &
3234 			    ERI_MCTLSTS_PAUSE_TIME) >> 16);
3235 			ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
3236 			    "PAUSE Received. pause time = %X slot_times",
3237 			    pause_time);
3238 			HSTAT(erip, pause_rxcount);
3239 			erip->stats.pause_time_count += pause_time;
3240 		}
3241 
3242 		if (macctl_sts & ERI_MCTLSTS_PAUSE_STATE) {
3243 			HSTAT(erip, pause_oncount);
3244 			erip->stats.pausing = 1;
3245 		}
3246 
3247 		if (macctl_sts & ERI_MCTLSTS_NONPAUSE) {
3248 			HSTAT(erip, pause_offcount);
3249 			erip->stats.pausing = 0;
3250 		}
3251 	}
3252 
3253 }
3254 
3255 /*
3256  * if this is the first init do not bother to save the
3257  * counters.
3258  */
3259 static void
3260 eri_savecntrs(struct eri *erip)
3261 {
3262 	uint32_t	fecnt, aecnt, lecnt, rxcv;
3263 	uint32_t	ltcnt, excnt, fccnt;
3264 
3265 	/* XXX What all gets added in ierrors and oerrors? */
3266 	fecnt = GET_MACREG(fecnt);
3267 	HSTATN(erip, rx_crc_err, fecnt);
3268 	PUT_MACREG(fecnt, 0);
3269 
3270 	aecnt = GET_MACREG(aecnt);
3271 	HSTATN(erip, rx_align_err, aecnt);
3272 	PUT_MACREG(aecnt, 0);
3273 
3274 	lecnt = GET_MACREG(lecnt);
3275 	HSTATN(erip, rx_length_err, lecnt);
3276 	PUT_MACREG(lecnt, 0);
3277 
3278 	rxcv = GET_MACREG(rxcv);
3279 	HSTATN(erip, rx_code_viol_err, rxcv);
3280 	PUT_MACREG(rxcv, 0);
3281 
3282 	ltcnt = GET_MACREG(ltcnt);
3283 	HSTATN(erip, late_coll, ltcnt);
3284 	PUT_MACREG(ltcnt, 0);
3285 
3286 	erip->stats.collisions += (GET_MACREG(nccnt) + ltcnt);
3287 	PUT_MACREG(nccnt, 0);
3288 
3289 	excnt = GET_MACREG(excnt);
3290 	HSTATN(erip, excessive_coll, excnt);
3291 	PUT_MACREG(excnt, 0);
3292 
3293 	fccnt = GET_MACREG(fccnt);
3294 	HSTATN(erip, first_coll, fccnt);
3295 	PUT_MACREG(fccnt, 0);
3296 
3297 	/*
3298 	 * Do not add code violations to input errors.
3299 	 * They are already counted in CRC errors
3300 	 */
3301 	HSTATN(erip, ierrors, (fecnt + aecnt + lecnt));
3302 	HSTATN(erip, oerrors, (ltcnt + excnt));
3303 }
3304 
3305 mblk_t *
3306 eri_allocb_sp(size_t size)
3307 {
3308 	mblk_t  *mp;
3309 
3310 	size += 128;
3311 	if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3312 		return (NULL);
3313 	}
3314 	mp->b_wptr += 128;
3315 	mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3316 	mp->b_rptr = mp->b_wptr;
3317 
3318 	return (mp);
3319 }
3320 
3321 mblk_t *
3322 eri_allocb(size_t size)
3323 {
3324 	mblk_t  *mp;
3325 
3326 	if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3327 		return (NULL);
3328 	}
3329 	mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3330 	mp->b_rptr = mp->b_wptr;
3331 
3332 	return (mp);
3333 }
3334 
3335 /*
3336  * Hardware Dependent Functions
3337  * New Section.
3338  */
3339 
3340 /* <<<<<<<<<<<<<<<< Fast Ethernet PHY Bit Bang Operations >>>>>>>>>>>>>>>>>> */
3341 
3342 static void
3343 send_bit(struct eri *erip, uint32_t x)
3344 {
3345 	PUT_MIFREG(mif_bbdata, x);
3346 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3347 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3348 }
3349 
3350 /*
3351  * To read the MII register bits according to the IEEE Standard
3352  */
3353 static uint32_t
3354 get_bit_std(struct eri *erip)
3355 {
3356 	uint32_t	x;
3357 
3358 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3359 	drv_usecwait(1);	/* wait for  >330 ns for stable data */
3360 	if (param_transceiver == INTERNAL_XCVR)
3361 		x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM0) ? 1 : 0;
3362 	else
3363 		x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM1) ? 1 : 0;
3364 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3365 	return (x);
3366 }
3367 
3368 #define	SEND_BIT(x)		send_bit(erip, x)
3369 #define	GET_BIT_STD(x)		x = get_bit_std(erip)
3370 
3371 
3372 static void
3373 eri_bb_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3374 {
3375 	uint8_t	phyad;
3376 	int		i;
3377 
3378 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
3379 	phyad = erip->phyad;
3380 	(void) eri_bb_force_idle(erip);
3381 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
3382 	SEND_BIT(0); SEND_BIT(1);	/* <OP> */
3383 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
3384 		SEND_BIT((phyad >> i) & 1);
3385 	}
3386 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
3387 		SEND_BIT((regad >> i) & 1);
3388 	}
3389 	SEND_BIT(1); SEND_BIT(0);	/* <TA> */
3390 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
3391 		SEND_BIT((data >> i) & 1);
3392 	}
3393 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
3394 }
3395 
3396 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3397 static uint32_t
3398 eri_bb_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3399 {
3400 	uint8_t	phyad;
3401 	int	i;
3402 	uint32_t	x;
3403 	uint32_t	y;
3404 
3405 	*datap = 0;
3406 
3407 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
3408 	phyad = erip->phyad;
3409 	(void) eri_bb_force_idle(erip);
3410 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
3411 	SEND_BIT(1); SEND_BIT(0);	/* <OP> */
3412 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
3413 		SEND_BIT((phyad >> i) & 1);
3414 	}
3415 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
3416 		SEND_BIT((regad >> i) & 1);
3417 	}
3418 
3419 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
3420 
3421 	GET_BIT_STD(x);
3422 	GET_BIT_STD(y);		/* <TA> */
3423 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
3424 		GET_BIT_STD(x);
3425 		*datap += (x << i);
3426 	}
3427 	/* Kludge to get the Transceiver out of hung mode */
3428 	/* XXX: Test if this is still needed */
3429 	GET_BIT_STD(x);
3430 	GET_BIT_STD(x);
3431 	GET_BIT_STD(x);
3432 
3433 	return (y);
3434 }
3435 
3436 static void
3437 eri_bb_force_idle(struct eri *erip)
3438 {
3439 	int		i;
3440 
3441 	for (i = 0; i < 33; i++) {
3442 		SEND_BIT(1);
3443 	}
3444 }
3445 
3446 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
3447 
3448 
3449 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
3450 
3451 #ifdef ERI_FRM_DEBUG
3452 int frame_flag = 0;
3453 #endif
3454 
3455 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3456 static uint32_t
3457 eri_mii_read(struct eri	*erip, uint8_t regad, uint16_t *datap)
3458 {
3459 	uint32_t frame;
3460 	uint8_t phyad;
3461 
3462 	if (param_transceiver == NO_XCVR)
3463 		return (1);	/* No xcvr present */
3464 
3465 	if (!erip->frame_enable)
3466 		return (eri_bb_mii_read(erip, regad, datap));
3467 
3468 	phyad = erip->phyad;
3469 #ifdef ERI_FRM_DEBUG
3470 	if (!frame_flag) {
3471 		eri_errror(erip->dip, "Frame Register used for MII");
3472 		frame_flag = 1;
3473 	}
3474 #endif
3475 	ERI_DEBUG_MSG3(erip, FRM_MSG,
3476 	    "Frame Reg :mii_read: phyad = %X reg = %X ", phyad, regad);
3477 
3478 	PUT_MIFREG(mif_frame, ERI_MIF_FRREAD |
3479 	    (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3480 	    (regad << ERI_MIF_FRREGAD_SHIFT));
3481 	MIF_ERIDELAY(300,  phyad, regad);
3482 	frame = GET_MIFREG(mif_frame);
3483 	if ((frame & ERI_MIF_FRTA0) == 0) {
3484 		return (1);
3485 	} else {
3486 		*datap = (uint16_t)(frame & ERI_MIF_FRDATA);
3487 		return (0);
3488 	}
3489 
3490 }
3491 
3492 static void
3493 eri_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3494 {
3495 	uint8_t	phyad;
3496 
3497 	if (!erip->frame_enable) {
3498 		eri_bb_mii_write(erip, regad, data);
3499 		return;
3500 	}
3501 
3502 	phyad = erip->phyad;
3503 
3504 	PUT_MIFREG(mif_frame, (ERI_MIF_FRWRITE |
3505 	    (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3506 	    (regad << ERI_MIF_FRREGAD_SHIFT) | data));
3507 	MIF_ERIDELAY(300,  phyad, regad);
3508 	(void) GET_MIFREG(mif_frame);
3509 }
3510 
3511 
3512 /* <<<<<<<<<<<<<<<<<	PACKET TRANSMIT FUNCTIONS	>>>>>>>>>>>>>>>>>>>> */
3513 
3514 #define	ERI_CROSS_PAGE_BOUNDRY(i, size, pagesize) \
3515 	((i & pagesize) != ((i + size) & pagesize))
3516 
3517 /*
3518  * Send a single mblk.  Returns B_TRUE if the packet is sent, or disposed of
3519  * by freemsg.  Returns B_FALSE if the packet was not sent or queued, and
3520  * should be retried later (due to tx resource exhaustion.)
3521  */
3522 static boolean_t
3523 eri_send_msg(struct eri *erip, mblk_t *mp)
3524 {
3525 	volatile struct	eri_tmd	*tmdp = NULL;
3526 	volatile struct	eri_tmd	*tbasep = NULL;
3527 	mblk_t		*nmp;
3528 	uint32_t	len = 0, len_msg = 0, xover_len = TX_STREAM_MIN;
3529 	uint32_t	nmblks = 0;
3530 	uint32_t	i, j;
3531 	uint64_t	int_me = 0;
3532 	uint_t		tmdcsum = 0;
3533 	uint_t		start_offset = 0;
3534 	uint_t		stuff_offset = 0;
3535 	uint_t		flags = 0;
3536 	boolean_t	macupdate = B_FALSE;
3537 
3538 	caddr_t	ptr;
3539 	uint32_t	offset;
3540 	uint64_t	ctrl;
3541 	uint32_t	count;
3542 	uint32_t	flag_dma;
3543 	ddi_dma_cookie_t	c;
3544 
3545 	if (!param_linkup) {
3546 		freemsg(mp);
3547 		HSTAT(erip, tnocar);
3548 		HSTAT(erip, oerrors);
3549 		return (B_TRUE);
3550 	}
3551 
3552 	nmp = mp;
3553 
3554 #ifdef ERI_HWCSUM
3555 	hcksum_retrieve(mp, NULL, NULL, &start_offset, &stuff_offset,
3556 	    NULL, NULL, &flags);
3557 
3558 	if (flags & HCK_PARTIALCKSUM) {
3559 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
3560 			start_offset += ETHERHEADER_SIZE + 4;
3561 			stuff_offset += ETHERHEADER_SIZE + 4;
3562 		} else {
3563 			start_offset += ETHERHEADER_SIZE;
3564 			stuff_offset += ETHERHEADER_SIZE;
3565 		}
3566 		tmdcsum = ERI_TMD_CSENABL;
3567 	}
3568 #endif /* ERI_HWCSUM */
3569 	while (nmp != NULL) {
3570 		ASSERT(nmp->b_wptr >= nmp->b_rptr);
3571 		nmblks++; /* # of mbs */
3572 		nmp = nmp->b_cont;
3573 	}
3574 	len_msg = msgsize(mp);
3575 
3576 	/*
3577 	 * update MIB II statistics
3578 	 */
3579 	BUMP_OutNUcast(erip, mp->b_rptr);
3580 
3581 /*
3582  * 	----------------------------------------------------------------------
3583  *	here we deal with 3 cases.
3584  * 	1. pkt has exactly one mblk
3585  * 	2. pkt has exactly two mblks
3586  * 	3. pkt has more than 2 mblks. Since this almost
3587  *	   always never happens, we copy all of them into
3588  *	   a msh with one mblk.
3589  * 	for each mblk in the message, we allocate a tmd and
3590  * 	figure out the tmd index and tmblkp index.
3591  * 	----------------------------------------------------------------------
3592  */
3593 	if (nmblks > 2) { /* more than 2 mbs */
3594 		if ((nmp = eri_allocb(len_msg)) == NULL) {
3595 			HSTAT(erip, allocbfail);
3596 			HSTAT(erip, noxmtbuf);
3597 			freemsg(mp);
3598 			return (B_TRUE); /* bad case */
3599 		}
3600 		mcopymsg(mp, nmp->b_rptr);
3601 		nmp->b_wptr = nmp->b_rptr + len_msg;
3602 		mp = nmp;
3603 		nmblks = 1; /* make it one mb */
3604 	} else
3605 		nmp = mp;
3606 
3607 	mutex_enter(&erip->xmitlock);
3608 
3609 	tbasep = erip->eri_tmdp;
3610 
3611 	/* Check if there are enough descriptors for this packet */
3612 	tmdp = erip->tnextp;
3613 
3614 	if (tmdp >=  erip->tcurp) /* check notmds */
3615 		i = tmdp - erip->tcurp;
3616 	else
3617 		i = tmdp + ERI_TPENDING - erip->tcurp;
3618 
3619 	if (i > (ERI_TPENDING - 4))
3620 		goto notmds;
3621 
3622 	if (i >= (ERI_TPENDING >> 1) && !(erip->starts & 0x7))
3623 		int_me = ERI_TMD_INTME;
3624 
3625 	for (j = 0; j < nmblks; j++) { /* for one or two mb cases */
3626 
3627 		len = MBLKL(nmp);
3628 		i = tmdp - tbasep; /* index */
3629 
3630 		if (len_msg < eri_tx_bcopy_max) { /* tb-all mb */
3631 
3632 			offset = (i * eri_tx_bcopy_max);
3633 			ptr = erip->tbuf_kaddr + offset;
3634 
3635 			mcopymsg(mp, ptr);
3636 
3637 #ifdef	ERI_HDX_BUG_WORKAROUND
3638 			if ((param_mode) || (eri_hdx_pad_enable == 0)) {
3639 				if (len_msg < ETHERMIN) {
3640 					bzero((ptr + len_msg),
3641 					    (ETHERMIN - len_msg));
3642 					len_msg = ETHERMIN;
3643 				}
3644 			} else {
3645 				if (len_msg < 97) {
3646 					bzero((ptr + len_msg), (97 - len_msg));
3647 					len_msg = 97;
3648 				}
3649 			}
3650 #endif
3651 			len = len_msg;
3652 			c.dmac_address = erip->tbuf_ioaddr + offset;
3653 			(void) ddi_dma_sync(erip->tbuf_handle,
3654 			    (off_t)offset, len_msg, DDI_DMA_SYNC_FORDEV);
3655 			nmblks = 1; /* exit this for loop */
3656 		} else if ((!j) && (len < eri_tx_bcopy_max)) { /* tb-1st mb */
3657 
3658 			offset = (i * eri_tx_bcopy_max);
3659 			ptr = erip->tbuf_kaddr + offset;
3660 
3661 			bcopy(mp->b_rptr, ptr, len);
3662 
3663 			c.dmac_address = erip->tbuf_ioaddr + offset;
3664 			(void) ddi_dma_sync(erip->tbuf_handle,
3665 			    (off_t)offset, len, DDI_DMA_SYNC_FORDEV);
3666 			nmp = mp->b_cont;
3667 			mp->b_cont = NULL;
3668 			freeb(mp);
3669 		} else if (erip->eri_dvmaxh != NULL) { /* fast DVMA */
3670 
3671 			dvma_kaddr_load(erip->eri_dvmaxh,
3672 			    (caddr_t)nmp->b_rptr, len, 2 * i, &c);
3673 			dvma_sync(erip->eri_dvmaxh,
3674 			    2 * i, DDI_DMA_SYNC_FORDEV);
3675 
3676 			erip->tmblkp[i] = nmp;
3677 			if (!j) {
3678 				nmp = mp->b_cont;
3679 				mp->b_cont = NULL;
3680 			}
3681 
3682 		} else { /* DDI DMA */
3683 			if (len < xover_len)
3684 				flag_dma = DDI_DMA_WRITE | DDI_DMA_CONSISTENT;
3685 			else
3686 				flag_dma = DDI_DMA_WRITE | DDI_DMA_STREAMING;
3687 
3688 			if (ddi_dma_addr_bind_handle(erip->ndmaxh[i],
3689 			    NULL, (caddr_t)nmp->b_rptr, len,
3690 			    flag_dma, DDI_DMA_DONTWAIT, NULL, &c,
3691 			    &count) != DDI_DMA_MAPPED) {
3692 				if (j) { /* free previous DMV resources */
3693 					i = erip->tnextp - tbasep;
3694 					if (erip->ndmaxh[i]) { /* DDI DMA */
3695 						(void) ddi_dma_unbind_handle(
3696 						    erip->ndmaxh[i]);
3697 						erip->ndmaxh[i] = NULL;
3698 						freeb(mp);
3699 					}
3700 					freeb(nmp);
3701 				} else {
3702 					freemsg(mp);
3703 				}
3704 
3705 				mutex_exit(&erip->xmitlock);
3706 				HSTAT(erip, noxmtbuf);
3707 
3708 				return (B_TRUE); /* bad case */
3709 			}
3710 
3711 			erip->tmblkp[i] = nmp;
3712 			if (!j) {
3713 				nmp = mp->b_cont;
3714 				mp->b_cont = NULL;
3715 			}
3716 		}
3717 
3718 		ctrl = 0;
3719 		/* first descr of packet */
3720 		if (!j) {
3721 			ctrl = ERI_TMD_SOP| int_me | tmdcsum |
3722 			    (start_offset << ERI_TMD_CSSTART_SHIFT) |
3723 			    (stuff_offset << ERI_TMD_CSSTUFF_SHIFT);
3724 		}
3725 
3726 		/* last descr of packet */
3727 		if ((j + 1) == nmblks) {
3728 			ctrl |= ERI_TMD_EOP;
3729 		}
3730 
3731 		PUT_TMD(tmdp, c, len, ctrl);
3732 		ERI_SYNCIOPB(erip, tmdp, sizeof (struct eri_tmd),
3733 		    DDI_DMA_SYNC_FORDEV);
3734 
3735 		tmdp = NEXTTMD(erip, tmdp);
3736 		erip->tx_cur_cnt++;
3737 	} /* for each nmp */
3738 
3739 	erip->tx_kick = tmdp - tbasep;
3740 	PUT_ETXREG(tx_kick, erip->tx_kick);
3741 	erip->tnextp = tmdp;
3742 
3743 	erip->starts++;
3744 
3745 	if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3746 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3747 		    ETX_COMPLETION_MASK);
3748 		macupdate |= eri_reclaim(erip, erip->tx_completion);
3749 	}
3750 	mutex_exit(&erip->xmitlock);
3751 
3752 	if (macupdate)
3753 		mac_tx_update(erip->mh);
3754 
3755 	return (B_TRUE);
3756 
3757 notmds:
3758 	HSTAT(erip, notmds);
3759 	erip->wantw = B_TRUE;
3760 
3761 	if (!erip->tx_int_me) {
3762 		PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
3763 		    ~(ERI_G_MASK_TX_INT_ME));
3764 		erip->tx_int_me = 1;
3765 	}
3766 
3767 	if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3768 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3769 		    ETX_COMPLETION_MASK);
3770 		macupdate |= eri_reclaim(erip, erip->tx_completion);
3771 	}
3772 
3773 	mutex_exit(&erip->xmitlock);
3774 
3775 	if (macupdate)
3776 		mac_tx_update(erip->mh);
3777 
3778 	return (B_FALSE);
3779 }
3780 
3781 static mblk_t *
3782 eri_m_tx(void *arg, mblk_t *mp)
3783 {
3784 	struct eri *erip = arg;
3785 	mblk_t *next;
3786 
3787 	while (mp != NULL) {
3788 		next = mp->b_next;
3789 		mp->b_next = NULL;
3790 		if (!eri_send_msg(erip, mp)) {
3791 			mp->b_next = next;
3792 			break;
3793 		}
3794 		mp = next;
3795 	}
3796 
3797 	return (mp);
3798 }
3799 
3800 /*
3801  * Transmit completion reclaiming.
3802  */
3803 static boolean_t
3804 eri_reclaim(struct eri *erip, uint32_t tx_completion)
3805 {
3806 	volatile struct	eri_tmd	*tmdp;
3807 	struct	eri_tmd	*tcomp;
3808 	struct	eri_tmd	*tbasep;
3809 	struct	eri_tmd	*tlimp;
3810 	mblk_t *bp;
3811 	int	i;
3812 	uint64_t	flags;
3813 	uint_t reclaimed = 0;
3814 
3815 	tbasep = erip->eri_tmdp;
3816 	tlimp = erip->eri_tmdlimp;
3817 
3818 	tmdp = erip->tcurp;
3819 	tcomp = tbasep + tx_completion; /* pointer to completion tmd */
3820 
3821 	/*
3822 	 * Loop through each TMD starting from tcurp and upto tcomp.
3823 	 */
3824 	while (tmdp != tcomp) {
3825 		flags = GET_TMD_FLAGS(tmdp);
3826 		if (flags & (ERI_TMD_SOP))
3827 			HSTAT(erip, opackets64);
3828 
3829 		HSTATN(erip, obytes64, (flags & ERI_TMD_BUFSIZE));
3830 
3831 		i = tmdp - tbasep;
3832 		bp = erip->tmblkp[i];
3833 
3834 		/* dvma handle case */
3835 
3836 		if (bp) {
3837 			if (erip->eri_dvmaxh) {
3838 				dvma_unload(erip->eri_dvmaxh, 2 * i,
3839 				    (uint_t)DONT_FLUSH);
3840 			} else {
3841 				/* dma handle case. */
3842 				(void) ddi_dma_unbind_handle(erip->ndmaxh[i]);
3843 			}
3844 
3845 			freeb(bp);
3846 			erip->tmblkp[i] = NULL;
3847 
3848 		}
3849 		tmdp = NEXTTMDP(tbasep, tlimp, tmdp);
3850 		reclaimed++;
3851 	}
3852 
3853 	erip->tcurp = tmdp;
3854 	erip->tx_cur_cnt -= reclaimed;
3855 
3856 	return (erip->wantw && reclaimed ? B_TRUE : B_FALSE);
3857 }
3858 
3859 
3860 /* <<<<<<<<<<<<<<<<<<<	PACKET RECEIVE FUNCTIONS	>>>>>>>>>>>>>>>>>>> */
3861 static mblk_t *
3862 eri_read_dma(struct eri *erip, volatile struct rmd *rmdp,
3863 	int rmdi, uint64_t flags)
3864 {
3865 	mblk_t	*bp, *nbp;
3866 	int	len;
3867 	uint_t ccnt;
3868 	ddi_dma_cookie_t	c;
3869 #ifdef ERI_RCV_CKSUM
3870 	ushort_t sum;
3871 #endif /* ERI_RCV_CKSUM */
3872 	mblk_t *retmp = NULL;
3873 
3874 	bp = erip->rmblkp[rmdi];
3875 	len = (flags & ERI_RMD_BUFSIZE) >> ERI_RMD_BUFSIZE_SHIFT;
3876 #ifdef	ERI_DONT_STRIP_CRC
3877 	len -= 4;
3878 #endif
3879 	/*
3880 	 * In the event of RX FIFO overflow error, ERI REV 1.0 ASIC can
3881 	 * corrupt packets following the descriptor corresponding the
3882 	 * overflow. To detect the corrupted packets, we disable the
3883 	 * dropping of the "bad" packets at the MAC. The descriptor
3884 	 * then would have the "BAD" bit set. We drop the overflowing
3885 	 * packet and the packet following it. We could have done some sort
3886 	 * of checking to determine if the second packet was indeed bad
3887 	 * (using CRC or checksum) but it would be expensive in this
3888 	 * routine, since it is run in interrupt context.
3889 	 */
3890 	if ((flags & ERI_RMD_BAD) || (len  < ETHERMIN) || (len > ETHERMAX+4)) {
3891 
3892 		HSTAT(erip, rx_bad_pkts);
3893 		if ((flags & ERI_RMD_BAD) == 0)
3894 			HSTAT(erip, ierrors);
3895 		if (len < ETHERMIN) {
3896 			HSTAT(erip, rx_runt);
3897 		} else if (len > ETHERMAX+4) {
3898 			HSTAT(erip, rx_toolong_pkts);
3899 		}
3900 		HSTAT(erip, drop);
3901 		UPDATE_RMD(rmdp);
3902 
3903 		ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3904 		    DDI_DMA_SYNC_FORDEV);
3905 		return (NULL);
3906 	}
3907 #ifdef  ERI_DONT_STRIP_CRC
3908 	{
3909 		uint32_t hw_fcs, tail_fcs;
3910 		/*
3911 		 * since we don't let the hardware strip the CRC in hdx
3912 		 * then the driver needs to do it.
3913 		 * this is to workaround a hardware bug
3914 		 */
3915 		bp->b_wptr = bp->b_rptr + ERI_FSTBYTE_OFFSET + len;
3916 		/*
3917 		 * Get the Checksum calculated by the hardware.
3918 		 */
3919 		hw_fcs = flags & ERI_RMD_CKSUM;
3920 		/*
3921 		 * Catch the case when the CRC starts on an odd
3922 		 * boundary.
3923 		 */
3924 		tail_fcs = bp->b_wptr[0] << 8 | bp->b_wptr[1];
3925 		tail_fcs += bp->b_wptr[2] << 8 | bp->b_wptr[3];
3926 		tail_fcs = (tail_fcs & 0xffff) + (tail_fcs >> 16);
3927 		if ((uintptr_t)(bp->b_wptr) & 1) {
3928 			tail_fcs = (tail_fcs << 8) & 0xffff  | (tail_fcs >> 8);
3929 		}
3930 		hw_fcs += tail_fcs;
3931 		hw_fcs = (hw_fcs & 0xffff) + (hw_fcs >> 16);
3932 		hw_fcs &= 0xffff;
3933 		/*
3934 		 * Now we can replace what the hardware wrote, make believe
3935 		 * it got it right in the first place.
3936 		 */
3937 		flags = (flags & ~(uint64_t)ERI_RMD_CKSUM) | hw_fcs;
3938 	}
3939 #endif
3940 	/*
3941 	 * Packet Processing
3942 	 * Once we get a packet bp, we try allocate a new mblk, nbp
3943 	 * to replace this one. If we succeed, we map it to the current
3944 	 * dma handle and update the descriptor with the new cookie. We
3945 	 * then put bp in our read service queue erip->ipq, if it exists
3946 	 * or we just bp to the streams expecting it.
3947 	 * If allocation of the new mblk fails, we implicitly drop the
3948 	 * current packet, i.e do not pass up the mblk and re-use it.
3949 	 * Re-mapping is not required.
3950 	 */
3951 
3952 	if (len < eri_rx_bcopy_max) {
3953 		if ((nbp = eri_allocb_sp(len + ERI_FSTBYTE_OFFSET))) {
3954 			(void) ddi_dma_sync(erip->ndmarh[rmdi], 0,
3955 			    len + ERI_FSTBYTE_OFFSET, DDI_DMA_SYNC_FORCPU);
3956 			DB_TYPE(nbp) = M_DATA;
3957 			bcopy(bp->b_rptr, nbp->b_rptr,
3958 			    len + ERI_FSTBYTE_OFFSET);
3959 			UPDATE_RMD(rmdp);
3960 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3961 			    DDI_DMA_SYNC_FORDEV);
3962 
3963 			/* Add the First Byte offset to the b_rptr */
3964 			nbp->b_rptr += ERI_FSTBYTE_OFFSET;
3965 			nbp->b_wptr = nbp->b_rptr + len;
3966 
3967 #ifdef ERI_RCV_CKSUM
3968 			sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3969 			ERI_PROCESS_READ(erip, nbp, sum);
3970 #else
3971 			ERI_PROCESS_READ(erip, nbp);
3972 #endif
3973 			retmp = nbp;
3974 		} else {
3975 
3976 			/*
3977 			 * mblk allocation has failed. Re-use the old mblk for
3978 			 * the next packet. Re-mapping is not required since
3979 			 * the same mblk and dma cookie is to be used again.
3980 			 */
3981 			HSTAT(erip, ierrors);
3982 			HSTAT(erip, allocbfail);
3983 			HSTAT(erip, norcvbuf);
3984 
3985 			UPDATE_RMD(rmdp);
3986 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3987 			    DDI_DMA_SYNC_FORDEV);
3988 			ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3989 		}
3990 	} else {
3991 		/* Use dma unmap/map */
3992 		if ((nbp = eri_allocb_sp(ERI_BUFSIZE))) {
3993 			/*
3994 			 * How do we harden this, specially if unbind
3995 			 * succeeds and then bind fails?
3996 			 *  If Unbind fails, we can leave without updating
3997 			 * the descriptor but would it continue to work on
3998 			 * next round?
3999 			 */
4000 			(void) ddi_dma_unbind_handle(erip->ndmarh[rmdi]);
4001 			(void) ddi_dma_addr_bind_handle(erip->ndmarh[rmdi],
4002 			    NULL, (caddr_t)nbp->b_rptr, ERI_BUFSIZE,
4003 			    DDI_DMA_READ | DDI_DMA_CONSISTENT,
4004 			    DDI_DMA_DONTWAIT, 0, &c, &ccnt);
4005 
4006 			erip->rmblkp[rmdi] = nbp;
4007 			PUT_RMD(rmdp, c);
4008 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
4009 			    DDI_DMA_SYNC_FORDEV);
4010 
4011 			/* Add the First Byte offset to the b_rptr */
4012 
4013 			bp->b_rptr += ERI_FSTBYTE_OFFSET;
4014 			bp->b_wptr = bp->b_rptr + len;
4015 
4016 #ifdef ERI_RCV_CKSUM
4017 			sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
4018 			ERI_PROCESS_READ(erip, bp, sum);
4019 #else
4020 			ERI_PROCESS_READ(erip, bp);
4021 #endif
4022 			retmp = bp;
4023 		} else {
4024 
4025 			/*
4026 			 * mblk allocation has failed. Re-use the old mblk for
4027 			 * the next packet. Re-mapping is not required since
4028 			 * the same mblk and dma cookie is to be used again.
4029 			 */
4030 			HSTAT(erip, ierrors);
4031 			HSTAT(erip, allocbfail);
4032 			HSTAT(erip, norcvbuf);
4033 
4034 			UPDATE_RMD(rmdp);
4035 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
4036 			    DDI_DMA_SYNC_FORDEV);
4037 			ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
4038 		}
4039 	}
4040 
4041 	return (retmp);
4042 }
4043 
4044 #define	LINK_STAT_DISPLAY_TIME	20
4045 
4046 static int
4047 eri_init_xfer_params(struct eri *erip)
4048 {
4049 	int	i;
4050 	dev_info_t *dip;
4051 
4052 	dip = erip->dip;
4053 
4054 	for (i = 0; i < A_CNT(param_arr); i++)
4055 		erip->param_arr[i] = param_arr[i];
4056 
4057 	erip->xmit_dma_mode = 0;
4058 	erip->rcv_dma_mode = 0;
4059 	erip->mifpoll_enable = mifpoll_enable;
4060 	erip->lance_mode_enable = lance_mode;
4061 	erip->frame_enable = 1;
4062 	erip->ngu_enable = ngu_enable;
4063 
4064 	if (!erip->g_nd && !eri_param_register(erip,
4065 	    erip->param_arr, A_CNT(param_arr))) {
4066 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
4067 		    param_reg_fail_msg);
4068 			return (-1);
4069 		}
4070 
4071 	/*
4072 	 * Set up the start-up values for user-configurable parameters
4073 	 * Get the values from the global variables first.
4074 	 * Use the MASK to limit the value to allowed maximum.
4075 	 */
4076 
4077 	param_transceiver = NO_XCVR;
4078 
4079 /*
4080  * The link speed may be forced to either 10 Mbps or 100 Mbps using the
4081  * property "transfer-speed". This may be done in OBP by using the command
4082  * "apply transfer-speed=<speed> <device>". The speed may be either 10 or 100.
4083  */
4084 	i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "transfer-speed", 0);
4085 	if (i != 0) {
4086 		param_autoneg = 0;	/* force speed */
4087 		param_anar_100T4 = 0;
4088 		param_anar_10fdx = 0;
4089 		param_anar_10hdx = 0;
4090 		param_anar_100fdx = 0;
4091 		param_anar_100hdx = 0;
4092 		param_anar_asm_dir = 0;
4093 		param_anar_pause = 0;
4094 
4095 		if (i == 10)
4096 			param_anar_10hdx = 1;
4097 		else if (i == 100)
4098 			param_anar_100hdx = 1;
4099 	}
4100 
4101 	/*
4102 	 * Get the parameter values configured in .conf file.
4103 	 */
4104 	param_ipg1 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg1", ipg1) &
4105 	    ERI_MASK_8BIT;
4106 
4107 	param_ipg2 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg2", ipg2) &
4108 	    ERI_MASK_8BIT;
4109 
4110 	param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4111 	    "use_int_xcvr", use_int_xcvr) & ERI_MASK_1BIT;
4112 
4113 	param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4114 	    "pace_size", pace_size) & ERI_MASK_8BIT;
4115 
4116 	param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4117 	    "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
4118 
4119 	param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4120 	    "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
4121 
4122 	param_anar_100T4 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4123 	    "adv_100T4_cap", adv_100T4_cap) & ERI_MASK_1BIT;
4124 
4125 	param_anar_100fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4126 	    "adv_100fdx_cap", adv_100fdx_cap) & ERI_MASK_1BIT;
4127 
4128 	param_anar_100hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4129 	    "adv_100hdx_cap", adv_100hdx_cap) & ERI_MASK_1BIT;
4130 
4131 	param_anar_10fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4132 	    "adv_10fdx_cap", adv_10fdx_cap) & ERI_MASK_1BIT;
4133 
4134 	param_anar_10hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4135 	    "adv_10hdx_cap", adv_10hdx_cap) & ERI_MASK_1BIT;
4136 
4137 	param_ipg0 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg0", ipg0) &
4138 	    ERI_MASK_8BIT;
4139 
4140 	param_intr_blank_time = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4141 	    "intr_blank_time", intr_blank_time) & ERI_MASK_8BIT;
4142 
4143 	param_intr_blank_packets = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4144 	    "intr_blank_packets", intr_blank_packets) & ERI_MASK_8BIT;
4145 
4146 	param_lance_mode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4147 	    "lance_mode", lance_mode) & ERI_MASK_1BIT;
4148 
4149 	param_select_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4150 	    "select_link", select_link) & ERI_MASK_1BIT;
4151 
4152 	param_default_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4153 	    "default_link", default_link) & ERI_MASK_1BIT;
4154 
4155 	param_anar_asm_dir = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4156 	    "adv_asm_dir_cap", adv_pauseTX_cap) & ERI_MASK_1BIT;
4157 
4158 	param_anar_pause = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4159 	    "adv_pause_cap", adv_pauseRX_cap) & ERI_MASK_1BIT;
4160 
4161 	if (link_pulse_disabled)
4162 		erip->link_pulse_disabled = 1;
4163 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, 0, "link-pulse-disabled"))
4164 		erip->link_pulse_disabled = 1;
4165 
4166 	eri_statinit(erip);
4167 	return (0);
4168 
4169 }
4170 
4171 static void
4172 eri_process_ndd_ioctl(struct eri *erip, queue_t *wq, mblk_t *mp, int cmd)
4173 {
4174 
4175 	uint32_t old_ipg1, old_ipg2, old_use_int_xcvr, old_autoneg;
4176 	uint32_t old_100T4;
4177 	uint32_t old_100fdx, old_100hdx, old_10fdx, old_10hdx;
4178 	uint32_t old_ipg0, old_lance_mode;
4179 	uint32_t old_intr_blank_time, old_intr_blank_packets;
4180 	uint32_t old_asm_dir, old_pause;
4181 	uint32_t old_select_link, old_default_link;
4182 
4183 	switch (cmd) {
4184 	case ERI_ND_GET:
4185 
4186 		old_autoneg =	param_autoneg;
4187 		old_100T4 =	param_anar_100T4;
4188 		old_100fdx =	param_anar_100fdx;
4189 		old_100hdx =	param_anar_100hdx;
4190 		old_10fdx =	param_anar_10fdx;
4191 		old_10hdx =	param_anar_10hdx;
4192 		old_asm_dir =	param_anar_asm_dir;
4193 		old_pause =	param_anar_pause;
4194 
4195 		param_autoneg = old_autoneg & ~ERI_NOTUSR;
4196 		param_anar_100T4 = old_100T4 & ~ERI_NOTUSR;
4197 		param_anar_100fdx = old_100fdx & ~ERI_NOTUSR;
4198 		param_anar_100hdx = old_100hdx & ~ERI_NOTUSR;
4199 		param_anar_10fdx = old_10fdx & ~ERI_NOTUSR;
4200 		param_anar_10hdx = old_10hdx & ~ERI_NOTUSR;
4201 		param_anar_asm_dir = old_asm_dir & ~ERI_NOTUSR;
4202 		param_anar_pause = old_pause & ~ERI_NOTUSR;
4203 
4204 		if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4205 			param_autoneg = old_autoneg;
4206 			param_anar_100T4 = old_100T4;
4207 			param_anar_100fdx = old_100fdx;
4208 			param_anar_100hdx = old_100hdx;
4209 			param_anar_10fdx = old_10fdx;
4210 			param_anar_10hdx = old_10hdx;
4211 			param_anar_asm_dir = old_asm_dir;
4212 			param_anar_pause = old_pause;
4213 			miocnak(wq, mp, 0, EINVAL);
4214 			return;
4215 		}
4216 		param_autoneg = old_autoneg;
4217 		param_anar_100T4 = old_100T4;
4218 		param_anar_100fdx = old_100fdx;
4219 		param_anar_100hdx = old_100hdx;
4220 		param_anar_10fdx = old_10fdx;
4221 		param_anar_10hdx = old_10hdx;
4222 		param_anar_asm_dir = old_asm_dir;
4223 		param_anar_pause = old_pause;
4224 
4225 		qreply(wq, mp);
4226 		break;
4227 
4228 	case ERI_ND_SET:
4229 		old_ipg0 = param_ipg0;
4230 		old_intr_blank_time = param_intr_blank_time;
4231 		old_intr_blank_packets = param_intr_blank_packets;
4232 		old_lance_mode = param_lance_mode;
4233 		old_ipg1 = param_ipg1;
4234 		old_ipg2 = param_ipg2;
4235 		old_use_int_xcvr = param_use_intphy;
4236 		old_autoneg = param_autoneg;
4237 		old_100T4 =	param_anar_100T4;
4238 		old_100fdx =	param_anar_100fdx;
4239 		old_100hdx =	param_anar_100hdx;
4240 		old_10fdx =	param_anar_10fdx;
4241 		old_10hdx =	param_anar_10hdx;
4242 		param_autoneg = 0xff;
4243 		old_asm_dir = param_anar_asm_dir;
4244 		param_anar_asm_dir = 0xff;
4245 		old_pause = param_anar_pause;
4246 		param_anar_pause = 0xff;
4247 		old_select_link = param_select_link;
4248 		old_default_link = param_default_link;
4249 
4250 		if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4251 			param_autoneg = old_autoneg;
4252 			miocnak(wq, mp, 0, EINVAL);
4253 			return;
4254 		}
4255 
4256 		qreply(wq, mp);
4257 
4258 		if (param_autoneg != 0xff) {
4259 			ERI_DEBUG_MSG2(erip, NDD_MSG,
4260 			    "ndd_ioctl: new param_autoneg %d", param_autoneg);
4261 			param_linkup = 0;
4262 			erip->stats.link_up = LINK_STATE_DOWN;
4263 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4264 			(void) eri_init(erip);
4265 		} else {
4266 			param_autoneg = old_autoneg;
4267 			if ((old_use_int_xcvr != param_use_intphy) ||
4268 			    (old_default_link != param_default_link) ||
4269 			    (old_select_link != param_select_link)) {
4270 				param_linkup = 0;
4271 				erip->stats.link_up = LINK_STATE_DOWN;
4272 				erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4273 				(void) eri_init(erip);
4274 			} else if ((old_ipg1 != param_ipg1) ||
4275 			    (old_ipg2 != param_ipg2) ||
4276 			    (old_ipg0 != param_ipg0) ||
4277 			    (old_intr_blank_time != param_intr_blank_time) ||
4278 			    (old_intr_blank_packets !=
4279 			    param_intr_blank_packets) ||
4280 			    (old_lance_mode != param_lance_mode)) {
4281 				param_linkup = 0;
4282 				erip->stats.link_up = LINK_STATE_DOWN;
4283 				erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4284 				(void) eri_init(erip);
4285 			}
4286 		}
4287 		break;
4288 	}
4289 }
4290 
4291 
4292 static int
4293 eri_stat_kstat_update(kstat_t *ksp, int rw)
4294 {
4295 	struct eri *erip;
4296 	struct erikstat *erikp;
4297 	struct stats *esp;
4298 	boolean_t macupdate = B_FALSE;
4299 
4300 	erip = (struct eri *)ksp->ks_private;
4301 	erikp = (struct erikstat *)ksp->ks_data;
4302 
4303 	if (rw != KSTAT_READ)
4304 		return (EACCES);
4305 	/*
4306 	 * Update all the stats by reading all the counter registers.
4307 	 * Counter register stats are not updated till they overflow
4308 	 * and interrupt.
4309 	 */
4310 
4311 	mutex_enter(&erip->xmitlock);
4312 	if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
4313 		erip->tx_completion =
4314 		    GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
4315 		macupdate |= eri_reclaim(erip, erip->tx_completion);
4316 	}
4317 	mutex_exit(&erip->xmitlock);
4318 	if (macupdate)
4319 		mac_tx_update(erip->mh);
4320 
4321 	eri_savecntrs(erip);
4322 
4323 	esp = &erip->stats;
4324 
4325 	erikp->erik_txmac_maxpkt_err.value.ul = esp->txmac_maxpkt_err;
4326 	erikp->erik_defer_timer_exp.value.ul = esp->defer_timer_exp;
4327 	erikp->erik_peak_attempt_cnt.value.ul = esp->peak_attempt_cnt;
4328 	erikp->erik_tx_hang.value.ul	= esp->tx_hang;
4329 
4330 	erikp->erik_no_free_rx_desc.value.ul	= esp->no_free_rx_desc;
4331 
4332 	erikp->erik_rx_hang.value.ul		= esp->rx_hang;
4333 	erikp->erik_rx_length_err.value.ul	= esp->rx_length_err;
4334 	erikp->erik_rx_code_viol_err.value.ul	= esp->rx_code_viol_err;
4335 	erikp->erik_pause_rxcount.value.ul	= esp->pause_rxcount;
4336 	erikp->erik_pause_oncount.value.ul	= esp->pause_oncount;
4337 	erikp->erik_pause_offcount.value.ul	= esp->pause_offcount;
4338 	erikp->erik_pause_time_count.value.ul	= esp->pause_time_count;
4339 
4340 	erikp->erik_inits.value.ul		= esp->inits;
4341 	erikp->erik_jab.value.ul		= esp->jab;
4342 	erikp->erik_notmds.value.ul		= esp->notmds;
4343 	erikp->erik_allocbfail.value.ul		= esp->allocbfail;
4344 	erikp->erik_drop.value.ul		= esp->drop;
4345 	erikp->erik_rx_bad_pkts.value.ul	= esp->rx_bad_pkts;
4346 	erikp->erik_rx_inits.value.ul		= esp->rx_inits;
4347 	erikp->erik_tx_inits.value.ul		= esp->tx_inits;
4348 	erikp->erik_rxtag_err.value.ul		= esp->rxtag_err;
4349 	erikp->erik_parity_error.value.ul	= esp->parity_error;
4350 	erikp->erik_pci_error_int.value.ul	= esp->pci_error_int;
4351 	erikp->erik_unknown_fatal.value.ul	= esp->unknown_fatal;
4352 	erikp->erik_pci_data_parity_err.value.ul = esp->pci_data_parity_err;
4353 	erikp->erik_pci_signal_target_abort.value.ul =
4354 	    esp->pci_signal_target_abort;
4355 	erikp->erik_pci_rcvd_target_abort.value.ul =
4356 	    esp->pci_rcvd_target_abort;
4357 	erikp->erik_pci_rcvd_master_abort.value.ul =
4358 	    esp->pci_rcvd_master_abort;
4359 	erikp->erik_pci_signal_system_err.value.ul =
4360 	    esp->pci_signal_system_err;
4361 	erikp->erik_pci_det_parity_err.value.ul = esp->pci_det_parity_err;
4362 
4363 	erikp->erik_pmcap.value.ul = esp->pmcap;
4364 
4365 	return (0);
4366 }
4367 
4368 static void
4369 eri_statinit(struct eri *erip)
4370 {
4371 	struct	kstat	*ksp;
4372 	struct	erikstat	*erikp;
4373 
4374 	if ((ksp = kstat_create("eri", erip->instance, "driver_info", "net",
4375 	    KSTAT_TYPE_NAMED,
4376 	    sizeof (struct erikstat) / sizeof (kstat_named_t), 0)) == NULL) {
4377 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
4378 		    kstat_create_fail_msg);
4379 		return;
4380 	}
4381 
4382 	erip->ksp = ksp;
4383 	erikp = (struct erikstat *)(ksp->ks_data);
4384 	/*
4385 	 * MIB II kstat variables
4386 	 */
4387 
4388 	kstat_named_init(&erikp->erik_inits, "inits", KSTAT_DATA_ULONG);
4389 
4390 	kstat_named_init(&erikp->erik_txmac_maxpkt_err,	"txmac_maxpkt_err",
4391 	    KSTAT_DATA_ULONG);
4392 	kstat_named_init(&erikp->erik_defer_timer_exp, "defer_timer_exp",
4393 	    KSTAT_DATA_ULONG);
4394 	kstat_named_init(&erikp->erik_peak_attempt_cnt,	"peak_attempt_cnt",
4395 	    KSTAT_DATA_ULONG);
4396 	kstat_named_init(&erikp->erik_tx_hang, "tx_hang", KSTAT_DATA_ULONG);
4397 
4398 	kstat_named_init(&erikp->erik_no_free_rx_desc, "no_free_rx_desc",
4399 	    KSTAT_DATA_ULONG);
4400 	kstat_named_init(&erikp->erik_rx_hang, "rx_hang", KSTAT_DATA_ULONG);
4401 	kstat_named_init(&erikp->erik_rx_length_err, "rx_length_err",
4402 	    KSTAT_DATA_ULONG);
4403 	kstat_named_init(&erikp->erik_rx_code_viol_err,	"rx_code_viol_err",
4404 	    KSTAT_DATA_ULONG);
4405 
4406 	kstat_named_init(&erikp->erik_pause_rxcount, "pause_rcv_cnt",
4407 	    KSTAT_DATA_ULONG);
4408 
4409 	kstat_named_init(&erikp->erik_pause_oncount, "pause_on_cnt",
4410 	    KSTAT_DATA_ULONG);
4411 
4412 	kstat_named_init(&erikp->erik_pause_offcount, "pause_off_cnt",
4413 	    KSTAT_DATA_ULONG);
4414 	kstat_named_init(&erikp->erik_pause_time_count,	"pause_time_cnt",
4415 	    KSTAT_DATA_ULONG);
4416 
4417 	kstat_named_init(&erikp->erik_jab, "jabber", KSTAT_DATA_ULONG);
4418 	kstat_named_init(&erikp->erik_notmds, "no_tmds", KSTAT_DATA_ULONG);
4419 	kstat_named_init(&erikp->erik_allocbfail, "allocbfail",
4420 	    KSTAT_DATA_ULONG);
4421 
4422 	kstat_named_init(&erikp->erik_drop, "drop", KSTAT_DATA_ULONG);
4423 
4424 	kstat_named_init(&erikp->erik_rx_bad_pkts, "bad_pkts",
4425 	    KSTAT_DATA_ULONG);
4426 
4427 	kstat_named_init(&erikp->erik_rx_inits, "rx_inits", KSTAT_DATA_ULONG);
4428 
4429 	kstat_named_init(&erikp->erik_tx_inits, "tx_inits", KSTAT_DATA_ULONG);
4430 
4431 	kstat_named_init(&erikp->erik_rxtag_err, "rxtag_error",
4432 	    KSTAT_DATA_ULONG);
4433 
4434 	kstat_named_init(&erikp->erik_parity_error, "parity_error",
4435 	    KSTAT_DATA_ULONG);
4436 
4437 	kstat_named_init(&erikp->erik_pci_error_int, "pci_error_interrupt",
4438 	    KSTAT_DATA_ULONG);
4439 	kstat_named_init(&erikp->erik_unknown_fatal, "unknown_fatal",
4440 	    KSTAT_DATA_ULONG);
4441 	kstat_named_init(&erikp->erik_pci_data_parity_err,
4442 	    "pci_data_parity_err", KSTAT_DATA_ULONG);
4443 	kstat_named_init(&erikp->erik_pci_signal_target_abort,
4444 	    "pci_signal_target_abort", KSTAT_DATA_ULONG);
4445 	kstat_named_init(&erikp->erik_pci_rcvd_target_abort,
4446 	    "pci_rcvd_target_abort", KSTAT_DATA_ULONG);
4447 	kstat_named_init(&erikp->erik_pci_rcvd_master_abort,
4448 	    "pci_rcvd_master_abort", KSTAT_DATA_ULONG);
4449 	kstat_named_init(&erikp->erik_pci_signal_system_err,
4450 	    "pci_signal_system_err", KSTAT_DATA_ULONG);
4451 	kstat_named_init(&erikp->erik_pci_det_parity_err,
4452 	    "pci_det_parity_err", KSTAT_DATA_ULONG);
4453 
4454 	kstat_named_init(&erikp->erik_pmcap, "pmcap", KSTAT_DATA_ULONG);
4455 
4456 
4457 	ksp->ks_update = eri_stat_kstat_update;
4458 	ksp->ks_private = (void *) erip;
4459 	kstat_install(ksp);
4460 }
4461 
4462 
4463 /* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS	>>>>>>>>>>>>>>>>>>> */
4464 /*
4465  * ndd support functions to get/set parameters
4466  */
4467 /* Free the Named Dispatch Table by calling eri_nd_free */
4468 static void
4469 eri_param_cleanup(struct eri *erip)
4470 {
4471 	if (erip->g_nd)
4472 		(void) eri_nd_free(&erip->g_nd);
4473 }
4474 
4475 /*
4476  * Extracts the value from the eri parameter array and prints the
4477  * parameter value. cp points to the required parameter.
4478  */
4479 /* ARGSUSED */
4480 static int
4481 eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp)
4482 {
4483 	param_t		*eripa = (void *)cp;
4484 	int		param_len = 1;
4485 	uint32_t	param_val;
4486 	mblk_t		*nmp;
4487 	int		ok;
4488 
4489 	param_val = eripa->param_val;
4490 	/*
4491 	 * Calculate space required in mblk.
4492 	 * Remember to include NULL terminator.
4493 	 */
4494 	do {
4495 		param_len++;
4496 		param_val /= 10;
4497 	} while (param_val);
4498 
4499 	ok = eri_mk_mblk_tail_space(mp, &nmp, param_len);
4500 	if (ok == 0) {
4501 		(void) sprintf((char *)nmp->b_wptr, "%d", eripa->param_val);
4502 		nmp->b_wptr += param_len;
4503 	}
4504 
4505 	return (ok);
4506 }
4507 
4508 /*
4509  * Check if there is space for p_val at the end if mblk.
4510  * If not, allocate new 1k mblk.
4511  */
4512 static int
4513 eri_mk_mblk_tail_space(mblk_t *mp, mblk_t **nmp, size_t sz)
4514 {
4515 	mblk_t *tmp = mp;
4516 
4517 	while (tmp->b_cont)
4518 		tmp = tmp->b_cont;
4519 
4520 	if (MBLKTAIL(tmp) < sz) {
4521 		if ((tmp->b_cont = allocb(1024, BPRI_HI)) == NULL)
4522 			return (ENOMEM);
4523 		tmp = tmp->b_cont;
4524 	}
4525 	*nmp = tmp;
4526 	return (0);
4527 }
4528 
4529 /*
4530  * Register each element of the parameter array with the
4531  * named dispatch handler. Each element is loaded using
4532  * eri_nd_load()
4533  */
4534 static int
4535 eri_param_register(struct eri *erip, param_t *eripa, int cnt)
4536 {
4537 	/* cnt gives the count of the number of */
4538 	/* elements present in the parameter array */
4539 
4540 	int i;
4541 
4542 	for (i = 0; i < cnt; i++, eripa++) {
4543 		pfi_t	setter = (pfi_t)eri_param_set;
4544 
4545 		switch (eripa->param_name[0]) {
4546 		case '+':	/* read-write */
4547 			setter = (pfi_t)eri_param_set;
4548 			break;
4549 
4550 		case '-':	/* read-only */
4551 			setter = NULL;
4552 			break;
4553 
4554 		case '!':	/* read-only, not displayed */
4555 		case '%':	/* read-write, not displayed */
4556 			continue;
4557 		}
4558 
4559 		if (!eri_nd_load(&erip->g_nd, eripa->param_name + 1,
4560 		    (pfi_t)eri_param_get, setter, (caddr_t)eripa)) {
4561 			(void) eri_nd_free(&erip->g_nd);
4562 			return (B_FALSE);
4563 		}
4564 	}
4565 
4566 	return (B_TRUE);
4567 }
4568 
4569 /*
4570  * Sets the eri parameter to the value in the param_register using
4571  * eri_nd_load().
4572  */
4573 /* ARGSUSED */
4574 static int
4575 eri_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp)
4576 {
4577 	char *end;
4578 	long new_value;
4579 	param_t	*eripa = (void *)cp;
4580 
4581 	if (ddi_strtol(value, &end, 10, &new_value) != 0)
4582 		return (EINVAL);
4583 	if (end == value || new_value < eripa->param_min ||
4584 	    new_value > eripa->param_max) {
4585 			return (EINVAL);
4586 	}
4587 	eripa->param_val = (uint32_t)new_value;
4588 	return (0);
4589 
4590 }
4591 
4592 /* Free the table pointed to by 'ndp' */
4593 static void
4594 eri_nd_free(caddr_t *nd_pparam)
4595 {
4596 	ND	*nd;
4597 
4598 	if ((nd = (void *)(*nd_pparam)) != NULL) {
4599 		if (nd->nd_tbl)
4600 			kmem_free(nd->nd_tbl, nd->nd_size);
4601 		kmem_free(nd, sizeof (ND));
4602 		*nd_pparam = NULL;
4603 	}
4604 }
4605 
4606 static int
4607 eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp)
4608 {
4609 	int	err;
4610 	IOCP	iocp;
4611 	MBLKP	mp1;
4612 	ND	*nd;
4613 	NDE	*nde;
4614 	char	*valp;
4615 	size_t	avail;
4616 	mblk_t	*nmp;
4617 
4618 	if (!nd_param)
4619 		return (B_FALSE);
4620 
4621 	nd = (void *)nd_param;
4622 	iocp = (void *)mp->b_rptr;
4623 	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
4624 		mp->b_datap->db_type = M_IOCACK;
4625 		iocp->ioc_count = 0;
4626 		iocp->ioc_error = EINVAL;
4627 		return (B_TRUE);
4628 	}
4629 	/*
4630 	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
4631 	 *	However, existing code sends in some big buffers.
4632 	 */
4633 	avail = iocp->ioc_count;
4634 	if (mp1->b_cont) {
4635 		freemsg(mp1->b_cont);
4636 		mp1->b_cont = NULL;
4637 	}
4638 
4639 	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
4640 	valp = (char *)mp1->b_rptr;
4641 
4642 	for (nde = nd->nd_tbl; /* */; nde++) {
4643 		if (!nde->nde_name)
4644 			return (B_FALSE);
4645 		if (strcmp(nde->nde_name, valp) == 0)
4646 			break;
4647 	}
4648 	err = EINVAL;
4649 
4650 	while (*valp++)
4651 		;
4652 
4653 	if (!*valp || valp >= (char *)mp1->b_wptr)
4654 		valp = NULL;
4655 
4656 	switch (iocp->ioc_cmd) {
4657 	case ND_GET:
4658 	/*
4659 	 * (XXX) hack: "*valp" is size of user buffer for copyout. If result
4660 	 * of action routine is too big, free excess and return ioc_rval as buf
4661 	 * size needed.  Return as many mblocks as will fit, free the rest.  For
4662 	 * backward compatibility, assume size of orig ioctl buffer if "*valp"
4663 	 * bad or not given.
4664 	 */
4665 		if (valp)
4666 			(void) ddi_strtol(valp, NULL, 10, (long *)&avail);
4667 		/* We overwrite the name/value with the reply data */
4668 		{
4669 			mblk_t *mp2 = mp1;
4670 
4671 			while (mp2) {
4672 				mp2->b_wptr = mp2->b_rptr;
4673 				mp2 = mp2->b_cont;
4674 			}
4675 		}
4676 		err = (*nde->nde_get_pfi)(q, mp1, nde->nde_data, iocp->ioc_cr);
4677 		if (!err) {
4678 			size_t	size_out;
4679 			ssize_t	excess;
4680 
4681 			iocp->ioc_rval = 0;
4682 
4683 			/* Tack on the null */
4684 			err = eri_mk_mblk_tail_space(mp1, &nmp, 1);
4685 			if (!err) {
4686 				*nmp->b_wptr++ = '\0';
4687 				size_out = msgdsize(mp1);
4688 				excess = size_out - avail;
4689 				if (excess > 0) {
4690 					iocp->ioc_rval = (unsigned)size_out;
4691 					size_out -= excess;
4692 					(void) adjmsg(mp1, -(excess + 1));
4693 					err = eri_mk_mblk_tail_space(mp1,
4694 					    &nmp, 1);
4695 					if (!err)
4696 						*nmp->b_wptr++ = '\0';
4697 					else
4698 						size_out = 0;
4699 				}
4700 
4701 			} else
4702 				size_out = 0;
4703 
4704 			iocp->ioc_count = size_out;
4705 		}
4706 		break;
4707 
4708 	case ND_SET:
4709 		if (valp) {
4710 			err = (*nde->nde_set_pfi)(q, mp1, valp,
4711 			    nde->nde_data, iocp->ioc_cr);
4712 			iocp->ioc_count = 0;
4713 			freemsg(mp1);
4714 			mp->b_cont = NULL;
4715 		}
4716 		break;
4717 	}
4718 
4719 	iocp->ioc_error = err;
4720 	mp->b_datap->db_type = M_IOCACK;
4721 	return (B_TRUE);
4722 }
4723 
4724 /*
4725  * Load 'name' into the named dispatch table pointed to by 'ndp'.
4726  * 'ndp' should be the address of a char pointer cell.  If the table
4727  * does not exist (*ndp == 0), a new table is allocated and 'ndp'
4728  * is stuffed.  If there is not enough space in the table for a new
4729  * entry, more space is allocated.
4730  */
4731 static boolean_t
4732 eri_nd_load(caddr_t *nd_pparam, char *name, pfi_t get_pfi,
4733     pfi_t set_pfi, caddr_t data)
4734 {
4735 	ND	*nd;
4736 	NDE	*nde;
4737 
4738 	if (!nd_pparam)
4739 		return (B_FALSE);
4740 
4741 	if ((nd = (void *)(*nd_pparam)) == NULL) {
4742 		if ((nd = (ND *)kmem_zalloc(sizeof (ND), KM_NOSLEEP))
4743 		    == NULL)
4744 			return (B_FALSE);
4745 		*nd_pparam = (caddr_t)nd;
4746 	}
4747 	if (nd->nd_tbl) {
4748 		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
4749 			if (strcmp(name, nde->nde_name) == 0)
4750 				goto fill_it;
4751 		}
4752 	}
4753 	if (nd->nd_free_count <= 1) {
4754 		if ((nde = (NDE *)kmem_zalloc(nd->nd_size +
4755 		    NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
4756 			return (B_FALSE);
4757 
4758 		nd->nd_free_count += NDE_ALLOC_COUNT;
4759 		if (nd->nd_tbl) {
4760 			bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
4761 			kmem_free((char *)nd->nd_tbl, nd->nd_size);
4762 		} else {
4763 			nd->nd_free_count--;
4764 			nde->nde_name = "?";
4765 			nde->nde_get_pfi = nd_get_names;
4766 			nde->nde_set_pfi = nd_set_default;
4767 		}
4768 		nde->nde_data = (caddr_t)nd;
4769 		nd->nd_tbl = nde;
4770 		nd->nd_size += NDE_ALLOC_SIZE;
4771 	}
4772 	for (nde = nd->nd_tbl; nde->nde_name; nde++)
4773 		;
4774 	nd->nd_free_count--;
4775 fill_it:
4776 	nde->nde_name = name;
4777 	nde->nde_get_pfi = get_pfi ? get_pfi : nd_get_default;
4778 	nde->nde_set_pfi = set_pfi ? set_pfi : nd_set_default;
4779 	nde->nde_data = data;
4780 	return (B_TRUE);
4781 }
4782 
4783 /*
4784  * Hardening Functions
4785  * New Section
4786  */
4787 #ifdef  DEBUG
4788 /*PRINTFLIKE5*/
4789 static void
4790 eri_debug_msg(const char *file, int line, struct eri *erip,
4791     debug_msg_t type, const char *fmt, ...)
4792 {
4793 	char	msg_buffer[255];
4794 	va_list ap;
4795 
4796 	va_start(ap, fmt);
4797 	(void) vsprintf(msg_buffer, fmt, ap);
4798 	va_end(ap);
4799 
4800 	if (eri_msg_out & ERI_CON_MSG) {
4801 		if (((type <= eri_debug_level) && eri_debug_all) ||
4802 		    ((type == eri_debug_level) && !eri_debug_all)) {
4803 			if (erip)
4804 				cmn_err(CE_CONT, "D: %s %s%d:(%s%d) %s\n",
4805 				    debug_msg_string[type], file, line,
4806 				    ddi_driver_name(erip->dip), erip->instance,
4807 				    msg_buffer);
4808 			else
4809 				cmn_err(CE_CONT, "D: %s %s(%d): %s\n",
4810 				    debug_msg_string[type], file,
4811 				    line, msg_buffer);
4812 		}
4813 	}
4814 }
4815 #endif
4816 
4817 
4818 /*PRINTFLIKE4*/
4819 static void
4820 eri_fault_msg(struct eri *erip, uint_t severity, msg_t type,
4821 	const char *fmt, ...)
4822 {
4823 	char	msg_buffer[255];
4824 	va_list	ap;
4825 
4826 	va_start(ap, fmt);
4827 	(void) vsprintf(msg_buffer, fmt, ap);
4828 	va_end(ap);
4829 
4830 	if (erip == NULL) {
4831 		cmn_err(CE_NOTE, "eri : %s", msg_buffer);
4832 		return;
4833 	}
4834 
4835 	if (severity == SEVERITY_HIGH) {
4836 		cmn_err(CE_WARN, "%s%d : %s", ddi_driver_name(erip->dip),
4837 		    erip->instance, msg_buffer);
4838 	} else switch (type) {
4839 	case ERI_VERB_MSG:
4840 		cmn_err(CE_CONT, "?%s%d : %s", ddi_driver_name(erip->dip),
4841 		    erip->instance, msg_buffer);
4842 		break;
4843 	case ERI_LOG_MSG:
4844 		cmn_err(CE_NOTE, "^%s%d : %s", ddi_driver_name(erip->dip),
4845 		    erip->instance, msg_buffer);
4846 		break;
4847 	case ERI_BUF_MSG:
4848 		cmn_err(CE_NOTE, "!%s%d : %s", ddi_driver_name(erip->dip),
4849 		    erip->instance, msg_buffer);
4850 		break;
4851 	case ERI_CON_MSG:
4852 		cmn_err(CE_CONT, "%s%d : %s", ddi_driver_name(erip->dip),
4853 		    erip->instance, msg_buffer);
4854 	default:
4855 		break;
4856 	}
4857 }
4858 
4859 /*
4860  * Transceiver (xcvr) Functions
4861  * New Section
4862  */
4863 /*
4864  * eri_stop_timer function is used by a function before doing link-related
4865  * processing. It locks the "linklock" to protect the link-related data
4866  * structures. This lock will be subsequently released in eri_start_timer().
4867  */
4868 static void
4869 eri_stop_timer(struct eri *erip)
4870 {
4871 	timeout_id_t id;
4872 	mutex_enter(&erip->linklock);
4873 	if (erip->timerid) {
4874 		erip->flags |= ERI_NOTIMEOUTS; /* prevent multiple timeout */
4875 		id = erip->timerid;
4876 		erip->timerid = 0; /* prevent other thread do untimeout */
4877 		mutex_exit(&erip->linklock); /* no mutex across untimeout() */
4878 
4879 		(void) untimeout(id);
4880 		mutex_enter(&erip->linklock); /* acquire mutex again */
4881 		erip->flags &= ~ERI_NOTIMEOUTS;
4882 	}
4883 }
4884 
4885 /*
4886  * If msec parameter is zero, just release "linklock".
4887  */
4888 static void
4889 eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec)
4890 {
4891 	if (msec) {
4892 		if (!(erip->flags & ERI_NOTIMEOUTS) &&
4893 		    (erip->flags & ERI_RUNNING)) {
4894 			erip->timerid = timeout(func, (caddr_t)erip,
4895 			    drv_usectohz(1000*msec));
4896 		}
4897 	}
4898 
4899 	mutex_exit(&erip->linklock);
4900 }
4901 
4902 static int
4903 eri_new_xcvr(struct eri *erip)
4904 {
4905 	int		status;
4906 	uint32_t 	cfg;
4907 	int		old_transceiver;
4908 
4909 	if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4910 	    PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
4911 		erip->stats.pmcap = ERI_PMCAP_NONE;
4912 
4913 	status = B_FALSE;			/* no change */
4914 	cfg = GET_MIFREG(mif_cfg);
4915 	ERI_DEBUG_MSG2(erip, MIF_MSG, "cfg value = %X", cfg);
4916 	old_transceiver = param_transceiver;
4917 
4918 	if ((cfg & ERI_MIF_CFGM1) && !use_int_xcvr) {
4919 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Found External XCVR");
4920 		/*
4921 		 * An External Transceiver was found and it takes priority
4922 		 * over an internal, given the use_int_xcvr flag
4923 		 * is false.
4924 		 */
4925 		if (old_transceiver != EXTERNAL_XCVR) {
4926 			/*
4927 			 * External transceiver has just been plugged
4928 			 * in. Isolate the internal Transceiver.
4929 			 */
4930 			if (old_transceiver == INTERNAL_XCVR) {
4931 				eri_mii_write(erip, ERI_PHY_BMCR,
4932 				    (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4933 				    PHY_BMCR_LPBK));
4934 			}
4935 			status = B_TRUE;
4936 		}
4937 		/*
4938 		 * Select the external Transceiver.
4939 		 */
4940 		erip->phyad = ERI_EXTERNAL_PHYAD;
4941 		param_transceiver = EXTERNAL_XCVR;
4942 		erip->mif_config &= ~ERI_MIF_CFGPD;
4943 		erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4944 		erip->mif_config |= ERI_MIF_CFGPS;
4945 		PUT_MIFREG(mif_cfg, erip->mif_config);
4946 
4947 		PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIIBUF_OE);
4948 		drv_usecwait(ERI_MIF_POLL_DELAY);
4949 	} else if (cfg & ERI_MIF_CFGM0) {
4950 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Found Internal XCVR");
4951 		/*
4952 		 * An Internal Transceiver was found or the
4953 		 * use_int_xcvr flag is true.
4954 		 */
4955 		if (old_transceiver != INTERNAL_XCVR) {
4956 			/*
4957 			 * The external transceiver has just been
4958 			 * disconnected or we're moving from a no
4959 			 * transceiver state.
4960 			 */
4961 			if ((old_transceiver == EXTERNAL_XCVR) &&
4962 			    (cfg & ERI_MIF_CFGM0)) {
4963 				eri_mii_write(erip, ERI_PHY_BMCR,
4964 				    (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4965 				    PHY_BMCR_LPBK));
4966 			}
4967 			status = B_TRUE;
4968 		}
4969 		/*
4970 		 * Select the internal transceiver.
4971 		 */
4972 		erip->phyad = ERI_INTERNAL_PHYAD;
4973 		param_transceiver = INTERNAL_XCVR;
4974 		erip->mif_config &= ~ERI_MIF_CFGPD;
4975 		erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4976 		erip->mif_config &= ~ERI_MIF_CFGPS;
4977 		PUT_MIFREG(mif_cfg, erip->mif_config);
4978 
4979 		PUT_MACREG(xifc, GET_MACREG(xifc) & ~ BMAC_XIFC_MIIBUF_OE);
4980 		drv_usecwait(ERI_MIF_POLL_DELAY);
4981 	} else {
4982 		/*
4983 		 * Did not find a valid xcvr.
4984 		 */
4985 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4986 		    "Eri_new_xcvr : Select None");
4987 		param_transceiver = NO_XCVR;
4988 		erip->xcvr_status = PHY_LINK_DOWN;
4989 	}
4990 
4991 	if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4992 		if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4993 		    (void *)4000) == DDI_SUCCESS)
4994 			erip->stats.pmcap = ERI_PMCAP_4MHZ;
4995 	}
4996 
4997 	return (status);
4998 }
4999 
5000 /*
5001  * This function is used for timers.  No locks are held on timer expiry.
5002  */
5003 static void
5004 eri_check_link(struct eri *erip)
5005 {
5006 	link_state_t	linkupdate = eri_check_link_noind(erip);
5007 
5008 	if (linkupdate != LINK_STATE_UNKNOWN)
5009 		mac_link_update(erip->mh, linkupdate);
5010 }
5011 
5012 /*
5013  * Compare our xcvr in our structure to the xcvr that we get from
5014  * eri_check_mii_xcvr(). If they are different then mark the
5015  * link down, reset xcvr, and return.
5016  *
5017  * Note without the MII connector, conditions can not change that
5018  * will then use a external phy, thus this code has been cleaned
5019  * to not even call the function or to possibly change the xcvr.
5020  */
5021 static uint32_t
5022 eri_check_link_noind(struct eri *erip)
5023 {
5024 	uint16_t stat, control, mif_ints;
5025 	uint32_t link_timeout	= ERI_LINKCHECK_TIMER;
5026 	uint32_t linkupdate = 0;
5027 
5028 	eri_stop_timer(erip);	/* acquire linklock */
5029 
5030 	mutex_enter(&erip->xmitlock);
5031 	mutex_enter(&erip->xcvrlock);
5032 	eri_mif_poll(erip, MIF_POLL_STOP);
5033 
5034 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5035 	mif_ints = erip->mii_status ^ stat;
5036 
5037 	if (erip->openloop_autoneg) {
5038 		(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5039 		ERI_DEBUG_MSG3(erip, XCVR_MSG,
5040 		    "eri_check_link:openloop stat %X mii_status %X",
5041 		    stat, erip->mii_status);
5042 		(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5043 		if (!(stat & PHY_BMSR_LNKSTS) &&
5044 		    (erip->openloop_autoneg < 2)) {
5045 			if (param_speed) {
5046 				control &= ~PHY_BMCR_100M;
5047 				param_anlpar_100hdx = 0;
5048 				param_anlpar_10hdx = 1;
5049 				param_speed = 0;
5050 				erip->stats.ifspeed = 10;
5051 
5052 			} else {
5053 				control |= PHY_BMCR_100M;
5054 				param_anlpar_100hdx = 1;
5055 				param_anlpar_10hdx = 0;
5056 				param_speed = 1;
5057 				erip->stats.ifspeed = 100;
5058 			}
5059 			ERI_DEBUG_MSG3(erip, XCVR_MSG,
5060 			    "eri_check_link: trying speed %X stat %X",
5061 			    param_speed, stat);
5062 
5063 			erip->openloop_autoneg ++;
5064 			eri_mii_write(erip, ERI_PHY_BMCR, control);
5065 			link_timeout = ERI_P_FAULT_TIMER;
5066 		} else {
5067 			erip->openloop_autoneg = 0;
5068 			linkupdate = eri_mif_check(erip, stat, stat);
5069 			if (erip->openloop_autoneg)
5070 				link_timeout = ERI_P_FAULT_TIMER;
5071 		}
5072 		eri_mif_poll(erip, MIF_POLL_START);
5073 		mutex_exit(&erip->xcvrlock);
5074 		mutex_exit(&erip->xmitlock);
5075 
5076 		eri_start_timer(erip, eri_check_link, link_timeout);
5077 		return (linkupdate);
5078 	}
5079 
5080 	linkupdate = eri_mif_check(erip, mif_ints, stat);
5081 	eri_mif_poll(erip, MIF_POLL_START);
5082 	mutex_exit(&erip->xcvrlock);
5083 	mutex_exit(&erip->xmitlock);
5084 
5085 #ifdef ERI_RMAC_HANG_WORKAROUND
5086 	/*
5087 	 * Check if rx hung.
5088 	 */
5089 	if ((erip->flags & ERI_RUNNING) && param_linkup) {
5090 		if (erip->check_rmac_hang) {
5091 			ERI_DEBUG_MSG5(erip,
5092 			    NONFATAL_MSG,
5093 			    "check1 %d: macsm:%8x wr:%2x rd:%2x",
5094 			    erip->check_rmac_hang,
5095 			    GET_MACREG(macsm),
5096 			    GET_ERXREG(rxfifo_wr_ptr),
5097 			    GET_ERXREG(rxfifo_rd_ptr));
5098 
5099 			erip->check_rmac_hang = 0;
5100 			erip->check2_rmac_hang ++;
5101 
5102 			erip->rxfifo_wr_ptr_c = GET_ERXREG(rxfifo_wr_ptr);
5103 			erip->rxfifo_rd_ptr_c = GET_ERXREG(rxfifo_rd_ptr);
5104 
5105 			eri_start_timer(erip, eri_check_link,
5106 			    ERI_CHECK_HANG_TIMER);
5107 			return (linkupdate);
5108 		}
5109 
5110 		if (erip->check2_rmac_hang) {
5111 			ERI_DEBUG_MSG5(erip,
5112 			    NONFATAL_MSG,
5113 			    "check2 %d: macsm:%8x wr:%2x rd:%2x",
5114 			    erip->check2_rmac_hang,
5115 			    GET_MACREG(macsm),
5116 			    GET_ERXREG(rxfifo_wr_ptr),
5117 			    GET_ERXREG(rxfifo_rd_ptr));
5118 
5119 			erip->check2_rmac_hang = 0;
5120 
5121 			erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
5122 			erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
5123 
5124 			if (((GET_MACREG(macsm) & BMAC_OVERFLOW_STATE) ==
5125 			    BMAC_OVERFLOW_STATE) &&
5126 			    ((erip->rxfifo_wr_ptr_c == erip->rxfifo_rd_ptr_c) ||
5127 			    ((erip->rxfifo_rd_ptr == erip->rxfifo_rd_ptr_c) &&
5128 			    (erip->rxfifo_wr_ptr == erip->rxfifo_wr_ptr_c)))) {
5129 				ERI_DEBUG_MSG1(erip,
5130 				    NONFATAL_MSG,
5131 				    "RX hang: Reset mac");
5132 
5133 				HSTAT(erip, rx_hang);
5134 				erip->linkcheck = 1;
5135 
5136 				eri_start_timer(erip, eri_check_link,
5137 				    ERI_LINKCHECK_TIMER);
5138 				(void) eri_init(erip);
5139 				return (linkupdate);
5140 			}
5141 		}
5142 	}
5143 #endif
5144 
5145 	/*
5146 	 * Check if tx hung.
5147 	 */
5148 #ifdef	ERI_TX_HUNG
5149 	if ((erip->flags & ERI_RUNNING) && param_linkup &&
5150 	    (eri_check_txhung(erip))) {
5151 		HSTAT(erip, tx_hang);
5152 		eri_reinit_txhung++;
5153 		erip->linkcheck = 1;
5154 		eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
5155 		(void) eri_init(erip);
5156 		return (linkupdate);
5157 	}
5158 #endif
5159 
5160 #ifdef ERI_PM_WORKAROUND
5161 	if (erip->stats.pmcap == ERI_PMCAP_NONE) {
5162 		if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
5163 		    (void *)4000) == DDI_SUCCESS)
5164 			erip->stats.pmcap = ERI_PMCAP_4MHZ;
5165 
5166 		ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
5167 		    "eri_check_link: PMCAP %d", erip->stats.pmcap);
5168 	}
5169 #endif
5170 	if ((!param_mode) && (param_transceiver != NO_XCVR))
5171 		eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
5172 	else
5173 		eri_start_timer(erip, eri_check_link, ERI_LINKCHECK_TIMER);
5174 	return (linkupdate);
5175 }
5176 
5177 static link_state_t
5178 eri_mif_check(struct eri *erip, uint16_t mif_ints, uint16_t mif_data)
5179 {
5180 	uint16_t control, aner, anlpar, anar, an_common;
5181 	uint16_t old_mintrans;
5182 	int restart_autoneg = 0;
5183 	link_state_t retv;
5184 
5185 	ERI_DEBUG_MSG4(erip, XCVR_MSG, "eri_mif_check: mif_mask: %X, %X, %X",
5186 	    erip->mif_mask, mif_ints, mif_data);
5187 
5188 	mif_ints &= ~erip->mif_mask;
5189 	erip->mii_status = mif_data;
5190 	/*
5191 	 * Now check if someone has pulled the xcvr or
5192 	 * a new xcvr has shown up
5193 	 * If so try to find out what the new xcvr setup is.
5194 	 */
5195 	if (((mif_ints & PHY_BMSR_RES1) && (mif_data == 0xFFFF)) ||
5196 	    (param_transceiver == NO_XCVR)) {
5197 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5198 		    "No status transceiver gone");
5199 		if (eri_new_xcvr(erip)) {
5200 			if (param_transceiver != NO_XCVR) {
5201 				/*
5202 				 * Reset the new PHY and bring up the link
5203 				 */
5204 				(void) eri_reset_xcvr(erip);
5205 			}
5206 		}
5207 		return (LINK_STATE_UNKNOWN);
5208 	}
5209 
5210 	if (param_autoneg && (mif_ints & PHY_BMSR_LNKSTS) &&
5211 	    (mif_data & PHY_BMSR_LNKSTS) && (mif_data & PHY_BMSR_ANC)) {
5212 		mif_ints |= PHY_BMSR_ANC;
5213 		ERI_DEBUG_MSG3(erip, PHY_MSG,
5214 		    "eri_mif_check: Set ANC bit mif_data %X mig_ints %X",
5215 		    mif_data, mif_ints);
5216 	}
5217 
5218 	if ((mif_ints & PHY_BMSR_ANC) && (mif_data & PHY_BMSR_ANC)) {
5219 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Auto-negotiation interrupt.");
5220 
5221 		/*
5222 		 * Switch off Auto-negotiation interrupts and switch on
5223 		 * Link ststus interrupts.
5224 		 */
5225 		erip->mif_mask |= PHY_BMSR_ANC;
5226 		erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5227 		(void) eri_mii_read(erip, ERI_PHY_ANER, &aner);
5228 		param_aner_lpancap = 1 && (aner & PHY_ANER_LPNW);
5229 		if ((aner & PHY_ANER_MLF) || (eri_force_mlf)) {
5230 			ERI_DEBUG_MSG1(erip, XCVR_MSG,
5231 			    "parallel detection fault");
5232 			/*
5233 			 * Consider doing open loop auto-negotiation.
5234 			 */
5235 			ERI_DEBUG_MSG1(erip, XCVR_MSG,
5236 			    "Going into Open loop Auto-neg");
5237 			(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5238 
5239 			control &= ~(PHY_BMCR_ANE | PHY_BMCR_RAN |
5240 			    PHY_BMCR_FDX);
5241 			if (param_anar_100fdx || param_anar_100hdx) {
5242 				control |= PHY_BMCR_100M;
5243 				param_anlpar_100hdx = 1;
5244 				param_anlpar_10hdx = 0;
5245 				param_speed = 1;
5246 				erip->stats.ifspeed = 100;
5247 
5248 			} else if (param_anar_10fdx || param_anar_10hdx) {
5249 				control &= ~PHY_BMCR_100M;
5250 				param_anlpar_100hdx = 0;
5251 				param_anlpar_10hdx = 1;
5252 				param_speed = 0;
5253 				erip->stats.ifspeed = 10;
5254 			} else {
5255 				ERI_FAULT_MSG1(erip, SEVERITY_NONE,
5256 				    ERI_VERB_MSG,
5257 				    "Transceiver speed set incorrectly.");
5258 				return (0);
5259 			}
5260 
5261 			(void) eri_mii_write(erip, ERI_PHY_BMCR, control);
5262 			param_anlpar_100fdx = 0;
5263 			param_anlpar_10fdx = 0;
5264 			param_mode = 0;
5265 			erip->openloop_autoneg = 1;
5266 			return (0);
5267 		}
5268 		(void) eri_mii_read(erip, ERI_PHY_ANLPAR, &anlpar);
5269 		(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5270 		an_common = anar & anlpar;
5271 
5272 		ERI_DEBUG_MSG2(erip, XCVR_MSG, "an_common = 0x%X", an_common);
5273 
5274 		if (an_common & (PHY_ANLPAR_TXFDX | PHY_ANLPAR_TX)) {
5275 			param_speed = 1;
5276 			erip->stats.ifspeed = 100;
5277 			param_mode = 1 && (an_common & PHY_ANLPAR_TXFDX);
5278 
5279 		} else if (an_common & (PHY_ANLPAR_10FDX | PHY_ANLPAR_10)) {
5280 			param_speed = 0;
5281 			erip->stats.ifspeed = 10;
5282 			param_mode = 1 && (an_common & PHY_ANLPAR_10FDX);
5283 
5284 		} else an_common = 0x0;
5285 
5286 		if (!an_common) {
5287 			ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
5288 			    "Transceiver: anar not set with speed selection");
5289 		}
5290 		param_anlpar_100T4 = 1 && (anlpar & PHY_ANLPAR_T4);
5291 		param_anlpar_100fdx = 1 && (anlpar & PHY_ANLPAR_TXFDX);
5292 		param_anlpar_100hdx = 1 && (anlpar & PHY_ANLPAR_TX);
5293 		param_anlpar_10fdx = 1 && (anlpar & PHY_ANLPAR_10FDX);
5294 		param_anlpar_10hdx = 1 && (anlpar & PHY_ANLPAR_10);
5295 
5296 		ERI_DEBUG_MSG2(erip, PHY_MSG,
5297 		    "Link duplex = 0x%X", param_mode);
5298 		ERI_DEBUG_MSG2(erip, PHY_MSG,
5299 		    "Link speed = 0x%X", param_speed);
5300 	/*	mif_ints |= PHY_BMSR_LNKSTS; prevent double msg */
5301 	/*	mif_data |= PHY_BMSR_LNKSTS; prevent double msg */
5302 	}
5303 	retv = LINK_STATE_UNKNOWN;
5304 	if (mif_ints & PHY_BMSR_LNKSTS) {
5305 		if (mif_data & PHY_BMSR_LNKSTS) {
5306 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Link Up");
5307 			/*
5308 			 * Program Lu3X31T for mininum transition
5309 			 */
5310 			if (eri_phy_mintrans) {
5311 				eri_mii_write(erip, 31, 0x8000);
5312 				(void) eri_mii_read(erip, 0, &old_mintrans);
5313 				eri_mii_write(erip, 0, 0x00F1);
5314 				eri_mii_write(erip, 31, 0x0000);
5315 			}
5316 			/*
5317 			 * The link is up.
5318 			 */
5319 			eri_init_txmac(erip);
5320 			param_linkup = 1;
5321 			erip->stats.link_up = LINK_STATE_UP;
5322 			if (param_mode)
5323 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5324 			else
5325 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5326 
5327 			retv = LINK_STATE_UP;
5328 		} else {
5329 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Link down.");
5330 			param_linkup = 0;
5331 			erip->stats.link_up = LINK_STATE_DOWN;
5332 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5333 			retv = LINK_STATE_DOWN;
5334 			if (param_autoneg) {
5335 				restart_autoneg = 1;
5336 			}
5337 		}
5338 	} else {
5339 		if (mif_data & PHY_BMSR_LNKSTS) {
5340 			if (!param_linkup) {
5341 				ERI_DEBUG_MSG1(erip, PHY_MSG,
5342 				    "eri_mif_check: MIF data link up");
5343 				/*
5344 				 * Program Lu3X31T for minimum transition
5345 				 */
5346 				if (eri_phy_mintrans) {
5347 					eri_mii_write(erip, 31, 0x8000);
5348 					(void) eri_mii_read(erip, 0,
5349 					    &old_mintrans);
5350 					eri_mii_write(erip, 0, 0x00F1);
5351 					eri_mii_write(erip, 31, 0x0000);
5352 				}
5353 				/*
5354 				 * The link is up.
5355 				 */
5356 				eri_init_txmac(erip);
5357 
5358 				param_linkup = 1;
5359 				erip->stats.link_up = LINK_STATE_UP;
5360 				if (param_mode)
5361 					erip->stats.link_duplex =
5362 					    LINK_DUPLEX_FULL;
5363 				else
5364 					erip->stats.link_duplex =
5365 					    LINK_DUPLEX_HALF;
5366 
5367 				retv = LINK_STATE_UP;
5368 			}
5369 		} else if (param_linkup) {
5370 			/*
5371 			 * The link is down now.
5372 			 */
5373 			ERI_DEBUG_MSG1(erip, PHY_MSG,
5374 			    "eri_mif_check:Link was up and went down");
5375 			param_linkup = 0;
5376 			erip->stats.link_up = LINK_STATE_DOWN;
5377 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5378 			retv = LINK_STATE_DOWN;
5379 			if (param_autoneg)
5380 				restart_autoneg = 1;
5381 		}
5382 	}
5383 	if (restart_autoneg) {
5384 		/*
5385 		 * Restart normal auto-negotiation.
5386 		 */
5387 		ERI_DEBUG_MSG1(erip, PHY_MSG,
5388 		    "eri_mif_check:Restart AUto Negotiation");
5389 		erip->openloop_autoneg = 0;
5390 		param_mode = 0;
5391 		param_speed = 0;
5392 		param_anlpar_100T4 = 0;
5393 		param_anlpar_100fdx = 0;
5394 		param_anlpar_100hdx = 0;
5395 		param_anlpar_10fdx = 0;
5396 		param_anlpar_10hdx = 0;
5397 		param_aner_lpancap = 0;
5398 		(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5399 		control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5400 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5401 	}
5402 	if (mif_ints & PHY_BMSR_JABDET) {
5403 		if (mif_data & PHY_BMSR_JABDET) {
5404 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Jabber detected.");
5405 			HSTAT(erip, jab);
5406 			/*
5407 			 * Reset the new PHY and bring up the link
5408 			 * (Check for failure?)
5409 			 */
5410 			(void) eri_reset_xcvr(erip);
5411 		}
5412 	}
5413 	return (retv);
5414 }
5415 
5416 #define	PHYRST_PERIOD 500
5417 static int
5418 eri_reset_xcvr(struct eri *erip)
5419 {
5420 	uint16_t	stat;
5421 	uint16_t	anar;
5422 	uint16_t	control;
5423 	uint16_t	idr1;
5424 	uint16_t	idr2;
5425 	uint16_t	nicr;
5426 	uint32_t	speed_100;
5427 	uint32_t	speed_10;
5428 	int n;
5429 
5430 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
5431 	erip->ifspeed_old = erip->stats.ifspeed;
5432 #endif
5433 	/*
5434 	 * Reset Open loop auto-negotiation this means you can try
5435 	 * Normal auto-negotiation, until you get a Multiple Link fault
5436 	 * at which point you try 100M half duplex then 10M half duplex
5437 	 * until you get a Link up.
5438 	 */
5439 	erip->openloop_autoneg = 0;
5440 
5441 	/*
5442 	 * Reset the xcvr.
5443 	 */
5444 	eri_mii_write(erip, ERI_PHY_BMCR, PHY_BMCR_RESET);
5445 
5446 	/* Check for transceiver reset completion */
5447 
5448 	n = 1000;
5449 	while (--n > 0) {
5450 		drv_usecwait((clock_t)PHYRST_PERIOD);
5451 		if (eri_mii_read(erip, ERI_PHY_BMCR, &control) == 1) {
5452 			/* Transceiver does not talk MII */
5453 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5454 			    "eri_reset_xcvr: no mii");
5455 		}
5456 		if ((control & PHY_BMCR_RESET) == 0)
5457 			goto reset_done;
5458 	}
5459 	ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
5460 	    "eri_reset_xcvr:reset_failed n == 0, control %x", control);
5461 	goto eri_reset_xcvr_failed;
5462 
5463 reset_done:
5464 
5465 	ERI_DEBUG_MSG2(erip, AUTOCONFIG_MSG,
5466 	    "eri_reset_xcvr: reset complete in %d us",
5467 	    (1000 - n) * PHYRST_PERIOD);
5468 
5469 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5470 	(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5471 	(void) eri_mii_read(erip, ERI_PHY_IDR1, &idr1);
5472 	(void) eri_mii_read(erip, ERI_PHY_IDR2, &idr2);
5473 
5474 	ERI_DEBUG_MSG4(erip, XCVR_MSG,
5475 	    "eri_reset_xcvr: control %x stat %x anar %x", control, stat, anar);
5476 
5477 	/*
5478 	 * Initialize the read only transceiver ndd information
5479 	 * the values are either 0 or 1.
5480 	 */
5481 	param_bmsr_ancap = 1 && (stat & PHY_BMSR_ACFG);
5482 	param_bmsr_100T4 = 1 && (stat & PHY_BMSR_100T4);
5483 	param_bmsr_100fdx = 1 && (stat & PHY_BMSR_100FDX);
5484 	param_bmsr_100hdx = 1 && (stat & PHY_BMSR_100HDX);
5485 	param_bmsr_10fdx = 1 && (stat & PHY_BMSR_10FDX);
5486 	param_bmsr_10hdx = 1 && (stat & PHY_BMSR_10HDX);
5487 
5488 	/*
5489 	 * Match up the ndd capabilities with the transceiver.
5490 	 */
5491 	param_autoneg &= param_bmsr_ancap;
5492 	param_anar_100fdx &= param_bmsr_100fdx;
5493 	param_anar_100hdx &= param_bmsr_100hdx;
5494 	param_anar_10fdx &= param_bmsr_10fdx;
5495 	param_anar_10hdx &= param_bmsr_10hdx;
5496 
5497 	/*
5498 	 * Select the operation mode of the transceiver.
5499 	 */
5500 	if (param_autoneg) {
5501 		/*
5502 		 * Initialize our auto-negotiation capabilities.
5503 		 */
5504 		anar = PHY_SELECTOR;
5505 		if (param_anar_100T4)
5506 			anar |= PHY_ANAR_T4;
5507 		if (param_anar_100fdx)
5508 			anar |= PHY_ANAR_TXFDX;
5509 		if (param_anar_100hdx)
5510 			anar |= PHY_ANAR_TX;
5511 		if (param_anar_10fdx)
5512 			anar |= PHY_ANAR_10FDX;
5513 		if (param_anar_10hdx)
5514 			anar |= PHY_ANAR_10;
5515 		ERI_DEBUG_MSG2(erip, XCVR_MSG, "anar = %x", anar);
5516 		eri_mii_write(erip, ERI_PHY_ANAR, anar);
5517 	}
5518 
5519 	/* Place the Transceiver in normal operation mode */
5520 	if ((control & PHY_BMCR_ISOLATE) || (control & PHY_BMCR_LPBK)) {
5521 		control &= ~(PHY_BMCR_ISOLATE | PHY_BMCR_LPBK);
5522 		eri_mii_write(erip, ERI_PHY_BMCR,
5523 		    (control & ~PHY_BMCR_ISOLATE));
5524 	}
5525 
5526 	/*
5527 	 * If Lu3X31T then allow nonzero eri_phy_mintrans
5528 	 */
5529 	if (eri_phy_mintrans &&
5530 	    (idr1 != 0x43 || (idr2 & 0xFFF0) != 0x7420)) {
5531 		eri_phy_mintrans = 0;
5532 	}
5533 	/*
5534 	 * Initialize the mif interrupt mask.
5535 	 */
5536 	erip->mif_mask = (uint16_t)(~PHY_BMSR_RES1);
5537 
5538 	/*
5539 	 * Establish link speeds and do necessary special stuff based
5540 	 * in the speed.
5541 	 */
5542 	speed_100 = param_anar_100fdx | param_anar_100hdx;
5543 	speed_10 = param_anar_10fdx | param_anar_10hdx;
5544 
5545 	ERI_DEBUG_MSG5(erip, XCVR_MSG, "eri_reset_xcvr: %d %d %d %d",
5546 	    param_anar_100fdx, param_anar_100hdx, param_anar_10fdx,
5547 	    param_anar_10hdx);
5548 
5549 	ERI_DEBUG_MSG3(erip, XCVR_MSG,
5550 	    "eri_reset_xcvr: speed_100 %d speed_10 %d", speed_100, speed_10);
5551 
5552 	if ((!speed_100) && (speed_10)) {
5553 		erip->mif_mask &= ~PHY_BMSR_JABDET;
5554 		if (!(param_anar_10fdx) &&
5555 		    (param_anar_10hdx) &&
5556 		    (erip->link_pulse_disabled)) {
5557 			param_speed = 0;
5558 			param_mode = 0;
5559 			(void) eri_mii_read(erip, ERI_PHY_NICR, &nicr);
5560 			nicr &= ~PHY_NICR_LD;
5561 			eri_mii_write(erip, ERI_PHY_NICR, nicr);
5562 			param_linkup = 1;
5563 			erip->stats.link_up = LINK_STATE_UP;
5564 			if (param_mode)
5565 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5566 			else
5567 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5568 		}
5569 	}
5570 
5571 	/*
5572 	 * Clear the autonegotitation before re-starting
5573 	 */
5574 	control = PHY_BMCR_100M | PHY_BMCR_FDX;
5575 /*	eri_mii_write(erip, ERI_PHY_BMCR, control); */
5576 	if (param_autoneg) {
5577 		/*
5578 		 * Setup the transceiver for autonegotiation.
5579 		 */
5580 		erip->mif_mask &= ~PHY_BMSR_ANC;
5581 
5582 		/*
5583 		 * Clear the Auto-negotiation before re-starting
5584 		 */
5585 		eri_mii_write(erip, ERI_PHY_BMCR, control & ~PHY_BMCR_ANE);
5586 
5587 		/*
5588 		 * Switch on auto-negotiation.
5589 		 */
5590 		control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5591 
5592 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5593 	} else {
5594 		/*
5595 		 * Force the transceiver.
5596 		 */
5597 		erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5598 
5599 		/*
5600 		 * Switch off auto-negotiation.
5601 		 */
5602 		control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5603 
5604 		if (speed_100) {
5605 			control |= PHY_BMCR_100M;
5606 			param_aner_lpancap = 0; /* Clear LP nway */
5607 			param_anlpar_10fdx = 0;
5608 			param_anlpar_10hdx = 0;
5609 			param_anlpar_100T4 = param_anar_100T4;
5610 			param_anlpar_100fdx = param_anar_100fdx;
5611 			param_anlpar_100hdx = param_anar_100hdx;
5612 			param_speed = 1;
5613 			erip->stats.ifspeed = 100;
5614 			param_mode = param_anar_100fdx;
5615 			if (param_mode) {
5616 				param_anlpar_100hdx = 0;
5617 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5618 			} else {
5619 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5620 			}
5621 		} else if (speed_10) {
5622 			control &= ~PHY_BMCR_100M;
5623 			param_aner_lpancap = 0; /* Clear LP nway */
5624 			param_anlpar_100fdx = 0;
5625 			param_anlpar_100hdx = 0;
5626 			param_anlpar_100T4 = 0;
5627 			param_anlpar_10fdx = param_anar_10fdx;
5628 			param_anlpar_10hdx = param_anar_10hdx;
5629 			param_speed = 0;
5630 			erip->stats.ifspeed = 10;
5631 			param_mode = param_anar_10fdx;
5632 			if (param_mode) {
5633 				param_anlpar_10hdx = 0;
5634 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5635 			} else {
5636 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5637 			}
5638 		} else {
5639 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5640 			    "Transceiver speed set incorrectly.");
5641 		}
5642 
5643 		if (param_mode) {
5644 			control |= PHY_BMCR_FDX;
5645 		}
5646 
5647 		ERI_DEBUG_MSG4(erip, PHY_MSG,
5648 		    "control = %x status = %x param_mode %d",
5649 		    control, stat, param_mode);
5650 
5651 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5652 /*
5653  *		if (param_mode) {
5654  *			control |= PHY_BMCR_FDX;
5655  *		}
5656  *		control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5657  *		eri_mii_write(erip, ERI_PHY_BMCR, control);
5658  */
5659 	}
5660 
5661 #ifdef DEBUG
5662 	(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5663 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5664 	(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5665 #endif
5666 	ERI_DEBUG_MSG4(erip, PHY_MSG,
5667 	    "control %X status %X anar %X", control, stat, anar);
5668 
5669 eri_reset_xcvr_exit:
5670 	return (0);
5671 
5672 eri_reset_xcvr_failed:
5673 	return (1);
5674 }
5675 
5676 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
5677 
5678 static void
5679 eri_xcvr_force_mode(struct eri *erip, uint32_t *link_timeout)
5680 {
5681 
5682 	if (!param_autoneg && !param_linkup && (erip->stats.ifspeed == 10) &&
5683 	    (param_anar_10fdx | param_anar_10hdx)) {
5684 		*link_timeout = SECOND(1);
5685 		return;
5686 	}
5687 
5688 	if (!param_autoneg && !param_linkup && (erip->ifspeed_old == 10) &&
5689 	    (param_anar_100fdx | param_anar_100hdx)) {
5690 		/*
5691 		 * May have to set link partner's speed and mode.
5692 		 */
5693 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_LOG_MSG,
5694 		"May have to set link partner's speed and duplex mode.");
5695 	}
5696 }
5697 #endif
5698 
5699 static void
5700 eri_mif_poll(struct eri *erip, soft_mif_enable_t enable)
5701 {
5702 	if (enable == MIF_POLL_START) {
5703 		if (erip->mifpoll_enable && !erip->openloop_autoneg) {
5704 			erip->mif_config |= ERI_MIF_CFGPE;
5705 			PUT_MIFREG(mif_cfg, erip->mif_config);
5706 			drv_usecwait(ERI_MIF_POLL_DELAY);
5707 			PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
5708 			    ~ERI_G_MASK_MIF_INT);
5709 			PUT_MIFREG(mif_imask, erip->mif_mask);
5710 		}
5711 	} else if (enable == MIF_POLL_STOP) {
5712 			erip->mif_config &= ~ERI_MIF_CFGPE;
5713 			PUT_MIFREG(mif_cfg, erip->mif_config);
5714 			drv_usecwait(ERI_MIF_POLL_DELAY);
5715 			PUT_GLOBREG(intmask, GET_GLOBREG(intmask) |
5716 			    ERI_G_MASK_MIF_INT);
5717 			PUT_MIFREG(mif_imask, ERI_MIF_INTMASK);
5718 	}
5719 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF Config = 0x%X",
5720 	    GET_MIFREG(mif_cfg));
5721 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF imask = 0x%X",
5722 	    GET_MIFREG(mif_imask));
5723 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "INT imask = 0x%X",
5724 	    GET_GLOBREG(intmask));
5725 	ERI_DEBUG_MSG1(erip, XCVR_MSG, "<== mif_poll");
5726 }
5727 
5728 /* Decide if transmitter went dead and reinitialize everything */
5729 #ifdef	ERI_TX_HUNG
5730 static int eri_txhung_limit = 2;
5731 static int
5732 eri_check_txhung(struct eri *erip)
5733 {
5734 	boolean_t	macupdate = B_FALSE;
5735 
5736 	mutex_enter(&erip->xmitlock);
5737 	if (erip->flags & ERI_RUNNING)
5738 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
5739 		    ETX_COMPLETION_MASK);
5740 		macupdate |= eri_reclaim(erip, erip->tx_completion);
5741 
5742 	/* Something needs to be sent out but it is not going out */
5743 	if ((erip->tcurp != erip->tnextp) &&
5744 	    (erip->stats.opackets64 == erip->erisave.reclaim_opackets) &&
5745 	    (erip->stats.collisions == erip->erisave.starts))
5746 		erip->txhung++;
5747 	else
5748 		erip->txhung = 0;
5749 
5750 	erip->erisave.reclaim_opackets = erip->stats.opackets64;
5751 	erip->erisave.starts = erip->stats.collisions;
5752 	mutex_exit(&erip->xmitlock);
5753 
5754 	if (macupdate)
5755 		mac_tx_update(erip->mh);
5756 
5757 	return (erip->txhung >= eri_txhung_limit);
5758 }
5759 #endif
5760