xref: /titanic_41/usr/src/uts/common/io/rge/rge_chip.c (revision 5aefb6555731130ca4fd295960123d71f2d21fe8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "rge.h"
30 
31 #define	REG32(rgep, reg)	((uint32_t *)(rgep->io_regs+(reg)))
32 #define	REG16(rgep, reg)	((uint16_t *)(rgep->io_regs+(reg)))
33 #define	REG8(rgep, reg)		((uint8_t *)(rgep->io_regs+(reg)))
34 #define	PIO_ADDR(rgep, offset)	((void *)(rgep->io_regs+(offset)))
35 
36 /*
37  * Patchable globals:
38  *
39  *	rge_autorecover
40  *		Enables/disables automatic recovery after fault detection
41  */
42 static uint32_t rge_autorecover = 1;
43 
44 /*
45  * globals:
46  */
47 #define	RGE_DBG		RGE_DBG_REGS	/* debug flag for this code	*/
48 static uint32_t rge_watchdog_count	= 1 << 16;
49 
50 /*
51  * Operating register get/set access routines
52  */
53 #if	RGE_DEBUGGING
54 
55 static void rge_pci_check(rge_t *rgep);
56 #pragma	no_inline(rge_pci_check)
57 
58 static void
59 rge_pci_check(rge_t *rgep)
60 {
61 	uint16_t pcistatus;
62 
63 	pcistatus = pci_config_get16(rgep->cfg_handle, PCI_CONF_STAT);
64 	if ((pcistatus & (PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB)) != 0)
65 		RGE_DEBUG(("rge_pci_check($%p): PCI status 0x%x",
66 			(void *)rgep, pcistatus));
67 }
68 
69 #endif	/* RGE_DEBUGGING */
70 
71 static uint32_t rge_reg_get32(rge_t *rgep, uintptr_t regno);
72 #pragma	inline(rge_reg_get32)
73 
74 static uint32_t
75 rge_reg_get32(rge_t *rgep, uintptr_t regno)
76 {
77 	RGE_TRACE(("rge_reg_get32($%p, 0x%lx)",
78 		(void *)rgep, regno));
79 
80 	return (ddi_get32(rgep->io_handle, REG32(rgep, regno)));
81 }
82 
83 static void rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data);
84 #pragma	inline(rge_reg_put32)
85 
86 static void
87 rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data)
88 {
89 	RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)",
90 		(void *)rgep, regno, data));
91 
92 	ddi_put32(rgep->io_handle, REG32(rgep, regno), data);
93 	RGE_PCICHK(rgep);
94 }
95 
96 static void rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits);
97 #pragma	inline(rge_reg_set32)
98 
99 static void
100 rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits)
101 {
102 	uint32_t regval;
103 
104 	RGE_TRACE(("rge_reg_set32($%p, 0x%lx, 0x%x)",
105 		(void *)rgep, regno, bits));
106 
107 	regval = rge_reg_get32(rgep, regno);
108 	regval |= bits;
109 	rge_reg_put32(rgep, regno, regval);
110 }
111 
112 static void rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits);
113 #pragma	inline(rge_reg_clr32)
114 
115 static void
116 rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits)
117 {
118 	uint32_t regval;
119 
120 	RGE_TRACE(("rge_reg_clr32($%p, 0x%lx, 0x%x)",
121 		(void *)rgep, regno, bits));
122 
123 	regval = rge_reg_get32(rgep, regno);
124 	regval &= ~bits;
125 	rge_reg_put32(rgep, regno, regval);
126 }
127 
128 static uint16_t rge_reg_get16(rge_t *rgep, uintptr_t regno);
129 #pragma	inline(rge_reg_get16)
130 
131 static uint16_t
132 rge_reg_get16(rge_t *rgep, uintptr_t regno)
133 {
134 	RGE_TRACE(("rge_reg_get16($%p, 0x%lx)",
135 		(void *)rgep, regno));
136 
137 	return (ddi_get16(rgep->io_handle, REG16(rgep, regno)));
138 }
139 
140 static void rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data);
141 #pragma	inline(rge_reg_put16)
142 
143 static void
144 rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data)
145 {
146 	RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)",
147 		(void *)rgep, regno, data));
148 
149 	ddi_put16(rgep->io_handle, REG16(rgep, regno), data);
150 	RGE_PCICHK(rgep);
151 }
152 
153 static void rge_reg_set16(rge_t *rgep, uintptr_t regno, uint16_t bits);
154 #pragma	inline(rge_reg_set16)
155 
156 static void
157 rge_reg_set16(rge_t *rgep, uintptr_t regno, uint16_t bits)
158 {
159 	uint16_t regval;
160 
161 	RGE_TRACE(("rge_reg_set16($%p, 0x%lx, 0x%x)",
162 		(void *)rgep, regno, bits));
163 
164 	regval = rge_reg_get16(rgep, regno);
165 	regval |= bits;
166 	rge_reg_put16(rgep, regno, regval);
167 }
168 
169 static void rge_reg_clr16(rge_t *rgep, uintptr_t regno, uint16_t bits);
170 #pragma	inline(rge_reg_clr16)
171 
172 static void
173 rge_reg_clr16(rge_t *rgep, uintptr_t regno, uint16_t bits)
174 {
175 	uint16_t regval;
176 
177 	RGE_TRACE(("rge_reg_clr16($%p, 0x%lx, 0x%x)",
178 		(void *)rgep, regno, bits));
179 
180 	regval = rge_reg_get16(rgep, regno);
181 	regval &= ~bits;
182 	rge_reg_put16(rgep, regno, regval);
183 }
184 
185 static uint8_t rge_reg_get8(rge_t *rgep, uintptr_t regno);
186 #pragma	inline(rge_reg_get8)
187 
188 static uint8_t
189 rge_reg_get8(rge_t *rgep, uintptr_t regno)
190 {
191 	RGE_TRACE(("rge_reg_get8($%p, 0x%lx)",
192 		(void *)rgep, regno));
193 
194 	return (ddi_get8(rgep->io_handle, REG8(rgep, regno)));
195 }
196 
197 static void rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data);
198 #pragma	inline(rge_reg_put8)
199 
200 static void
201 rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data)
202 {
203 	RGE_TRACE(("rge_reg_put8($%p, 0x%lx, 0x%x)",
204 		(void *)rgep, regno, data));
205 
206 	ddi_put8(rgep->io_handle, REG8(rgep, regno), data);
207 	RGE_PCICHK(rgep);
208 }
209 
210 static void rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits);
211 #pragma	inline(rge_reg_set8)
212 
213 static void
214 rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits)
215 {
216 	uint8_t regval;
217 
218 	RGE_TRACE(("rge_reg_set8($%p, 0x%lx, 0x%x)",
219 		(void *)rgep, regno, bits));
220 
221 	regval = rge_reg_get8(rgep, regno);
222 	regval |= bits;
223 	rge_reg_put8(rgep, regno, regval);
224 }
225 
226 static void rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits);
227 #pragma	inline(rge_reg_clr8)
228 
229 static void
230 rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits)
231 {
232 	uint8_t regval;
233 
234 	RGE_TRACE(("rge_reg_clr8($%p, 0x%lx, 0x%x)",
235 		(void *)rgep, regno, bits));
236 
237 	regval = rge_reg_get8(rgep, regno);
238 	regval &= ~bits;
239 	rge_reg_put8(rgep, regno, regval);
240 }
241 
242 uint16_t rge_mii_get16(rge_t *rgep, uintptr_t mii);
243 #pragma	no_inline(rge_mii_get16)
244 
245 uint16_t
246 rge_mii_get16(rge_t *rgep, uintptr_t mii)
247 {
248 	uint32_t regval;
249 	uint32_t val32;
250 	uint32_t i;
251 
252 	regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
253 	rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
254 
255 	/*
256 	 * Waiting for PHY reading OK
257 	 */
258 	for (i = 0; i < PHY_RESET_LOOP; i++) {
259 		drv_usecwait(100);
260 		val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
261 		if (val32 & PHY_ACCESS_WR_FLAG)
262 			return (val32 & 0xffff);
263 	}
264 
265 	RGE_REPORT((rgep, "rge_mii_get16(0x%x) fail, val = %x", mii, val32));
266 	return ((uint16_t)~0u);
267 }
268 
269 void rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data);
270 #pragma	no_inline(rge_mii_put16)
271 
272 void
273 rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data)
274 {
275 	uint32_t regval;
276 	uint32_t val32;
277 	uint32_t i;
278 
279 	regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
280 	regval |= data & PHY_DATA_MASK;
281 	regval |= PHY_ACCESS_WR_FLAG;
282 	rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
283 
284 	/*
285 	 * Waiting for PHY writing OK
286 	 */
287 	for (i = 0; i < PHY_RESET_LOOP; i++) {
288 		drv_usecwait(100);
289 		val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
290 		if (!(val32 & PHY_ACCESS_WR_FLAG))
291 			return;
292 	}
293 	RGE_REPORT((rgep, "rge_mii_put16(0x%lx, 0x%x) fail",
294 	    mii, data));
295 }
296 
297 /*
298  * Atomically shift a 32-bit word left, returning
299  * the value it had *before* the shift was applied
300  */
301 static uint32_t rge_atomic_shl32(uint32_t *sp, uint_t count);
302 #pragma	inline(rge_mii_put16)
303 
304 static uint32_t
305 rge_atomic_shl32(uint32_t *sp, uint_t count)
306 {
307 	uint32_t oldval;
308 	uint32_t newval;
309 
310 	/* ATOMICALLY */
311 	do {
312 		oldval = *sp;
313 		newval = oldval << count;
314 	} while (cas32(sp, oldval, newval) != oldval);
315 
316 	return (oldval);
317 }
318 
319 /*
320  * PHY operation routines
321  */
322 #if	RGE_DEBUGGING
323 
324 static void
325 rge_phydump(rge_t *rgep)
326 {
327 	uint16_t regs[32];
328 	int i;
329 
330 	ASSERT(mutex_owned(rgep->genlock));
331 
332 	for (i = 0; i < 32; ++i) {
333 		regs[i] = rge_mii_get16(rgep, i);
334 	}
335 
336 	for (i = 0; i < 32; i += 8)
337 		RGE_DEBUG(("rge_phydump: "
338 				"0x%04x %04x %04x %04x %04x %04x %04x %04x",
339 			regs[i+0], regs[i+1], regs[i+2], regs[i+3],
340 			regs[i+4], regs[i+5], regs[i+6], regs[i+7]));
341 }
342 
343 #endif	/* RGE_DEBUGGING */
344 
345 /*
346  * Basic low-level function to probe for a PHY
347  *
348  * Returns TRUE if the PHY responds with valid data, FALSE otherwise
349  */
350 static boolean_t
351 rge_phy_probe(rge_t *rgep)
352 {
353 	uint16_t phy_status;
354 
355 	ASSERT(mutex_owned(rgep->genlock));
356 
357 	/*
358 	 * Read the MII_STATUS register twice, in
359 	 * order to clear any sticky bits (but they should
360 	 * have been cleared by the RESET, I think).
361 	 */
362 	phy_status = rge_mii_get16(rgep, MII_STATUS);
363 	phy_status = rge_mii_get16(rgep, MII_STATUS);
364 	RGE_DEBUG(("rge_phy_probe: status 0x%x", phy_status));
365 
366 	/*
367 	 * Now check the value read; it should have at least one bit set
368 	 * (for the device capabilities) and at least one clear (one of
369 	 * the error bits). So if we see all 0s or all 1s, there's a
370 	 * problem.  In particular, rge_mii_get16() returns all 1s if
371 	 * communications fails ...
372 	 */
373 	switch (phy_status) {
374 	case 0x0000:
375 	case 0xffff:
376 		return (B_FALSE);
377 
378 	default :
379 		return (B_TRUE);
380 	}
381 }
382 
383 static void
384 rge_phy_check(rge_t *rgep)
385 {
386 	uint16_t gig_ctl;
387 
388 	if (rgep->param_link_up  == LINK_STATE_DOWN) {
389 		/*
390 		 * RTL8169S/8110S PHY has the "PCS bug".  Need reset PHY
391 		 * every 15 seconds whin link down & advertise is 1000.
392 		 */
393 		if (rgep->chipid.phy_ver == PHY_VER_S) {
394 			gig_ctl = rge_mii_get16(rgep, MII_1000BASE_T_CONTROL);
395 			if (gig_ctl & MII_1000BT_CTL_ADV_FDX) {
396 				rgep->link_down_count++;
397 				if (rgep->link_down_count > 15) {
398 					(void) rge_phy_reset(rgep);
399 					rgep->stats.phy_reset++;
400 					rgep->link_down_count = 0;
401 				}
402 			}
403 		}
404 	} else {
405 		rgep->link_down_count = 0;
406 	}
407 }
408 
409 /*
410  * Basic low-level function to reset the PHY.
411  * Doesn't incorporate any special-case workarounds.
412  *
413  * Returns TRUE on success, FALSE if the RESET bit doesn't clear
414  */
415 boolean_t
416 rge_phy_reset(rge_t *rgep)
417 {
418 	uint16_t control;
419 	uint_t count;
420 
421 	/*
422 	 * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear
423 	 */
424 	control = rge_mii_get16(rgep, MII_CONTROL);
425 	rge_mii_put16(rgep, MII_CONTROL, control | MII_CONTROL_RESET);
426 	for (count = 0; ++count < 1000; ) {
427 		drv_usecwait(100);
428 		control = rge_mii_get16(rgep, MII_CONTROL);
429 		if (BIC(control, MII_CONTROL_RESET))
430 			return (B_TRUE);
431 	}
432 
433 	RGE_REPORT((rgep, "rge_phy_reset: FAILED, control now 0x%x", control));
434 	return (B_FALSE);
435 }
436 
437 /*
438  * Synchronise the PHY's speed/duplex/autonegotiation capabilities
439  * and advertisements with the required settings as specified by the various
440  * param_* variables that can be poked via the NDD interface.
441  *
442  * We always reset the PHY and reprogram *all* the relevant registers,
443  * not just those changed.  This should cause the link to go down, and then
444  * back up again once the link is stable and autonegotiation (if enabled)
445  * is complete.  We should get a link state change interrupt somewhere along
446  * the way ...
447  *
448  * NOTE: <genlock> must already be held by the caller
449  */
450 void
451 rge_phy_update(rge_t *rgep)
452 {
453 	boolean_t adv_autoneg;
454 	boolean_t adv_pause;
455 	boolean_t adv_asym_pause;
456 	boolean_t adv_1000fdx;
457 	boolean_t adv_1000hdx;
458 	boolean_t adv_100fdx;
459 	boolean_t adv_100hdx;
460 	boolean_t adv_10fdx;
461 	boolean_t adv_10hdx;
462 
463 	uint16_t control;
464 	uint16_t gigctrl;
465 	uint16_t anar;
466 
467 	ASSERT(mutex_owned(rgep->genlock));
468 
469 	RGE_DEBUG(("rge_phy_update: autoneg %d "
470 			"pause %d asym_pause %d "
471 			"1000fdx %d 1000hdx %d "
472 			"100fdx %d 100hdx %d "
473 			"10fdx %d 10hdx %d ",
474 		rgep->param_adv_autoneg,
475 		rgep->param_adv_pause, rgep->param_adv_asym_pause,
476 		rgep->param_adv_1000fdx, rgep->param_adv_1000hdx,
477 		rgep->param_adv_100fdx, rgep->param_adv_100hdx,
478 		rgep->param_adv_10fdx, rgep->param_adv_10hdx));
479 
480 	control = gigctrl = anar = 0;
481 
482 	/*
483 	 * PHY settings are normally based on the param_* variables,
484 	 * but if any loopback mode is in effect, that takes precedence.
485 	 *
486 	 * RGE supports MAC-internal loopback, PHY-internal loopback,
487 	 * and External loopback at a variety of speeds (with a special
488 	 * cable).  In all cases, autoneg is turned OFF, full-duplex
489 	 * is turned ON, and the speed/mastership is forced.
490 	 */
491 	switch (rgep->param_loop_mode) {
492 	case RGE_LOOP_NONE:
493 	default:
494 		adv_autoneg = rgep->param_adv_autoneg;
495 		adv_pause = rgep->param_adv_pause;
496 		adv_asym_pause = rgep->param_adv_asym_pause;
497 		adv_1000fdx = rgep->param_adv_1000fdx;
498 		adv_1000hdx = rgep->param_adv_1000hdx;
499 		adv_100fdx = rgep->param_adv_100fdx;
500 		adv_100hdx = rgep->param_adv_100hdx;
501 		adv_10fdx = rgep->param_adv_10fdx;
502 		adv_10hdx = rgep->param_adv_10hdx;
503 		break;
504 
505 	case RGE_LOOP_INTERNAL_PHY:
506 	case RGE_LOOP_INTERNAL_MAC:
507 		adv_autoneg = adv_pause = adv_asym_pause = B_FALSE;
508 		adv_1000fdx = adv_100fdx = adv_10fdx = B_FALSE;
509 		adv_1000hdx = adv_100hdx = adv_10hdx = B_FALSE;
510 		rgep->param_link_duplex = LINK_DUPLEX_FULL;
511 
512 		switch (rgep->param_loop_mode) {
513 		case RGE_LOOP_INTERNAL_PHY:
514 			rgep->param_link_speed = 1000;
515 			adv_1000fdx = B_TRUE;
516 			control = MII_CONTROL_LOOPBACK;
517 			break;
518 
519 		case RGE_LOOP_INTERNAL_MAC:
520 			rgep->param_link_speed = 1000;
521 			adv_1000fdx = B_TRUE;
522 			break;
523 		}
524 	}
525 
526 	RGE_DEBUG(("rge_phy_update: autoneg %d "
527 			"pause %d asym_pause %d "
528 			"1000fdx %d 1000hdx %d "
529 			"100fdx %d 100hdx %d "
530 			"10fdx %d 10hdx %d ",
531 		adv_autoneg,
532 		adv_pause, adv_asym_pause,
533 		adv_1000fdx, adv_1000hdx,
534 		adv_100fdx, adv_100hdx,
535 		adv_10fdx, adv_10hdx));
536 
537 	/*
538 	 * We should have at least one technology capability set;
539 	 * if not, we select a default of 1000Mb/s full-duplex
540 	 */
541 	if (!adv_1000fdx && !adv_100fdx && !adv_10fdx &&
542 	    !adv_1000hdx && !adv_100hdx && !adv_10hdx)
543 		adv_1000fdx = B_TRUE;
544 
545 	/*
546 	 * Now transform the adv_* variables into the proper settings
547 	 * of the PHY registers ...
548 	 *
549 	 * If autonegotiation is (now) enabled, we want to trigger
550 	 * a new autonegotiation cycle once the PHY has been
551 	 * programmed with the capabilities to be advertised.
552 	 *
553 	 * RTL8169/8110 doesn't support 1000Mb/s half-duplex.
554 	 */
555 	if (adv_autoneg)
556 		control |= MII_CONTROL_ANE|MII_CONTROL_RSAN;
557 
558 	if (adv_1000fdx)
559 		control |= MII_CONTROL_1000MB|MII_CONTROL_FDUPLEX;
560 	else if (adv_1000hdx)
561 		control |= MII_CONTROL_1000MB;
562 	else if (adv_100fdx)
563 		control |= MII_CONTROL_100MB|MII_CONTROL_FDUPLEX;
564 	else if (adv_100hdx)
565 		control |= MII_CONTROL_100MB;
566 	else if (adv_10fdx)
567 		control |= MII_CONTROL_FDUPLEX;
568 	else if (adv_10hdx)
569 		control |= 0;
570 	else
571 		{ _NOTE(EMPTY); }	/* Can't get here anyway ...	*/
572 
573 	if (adv_1000fdx) {
574 		gigctrl |= MII_1000BT_CTL_ADV_FDX;
575 		/*
576 		 * Chipset limitation: need set other capabilities to true
577 		 */
578 		adv_100fdx = B_TRUE;
579 		adv_100hdx  = B_TRUE;
580 		adv_10fdx = B_TRUE;
581 		adv_10hdx = B_TRUE;
582 	}
583 
584 	if (adv_1000hdx)
585 		gigctrl |= MII_1000BT_CTL_ADV_HDX;
586 
587 	if (adv_100fdx)
588 		anar |= MII_ABILITY_100BASE_TX_FD;
589 	if (adv_100hdx)
590 		anar |= MII_ABILITY_100BASE_TX;
591 	if (adv_10fdx)
592 		anar |= MII_ABILITY_10BASE_T_FD;
593 	if (adv_10hdx)
594 		anar |= MII_ABILITY_10BASE_T;
595 
596 	if (adv_pause)
597 		anar |= MII_ABILITY_PAUSE;
598 	if (adv_asym_pause)
599 		anar |= MII_ABILITY_ASYM_PAUSE;
600 
601 	/*
602 	 * Munge in any other fixed bits we require ...
603 	 */
604 	anar |= MII_AN_SELECTOR_8023;
605 
606 	/*
607 	 * Restart the PHY and write the new values.  Note the
608 	 * time, so that we can say whether subsequent link state
609 	 * changes can be attributed to our reprogramming the PHY
610 	 */
611 	rgep->phys_write_time = gethrtime();
612 	rge_phy_init(rgep);
613 	rge_mii_put16(rgep, MII_AN_ADVERT, anar);
614 	rge_mii_put16(rgep, MII_CONTROL, control);
615 	rge_mii_put16(rgep, MII_1000BASE_T_CONTROL, gigctrl);
616 
617 	RGE_DEBUG(("rge_phy_update: anar <- 0x%x", anar));
618 	RGE_DEBUG(("rge_phy_update: control <- 0x%x", control));
619 	RGE_DEBUG(("rge_phy_update: gigctrl <- 0x%x", gigctrl));
620 }
621 
622 void rge_phy_init(rge_t *rgep);
623 #pragma	no_inline(rge_phy_init)
624 
625 void
626 rge_phy_init(rge_t *rgep)
627 {
628 	uint16_t val16;
629 
630 	rgep->phy_mii_addr = 1;
631 
632 	/*
633 	 * Below phy config steps are copied from the Programming Guide
634 	 * (there's no detail comments for these steps.)
635 	 */
636 	if ((rgep->chipid.mac_ver == MAC_VER_SD ||
637 	    rgep->chipid.mac_ver == MAC_VER_SE) &&
638 	    (rgep->chipid.phy_ver == PHY_VER_S)) {
639 		rge_mii_put16(rgep, PHY_1F_REG, 1);
640 		rge_mii_put16(rgep, PHY_15_REG, 0x1000);
641 		rge_mii_put16(rgep, PHY_18_REG, 0x65c7);
642 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
643 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE));
644 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
645 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 & 0x0fff);
646 		rge_mii_put16(rgep, PHY_ID_REG_2, 0x00a1);
647 		rge_mii_put16(rgep, PHY_ID_REG_1, 0x0008);
648 		rge_mii_put16(rgep, PHY_BMSR_REG, 0x1020);
649 		rge_mii_put16(rgep, PHY_BMCR_REG, 0x1000);
650 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
651 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE);
652 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
653 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE));
654 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
655 		rge_mii_put16(rgep, PHY_ANAR_REG, (val16 & 0x0fff) | 0x7000);
656 		rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
657 		rge_mii_put16(rgep, PHY_ID_REG_1, 0xde60);
658 		rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
659 		rge_mii_put16(rgep, PHY_BMCR_REG, 0x0077);
660 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
661 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE);
662 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
663 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE));
664 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
665 		rge_mii_put16(rgep, PHY_ANAR_REG, (val16 & 0x0fff) | 0xa000);
666 		rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
667 		rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
668 		rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
669 		rge_mii_put16(rgep, PHY_BMCR_REG, 0xfa00);
670 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
671 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE);
672 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
673 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE));
674 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
675 		rge_mii_put16(rgep, PHY_ANAR_REG, (val16 & 0x0fff) | 0xb000);
676 		rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
677 		rge_mii_put16(rgep, PHY_ID_REG_1, 0xde20);
678 		rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
679 		rge_mii_put16(rgep, PHY_BMCR_REG, 0x00bb);
680 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
681 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE);
682 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
683 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE));
684 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
685 		rge_mii_put16(rgep, PHY_ANAR_REG, (val16 & 0x0fff) | 0xf000);
686 		rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
687 		rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
688 		rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
689 		rge_mii_put16(rgep, PHY_BMCR_REG, 0xbf00);
690 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
691 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE);
692 		val16 = rge_mii_get16(rgep, PHY_ANAR_REG);
693 		rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE));
694 		rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
695 		rge_mii_put16(rgep, PHY_0B_REG, 0x0000);
696 	}
697 
698 	if (rgep->chipid.mac_ver == MAC_VER_SB) {
699 		rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
700 		rge_mii_put16(rgep, PHY_1B_REG, 0x841e);
701 		rge_mii_put16(rgep, PHY_0E_REG, 0x7bfb);
702 		rge_mii_put16(rgep, PHY_GBCR_REG, GBCR_DEFAULT);
703 		rge_mii_put16(rgep, PHY_1F_REG, 0x0002);
704 		rge_mii_put16(rgep, PHY_BMSR_REG, 0x90D0);
705 		rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
706 	}
707 }
708 
709 void rge_chip_ident(rge_t *rgep);
710 #pragma	no_inline(rge_chip_ident)
711 
712 void
713 rge_chip_ident(rge_t *rgep)
714 {
715 	chip_id_t *chip = &rgep->chipid;
716 	uint32_t val32;
717 	uint16_t val16;
718 
719 	val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
720 	val32 &= HW_VERSION_ID_0 | HW_VERSION_ID_1;
721 	chip->mac_ver = val32;
722 
723 	val16 = rge_mii_get16(rgep, PHY_ID_REG_2);
724 	val16 &= PHY_VER_MASK;
725 	chip->phy_ver = val16;
726 
727 	if (rgep->param_default_mtu > ETHERMTU) {
728 		rgep->rxbuf_size = RGE_BUFF_SIZE_JUMBO;
729 		rgep->txbuf_size = RGE_BUFF_SIZE_JUMBO;
730 		rgep->ethmax_size = RGE_JUMBO_SIZE;
731 	} else {
732 		rgep->rxbuf_size = RGE_BUFF_SIZE_STD;
733 		rgep->txbuf_size = RGE_BUFF_SIZE_STD;
734 		rgep->ethmax_size = ETHERMAX;
735 	}
736 
737 	chip->rxconfig = RX_CONFIG_DEFAULT;
738 	chip->txconfig = TX_CONFIG_DEFAULT;
739 
740 	RGE_TRACE(("%s: MAC version = %x, PHY version = %x",
741 	    rgep->ifname, chip->mac_ver, chip->phy_ver));
742 
743 	/* set pci latency timer */
744 	if (chip->mac_ver == MAC_VER_NS || chip->mac_ver == MAC_VER_SD)
745 		pci_config_put8(rgep->cfg_handle, PCI_CONF_LATENCY_TIMER, 0x40);
746 }
747 
748 /*
749  * Perform first-stage chip (re-)initialisation, using only config-space
750  * accesses:
751  *
752  * + Read the vendor/device/revision/subsystem/cache-line-size registers,
753  *   returning the data in the structure pointed to by <idp>.
754  * + Enable Memory Space accesses.
755  * + Enable Bus Mastering according.
756  */
757 void rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp);
758 #pragma	no_inline(rge_chip_cfg_init)
759 
760 void
761 rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp)
762 {
763 	ddi_acc_handle_t handle;
764 	uint16_t commd;
765 
766 	handle = rgep->cfg_handle;
767 
768 	/*
769 	 * Save PCI cache line size and subsystem vendor ID
770 	 */
771 	cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
772 	cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
773 	cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
774 	cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
775 	cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
776 	cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
777 	cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
778 	cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
779 
780 	/*
781 	 * Turn on Master Enable (DMA) and IO Enable bits.
782 	 * Enable PCI Memory Space accesses
783 	 */
784 	commd = cidp->command;
785 	commd |= PCI_COMM_ME | PCI_COMM_MAE | PCI_COMM_IO;
786 	pci_config_put16(handle, PCI_CONF_COMM, commd);
787 
788 	RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
789 		cidp->vendor, cidp->device, cidp->revision));
790 	RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x",
791 		cidp->subven, cidp->subdev));
792 	RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x",
793 		cidp->clsize, cidp->latency, cidp->command));
794 }
795 
796 int rge_chip_reset(rge_t *rgep);
797 #pragma	no_inline(rge_chip_reset)
798 
799 int
800 rge_chip_reset(rge_t *rgep)
801 {
802 	int i;
803 	uint8_t val8;
804 
805 	/*
806 	 * Chip should be in STOP state
807 	 */
808 	rge_reg_clr8(rgep, RT_COMMAND_REG,
809 	    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
810 
811 	/*
812 	 * Disable interrupt
813 	 */
814 	rge_reg_clr16(rgep, INT_MASK_REG, INT_MASK_ALL);
815 	rgep->int_mask = INT_MASK_NONE;
816 
817 	/*
818 	 * Clear pended interrupt
819 	 */
820 	rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
821 
822 	/*
823 	 * Reset chip
824 	 */
825 	rge_reg_set8(rgep, RT_COMMAND_REG, RT_COMMAND_RESET);
826 
827 	/*
828 	 * Wait for reset success
829 	 */
830 	for (i = 0; i < CHIP_RESET_LOOP; i++) {
831 		drv_usecwait(10);
832 		val8 = rge_reg_get8(rgep, RT_COMMAND_REG);
833 		if (!(val8 & RT_COMMAND_RESET)) {
834 			rgep->rge_chip_state = RGE_CHIP_RESET;
835 			return (0);
836 		}
837 	}
838 	RGE_REPORT((rgep, "rge_chip_reset fail."));
839 	return (-1);
840 }
841 
842 void rge_chip_init(rge_t *rgep);
843 #pragma	no_inline(rge_chip_init)
844 
845 void
846 rge_chip_init(rge_t *rgep)
847 {
848 	uint32_t val32;
849 
850 	/*
851 	 * Config MII register
852 	 */
853 	rgep->param_link_up = LINK_STATE_DOWN;
854 	rge_phy_update(rgep);
855 
856 	/*
857 	 * Enable Rx checksum offload.
858 	 *  Then for vlan support, we must enable receive vlan de-tagging.
859 	 *  Otherwise, there'll be checksum error.
860 	 */
861 	rge_reg_set16(rgep, CPLUS_COMMAND_REG, RX_CKSM_OFFLOAD | RX_VLAN_DETAG);
862 
863 	/*
864 	 * Suggested setting from Realtek
865 	 */
866 	if (rgep->chipid.mac_ver == MAC_VER_SD) {
867 		rge_reg_set16(rgep, CPLUS_COMMAND_REG,
868 		    CPLUS_BIT14 | MUL_PCI_RW_ENABLE);
869 		rge_reg_put8(rgep, RESV_82_REG, 0x01);
870 	}
871 	rge_reg_clr16(rgep, CPLUS_COMMAND_REG, 0x03);
872 
873 	/*
874 	 * Start transmit/receive before set tx/rx configuration register
875 	 */
876 	rge_reg_set8(rgep, RT_COMMAND_REG,
877 	    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
878 
879 	/*
880 	 * Set dump tally counter register
881 	 */
882 	val32 = rgep->dma_area_stats.cookie.dmac_laddress >> 32;
883 	rge_reg_put32(rgep, DUMP_COUNTER_REG_1, val32);
884 	val32 = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
885 	val32 &= DUMP_COUNTER_REG_RESV;
886 	val32 |= rgep->dma_area_stats.cookie.dmac_laddress;
887 	rge_reg_put32(rgep, DUMP_COUNTER_REG_0, val32);
888 
889 	/*
890 	 * Change to config register write enable mode
891 	 */
892 	rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
893 
894 	/*
895 	 * Set Tx/Rx maximum packet size
896 	 */
897 	if (rgep->param_default_mtu > ETHERMTU) {
898 		rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_JUMBO);
899 		rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_JUMBO);
900 	} else {
901 		rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD);
902 		rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD);
903 	}
904 
905 	/*
906 	 * Set receive configuration register
907 	 */
908 	val32 = rge_reg_get32(rgep, RX_CONFIG_REG);
909 	val32 &= RX_CONFIG_REG_RESV;
910 	if (rgep->promisc)
911 		val32 |= RX_ACCEPT_ALL_PKT;
912 	rge_reg_put32(rgep, RX_CONFIG_REG, val32 | rgep->chipid.rxconfig);
913 
914 	/*
915 	 * Set transmit configuration register
916 	 */
917 	val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
918 	val32 &= TX_CONFIG_REG_RESV;
919 	rge_reg_put32(rgep, TX_CONFIG_REG, val32 | rgep->chipid.txconfig);
920 
921 	/*
922 	 * Initialize PHY registers
923 	 */
924 	rge_phy_init(rgep);
925 
926 	/*
927 	 * Set Tx/Rx descriptor register
928 	 */
929 	val32 = rgep->tx_desc.cookie.dmac_laddress;
930 	rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_LO_REG, val32);
931 	val32 = rgep->tx_desc.cookie.dmac_laddress >> 32;
932 	rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_HI_REG, val32);
933 	rge_reg_put32(rgep, HIGH_TX_RING_ADDR_LO_REG, 0);
934 	rge_reg_put32(rgep, HIGH_TX_RING_ADDR_HI_REG, 0);
935 	val32 = rgep->rx_desc.cookie.dmac_laddress;
936 	rge_reg_put32(rgep, RX_RING_ADDR_LO_REG, val32);
937 	val32 = rgep->rx_desc.cookie.dmac_laddress >> 32;
938 	rge_reg_put32(rgep, RX_RING_ADDR_HI_REG, val32);
939 
940 	/*
941 	 * Suggested setting from Realtek
942 	 */
943 	rge_reg_put16(rgep, RESV_E2_REG, 0x282a);
944 
945 	/*
946 	 * Return to normal network/host communication mode
947 	 */
948 	rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
949 	drv_usecwait(20);
950 
951 	/*
952 	 * Set multicast register
953 	 */
954 	rge_reg_put32(rgep, MULTICAST_0_REG, rgep->mcast_hash[0]);
955 	rge_reg_put32(rgep, MULTICAST_4_REG, rgep->mcast_hash[1]);
956 
957 	/*
958 	 * Mask and clear all Interrupt
959 	 */
960 	rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE);
961 	rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
962 
963 	/*
964 	 * Msic register setting:
965 	 *   -- Missed packet counter: clear it
966 	 *   -- TimerInt Register
967 	 *   -- Timer count register
968 	 */
969 	rge_reg_put32(rgep, RX_PKT_MISS_COUNT_REG, 0);
970 	rge_reg_put32(rgep, TIMER_INT_REG, TIMER_INT_NONE);
971 	rge_reg_put32(rgep, TIMER_COUNT_REG, 0);
972 }
973 
974 /*
975  * rge_chip_start() -- start the chip transmitting and/or receiving,
976  * including enabling interrupts
977  */
978 void rge_chip_start(rge_t *rgep);
979 #pragma	no_inline(rge_chip_start)
980 
981 void
982 rge_chip_start(rge_t *rgep)
983 {
984 	/*
985 	 * Clear statistics
986 	 */
987 	bzero(&rgep->stats, sizeof (rge_stats_t));
988 	DMA_ZERO(rgep->dma_area_stats);
989 
990 	/*
991 	 * Start transmit/receive
992 	 */
993 	rge_reg_set8(rgep, RT_COMMAND_REG,
994 	    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
995 
996 	/*
997 	 * Enable interrupt
998 	 */
999 	rge_reg_set16(rgep, INT_MASK_REG, RGE_INT_MASK);
1000 	rgep->int_mask = RGE_INT_MASK;
1001 
1002 	/*
1003 	 * All done!
1004 	 */
1005 	rgep->rge_chip_state = RGE_CHIP_RUNNING;
1006 }
1007 
1008 /*
1009  * rge_chip_stop() -- stop board receiving
1010  */
1011 void rge_chip_stop(rge_t *rgep, boolean_t fault);
1012 #pragma	no_inline(rge_chip_stop)
1013 
1014 void
1015 rge_chip_stop(rge_t *rgep, boolean_t fault)
1016 {
1017 	/*
1018 	 * Disable interrupt
1019 	 */
1020 	rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE);
1021 	rgep->int_mask = INT_MASK_NONE;
1022 
1023 	/*
1024 	 * Clear pended interrupt
1025 	 */
1026 	rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
1027 
1028 	/*
1029 	 * Stop the board and disable transmit/receive
1030 	 */
1031 	rge_reg_clr8(rgep, RT_COMMAND_REG,
1032 	    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1033 
1034 	if (fault)
1035 		rgep->rge_chip_state = RGE_CHIP_FAULT;
1036 	else
1037 		rgep->rge_chip_state = RGE_CHIP_STOPPED;
1038 }
1039 
1040 /*
1041  * rge_get_mac_addr() -- get the MAC address on NIC
1042  */
1043 static void rge_get_mac_addr(rge_t *rgep);
1044 #pragma	inline(rge_get_mac_addr)
1045 
1046 static void
1047 rge_get_mac_addr(rge_t *rgep)
1048 {
1049 	uint8_t *macaddr = rgep->netaddr;
1050 	uint32_t val32;
1051 
1052 	/*
1053 	 * Read first 4-byte of mac address
1054 	 */
1055 	val32 = rge_reg_get32(rgep, ID_0_REG);
1056 	macaddr[0] = val32 & 0xff;
1057 	val32 = val32 >> 8;
1058 	macaddr[1] = val32 & 0xff;
1059 	val32 = val32 >> 8;
1060 	macaddr[2] = val32 & 0xff;
1061 	val32 = val32 >> 8;
1062 	macaddr[3] = val32 & 0xff;
1063 
1064 	/*
1065 	 * Read last 2-byte of mac address
1066 	 */
1067 	val32 = rge_reg_get32(rgep, ID_4_REG);
1068 	macaddr[4] = val32 & 0xff;
1069 	val32 = val32 >> 8;
1070 	macaddr[5] = val32 & 0xff;
1071 }
1072 
1073 static void rge_set_mac_addr(rge_t *rgep);
1074 #pragma	inline(rge_set_mac_addr)
1075 
1076 static void
1077 rge_set_mac_addr(rge_t *rgep)
1078 {
1079 	uint8_t *p = rgep->netaddr;
1080 	uint32_t val32;
1081 
1082 	/*
1083 	 * Change to config register write enable mode
1084 	 */
1085 	rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1086 
1087 	/*
1088 	 * Get first 4 bytes of mac address
1089 	 */
1090 	val32 = p[3];
1091 	val32 = val32 << 8;
1092 	val32 |= p[2];
1093 	val32 = val32 << 8;
1094 	val32 |= p[1];
1095 	val32 = val32 << 8;
1096 	val32 |= p[0];
1097 
1098 	/*
1099 	 * Set first 4 bytes of mac address
1100 	 */
1101 	rge_reg_put32(rgep, ID_0_REG, val32);
1102 
1103 	/*
1104 	 * Get last 2 bytes of mac address
1105 	 */
1106 	val32 = p[5];
1107 	val32 = val32 << 8;
1108 	val32 |= p[4];
1109 
1110 	/*
1111 	 * Set last 2 bytes of mac address
1112 	 */
1113 	val32 |= rge_reg_get32(rgep, ID_4_REG) & ~0xffff;
1114 	rge_reg_put32(rgep, ID_4_REG, val32);
1115 
1116 	/*
1117 	 * Return to normal network/host communication mode
1118 	 */
1119 	rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1120 }
1121 
1122 static void rge_set_multi_addr(rge_t *rgep);
1123 #pragma	inline(rge_set_multi_addr)
1124 
1125 static void
1126 rge_set_multi_addr(rge_t *rgep)
1127 {
1128 	uint32_t *hashp;
1129 
1130 	hashp = rgep->mcast_hash;
1131 	rge_reg_put32(rgep, MULTICAST_0_REG, hashp[0]);
1132 	rge_reg_put32(rgep, MULTICAST_4_REG, hashp[1]);
1133 	rge_reg_set8(rgep, RT_COMMAND_REG,
1134 	    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1135 }
1136 
1137 static void rge_set_promisc(rge_t *rgep);
1138 #pragma	inline(rge_set_promisc)
1139 
1140 static void
1141 rge_set_promisc(rge_t *rgep)
1142 {
1143 	if (rgep->promisc)
1144 		rge_reg_set32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1145 	else
1146 		rge_reg_clr32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1147 
1148 	rge_reg_set8(rgep, RT_COMMAND_REG,
1149 	    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1150 }
1151 
1152 /*
1153  * rge_chip_sync() -- program the chip with the unicast MAC address,
1154  * the multicast hash table, the required level of promiscuity, and
1155  * the current loopback mode ...
1156  */
1157 void rge_chip_sync(rge_t *rgep, enum rge_sync_op todo);
1158 #pragma	no_inline(rge_chip_sync)
1159 
1160 void
1161 rge_chip_sync(rge_t *rgep, enum rge_sync_op todo)
1162 {
1163 	switch (todo) {
1164 	case RGE_GET_MAC:
1165 		rge_get_mac_addr(rgep);
1166 		break;
1167 	case RGE_SET_MAC:
1168 		/* Reprogram the unicast MAC address(es) ... */
1169 		rge_set_mac_addr(rgep);
1170 		break;
1171 	case RGE_SET_MUL:
1172 		/* Reprogram the hashed multicast address table ... */
1173 		rge_set_multi_addr(rgep);
1174 		break;
1175 	case RGE_SET_PROMISC:
1176 		/* Set or clear the PROMISCUOUS mode bit */
1177 		rge_set_promisc(rgep);
1178 		break;
1179 	default:
1180 		break;
1181 	}
1182 }
1183 
1184 void rge_chip_blank(void *arg, time_t ticks, uint_t count);
1185 #pragma	no_inline(rge_chip_blank)
1186 
1187 void
1188 rge_chip_blank(void *arg, time_t ticks, uint_t count)
1189 {
1190 	_NOTE(ARGUNUSED(arg, ticks, count));
1191 }
1192 
1193 void rge_tx_trigger(rge_t *rgep);
1194 #pragma	no_inline(rge_tx_trigger)
1195 
1196 void
1197 rge_tx_trigger(rge_t *rgep)
1198 {
1199 	rge_reg_set8(rgep, TX_RINGS_POLL_REG, NORMAL_TX_RING_POLL);
1200 }
1201 
1202 void rge_hw_stats_dump(rge_t *rgep);
1203 #pragma	no_inline(rge_tx_trigger)
1204 
1205 void
1206 rge_hw_stats_dump(rge_t *rgep)
1207 {
1208 	int i = 0;
1209 
1210 	while (rge_reg_get32(rgep, DUMP_COUNTER_REG_0) & DUMP_START) {
1211 		drv_usecwait(100);
1212 		if (++i > STATS_DUMP_LOOP) {
1213 			RGE_DEBUG(("rge h/w statistics dump fail!"));
1214 			rgep->rge_chip_state = RGE_CHIP_ERROR;
1215 			return;
1216 		}
1217 	}
1218 	DMA_SYNC(rgep->dma_area_stats, DDI_DMA_SYNC_FORKERNEL);
1219 
1220 	/*
1221 	 * Start H/W statistics dump for RTL8169 chip
1222 	 */
1223 	rge_reg_set32(rgep, DUMP_COUNTER_REG_0, DUMP_START);
1224 }
1225 
1226 /*
1227  * ========== Hardware interrupt handler ==========
1228  */
1229 
1230 #undef	RGE_DBG
1231 #define	RGE_DBG		RGE_DBG_INT	/* debug flag for this code	*/
1232 
1233 static void rge_wake_factotum(rge_t *rgep);
1234 #pragma	inline(rge_wake_factotum)
1235 
1236 static void
1237 rge_wake_factotum(rge_t *rgep)
1238 {
1239 	if (rgep->factotum_flag == 0) {
1240 		rgep->factotum_flag = 1;
1241 		ddi_trigger_softintr(rgep->factotum_id);
1242 	}
1243 }
1244 
1245 /*
1246  *	rge_intr() -- handle chip interrupts
1247  */
1248 uint_t rge_intr(caddr_t arg);
1249 #pragma	no_inline(rge_intr)
1250 
1251 uint_t
1252 rge_intr(caddr_t arg)
1253 {
1254 	rge_t *rgep = (rge_t *)arg;
1255 	uint16_t int_status;
1256 
1257 	mutex_enter(rgep->genlock);
1258 
1259 	/*
1260 	 * Was this interrupt caused by our device...
1261 	 */
1262 	int_status = rge_reg_get16(rgep, INT_STATUS_REG);
1263 	if (!(int_status & rgep->int_mask)) {
1264 		mutex_exit(rgep->genlock);
1265 		return (DDI_INTR_UNCLAIMED);
1266 				/* indicate it wasn't our interrupt */
1267 	}
1268 
1269 	rgep->stats.intr++;
1270 
1271 	/*
1272 	 * Clear interrupt
1273 	 */
1274 	rge_reg_put16(rgep, INT_STATUS_REG, int_status);
1275 
1276 	/*
1277 	 * Cable link change interrupt
1278 	 */
1279 	if (int_status & LINK_CHANGE_INT) {
1280 		rge_chip_cyclic(rgep);
1281 	}
1282 	mutex_exit(rgep->genlock);
1283 
1284 	/*
1285 	 * Receive interrupt
1286 	 */
1287 	if (int_status & RGE_RX_OVERFLOW_INT)
1288 		rgep->stats.overflow++;
1289 	if (rgep->rge_chip_state == RGE_CHIP_RUNNING)
1290 		rge_receive(rgep);
1291 
1292 	return (DDI_INTR_CLAIMED);	/* indicate it was our interrupt */
1293 }
1294 
1295 /*
1296  * ========== Factotum, implemented as a softint handler ==========
1297  */
1298 
1299 #undef	RGE_DBG
1300 #define	RGE_DBG		RGE_DBG_FACT	/* debug flag for this code	*/
1301 
1302 static boolean_t rge_factotum_link_check(rge_t *rgep);
1303 #pragma	no_inline(rge_factotum_link_check)
1304 
1305 static boolean_t
1306 rge_factotum_link_check(rge_t *rgep)
1307 {
1308 	uint8_t media_status;
1309 	int32_t link;
1310 	void (*logfn)(rge_t *rgep, const char *fmt, ...);
1311 	const char *msg;
1312 	hrtime_t deltat;
1313 
1314 	media_status = rge_reg_get8(rgep, PHY_STATUS_REG);
1315 	link = (media_status & PHY_STATUS_LINK_UP) ?
1316 	    LINK_STATE_UP : LINK_STATE_DOWN;
1317 	if (rgep->param_link_up != link) {
1318 		/*
1319 		 * Link change. We have to decide whether to write a message
1320 		 * on the console or only in the log.  If the PHY has
1321 		 * been reprogrammed (at user request) "recently", then
1322 		 * the message only goes in the log.  Otherwise it's an
1323 		 * "unexpected" event, and it goes on the console as well.
1324 		 */
1325 		rgep->param_link_up = link;
1326 		rgep->phys_event_time = gethrtime();
1327 		deltat = rgep->phys_event_time - rgep->phys_write_time;
1328 		if (deltat > RGE_LINK_SETTLE_TIME)
1329 			msg = "";
1330 		else if (link == LINK_STATE_UP)
1331 			msg = rgep->link_up_msg;
1332 		else
1333 			msg = rgep->link_down_msg;
1334 		logfn = (msg == NULL || *msg == '\0') ? rge_notice : rge_log;
1335 
1336 		if (link == LINK_STATE_UP) {
1337 			if (media_status & PHY_STATUS_1000MF) {
1338 				rgep->param_link_speed = RGE_SPEED_1000M;
1339 				rgep->param_link_duplex = LINK_DUPLEX_FULL;
1340 			} else {
1341 				rgep->param_link_speed =
1342 				    (media_status & PHY_STATUS_100M) ?
1343 				    RGE_SPEED_100M : RGE_SPEED_10M;
1344 				rgep->param_link_duplex =
1345 				    (media_status & PHY_STATUS_DUPLEX_FULL) ?
1346 				    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1347 			}
1348 			logfn(rgep,
1349 			    "link up %sbps %s_Duplex%s",
1350 			    (rgep->param_link_speed == RGE_SPEED_10M) ?
1351 			    "10M" : (rgep->param_link_speed == RGE_SPEED_100M ?
1352 			    "100M" : "1000M"),
1353 			    (rgep->param_link_duplex == LINK_DUPLEX_FULL) ?
1354 			    "Full" : "Half",
1355 			    msg);
1356 		} else {
1357 			logfn(rgep, "link down%s", msg);
1358 		}
1359 		return (B_TRUE);
1360 	}
1361 	return (B_FALSE);
1362 }
1363 
1364 /*
1365  * Factotum routine to check for Tx stall, using the 'watchdog' counter
1366  */
1367 static boolean_t rge_factotum_stall_check(rge_t *rgep);
1368 #pragma	no_inline(rge_factotum_stall_check)
1369 
1370 static boolean_t
1371 rge_factotum_stall_check(rge_t *rgep)
1372 {
1373 	uint32_t dogval;
1374 
1375 	ASSERT(mutex_owned(rgep->genlock));
1376 
1377 	/*
1378 	 * Specific check for Tx stall ...
1379 	 *
1380 	 * The 'watchdog' counter is incremented whenever a packet
1381 	 * is queued, reset to 1 when some (but not all) buffers
1382 	 * are reclaimed, reset to 0 (disabled) when all buffers
1383 	 * are reclaimed, and shifted left here.  If it exceeds the
1384 	 * threshold value, the chip is assumed to have stalled and
1385 	 * is put into the ERROR state.  The factotum will then reset
1386 	 * it on the next pass.
1387 	 *
1388 	 * All of which should ensure that we don't get into a state
1389 	 * where packets are left pending indefinitely!
1390 	 */
1391 	dogval = rge_atomic_shl32(&rgep->watchdog, 1);
1392 	if (dogval < rge_watchdog_count)
1393 		return (B_FALSE);
1394 
1395 	RGE_REPORT((rgep, "Tx stall detected, watchdog code 0x%x", dogval));
1396 	return (B_TRUE);
1397 
1398 }
1399 
1400 /*
1401  * The factotum is woken up when there's something to do that we'd rather
1402  * not do from inside a hardware interrupt handler or high-level cyclic.
1403  * Its two main tasks are:
1404  *	reset & restart the chip after an error
1405  *	check the link status whenever necessary
1406  */
1407 uint_t rge_chip_factotum(caddr_t arg);
1408 #pragma	no_inline(rge_chip_factotum)
1409 
1410 uint_t
1411 rge_chip_factotum(caddr_t arg)
1412 {
1413 	rge_t *rgep;
1414 	uint_t result;
1415 	boolean_t error;
1416 	boolean_t linkchg;
1417 
1418 	rgep = (rge_t *)arg;
1419 
1420 	if (rgep->factotum_flag == 0)
1421 		return (DDI_INTR_UNCLAIMED);
1422 
1423 	rgep->factotum_flag = 0;
1424 	result = DDI_INTR_CLAIMED;
1425 	error = B_FALSE;
1426 	linkchg = B_FALSE;
1427 
1428 	mutex_enter(rgep->genlock);
1429 	switch (rgep->rge_chip_state) {
1430 	default:
1431 		break;
1432 
1433 	case RGE_CHIP_RUNNING:
1434 		linkchg = rge_factotum_link_check(rgep);
1435 		error = rge_factotum_stall_check(rgep);
1436 		break;
1437 
1438 	case RGE_CHIP_ERROR:
1439 		error = B_TRUE;
1440 		break;
1441 
1442 	case RGE_CHIP_FAULT:
1443 		/*
1444 		 * Fault detected, time to reset ...
1445 		 */
1446 		if (rge_autorecover) {
1447 			RGE_REPORT((rgep, "automatic recovery activated"));
1448 			rge_restart(rgep);
1449 		}
1450 		break;
1451 	}
1452 
1453 	/*
1454 	 * If an error is detected, stop the chip now, marking it as
1455 	 * faulty, so that it will be reset next time through ...
1456 	 */
1457 	if (error)
1458 		rge_chip_stop(rgep, B_TRUE);
1459 	mutex_exit(rgep->genlock);
1460 
1461 	/*
1462 	 * If the link state changed, tell the world about it.
1463 	 * Note: can't do this while still holding the mutex.
1464 	 */
1465 	if (linkchg)
1466 		mac_link_update(rgep->macp, rgep->param_link_up);
1467 
1468 	return (result);
1469 }
1470 
1471 /*
1472  * High-level cyclic handler
1473  *
1474  * This routine schedules a (low-level) softint callback to the
1475  * factotum, and prods the chip to update the status block (which
1476  * will cause a hardware interrupt when complete).
1477  */
1478 void rge_chip_cyclic(void *arg);
1479 #pragma	no_inline(rge_chip_cyclic)
1480 
1481 void
1482 rge_chip_cyclic(void *arg)
1483 {
1484 	rge_t *rgep;
1485 
1486 	rgep = arg;
1487 
1488 	switch (rgep->rge_chip_state) {
1489 	default:
1490 		return;
1491 
1492 	case RGE_CHIP_RUNNING:
1493 		rge_phy_check(rgep);
1494 		break;
1495 
1496 	case RGE_CHIP_FAULT:
1497 	case RGE_CHIP_ERROR:
1498 		break;
1499 	}
1500 
1501 	rge_wake_factotum(rgep);
1502 }
1503 
1504 
1505 /*
1506  * ========== Ioctl subfunctions ==========
1507  */
1508 
1509 #undef	RGE_DBG
1510 #define	RGE_DBG		RGE_DBG_PPIO	/* debug flag for this code	*/
1511 
1512 #if	RGE_DEBUGGING || RGE_DO_PPIO
1513 
1514 static void rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd);
1515 #pragma	no_inline(rge_chip_peek_cfg)
1516 
1517 static void
1518 rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1519 {
1520 	uint64_t regval;
1521 	uint64_t regno;
1522 
1523 	RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)",
1524 		(void *)rgep, (void *)ppd));
1525 
1526 	regno = ppd->pp_acc_offset;
1527 
1528 	switch (ppd->pp_acc_size) {
1529 	case 1:
1530 		regval = pci_config_get8(rgep->cfg_handle, regno);
1531 		break;
1532 
1533 	case 2:
1534 		regval = pci_config_get16(rgep->cfg_handle, regno);
1535 		break;
1536 
1537 	case 4:
1538 		regval = pci_config_get32(rgep->cfg_handle, regno);
1539 		break;
1540 
1541 	case 8:
1542 		regval = pci_config_get64(rgep->cfg_handle, regno);
1543 		break;
1544 	}
1545 
1546 	ppd->pp_acc_data = regval;
1547 }
1548 
1549 static void rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd);
1550 #pragma	no_inline(rge_chip_poke_cfg)
1551 
1552 static void
1553 rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1554 {
1555 	uint64_t regval;
1556 	uint64_t regno;
1557 
1558 	RGE_TRACE(("rge_chip_poke_cfg($%p, $%p)",
1559 		(void *)rgep, (void *)ppd));
1560 
1561 	regno = ppd->pp_acc_offset;
1562 	regval = ppd->pp_acc_data;
1563 
1564 	switch (ppd->pp_acc_size) {
1565 	case 1:
1566 		pci_config_put8(rgep->cfg_handle, regno, regval);
1567 		break;
1568 
1569 	case 2:
1570 		pci_config_put16(rgep->cfg_handle, regno, regval);
1571 		break;
1572 
1573 	case 4:
1574 		pci_config_put32(rgep->cfg_handle, regno, regval);
1575 		break;
1576 
1577 	case 8:
1578 		pci_config_put64(rgep->cfg_handle, regno, regval);
1579 		break;
1580 	}
1581 }
1582 
1583 static void rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd);
1584 #pragma	no_inline(rge_chip_peek_reg)
1585 
1586 static void
1587 rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1588 {
1589 	uint64_t regval;
1590 	void *regaddr;
1591 
1592 	RGE_TRACE(("rge_chip_peek_reg($%p, $%p)",
1593 		(void *)rgep, (void *)ppd));
1594 
1595 	regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1596 
1597 	switch (ppd->pp_acc_size) {
1598 	case 1:
1599 		regval = ddi_get8(rgep->io_handle, regaddr);
1600 		break;
1601 
1602 	case 2:
1603 		regval = ddi_get16(rgep->io_handle, regaddr);
1604 		break;
1605 
1606 	case 4:
1607 		regval = ddi_get32(rgep->io_handle, regaddr);
1608 		break;
1609 
1610 	case 8:
1611 		regval = ddi_get64(rgep->io_handle, regaddr);
1612 		break;
1613 	}
1614 
1615 	ppd->pp_acc_data = regval;
1616 }
1617 
1618 static void rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd);
1619 #pragma	no_inline(rge_chip_peek_reg)
1620 
1621 static void
1622 rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1623 {
1624 	uint64_t regval;
1625 	void *regaddr;
1626 
1627 	RGE_TRACE(("rge_chip_poke_reg($%p, $%p)",
1628 		(void *)rgep, (void *)ppd));
1629 
1630 	regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1631 	regval = ppd->pp_acc_data;
1632 
1633 	switch (ppd->pp_acc_size) {
1634 	case 1:
1635 		ddi_put8(rgep->io_handle, regaddr, regval);
1636 		break;
1637 
1638 	case 2:
1639 		ddi_put16(rgep->io_handle, regaddr, regval);
1640 		break;
1641 
1642 	case 4:
1643 		ddi_put32(rgep->io_handle, regaddr, regval);
1644 		break;
1645 
1646 	case 8:
1647 		ddi_put64(rgep->io_handle, regaddr, regval);
1648 		break;
1649 	}
1650 	RGE_PCICHK(rgep);
1651 }
1652 
1653 static void rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd);
1654 #pragma	no_inline(rge_chip_peek_mii)
1655 
1656 static void
1657 rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1658 {
1659 	RGE_TRACE(("rge_chip_peek_mii($%p, $%p)",
1660 		(void *)rgep, (void *)ppd));
1661 
1662 	ppd->pp_acc_data = rge_mii_get16(rgep, ppd->pp_acc_offset/2);
1663 }
1664 
1665 static void rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd);
1666 #pragma	no_inline(rge_chip_poke_mii)
1667 
1668 static void
1669 rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1670 {
1671 	RGE_TRACE(("rge_chip_poke_mii($%p, $%p)",
1672 		(void *)rgep, (void *)ppd));
1673 
1674 	rge_mii_put16(rgep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
1675 }
1676 
1677 static void rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd);
1678 #pragma	no_inline(rge_chip_peek_mem)
1679 
1680 static void
1681 rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1682 {
1683 	uint64_t regval;
1684 	void *vaddr;
1685 
1686 	RGE_TRACE(("rge_chip_peek_rge($%p, $%p)",
1687 		(void *)rgep, (void *)ppd));
1688 
1689 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1690 
1691 	switch (ppd->pp_acc_size) {
1692 	case 1:
1693 		regval = *(uint8_t *)vaddr;
1694 		break;
1695 
1696 	case 2:
1697 		regval = *(uint16_t *)vaddr;
1698 		break;
1699 
1700 	case 4:
1701 		regval = *(uint32_t *)vaddr;
1702 		break;
1703 
1704 	case 8:
1705 		regval = *(uint64_t *)vaddr;
1706 		break;
1707 	}
1708 
1709 	RGE_DEBUG(("rge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p",
1710 		(void *)rgep, (void *)ppd, regval, vaddr));
1711 
1712 	ppd->pp_acc_data = regval;
1713 }
1714 
1715 static void rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd);
1716 #pragma	no_inline(rge_chip_poke_mem)
1717 
1718 static void
1719 rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1720 {
1721 	uint64_t regval;
1722 	void *vaddr;
1723 
1724 	RGE_TRACE(("rge_chip_poke_mem($%p, $%p)",
1725 		(void *)rgep, (void *)ppd));
1726 
1727 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1728 	regval = ppd->pp_acc_data;
1729 
1730 	RGE_DEBUG(("rge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p",
1731 		(void *)rgep, (void *)ppd, regval, vaddr));
1732 
1733 	switch (ppd->pp_acc_size) {
1734 	case 1:
1735 		*(uint8_t *)vaddr = (uint8_t)regval;
1736 		break;
1737 
1738 	case 2:
1739 		*(uint16_t *)vaddr = (uint16_t)regval;
1740 		break;
1741 
1742 	case 4:
1743 		*(uint32_t *)vaddr = (uint32_t)regval;
1744 		break;
1745 
1746 	case 8:
1747 		*(uint64_t *)vaddr = (uint64_t)regval;
1748 		break;
1749 	}
1750 }
1751 
1752 static enum ioc_reply rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
1753 					struct iocblk *iocp);
1754 #pragma	no_inline(rge_pp_ioctl)
1755 
1756 static enum ioc_reply
1757 rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
1758 {
1759 	void (*ppfn)(rge_t *rgep, rge_peekpoke_t *ppd);
1760 	rge_peekpoke_t *ppd;
1761 	dma_area_t *areap;
1762 	uint64_t sizemask;
1763 	uint64_t mem_va;
1764 	uint64_t maxoff;
1765 	boolean_t peek;
1766 
1767 	switch (cmd) {
1768 	default:
1769 		/* NOTREACHED */
1770 		rge_error(rgep, "rge_pp_ioctl: invalid cmd 0x%x", cmd);
1771 		return (IOC_INVAL);
1772 
1773 	case RGE_PEEK:
1774 		peek = B_TRUE;
1775 		break;
1776 
1777 	case RGE_POKE:
1778 		peek = B_FALSE;
1779 		break;
1780 	}
1781 
1782 	/*
1783 	 * Validate format of ioctl
1784 	 */
1785 	if (iocp->ioc_count != sizeof (rge_peekpoke_t))
1786 		return (IOC_INVAL);
1787 	if (mp->b_cont == NULL)
1788 		return (IOC_INVAL);
1789 	ppd = (rge_peekpoke_t *)mp->b_cont->b_rptr;
1790 
1791 	/*
1792 	 * Validate request parameters
1793 	 */
1794 	switch (ppd->pp_acc_space) {
1795 	default:
1796 		return (IOC_INVAL);
1797 
1798 	case RGE_PP_SPACE_CFG:
1799 		/*
1800 		 * Config space
1801 		 */
1802 		sizemask = 8|4|2|1;
1803 		mem_va = 0;
1804 		maxoff = PCI_CONF_HDR_SIZE;
1805 		ppfn = peek ? rge_chip_peek_cfg : rge_chip_poke_cfg;
1806 		break;
1807 
1808 	case RGE_PP_SPACE_REG:
1809 		/*
1810 		 * Memory-mapped I/O space
1811 		 */
1812 		sizemask = 8|4|2|1;
1813 		mem_va = 0;
1814 		maxoff = RGE_REGISTER_MAX;
1815 		ppfn = peek ? rge_chip_peek_reg : rge_chip_poke_reg;
1816 		break;
1817 
1818 	case RGE_PP_SPACE_MII:
1819 		/*
1820 		 * PHY's MII registers
1821 		 * NB: all PHY registers are two bytes, but the
1822 		 * addresses increment in ones (word addressing).
1823 		 * So we scale the address here, then undo the
1824 		 * transformation inside the peek/poke functions.
1825 		 */
1826 		ppd->pp_acc_offset *= 2;
1827 		sizemask = 2;
1828 		mem_va = 0;
1829 		maxoff = (MII_MAXREG+1)*2;
1830 		ppfn = peek ? rge_chip_peek_mii : rge_chip_poke_mii;
1831 		break;
1832 
1833 	case RGE_PP_SPACE_RGE:
1834 		/*
1835 		 * RGE data structure!
1836 		 */
1837 		sizemask = 8|4|2|1;
1838 		mem_va = (uintptr_t)rgep;
1839 		maxoff = sizeof (*rgep);
1840 		ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
1841 		break;
1842 
1843 	case RGE_PP_SPACE_STATISTICS:
1844 	case RGE_PP_SPACE_TXDESC:
1845 	case RGE_PP_SPACE_TXBUFF:
1846 	case RGE_PP_SPACE_RXDESC:
1847 	case RGE_PP_SPACE_RXBUFF:
1848 		/*
1849 		 * Various DMA_AREAs
1850 		 */
1851 		switch (ppd->pp_acc_space) {
1852 		case RGE_PP_SPACE_TXDESC:
1853 			areap = &rgep->dma_area_txdesc;
1854 			break;
1855 		case RGE_PP_SPACE_TXBUFF:
1856 			areap = &rgep->dma_area_txbuf[0];
1857 			break;
1858 		case RGE_PP_SPACE_RXDESC:
1859 			areap = &rgep->dma_area_rxdesc;
1860 			break;
1861 		case RGE_PP_SPACE_RXBUFF:
1862 			areap = &rgep->dma_area_rxbuf[0];
1863 			break;
1864 		case RGE_PP_SPACE_STATISTICS:
1865 			areap = &rgep->dma_area_stats;
1866 			break;
1867 		}
1868 
1869 		sizemask = 8|4|2|1;
1870 		mem_va = (uintptr_t)areap->mem_va;
1871 		maxoff = areap->alength;
1872 		ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
1873 		break;
1874 	}
1875 
1876 	switch (ppd->pp_acc_size) {
1877 	default:
1878 		return (IOC_INVAL);
1879 
1880 	case 8:
1881 	case 4:
1882 	case 2:
1883 	case 1:
1884 		if ((ppd->pp_acc_size & sizemask) == 0)
1885 			return (IOC_INVAL);
1886 		break;
1887 	}
1888 
1889 	if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
1890 		return (IOC_INVAL);
1891 
1892 	if (ppd->pp_acc_offset >= maxoff)
1893 		return (IOC_INVAL);
1894 
1895 	if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
1896 		return (IOC_INVAL);
1897 
1898 	/*
1899 	 * All OK - go do it!
1900 	 */
1901 	ppd->pp_acc_offset += mem_va;
1902 	(*ppfn)(rgep, ppd);
1903 	return (peek ? IOC_REPLY : IOC_ACK);
1904 }
1905 
1906 static enum ioc_reply rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
1907 					struct iocblk *iocp);
1908 #pragma	no_inline(rge_diag_ioctl)
1909 
1910 static enum ioc_reply
1911 rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
1912 {
1913 	ASSERT(mutex_owned(rgep->genlock));
1914 
1915 	switch (cmd) {
1916 	default:
1917 		/* NOTREACHED */
1918 		rge_error(rgep, "rge_diag_ioctl: invalid cmd 0x%x", cmd);
1919 		return (IOC_INVAL);
1920 
1921 	case RGE_DIAG:
1922 		/*
1923 		 * Currently a no-op
1924 		 */
1925 		return (IOC_ACK);
1926 
1927 	case RGE_PEEK:
1928 	case RGE_POKE:
1929 		return (rge_pp_ioctl(rgep, cmd, mp, iocp));
1930 
1931 	case RGE_PHY_RESET:
1932 		return (IOC_RESTART_ACK);
1933 
1934 	case RGE_SOFT_RESET:
1935 	case RGE_HARD_RESET:
1936 		/*
1937 		 * Reset and reinitialise the 570x hardware
1938 		 */
1939 		rge_restart(rgep);
1940 		return (IOC_ACK);
1941 	}
1942 
1943 	/* NOTREACHED */
1944 }
1945 
1946 #endif	/* RGE_DEBUGGING || RGE_DO_PPIO */
1947 
1948 static enum ioc_reply rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
1949 				    struct iocblk *iocp);
1950 #pragma	no_inline(rge_mii_ioctl)
1951 
1952 static enum ioc_reply
1953 rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
1954 {
1955 	struct rge_mii_rw *miirwp;
1956 
1957 	/*
1958 	 * Validate format of ioctl
1959 	 */
1960 	if (iocp->ioc_count != sizeof (struct rge_mii_rw))
1961 		return (IOC_INVAL);
1962 	if (mp->b_cont == NULL)
1963 		return (IOC_INVAL);
1964 	miirwp = (struct rge_mii_rw *)mp->b_cont->b_rptr;
1965 
1966 	/*
1967 	 * Validate request parameters ...
1968 	 */
1969 	if (miirwp->mii_reg > MII_MAXREG)
1970 		return (IOC_INVAL);
1971 
1972 	switch (cmd) {
1973 	default:
1974 		/* NOTREACHED */
1975 		rge_error(rgep, "rge_mii_ioctl: invalid cmd 0x%x", cmd);
1976 		return (IOC_INVAL);
1977 
1978 	case RGE_MII_READ:
1979 		miirwp->mii_data = rge_mii_get16(rgep, miirwp->mii_reg);
1980 		return (IOC_REPLY);
1981 
1982 	case RGE_MII_WRITE:
1983 		rge_mii_put16(rgep, miirwp->mii_reg, miirwp->mii_data);
1984 		return (IOC_ACK);
1985 	}
1986 
1987 	/* NOTREACHED */
1988 }
1989 
1990 enum ioc_reply rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp,
1991 				struct iocblk *iocp);
1992 #pragma	no_inline(rge_chip_ioctl)
1993 
1994 enum ioc_reply
1995 rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
1996 {
1997 	int cmd;
1998 
1999 	RGE_TRACE(("rge_chip_ioctl($%p, $%p, $%p, $%p)",
2000 		(void *)rgep, (void *)wq, (void *)mp, (void *)iocp));
2001 
2002 	ASSERT(mutex_owned(rgep->genlock));
2003 
2004 	cmd = iocp->ioc_cmd;
2005 	switch (cmd) {
2006 	default:
2007 		/* NOTREACHED */
2008 		rge_error(rgep, "rge_chip_ioctl: invalid cmd 0x%x", cmd);
2009 		return (IOC_INVAL);
2010 
2011 	case RGE_DIAG:
2012 	case RGE_PEEK:
2013 	case RGE_POKE:
2014 	case RGE_PHY_RESET:
2015 	case RGE_SOFT_RESET:
2016 	case RGE_HARD_RESET:
2017 #if	RGE_DEBUGGING || RGE_DO_PPIO
2018 		return (rge_diag_ioctl(rgep, cmd, mp, iocp));
2019 #else
2020 		return (IOC_INVAL);
2021 #endif	/* RGE_DEBUGGING || RGE_DO_PPIO */
2022 
2023 	case RGE_MII_READ:
2024 	case RGE_MII_WRITE:
2025 		return (rge_mii_ioctl(rgep, cmd, mp, iocp));
2026 
2027 	}
2028 
2029 	/* NOTREACHED */
2030 }
2031