xref: /titanic_51/usr/src/uts/common/io/rge/rge_chip.c (revision 5ec2209c9e5bae1d9b4218839a10cb209bb5d7ab)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include "rge.h"
27 
28 #define	REG32(rgep, reg)	((uint32_t *)(rgep->io_regs+(reg)))
29 #define	REG16(rgep, reg)	((uint16_t *)(rgep->io_regs+(reg)))
30 #define	REG8(rgep, reg)		((uint8_t *)(rgep->io_regs+(reg)))
31 #define	PIO_ADDR(rgep, offset)	((void *)(rgep->io_regs+(offset)))
32 
33 /*
34  * Patchable globals:
35  *
36  *	rge_autorecover
37  *		Enables/disables automatic recovery after fault detection
38  */
39 static uint32_t rge_autorecover = 1;
40 
41 /*
42  * globals:
43  */
44 #define	RGE_DBG		RGE_DBG_REGS	/* debug flag for this code	*/
45 static uint32_t rge_watchdog_count	= 1 << 5;
46 
47 /*
48  * Operating register get/set access routines
49  */
50 
51 static uint32_t rge_reg_get32(rge_t *rgep, uintptr_t regno);
52 #pragma	inline(rge_reg_get32)
53 
54 static uint32_t
55 rge_reg_get32(rge_t *rgep, uintptr_t regno)
56 {
57 	RGE_TRACE(("rge_reg_get32($%p, 0x%lx)",
58 	    (void *)rgep, regno));
59 
60 	return (ddi_get32(rgep->io_handle, REG32(rgep, regno)));
61 }
62 
63 static void rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data);
64 #pragma	inline(rge_reg_put32)
65 
66 static void
67 rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data)
68 {
69 	RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)",
70 	    (void *)rgep, regno, data));
71 
72 	ddi_put32(rgep->io_handle, REG32(rgep, regno), data);
73 }
74 
75 static void rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits);
76 #pragma	inline(rge_reg_set32)
77 
78 static void
79 rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits)
80 {
81 	uint32_t regval;
82 
83 	RGE_TRACE(("rge_reg_set32($%p, 0x%lx, 0x%x)",
84 	    (void *)rgep, regno, bits));
85 
86 	regval = rge_reg_get32(rgep, regno);
87 	regval |= bits;
88 	rge_reg_put32(rgep, regno, regval);
89 }
90 
91 static void rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits);
92 #pragma	inline(rge_reg_clr32)
93 
94 static void
95 rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits)
96 {
97 	uint32_t regval;
98 
99 	RGE_TRACE(("rge_reg_clr32($%p, 0x%lx, 0x%x)",
100 	    (void *)rgep, regno, bits));
101 
102 	regval = rge_reg_get32(rgep, regno);
103 	regval &= ~bits;
104 	rge_reg_put32(rgep, regno, regval);
105 }
106 
107 static uint16_t rge_reg_get16(rge_t *rgep, uintptr_t regno);
108 #pragma	inline(rge_reg_get16)
109 
110 static uint16_t
111 rge_reg_get16(rge_t *rgep, uintptr_t regno)
112 {
113 	RGE_TRACE(("rge_reg_get16($%p, 0x%lx)",
114 	    (void *)rgep, regno));
115 
116 	return (ddi_get16(rgep->io_handle, REG16(rgep, regno)));
117 }
118 
119 static void rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data);
120 #pragma	inline(rge_reg_put16)
121 
122 static void
123 rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data)
124 {
125 	RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)",
126 	    (void *)rgep, regno, data));
127 
128 	ddi_put16(rgep->io_handle, REG16(rgep, regno), data);
129 }
130 
131 static uint8_t rge_reg_get8(rge_t *rgep, uintptr_t regno);
132 #pragma	inline(rge_reg_get8)
133 
134 static uint8_t
135 rge_reg_get8(rge_t *rgep, uintptr_t regno)
136 {
137 	RGE_TRACE(("rge_reg_get8($%p, 0x%lx)",
138 	    (void *)rgep, regno));
139 
140 	return (ddi_get8(rgep->io_handle, REG8(rgep, regno)));
141 }
142 
143 static void rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data);
144 #pragma	inline(rge_reg_put8)
145 
146 static void
147 rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data)
148 {
149 	RGE_TRACE(("rge_reg_put8($%p, 0x%lx, 0x%x)",
150 	    (void *)rgep, regno, data));
151 
152 	ddi_put8(rgep->io_handle, REG8(rgep, regno), data);
153 }
154 
155 static void rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits);
156 #pragma	inline(rge_reg_set8)
157 
158 static void
159 rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits)
160 {
161 	uint8_t regval;
162 
163 	RGE_TRACE(("rge_reg_set8($%p, 0x%lx, 0x%x)",
164 	    (void *)rgep, regno, bits));
165 
166 	regval = rge_reg_get8(rgep, regno);
167 	regval |= bits;
168 	rge_reg_put8(rgep, regno, regval);
169 }
170 
171 static void rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits);
172 #pragma	inline(rge_reg_clr8)
173 
174 static void
175 rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits)
176 {
177 	uint8_t regval;
178 
179 	RGE_TRACE(("rge_reg_clr8($%p, 0x%lx, 0x%x)",
180 	    (void *)rgep, regno, bits));
181 
182 	regval = rge_reg_get8(rgep, regno);
183 	regval &= ~bits;
184 	rge_reg_put8(rgep, regno, regval);
185 }
186 
187 uint16_t rge_mii_get16(rge_t *rgep, uintptr_t mii);
188 #pragma	no_inline(rge_mii_get16)
189 
190 uint16_t
191 rge_mii_get16(rge_t *rgep, uintptr_t mii)
192 {
193 	uint32_t regval;
194 	uint32_t val32;
195 	uint32_t i;
196 
197 	regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
198 	rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
199 
200 	/*
201 	 * Waiting for PHY reading OK
202 	 */
203 	for (i = 0; i < PHY_RESET_LOOP; i++) {
204 		drv_usecwait(1000);
205 		val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
206 		if (val32 & PHY_ACCESS_WR_FLAG)
207 			return ((uint16_t)(val32 & 0xffff));
208 	}
209 
210 	RGE_REPORT((rgep, "rge_mii_get16(0x%x) fail, val = %x", mii, val32));
211 	return ((uint16_t)~0u);
212 }
213 
214 void rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data);
215 #pragma	no_inline(rge_mii_put16)
216 
217 void
218 rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data)
219 {
220 	uint32_t regval;
221 	uint32_t val32;
222 	uint32_t i;
223 
224 	regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
225 	regval |= data & PHY_DATA_MASK;
226 	regval |= PHY_ACCESS_WR_FLAG;
227 	rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
228 
229 	/*
230 	 * Waiting for PHY writing OK
231 	 */
232 	for (i = 0; i < PHY_RESET_LOOP; i++) {
233 		drv_usecwait(1000);
234 		val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
235 		if (!(val32 & PHY_ACCESS_WR_FLAG))
236 			return;
237 	}
238 	RGE_REPORT((rgep, "rge_mii_put16(0x%lx, 0x%x) fail",
239 	    mii, data));
240 }
241 
242 void rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data);
243 #pragma	no_inline(rge_ephy_put16)
244 
245 void
246 rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data)
247 {
248 	uint32_t regval;
249 	uint32_t val32;
250 	uint32_t i;
251 
252 	regval = (emii & EPHY_REG_MASK) << EPHY_REG_SHIFT;
253 	regval |= data & EPHY_DATA_MASK;
254 	regval |= EPHY_ACCESS_WR_FLAG;
255 	rge_reg_put32(rgep, EPHY_ACCESS_REG, regval);
256 
257 	/*
258 	 * Waiting for PHY writing OK
259 	 */
260 	for (i = 0; i < PHY_RESET_LOOP; i++) {
261 		drv_usecwait(1000);
262 		val32 = rge_reg_get32(rgep, EPHY_ACCESS_REG);
263 		if (!(val32 & EPHY_ACCESS_WR_FLAG))
264 			return;
265 	}
266 	RGE_REPORT((rgep, "rge_ephy_put16(0x%lx, 0x%x) fail",
267 	    emii, data));
268 }
269 
270 /*
271  * Atomically shift a 32-bit word left, returning
272  * the value it had *before* the shift was applied
273  */
274 static uint32_t rge_atomic_shl32(uint32_t *sp, uint_t count);
275 #pragma	inline(rge_mii_put16)
276 
277 static uint32_t
278 rge_atomic_shl32(uint32_t *sp, uint_t count)
279 {
280 	uint32_t oldval;
281 	uint32_t newval;
282 
283 	/* ATOMICALLY */
284 	do {
285 		oldval = *sp;
286 		newval = oldval << count;
287 	} while (cas32(sp, oldval, newval) != oldval);
288 
289 	return (oldval);
290 }
291 
292 /*
293  * PHY operation routines
294  */
295 #if	RGE_DEBUGGING
296 
297 void
298 rge_phydump(rge_t *rgep)
299 {
300 	uint16_t regs[32];
301 	int i;
302 
303 	ASSERT(mutex_owned(rgep->genlock));
304 
305 	for (i = 0; i < 32; ++i) {
306 		regs[i] = rge_mii_get16(rgep, i);
307 	}
308 
309 	for (i = 0; i < 32; i += 8)
310 		RGE_DEBUG(("rge_phydump: "
311 		    "0x%04x %04x %04x %04x %04x %04x %04x %04x",
312 		    regs[i+0], regs[i+1], regs[i+2], regs[i+3],
313 		    regs[i+4], regs[i+5], regs[i+6], regs[i+7]));
314 }
315 
316 #endif	/* RGE_DEBUGGING */
317 
318 static void
319 rge_phy_check(rge_t *rgep)
320 {
321 	uint16_t gig_ctl;
322 
323 	if (rgep->param_link_up  == LINK_STATE_DOWN) {
324 		/*
325 		 * RTL8169S/8110S PHY has the "PCS bug".  Need reset PHY
326 		 * every 15 seconds whin link down & advertise is 1000.
327 		 */
328 		if (rgep->chipid.phy_ver == PHY_VER_S) {
329 			gig_ctl = rge_mii_get16(rgep, MII_1000BASE_T_CONTROL);
330 			if (gig_ctl & MII_1000BT_CTL_ADV_FDX) {
331 				rgep->link_down_count++;
332 				if (rgep->link_down_count > 15) {
333 					(void) rge_phy_reset(rgep);
334 					rgep->stats.phy_reset++;
335 					rgep->link_down_count = 0;
336 				}
337 			}
338 		}
339 	} else {
340 		rgep->link_down_count = 0;
341 	}
342 }
343 
344 /*
345  * Basic low-level function to reset the PHY.
346  * Doesn't incorporate any special-case workarounds.
347  *
348  * Returns TRUE on success, FALSE if the RESET bit doesn't clear
349  */
350 boolean_t
351 rge_phy_reset(rge_t *rgep)
352 {
353 	uint16_t control;
354 	uint_t count;
355 
356 	/*
357 	 * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear
358 	 */
359 	control = rge_mii_get16(rgep, MII_CONTROL);
360 	rge_mii_put16(rgep, MII_CONTROL, control | MII_CONTROL_RESET);
361 	for (count = 0; count < 5; count++) {
362 		drv_usecwait(100);
363 		control = rge_mii_get16(rgep, MII_CONTROL);
364 		if (BIC(control, MII_CONTROL_RESET))
365 			return (B_TRUE);
366 	}
367 
368 	RGE_REPORT((rgep, "rge_phy_reset: FAILED, control now 0x%x", control));
369 	return (B_FALSE);
370 }
371 
372 /*
373  * Synchronise the PHY's speed/duplex/autonegotiation capabilities
374  * and advertisements with the required settings as specified by the various
375  * param_* variables that can be poked via the NDD interface.
376  *
377  * We always reset the PHY and reprogram *all* the relevant registers,
378  * not just those changed.  This should cause the link to go down, and then
379  * back up again once the link is stable and autonegotiation (if enabled)
380  * is complete.  We should get a link state change interrupt somewhere along
381  * the way ...
382  *
383  * NOTE: <genlock> must already be held by the caller
384  */
385 void
386 rge_phy_update(rge_t *rgep)
387 {
388 	boolean_t adv_autoneg;
389 	boolean_t adv_pause;
390 	boolean_t adv_asym_pause;
391 	boolean_t adv_1000fdx;
392 	boolean_t adv_1000hdx;
393 	boolean_t adv_100fdx;
394 	boolean_t adv_100hdx;
395 	boolean_t adv_10fdx;
396 	boolean_t adv_10hdx;
397 
398 	uint16_t control;
399 	uint16_t gigctrl;
400 	uint16_t anar;
401 
402 	ASSERT(mutex_owned(rgep->genlock));
403 
404 	RGE_DEBUG(("rge_phy_update: autoneg %d "
405 	    "pause %d asym_pause %d "
406 	    "1000fdx %d 1000hdx %d "
407 	    "100fdx %d 100hdx %d "
408 	    "10fdx %d 10hdx %d ",
409 	    rgep->param_adv_autoneg,
410 	    rgep->param_adv_pause, rgep->param_adv_asym_pause,
411 	    rgep->param_adv_1000fdx, rgep->param_adv_1000hdx,
412 	    rgep->param_adv_100fdx, rgep->param_adv_100hdx,
413 	    rgep->param_adv_10fdx, rgep->param_adv_10hdx));
414 
415 	control = gigctrl = anar = 0;
416 
417 	/*
418 	 * PHY settings are normally based on the param_* variables,
419 	 * but if any loopback mode is in effect, that takes precedence.
420 	 *
421 	 * RGE supports MAC-internal loopback, PHY-internal loopback,
422 	 * and External loopback at a variety of speeds (with a special
423 	 * cable).  In all cases, autoneg is turned OFF, full-duplex
424 	 * is turned ON, and the speed/mastership is forced.
425 	 */
426 	switch (rgep->param_loop_mode) {
427 	case RGE_LOOP_NONE:
428 	default:
429 		adv_autoneg = rgep->param_adv_autoneg;
430 		adv_pause = rgep->param_adv_pause;
431 		adv_asym_pause = rgep->param_adv_asym_pause;
432 		adv_1000fdx = rgep->param_adv_1000fdx;
433 		adv_1000hdx = rgep->param_adv_1000hdx;
434 		adv_100fdx = rgep->param_adv_100fdx;
435 		adv_100hdx = rgep->param_adv_100hdx;
436 		adv_10fdx = rgep->param_adv_10fdx;
437 		adv_10hdx = rgep->param_adv_10hdx;
438 		break;
439 
440 	case RGE_LOOP_INTERNAL_PHY:
441 	case RGE_LOOP_INTERNAL_MAC:
442 		adv_autoneg = adv_pause = adv_asym_pause = B_FALSE;
443 		adv_1000fdx = adv_100fdx = adv_10fdx = B_FALSE;
444 		adv_1000hdx = adv_100hdx = adv_10hdx = B_FALSE;
445 		rgep->param_link_duplex = LINK_DUPLEX_FULL;
446 
447 		switch (rgep->param_loop_mode) {
448 		case RGE_LOOP_INTERNAL_PHY:
449 			if (rgep->chipid.mac_ver != MAC_VER_8101E) {
450 				rgep->param_link_speed = 1000;
451 				adv_1000fdx = B_TRUE;
452 			} else {
453 				rgep->param_link_speed = 100;
454 				adv_100fdx = B_TRUE;
455 			}
456 			control = MII_CONTROL_LOOPBACK;
457 			break;
458 
459 		case RGE_LOOP_INTERNAL_MAC:
460 			if (rgep->chipid.mac_ver != MAC_VER_8101E) {
461 				rgep->param_link_speed = 1000;
462 				adv_1000fdx = B_TRUE;
463 			} else {
464 				rgep->param_link_speed = 100;
465 				adv_100fdx = B_TRUE;
466 			break;
467 		}
468 	}
469 
470 	RGE_DEBUG(("rge_phy_update: autoneg %d "
471 	    "pause %d asym_pause %d "
472 	    "1000fdx %d 1000hdx %d "
473 	    "100fdx %d 100hdx %d "
474 	    "10fdx %d 10hdx %d ",
475 	    adv_autoneg,
476 	    adv_pause, adv_asym_pause,
477 	    adv_1000fdx, adv_1000hdx,
478 	    adv_100fdx, adv_100hdx,
479 	    adv_10fdx, adv_10hdx));
480 
481 	/*
482 	 * We should have at least one technology capability set;
483 	 * if not, we select a default of 1000Mb/s full-duplex
484 	 */
485 	if (!adv_1000fdx && !adv_100fdx && !adv_10fdx &&
486 	    !adv_1000hdx && !adv_100hdx && !adv_10hdx) {
487 		if (rgep->chipid.mac_ver != MAC_VER_8101E)
488 			adv_1000fdx = B_TRUE;
489 		} else {
490 			adv_1000fdx = B_FALSE;
491 			adv_100fdx = B_TRUE;
492 		}
493 	}
494 
495 	/*
496 	 * Now transform the adv_* variables into the proper settings
497 	 * of the PHY registers ...
498 	 *
499 	 * If autonegotiation is (now) enabled, we want to trigger
500 	 * a new autonegotiation cycle once the PHY has been
501 	 * programmed with the capabilities to be advertised.
502 	 *
503 	 * RTL8169/8110 doesn't support 1000Mb/s half-duplex.
504 	 */
505 	if (adv_autoneg)
506 		control |= MII_CONTROL_ANE|MII_CONTROL_RSAN;
507 
508 	if (adv_1000fdx)
509 		control |= MII_CONTROL_1GB|MII_CONTROL_FDUPLEX;
510 	else if (adv_1000hdx)
511 		control |= MII_CONTROL_1GB;
512 	else if (adv_100fdx)
513 		control |= MII_CONTROL_100MB|MII_CONTROL_FDUPLEX;
514 	else if (adv_100hdx)
515 		control |= MII_CONTROL_100MB;
516 	else if (adv_10fdx)
517 		control |= MII_CONTROL_FDUPLEX;
518 	else if (adv_10hdx)
519 		control |= 0;
520 	else
521 		{ _NOTE(EMPTY); }	/* Can't get here anyway ...	*/
522 
523 	if (adv_1000fdx) {
524 		gigctrl |= MII_1000BT_CTL_ADV_FDX;
525 		/*
526 		 * Chipset limitation: need set other capabilities to true
527 		 */
528 		if (rgep->chipid.is_pcie)
529 			adv_1000hdx = B_TRUE;
530 		adv_100fdx = B_TRUE;
531 		adv_100hdx  = B_TRUE;
532 		adv_10fdx = B_TRUE;
533 		adv_10hdx = B_TRUE;
534 	}
535 
536 	if (adv_1000hdx)
537 		gigctrl |= MII_1000BT_CTL_ADV_HDX;
538 
539 	if (adv_100fdx)
540 		anar |= MII_ABILITY_100BASE_TX_FD;
541 	if (adv_100hdx)
542 		anar |= MII_ABILITY_100BASE_TX;
543 	if (adv_10fdx)
544 		anar |= MII_ABILITY_10BASE_T_FD;
545 	if (adv_10hdx)
546 		anar |= MII_ABILITY_10BASE_T;
547 
548 	if (adv_pause)
549 		anar |= MII_ABILITY_PAUSE;
550 	if (adv_asym_pause)
551 		anar |= MII_ABILITY_ASMPAUSE;
552 
553 	/*
554 	 * Munge in any other fixed bits we require ...
555 	 */
556 	anar |= MII_AN_SELECTOR_8023;
557 
558 	/*
559 	 * Restart the PHY and write the new values.  Note the
560 	 * time, so that we can say whether subsequent link state
561 	 * changes can be attributed to our reprogramming the PHY
562 	 */
563 	rge_phy_init(rgep);
564 	if (rgep->chipid.mac_ver == MAC_VER_8168B_B ||
565 	    rgep->chipid.mac_ver == MAC_VER_8168B_C) {
566 		/* power up PHY for RTL8168B chipset */
567 		rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
568 		rge_mii_put16(rgep, PHY_0E_REG, 0x0000);
569 		rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
570 	}
571 	rge_mii_put16(rgep, MII_AN_ADVERT, anar);
572 	rge_mii_put16(rgep, MII_1000BASE_T_CONTROL, gigctrl);
573 	rge_mii_put16(rgep, MII_CONTROL, control);
574 
575 	RGE_DEBUG(("rge_phy_update: anar <- 0x%x", anar));
576 	RGE_DEBUG(("rge_phy_update: control <- 0x%x", control));
577 	RGE_DEBUG(("rge_phy_update: gigctrl <- 0x%x", gigctrl));
578 }
579 
580 void rge_phy_init(rge_t *rgep);
581 #pragma	no_inline(rge_phy_init)
582 
583 void
584 rge_phy_init(rge_t *rgep)
585 {
586 	rgep->phy_mii_addr = 1;
587 
588 	/*
589 	 * Below phy config steps are copied from the Programming Guide
590 	 * (there's no detail comments for these steps.)
591 	 */
592 	switch (rgep->chipid.mac_ver) {
593 	case MAC_VER_8169S_D:
594 	case MAC_VER_8169S_E :
595 		rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
596 		rge_mii_put16(rgep, PHY_15_REG, 0x1000);
597 		rge_mii_put16(rgep, PHY_18_REG, 0x65c7);
598 		rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
599 		rge_mii_put16(rgep, PHY_ID_REG_2, 0x00a1);
600 		rge_mii_put16(rgep, PHY_ID_REG_1, 0x0008);
601 		rge_mii_put16(rgep, PHY_BMSR_REG, 0x1020);
602 		rge_mii_put16(rgep, PHY_BMCR_REG, 0x1000);
603 		rge_mii_put16(rgep, PHY_ANAR_REG, 0x0800);
604 		rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
605 		rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000);
606 		rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
607 		rge_mii_put16(rgep, PHY_ID_REG_1, 0xde60);
608 		rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
609 		rge_mii_put16(rgep, PHY_BMCR_REG, 0x0077);
610 		rge_mii_put16(rgep, PHY_ANAR_REG, 0x7800);
611 		rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000);
612 		rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000);
613 		rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
614 		rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
615 		rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
616 		rge_mii_put16(rgep, PHY_BMCR_REG, 0xfa00);
617 		rge_mii_put16(rgep, PHY_ANAR_REG, 0xa800);
618 		rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000);
619 		rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000);
620 		rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
621 		rge_mii_put16(rgep, PHY_ID_REG_1, 0xde20);
622 		rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
623 		rge_mii_put16(rgep, PHY_BMCR_REG, 0x00bb);
624 		rge_mii_put16(rgep, PHY_ANAR_REG, 0xb800);
625 		rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000);
626 		rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000);
627 		rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
628 		rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
629 		rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
630 		rge_mii_put16(rgep, PHY_BMCR_REG, 0xbf00);
631 		rge_mii_put16(rgep, PHY_ANAR_REG, 0xf800);
632 		rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000);
633 		rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
634 		rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
635 		rge_mii_put16(rgep, PHY_0B_REG, 0x0000);
636 		break;
637 
638 	case MAC_VER_8169SB:
639 		rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
640 		rge_mii_put16(rgep, PHY_1B_REG, 0xD41E);
641 		rge_mii_put16(rgep, PHY_0E_REG, 0x7bff);
642 		rge_mii_put16(rgep, PHY_GBCR_REG, GBCR_DEFAULT);
643 		rge_mii_put16(rgep, PHY_1F_REG, 0x0002);
644 		rge_mii_put16(rgep, PHY_BMSR_REG, 0x90D0);
645 		rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
646 		break;
647 
648 	case MAC_VER_8169SC:
649 		rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
650 		rge_mii_put16(rgep, PHY_ANER_REG, 0x0078);
651 		rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x05dc);
652 		rge_mii_put16(rgep, PHY_GBCR_REG, 0x2672);
653 		rge_mii_put16(rgep, PHY_GBSR_REG, 0x6a14);
654 		rge_mii_put16(rgep, PHY_0B_REG, 0x7cb0);
655 		rge_mii_put16(rgep, PHY_0C_REG, 0xdb80);
656 		rge_mii_put16(rgep, PHY_1B_REG, 0xc414);
657 		rge_mii_put16(rgep, PHY_1C_REG, 0xef03);
658 		rge_mii_put16(rgep, PHY_1D_REG, 0x3dc8);
659 		rge_mii_put16(rgep, PHY_1F_REG, 0x0003);
660 		rge_mii_put16(rgep, PHY_13_REG, 0x0600);
661 		rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
662 		break;
663 
664 	case MAC_VER_8168:
665 		rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
666 		rge_mii_put16(rgep, PHY_ANER_REG, 0x00aa);
667 		rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x3173);
668 		rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x08fc);
669 		rge_mii_put16(rgep, PHY_GBCR_REG, 0xe2d0);
670 		rge_mii_put16(rgep, PHY_0B_REG, 0x941a);
671 		rge_mii_put16(rgep, PHY_18_REG, 0x65fe);
672 		rge_mii_put16(rgep, PHY_1C_REG, 0x1e02);
673 		rge_mii_put16(rgep, PHY_1F_REG, 0x0002);
674 		rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x103e);
675 		rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
676 		break;
677 
678 	case MAC_VER_8168B_B:
679 	case MAC_VER_8168B_C:
680 		rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
681 		rge_mii_put16(rgep, PHY_0B_REG, 0x94b0);
682 		rge_mii_put16(rgep, PHY_1B_REG, 0xc416);
683 		rge_mii_put16(rgep, PHY_1F_REG, 0x0003);
684 		rge_mii_put16(rgep, PHY_12_REG, 0x6096);
685 		rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
686 		break;
687 	}
688 }
689 
690 void rge_chip_ident(rge_t *rgep);
691 #pragma	no_inline(rge_chip_ident)
692 
693 void
694 rge_chip_ident(rge_t *rgep)
695 {
696 	chip_id_t *chip = &rgep->chipid;
697 	uint32_t val32;
698 	uint16_t val16;
699 
700 	/*
701 	 * Read and record MAC version
702 	 */
703 	val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
704 	val32 &= HW_VERSION_ID_0 | HW_VERSION_ID_1;
705 	chip->mac_ver = val32;
706 	chip->is_pcie = pci_lcap_locate(rgep->cfg_handle,
707 	    PCI_CAP_ID_PCI_E, &val16) == DDI_SUCCESS;
708 
709 	/*
710 	 * Read and record PHY version
711 	 */
712 	val16 = rge_mii_get16(rgep, PHY_ID_REG_2);
713 	val16 &= PHY_VER_MASK;
714 	chip->phy_ver = val16;
715 
716 	/* set pci latency timer */
717 	if (chip->mac_ver == MAC_VER_8169 ||
718 	    chip->mac_ver == MAC_VER_8169S_D ||
719 	    chip->mac_ver == MAC_VER_8169SC)
720 		pci_config_put8(rgep->cfg_handle, PCI_CONF_LATENCY_TIMER, 0x40);
721 
722 	if (chip->mac_ver == MAC_VER_8169SC) {
723 		val16 = rge_reg_get16(rgep, RT_CONFIG_1_REG);
724 		val16 &= 0x0300;
725 		if (val16 == 0x1)	/* 66Mhz PCI */
726 			pci_config_put32(rgep->cfg_handle, 0x7c, 0x00ff00ff);
727 		else if (val16 == 0x0) /* 33Mhz PCI */
728 			pci_config_put32(rgep->cfg_handle, 0x7c, 0x00ffff00);
729 	}
730 
731 	/*
732 	 * PCIE chipset require the Rx buffer start address must be
733 	 * 8-byte alignment and the Rx buffer size must be multiple of 8.
734 	 * We'll just use bcopy in receive procedure for the PCIE chipset.
735 	 */
736 	if (chip->is_pcie) {
737 		rgep->chip_flags |= CHIP_FLAG_FORCE_BCOPY;
738 		if (rgep->default_mtu > ETHERMTU) {
739 			rge_notice(rgep, "Jumbo packets not supported "
740 			    "for this PCIE chipset");
741 			rgep->default_mtu = ETHERMTU;
742 		}
743 	}
744 	if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
745 		rgep->head_room = 0;
746 	else
747 		rgep->head_room = RGE_HEADROOM;
748 
749 	/*
750 	 * Initialize other variables.
751 	 */
752 	if (rgep->default_mtu < ETHERMTU || rgep->default_mtu > RGE_JUMBO_MTU)
753 		rgep->default_mtu = ETHERMTU;
754 	if (rgep->default_mtu > ETHERMTU) {
755 		rgep->rxbuf_size = RGE_BUFF_SIZE_JUMBO;
756 		rgep->txbuf_size = RGE_BUFF_SIZE_JUMBO;
757 		rgep->ethmax_size = RGE_JUMBO_SIZE;
758 	} else {
759 		rgep->rxbuf_size = RGE_BUFF_SIZE_STD;
760 		rgep->txbuf_size = RGE_BUFF_SIZE_STD;
761 		rgep->ethmax_size = ETHERMAX;
762 	}
763 	chip->rxconfig = RX_CONFIG_DEFAULT;
764 	chip->txconfig = TX_CONFIG_DEFAULT;
765 
766 	/* interval to update statistics for polling mode */
767 	rgep->tick_delta = drv_usectohz(1000*1000/CLK_TICK);
768 
769 	/* ensure we are not in polling mode */
770 	rgep->curr_tick = ddi_get_lbolt() - 2*rgep->tick_delta;
771 	RGE_TRACE(("%s: MAC version = %x, PHY version = %x",
772 	    rgep->ifname, chip->mac_ver, chip->phy_ver));
773 }
774 
775 /*
776  * Perform first-stage chip (re-)initialisation, using only config-space
777  * accesses:
778  *
779  * + Read the vendor/device/revision/subsystem/cache-line-size registers,
780  *   returning the data in the structure pointed to by <idp>.
781  * + Enable Memory Space accesses.
782  * + Enable Bus Mastering according.
783  */
784 void rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp);
785 #pragma	no_inline(rge_chip_cfg_init)
786 
787 void
788 rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp)
789 {
790 	ddi_acc_handle_t handle;
791 	uint16_t commd;
792 
793 	handle = rgep->cfg_handle;
794 
795 	/*
796 	 * Save PCI cache line size and subsystem vendor ID
797 	 */
798 	cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
799 	cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
800 	cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
801 	cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
802 	cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
803 	cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
804 	cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
805 	cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
806 
807 	/*
808 	 * Turn on Master Enable (DMA) and IO Enable bits.
809 	 * Enable PCI Memory Space accesses
810 	 */
811 	commd = cidp->command;
812 	commd |= PCI_COMM_ME | PCI_COMM_MAE | PCI_COMM_IO;
813 	pci_config_put16(handle, PCI_CONF_COMM, commd);
814 
815 	RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
816 	    cidp->vendor, cidp->device, cidp->revision));
817 	RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x",
818 	    cidp->subven, cidp->subdev));
819 	RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x",
820 	    cidp->clsize, cidp->latency, cidp->command));
821 }
822 
823 int rge_chip_reset(rge_t *rgep);
824 #pragma	no_inline(rge_chip_reset)
825 
826 int
827 rge_chip_reset(rge_t *rgep)
828 {
829 	int i;
830 	uint8_t val8;
831 
832 	/*
833 	 * Chip should be in STOP state
834 	 */
835 	rge_reg_clr8(rgep, RT_COMMAND_REG,
836 	    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
837 
838 	/*
839 	 * Disable interrupt
840 	 */
841 	rgep->int_mask = INT_MASK_NONE;
842 	rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
843 
844 	/*
845 	 * Clear pended interrupt
846 	 */
847 	rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
848 
849 	/*
850 	 * Reset chip
851 	 */
852 	rge_reg_set8(rgep, RT_COMMAND_REG, RT_COMMAND_RESET);
853 
854 	/*
855 	 * Wait for reset success
856 	 */
857 	for (i = 0; i < CHIP_RESET_LOOP; i++) {
858 		drv_usecwait(10);
859 		val8 = rge_reg_get8(rgep, RT_COMMAND_REG);
860 		if (!(val8 & RT_COMMAND_RESET)) {
861 			rgep->rge_chip_state = RGE_CHIP_RESET;
862 			return (0);
863 		}
864 	}
865 	RGE_REPORT((rgep, "rge_chip_reset fail."));
866 	return (-1);
867 }
868 
869 void rge_chip_init(rge_t *rgep);
870 #pragma	no_inline(rge_chip_init)
871 
872 void
873 rge_chip_init(rge_t *rgep)
874 {
875 	uint32_t val32;
876 	uint32_t val16;
877 	uint32_t *hashp;
878 	chip_id_t *chip = &rgep->chipid;
879 
880 	/*
881 	 * Increase the threshold voltage of RX sensitivity
882 	 */
883 	if (chip->mac_ver == MAC_VER_8168B_B ||
884 	    chip->mac_ver == MAC_VER_8168B_C ||
885 	    chip->mac_ver == MAC_VER_8101E ||
886 	    chip->mac_ver == MAC_VER_8101E_C) {
887 		rge_ephy_put16(rgep, 0x01, 0x1bd3);
888 	}
889 
890 	if (chip->mac_ver == MAC_VER_8168 ||
891 	    chip->mac_ver == MAC_VER_8168B_B) {
892 		val16 = rge_reg_get8(rgep, PHY_STATUS_REG);
893 		val16 = 0x12<<8 | val16;
894 		rge_reg_put16(rgep, PHY_STATUS_REG, val16);
895 		rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00021c01);
896 		rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f088);
897 		rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00004000);
898 		rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f0b0);
899 		rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x0000f068);
900 		val32 = rge_reg_get32(rgep, RT_CSI_DATA_REG);
901 		val32 |= 0x7000;
902 		val32 &= 0xffff5fff;
903 		rge_reg_put32(rgep, RT_CSI_DATA_REG, val32);
904 		rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f068);
905 	}
906 
907 	/*
908 	 * Config MII register
909 	 */
910 	rgep->param_link_up = LINK_STATE_DOWN;
911 	rge_phy_update(rgep);
912 
913 	/*
914 	 * Enable Rx checksum offload.
915 	 *  Then for vlan support, we must enable receive vlan de-tagging.
916 	 *  Otherwise, there'll be checksum error.
917 	 */
918 	val16 = rge_reg_get16(rgep, CPLUS_COMMAND_REG);
919 	val16 |= RX_CKSM_OFFLOAD | RX_VLAN_DETAG;
920 	if (chip->mac_ver == MAC_VER_8169S_D) {
921 		val16 |= CPLUS_BIT14 | MUL_PCI_RW_ENABLE;
922 		rge_reg_put8(rgep, RESV_82_REG, 0x01);
923 	}
924 	rge_reg_put16(rgep, CPLUS_COMMAND_REG, val16 & (~0x03));
925 
926 	/*
927 	 * Start transmit/receive before set tx/rx configuration register
928 	 */
929 	if (!chip->is_pcie)
930 		rge_reg_set8(rgep, RT_COMMAND_REG,
931 		    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
932 
933 	/*
934 	 * Set dump tally counter register
935 	 */
936 	val32 = rgep->dma_area_stats.cookie.dmac_laddress >> 32;
937 	rge_reg_put32(rgep, DUMP_COUNTER_REG_1, val32);
938 	val32 = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
939 	val32 &= DUMP_COUNTER_REG_RESV;
940 	val32 |= rgep->dma_area_stats.cookie.dmac_laddress;
941 	rge_reg_put32(rgep, DUMP_COUNTER_REG_0, val32);
942 
943 	/*
944 	 * Change to config register write enable mode
945 	 */
946 	rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
947 
948 	/*
949 	 * Set Tx/Rx maximum packet size
950 	 */
951 	if (rgep->default_mtu > ETHERMTU) {
952 		rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_JUMBO);
953 		rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_JUMBO);
954 	} else if (rgep->chipid.mac_ver != MAC_VER_8101E) {
955 		rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD);
956 		rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD);
957 	} else {
958 		rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD_8101E);
959 		rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD_8101E);
960 	}
961 
962 	/*
963 	 * Set receive configuration register
964 	 */
965 	val32 = rge_reg_get32(rgep, RX_CONFIG_REG);
966 	val32 &= RX_CONFIG_REG_RESV;
967 	if (rgep->promisc)
968 		val32 |= RX_ACCEPT_ALL_PKT;
969 	rge_reg_put32(rgep, RX_CONFIG_REG, val32 | chip->rxconfig);
970 
971 	/*
972 	 * Set transmit configuration register
973 	 */
974 	val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
975 	val32 &= TX_CONFIG_REG_RESV;
976 	rge_reg_put32(rgep, TX_CONFIG_REG, val32 | chip->txconfig);
977 
978 	/*
979 	 * Set Tx/Rx descriptor register
980 	 */
981 	val32 = rgep->tx_desc.cookie.dmac_laddress;
982 	rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_LO_REG, val32);
983 	val32 = rgep->tx_desc.cookie.dmac_laddress >> 32;
984 	rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_HI_REG, val32);
985 	rge_reg_put32(rgep, HIGH_TX_RING_ADDR_LO_REG, 0);
986 	rge_reg_put32(rgep, HIGH_TX_RING_ADDR_HI_REG, 0);
987 	val32 = rgep->rx_desc.cookie.dmac_laddress;
988 	rge_reg_put32(rgep, RX_RING_ADDR_LO_REG, val32);
989 	val32 = rgep->rx_desc.cookie.dmac_laddress >> 32;
990 	rge_reg_put32(rgep, RX_RING_ADDR_HI_REG, val32);
991 
992 	/*
993 	 * Suggested setting from Realtek
994 	 */
995 	if (rgep->chipid.mac_ver != MAC_VER_8101E)
996 		rge_reg_put16(rgep, RESV_E2_REG, 0x282a);
997 	else
998 		rge_reg_put16(rgep, RESV_E2_REG, 0x0000);
999 
1000 	/*
1001 	 * Set multicast register
1002 	 */
1003 	hashp = (uint32_t *)rgep->mcast_hash;
1004 	if (rgep->promisc) {
1005 		rge_reg_put32(rgep, MULTICAST_0_REG, ~0U);
1006 		rge_reg_put32(rgep, MULTICAST_4_REG, ~0U);
1007 	} else {
1008 		rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0]));
1009 		rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1]));
1010 	}
1011 
1012 	/*
1013 	 * Msic register setting:
1014 	 *   -- Missed packet counter: clear it
1015 	 *   -- TimerInt Register
1016 	 *   -- Timer count register
1017 	 */
1018 	rge_reg_put32(rgep, RX_PKT_MISS_COUNT_REG, 0);
1019 	rge_reg_put32(rgep, TIMER_INT_REG, TIMER_INT_NONE);
1020 	rge_reg_put32(rgep, TIMER_COUNT_REG, 0);
1021 
1022 	/*
1023 	 * disable the Unicast Wakeup Frame capability
1024 	 */
1025 	rge_reg_clr8(rgep, RT_CONFIG_5_REG, RT_UNI_WAKE_FRAME);
1026 
1027 	/*
1028 	 * Return to normal network/host communication mode
1029 	 */
1030 	rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1031 	drv_usecwait(20);
1032 }
1033 
1034 /*
1035  * rge_chip_start() -- start the chip transmitting and/or receiving,
1036  * including enabling interrupts
1037  */
1038 void rge_chip_start(rge_t *rgep);
1039 #pragma	no_inline(rge_chip_start)
1040 
1041 void
1042 rge_chip_start(rge_t *rgep)
1043 {
1044 	/*
1045 	 * Clear statistics
1046 	 */
1047 	bzero(&rgep->stats, sizeof (rge_stats_t));
1048 	DMA_ZERO(rgep->dma_area_stats);
1049 
1050 	/*
1051 	 * Start transmit/receive
1052 	 */
1053 	rge_reg_set8(rgep, RT_COMMAND_REG,
1054 	    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1055 
1056 	/*
1057 	 * Enable interrupt
1058 	 */
1059 	rgep->int_mask = RGE_INT_MASK;
1060 	if (rgep->chipid.is_pcie) {
1061 		rgep->int_mask |= NO_TXDESC_INT;
1062 	}
1063 	rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1064 
1065 	/*
1066 	 * All done!
1067 	 */
1068 	rgep->rge_chip_state = RGE_CHIP_RUNNING;
1069 }
1070 
1071 /*
1072  * rge_chip_stop() -- stop board receiving
1073  *
1074  * Since this function is also invoked by rge_quiesce(), it
1075  * must not block; also, no tracing or logging takes place
1076  * when invoked by rge_quiesce().
1077  */
1078 void rge_chip_stop(rge_t *rgep, boolean_t fault);
1079 #pragma	no_inline(rge_chip_stop)
1080 
1081 void
1082 rge_chip_stop(rge_t *rgep, boolean_t fault)
1083 {
1084 	/*
1085 	 * Disable interrupt
1086 	 */
1087 	rgep->int_mask = INT_MASK_NONE;
1088 	rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1089 
1090 	/*
1091 	 * Clear pended interrupt
1092 	 */
1093 	if (!rgep->suspended) {
1094 		rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
1095 	}
1096 
1097 	/*
1098 	 * Stop the board and disable transmit/receive
1099 	 */
1100 	rge_reg_clr8(rgep, RT_COMMAND_REG,
1101 	    RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1102 
1103 	if (fault)
1104 		rgep->rge_chip_state = RGE_CHIP_FAULT;
1105 	else
1106 		rgep->rge_chip_state = RGE_CHIP_STOPPED;
1107 }
1108 
1109 /*
1110  * rge_get_mac_addr() -- get the MAC address on NIC
1111  */
1112 static void rge_get_mac_addr(rge_t *rgep);
1113 #pragma	inline(rge_get_mac_addr)
1114 
1115 static void
1116 rge_get_mac_addr(rge_t *rgep)
1117 {
1118 	uint8_t *macaddr = rgep->netaddr;
1119 	uint32_t val32;
1120 
1121 	/*
1122 	 * Read first 4-byte of mac address
1123 	 */
1124 	val32 = rge_reg_get32(rgep, ID_0_REG);
1125 	macaddr[0] = val32 & 0xff;
1126 	val32 = val32 >> 8;
1127 	macaddr[1] = val32 & 0xff;
1128 	val32 = val32 >> 8;
1129 	macaddr[2] = val32 & 0xff;
1130 	val32 = val32 >> 8;
1131 	macaddr[3] = val32 & 0xff;
1132 
1133 	/*
1134 	 * Read last 2-byte of mac address
1135 	 */
1136 	val32 = rge_reg_get32(rgep, ID_4_REG);
1137 	macaddr[4] = val32 & 0xff;
1138 	val32 = val32 >> 8;
1139 	macaddr[5] = val32 & 0xff;
1140 }
1141 
1142 static void rge_set_mac_addr(rge_t *rgep);
1143 #pragma	inline(rge_set_mac_addr)
1144 
1145 static void
1146 rge_set_mac_addr(rge_t *rgep)
1147 {
1148 	uint8_t *p = rgep->netaddr;
1149 	uint32_t val32;
1150 
1151 	/*
1152 	 * Change to config register write enable mode
1153 	 */
1154 	rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1155 
1156 	/*
1157 	 * Get first 4 bytes of mac address
1158 	 */
1159 	val32 = p[3];
1160 	val32 = val32 << 8;
1161 	val32 |= p[2];
1162 	val32 = val32 << 8;
1163 	val32 |= p[1];
1164 	val32 = val32 << 8;
1165 	val32 |= p[0];
1166 
1167 	/*
1168 	 * Set first 4 bytes of mac address
1169 	 */
1170 	rge_reg_put32(rgep, ID_0_REG, val32);
1171 
1172 	/*
1173 	 * Get last 2 bytes of mac address
1174 	 */
1175 	val32 = p[5];
1176 	val32 = val32 << 8;
1177 	val32 |= p[4];
1178 
1179 	/*
1180 	 * Set last 2 bytes of mac address
1181 	 */
1182 	val32 |= rge_reg_get32(rgep, ID_4_REG) & ~0xffff;
1183 	rge_reg_put32(rgep, ID_4_REG, val32);
1184 
1185 	/*
1186 	 * Return to normal network/host communication mode
1187 	 */
1188 	rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1189 }
1190 
1191 static void rge_set_multi_addr(rge_t *rgep);
1192 #pragma	inline(rge_set_multi_addr)
1193 
1194 static void
1195 rge_set_multi_addr(rge_t *rgep)
1196 {
1197 	uint32_t *hashp;
1198 
1199 	hashp = (uint32_t *)rgep->mcast_hash;
1200 
1201 	/*
1202 	 * Change to config register write enable mode
1203 	 */
1204 	if (rgep->chipid.mac_ver == MAC_VER_8169SC) {
1205 		rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1206 	}
1207 	if (rgep->promisc) {
1208 		rge_reg_put32(rgep, MULTICAST_0_REG, ~0U);
1209 		rge_reg_put32(rgep, MULTICAST_4_REG, ~0U);
1210 	} else {
1211 		rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0]));
1212 		rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1]));
1213 	}
1214 
1215 	/*
1216 	 * Return to normal network/host communication mode
1217 	 */
1218 	if (rgep->chipid.mac_ver == MAC_VER_8169SC) {
1219 		rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1220 	}
1221 }
1222 
1223 static void rge_set_promisc(rge_t *rgep);
1224 #pragma	inline(rge_set_promisc)
1225 
1226 static void
1227 rge_set_promisc(rge_t *rgep)
1228 {
1229 	if (rgep->promisc)
1230 		rge_reg_set32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1231 	else
1232 		rge_reg_clr32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1233 }
1234 
1235 /*
1236  * rge_chip_sync() -- program the chip with the unicast MAC address,
1237  * the multicast hash table, the required level of promiscuity, and
1238  * the current loopback mode ...
1239  */
1240 void rge_chip_sync(rge_t *rgep, enum rge_sync_op todo);
1241 #pragma	no_inline(rge_chip_sync)
1242 
1243 void
1244 rge_chip_sync(rge_t *rgep, enum rge_sync_op todo)
1245 {
1246 	switch (todo) {
1247 	case RGE_GET_MAC:
1248 		rge_get_mac_addr(rgep);
1249 		break;
1250 	case RGE_SET_MAC:
1251 		/* Reprogram the unicast MAC address(es) ... */
1252 		rge_set_mac_addr(rgep);
1253 		break;
1254 	case RGE_SET_MUL:
1255 		/* Reprogram the hashed multicast address table ... */
1256 		rge_set_multi_addr(rgep);
1257 		break;
1258 	case RGE_SET_PROMISC:
1259 		/* Set or clear the PROMISCUOUS mode bit */
1260 		rge_set_multi_addr(rgep);
1261 		rge_set_promisc(rgep);
1262 		break;
1263 	default:
1264 		break;
1265 	}
1266 }
1267 
1268 void rge_chip_blank(void *arg, time_t ticks, uint_t count, int flag);
1269 #pragma	no_inline(rge_chip_blank)
1270 
1271 /* ARGSUSED */
1272 void
1273 rge_chip_blank(void *arg, time_t ticks, uint_t count, int flag)
1274 {
1275 	_NOTE(ARGUNUSED(arg, ticks, count));
1276 }
1277 
1278 void rge_tx_trigger(rge_t *rgep);
1279 #pragma	no_inline(rge_tx_trigger)
1280 
1281 void
1282 rge_tx_trigger(rge_t *rgep)
1283 {
1284 	rge_reg_put8(rgep, TX_RINGS_POLL_REG, NORMAL_TX_RING_POLL);
1285 }
1286 
1287 void rge_hw_stats_dump(rge_t *rgep);
1288 #pragma	no_inline(rge_tx_trigger)
1289 
1290 void
1291 rge_hw_stats_dump(rge_t *rgep)
1292 {
1293 	int i = 0;
1294 	uint32_t regval = 0;
1295 
1296 	if (rgep->rge_mac_state == RGE_MAC_STOPPED)
1297 		return;
1298 
1299 	regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
1300 	while (regval & DUMP_START) {
1301 		drv_usecwait(100);
1302 		if (++i > STATS_DUMP_LOOP) {
1303 			RGE_DEBUG(("rge h/w statistics dump fail!"));
1304 			rgep->rge_chip_state = RGE_CHIP_ERROR;
1305 			return;
1306 		}
1307 		regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
1308 	}
1309 	DMA_SYNC(rgep->dma_area_stats, DDI_DMA_SYNC_FORKERNEL);
1310 
1311 	/*
1312 	 * Start H/W statistics dump for RTL8169 chip
1313 	 */
1314 	rge_reg_set32(rgep, DUMP_COUNTER_REG_0, DUMP_START);
1315 }
1316 
1317 /*
1318  * ========== Hardware interrupt handler ==========
1319  */
1320 
1321 #undef	RGE_DBG
1322 #define	RGE_DBG		RGE_DBG_INT	/* debug flag for this code	*/
1323 
1324 static void rge_wake_factotum(rge_t *rgep);
1325 #pragma	inline(rge_wake_factotum)
1326 
1327 static void
1328 rge_wake_factotum(rge_t *rgep)
1329 {
1330 	if (rgep->factotum_flag == 0) {
1331 		rgep->factotum_flag = 1;
1332 		(void) ddi_intr_trigger_softint(rgep->factotum_hdl, NULL);
1333 	}
1334 }
1335 
1336 /*
1337  *	rge_intr() -- handle chip interrupts
1338  */
1339 uint_t rge_intr(caddr_t arg1, caddr_t arg2);
1340 #pragma	no_inline(rge_intr)
1341 
1342 uint_t
1343 rge_intr(caddr_t arg1, caddr_t arg2)
1344 {
1345 	rge_t *rgep = (rge_t *)arg1;
1346 	uint16_t int_status;
1347 	clock_t	now;
1348 	uint32_t tx_pkts;
1349 	uint32_t rx_pkts;
1350 	uint32_t poll_rate;
1351 	uint32_t opt_pkts;
1352 	uint32_t opt_intrs;
1353 	boolean_t update_int_mask = B_FALSE;
1354 	uint32_t itimer;
1355 
1356 	_NOTE(ARGUNUSED(arg2))
1357 
1358 	mutex_enter(rgep->genlock);
1359 
1360 	if (rgep->suspended) {
1361 		mutex_exit(rgep->genlock);
1362 		return (DDI_INTR_UNCLAIMED);
1363 	}
1364 
1365 	/*
1366 	 * Was this interrupt caused by our device...
1367 	 */
1368 	int_status = rge_reg_get16(rgep, INT_STATUS_REG);
1369 	if (!(int_status & rgep->int_mask)) {
1370 		mutex_exit(rgep->genlock);
1371 		return (DDI_INTR_UNCLAIMED);
1372 				/* indicate it wasn't our interrupt */
1373 	}
1374 	rgep->stats.intr++;
1375 
1376 	/*
1377 	 * Clear interrupt
1378 	 *	For PCIE chipset, we need disable interrupt first.
1379 	 */
1380 	if (rgep->chipid.is_pcie) {
1381 		rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE);
1382 		update_int_mask = B_TRUE;
1383 	}
1384 	rge_reg_put16(rgep, INT_STATUS_REG, int_status);
1385 
1386 	/*
1387 	 * Calculate optimal polling interval
1388 	 */
1389 	now = ddi_get_lbolt();
1390 	if (now - rgep->curr_tick >= rgep->tick_delta &&
1391 	    (rgep->param_link_speed == RGE_SPEED_1000M ||
1392 	    rgep->param_link_speed == RGE_SPEED_100M)) {
1393 		/* number of rx and tx packets in the last tick */
1394 		tx_pkts = rgep->stats.opackets - rgep->last_opackets;
1395 		rx_pkts = rgep->stats.rpackets - rgep->last_rpackets;
1396 
1397 		rgep->last_opackets = rgep->stats.opackets;
1398 		rgep->last_rpackets = rgep->stats.rpackets;
1399 
1400 		/* restore interrupt mask */
1401 		rgep->int_mask |= TX_OK_INT | RX_OK_INT;
1402 		if (rgep->chipid.is_pcie) {
1403 			rgep->int_mask |= NO_TXDESC_INT;
1404 		}
1405 
1406 		/* optimal number of packets in a tick */
1407 		if (rgep->param_link_speed == RGE_SPEED_1000M) {
1408 			opt_pkts = (1000*1000*1000/8)/ETHERMTU/CLK_TICK;
1409 		} else {
1410 			opt_pkts = (100*1000*1000/8)/ETHERMTU/CLK_TICK;
1411 		}
1412 
1413 		/*
1414 		 * calculate polling interval based on rx and tx packets
1415 		 * in the last tick
1416 		 */
1417 		poll_rate = 0;
1418 		if (now - rgep->curr_tick < 2*rgep->tick_delta) {
1419 			opt_intrs = opt_pkts/TX_COALESC;
1420 			if (tx_pkts > opt_intrs) {
1421 				poll_rate = max(tx_pkts/TX_COALESC, opt_intrs);
1422 				rgep->int_mask &= ~(TX_OK_INT | NO_TXDESC_INT);
1423 			}
1424 
1425 			opt_intrs = opt_pkts/RX_COALESC;
1426 			if (rx_pkts > opt_intrs) {
1427 				opt_intrs = max(rx_pkts/RX_COALESC, opt_intrs);
1428 				poll_rate = max(opt_intrs, poll_rate);
1429 				rgep->int_mask &= ~RX_OK_INT;
1430 			}
1431 			/* ensure poll_rate reasonable */
1432 			poll_rate = min(poll_rate, opt_pkts*4);
1433 		}
1434 
1435 		if (poll_rate) {
1436 			/* move to polling mode */
1437 			if (rgep->chipid.is_pcie) {
1438 				itimer = (TIMER_CLK_PCIE/CLK_TICK)/poll_rate;
1439 			} else {
1440 				itimer = (TIMER_CLK_PCI/CLK_TICK)/poll_rate;
1441 			}
1442 		} else {
1443 			/* move to normal mode */
1444 			itimer = 0;
1445 		}
1446 		RGE_DEBUG(("%s: poll: itimer:%d int_mask:0x%x",
1447 		    __func__, itimer, rgep->int_mask));
1448 		rge_reg_put32(rgep, TIMER_INT_REG, itimer);
1449 
1450 		/* update timestamp for statistics */
1451 		rgep->curr_tick = now;
1452 
1453 		/* reset timer */
1454 		int_status |= TIME_OUT_INT;
1455 
1456 		update_int_mask = B_TRUE;
1457 	}
1458 
1459 	if (int_status & TIME_OUT_INT) {
1460 		rge_reg_put32(rgep, TIMER_COUNT_REG, 0);
1461 	}
1462 
1463 	/* flush post writes */
1464 	(void) rge_reg_get16(rgep, INT_STATUS_REG);
1465 
1466 	/*
1467 	 * Cable link change interrupt
1468 	 */
1469 	if (int_status & LINK_CHANGE_INT) {
1470 		rge_chip_cyclic(rgep);
1471 	}
1472 
1473 	mutex_exit(rgep->genlock);
1474 
1475 	/*
1476 	 * Receive interrupt
1477 	 */
1478 	if (int_status & RGE_RX_INT)
1479 		rge_receive(rgep);
1480 
1481 	/*
1482 	 * Transmit interrupt
1483 	 */
1484 	if (int_status & TX_ERR_INT) {
1485 		RGE_REPORT((rgep, "tx error happened, resetting the chip "));
1486 		mutex_enter(rgep->genlock);
1487 		rgep->rge_chip_state = RGE_CHIP_ERROR;
1488 		mutex_exit(rgep->genlock);
1489 	} else if ((rgep->chipid.is_pcie && (int_status & NO_TXDESC_INT)) ||
1490 	    ((int_status & TX_OK_INT) && rgep->tx_free < RGE_SEND_SLOTS/8)) {
1491 		(void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
1492 	}
1493 
1494 	/*
1495 	 * System error interrupt
1496 	 */
1497 	if (int_status & SYS_ERR_INT) {
1498 		RGE_REPORT((rgep, "sys error happened, resetting the chip "));
1499 		mutex_enter(rgep->genlock);
1500 		rgep->rge_chip_state = RGE_CHIP_ERROR;
1501 		mutex_exit(rgep->genlock);
1502 	}
1503 
1504 	/*
1505 	 * Re-enable interrupt for PCIE chipset or install new int_mask
1506 	 */
1507 	if (update_int_mask)
1508 		rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1509 
1510 	return (DDI_INTR_CLAIMED);	/* indicate it was our interrupt */
1511 }
1512 
1513 /*
1514  * ========== Factotum, implemented as a softint handler ==========
1515  */
1516 
1517 #undef	RGE_DBG
1518 #define	RGE_DBG		RGE_DBG_FACT	/* debug flag for this code	*/
1519 
1520 static boolean_t rge_factotum_link_check(rge_t *rgep);
1521 #pragma	no_inline(rge_factotum_link_check)
1522 
1523 static boolean_t
1524 rge_factotum_link_check(rge_t *rgep)
1525 {
1526 	uint8_t media_status;
1527 	int32_t link;
1528 
1529 	media_status = rge_reg_get8(rgep, PHY_STATUS_REG);
1530 	link = (media_status & PHY_STATUS_LINK_UP) ?
1531 	    LINK_STATE_UP : LINK_STATE_DOWN;
1532 	if (rgep->param_link_up != link) {
1533 		/*
1534 		 * Link change.
1535 		 */
1536 		rgep->param_link_up = link;
1537 
1538 		if (link == LINK_STATE_UP) {
1539 			if (media_status & PHY_STATUS_1000MF) {
1540 				rgep->param_link_speed = RGE_SPEED_1000M;
1541 				rgep->param_link_duplex = LINK_DUPLEX_FULL;
1542 			} else {
1543 				rgep->param_link_speed =
1544 				    (media_status & PHY_STATUS_100M) ?
1545 				    RGE_SPEED_100M : RGE_SPEED_10M;
1546 				rgep->param_link_duplex =
1547 				    (media_status & PHY_STATUS_DUPLEX_FULL) ?
1548 				    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1549 			}
1550 		}
1551 		return (B_TRUE);
1552 	}
1553 	return (B_FALSE);
1554 }
1555 
1556 /*
1557  * Factotum routine to check for Tx stall, using the 'watchdog' counter
1558  */
1559 static boolean_t rge_factotum_stall_check(rge_t *rgep);
1560 #pragma	no_inline(rge_factotum_stall_check)
1561 
1562 static boolean_t
1563 rge_factotum_stall_check(rge_t *rgep)
1564 {
1565 	uint32_t dogval;
1566 
1567 	ASSERT(mutex_owned(rgep->genlock));
1568 
1569 	/*
1570 	 * Specific check for Tx stall ...
1571 	 *
1572 	 * The 'watchdog' counter is incremented whenever a packet
1573 	 * is queued, reset to 1 when some (but not all) buffers
1574 	 * are reclaimed, reset to 0 (disabled) when all buffers
1575 	 * are reclaimed, and shifted left here.  If it exceeds the
1576 	 * threshold value, the chip is assumed to have stalled and
1577 	 * is put into the ERROR state.  The factotum will then reset
1578 	 * it on the next pass.
1579 	 *
1580 	 * All of which should ensure that we don't get into a state
1581 	 * where packets are left pending indefinitely!
1582 	 */
1583 	if (rgep->resched_needed)
1584 		(void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
1585 	dogval = rge_atomic_shl32(&rgep->watchdog, 1);
1586 	if (dogval < rge_watchdog_count)
1587 		return (B_FALSE);
1588 
1589 	RGE_REPORT((rgep, "Tx stall detected, watchdog code 0x%x", dogval));
1590 	return (B_TRUE);
1591 
1592 }
1593 
1594 /*
1595  * The factotum is woken up when there's something to do that we'd rather
1596  * not do from inside a hardware interrupt handler or high-level cyclic.
1597  * Its two main tasks are:
1598  *	reset & restart the chip after an error
1599  *	check the link status whenever necessary
1600  */
1601 uint_t rge_chip_factotum(caddr_t arg1, caddr_t arg2);
1602 #pragma	no_inline(rge_chip_factotum)
1603 
1604 uint_t
1605 rge_chip_factotum(caddr_t arg1, caddr_t arg2)
1606 {
1607 	rge_t *rgep;
1608 	uint_t result;
1609 	boolean_t error;
1610 	boolean_t linkchg;
1611 
1612 	rgep = (rge_t *)arg1;
1613 	_NOTE(ARGUNUSED(arg2))
1614 
1615 	if (rgep->factotum_flag == 0)
1616 		return (DDI_INTR_UNCLAIMED);
1617 
1618 	rgep->factotum_flag = 0;
1619 	result = DDI_INTR_CLAIMED;
1620 	error = B_FALSE;
1621 	linkchg = B_FALSE;
1622 
1623 	mutex_enter(rgep->genlock);
1624 	switch (rgep->rge_chip_state) {
1625 	default:
1626 		break;
1627 
1628 	case RGE_CHIP_RUNNING:
1629 		linkchg = rge_factotum_link_check(rgep);
1630 		error = rge_factotum_stall_check(rgep);
1631 		break;
1632 
1633 	case RGE_CHIP_ERROR:
1634 		error = B_TRUE;
1635 		break;
1636 
1637 	case RGE_CHIP_FAULT:
1638 		/*
1639 		 * Fault detected, time to reset ...
1640 		 */
1641 		if (rge_autorecover) {
1642 			RGE_REPORT((rgep, "automatic recovery activated"));
1643 			rge_restart(rgep);
1644 		}
1645 		break;
1646 	}
1647 
1648 	/*
1649 	 * If an error is detected, stop the chip now, marking it as
1650 	 * faulty, so that it will be reset next time through ...
1651 	 */
1652 	if (error)
1653 		rge_chip_stop(rgep, B_TRUE);
1654 	mutex_exit(rgep->genlock);
1655 
1656 	/*
1657 	 * If the link state changed, tell the world about it.
1658 	 * Note: can't do this while still holding the mutex.
1659 	 */
1660 	if (linkchg)
1661 		mac_link_update(rgep->mh, rgep->param_link_up);
1662 
1663 	return (result);
1664 }
1665 
1666 /*
1667  * High-level cyclic handler
1668  *
1669  * This routine schedules a (low-level) softint callback to the
1670  * factotum, and prods the chip to update the status block (which
1671  * will cause a hardware interrupt when complete).
1672  */
1673 void rge_chip_cyclic(void *arg);
1674 #pragma	no_inline(rge_chip_cyclic)
1675 
1676 void
1677 rge_chip_cyclic(void *arg)
1678 {
1679 	rge_t *rgep;
1680 
1681 	rgep = arg;
1682 
1683 	switch (rgep->rge_chip_state) {
1684 	default:
1685 		return;
1686 
1687 	case RGE_CHIP_RUNNING:
1688 		rge_phy_check(rgep);
1689 		if (rgep->tx_free < RGE_SEND_SLOTS)
1690 			rge_send_recycle(rgep);
1691 		break;
1692 
1693 	case RGE_CHIP_FAULT:
1694 	case RGE_CHIP_ERROR:
1695 		break;
1696 	}
1697 
1698 	rge_wake_factotum(rgep);
1699 }
1700 
1701 
1702 /*
1703  * ========== Ioctl subfunctions ==========
1704  */
1705 
1706 #undef	RGE_DBG
1707 #define	RGE_DBG		RGE_DBG_PPIO	/* debug flag for this code	*/
1708 
1709 #if	RGE_DEBUGGING || RGE_DO_PPIO
1710 
1711 static void rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd);
1712 #pragma	no_inline(rge_chip_peek_cfg)
1713 
1714 static void
1715 rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1716 {
1717 	uint64_t regval;
1718 	uint64_t regno;
1719 
1720 	RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)",
1721 	    (void *)rgep, (void *)ppd));
1722 
1723 	regno = ppd->pp_acc_offset;
1724 
1725 	switch (ppd->pp_acc_size) {
1726 	case 1:
1727 		regval = pci_config_get8(rgep->cfg_handle, regno);
1728 		break;
1729 
1730 	case 2:
1731 		regval = pci_config_get16(rgep->cfg_handle, regno);
1732 		break;
1733 
1734 	case 4:
1735 		regval = pci_config_get32(rgep->cfg_handle, regno);
1736 		break;
1737 
1738 	case 8:
1739 		regval = pci_config_get64(rgep->cfg_handle, regno);
1740 		break;
1741 	}
1742 
1743 	ppd->pp_acc_data = regval;
1744 }
1745 
1746 static void rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd);
1747 #pragma	no_inline(rge_chip_poke_cfg)
1748 
1749 static void
1750 rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1751 {
1752 	uint64_t regval;
1753 	uint64_t regno;
1754 
1755 	RGE_TRACE(("rge_chip_poke_cfg($%p, $%p)",
1756 	    (void *)rgep, (void *)ppd));
1757 
1758 	regno = ppd->pp_acc_offset;
1759 	regval = ppd->pp_acc_data;
1760 
1761 	switch (ppd->pp_acc_size) {
1762 	case 1:
1763 		pci_config_put8(rgep->cfg_handle, regno, regval);
1764 		break;
1765 
1766 	case 2:
1767 		pci_config_put16(rgep->cfg_handle, regno, regval);
1768 		break;
1769 
1770 	case 4:
1771 		pci_config_put32(rgep->cfg_handle, regno, regval);
1772 		break;
1773 
1774 	case 8:
1775 		pci_config_put64(rgep->cfg_handle, regno, regval);
1776 		break;
1777 	}
1778 }
1779 
1780 static void rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd);
1781 #pragma	no_inline(rge_chip_peek_reg)
1782 
1783 static void
1784 rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1785 {
1786 	uint64_t regval;
1787 	void *regaddr;
1788 
1789 	RGE_TRACE(("rge_chip_peek_reg($%p, $%p)",
1790 	    (void *)rgep, (void *)ppd));
1791 
1792 	regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1793 
1794 	switch (ppd->pp_acc_size) {
1795 	case 1:
1796 		regval = ddi_get8(rgep->io_handle, regaddr);
1797 		break;
1798 
1799 	case 2:
1800 		regval = ddi_get16(rgep->io_handle, regaddr);
1801 		break;
1802 
1803 	case 4:
1804 		regval = ddi_get32(rgep->io_handle, regaddr);
1805 		break;
1806 
1807 	case 8:
1808 		regval = ddi_get64(rgep->io_handle, regaddr);
1809 		break;
1810 	}
1811 
1812 	ppd->pp_acc_data = regval;
1813 }
1814 
1815 static void rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd);
1816 #pragma	no_inline(rge_chip_peek_reg)
1817 
1818 static void
1819 rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1820 {
1821 	uint64_t regval;
1822 	void *regaddr;
1823 
1824 	RGE_TRACE(("rge_chip_poke_reg($%p, $%p)",
1825 	    (void *)rgep, (void *)ppd));
1826 
1827 	regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1828 	regval = ppd->pp_acc_data;
1829 
1830 	switch (ppd->pp_acc_size) {
1831 	case 1:
1832 		ddi_put8(rgep->io_handle, regaddr, regval);
1833 		break;
1834 
1835 	case 2:
1836 		ddi_put16(rgep->io_handle, regaddr, regval);
1837 		break;
1838 
1839 	case 4:
1840 		ddi_put32(rgep->io_handle, regaddr, regval);
1841 		break;
1842 
1843 	case 8:
1844 		ddi_put64(rgep->io_handle, regaddr, regval);
1845 		break;
1846 	}
1847 }
1848 
1849 static void rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd);
1850 #pragma	no_inline(rge_chip_peek_mii)
1851 
1852 static void
1853 rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1854 {
1855 	RGE_TRACE(("rge_chip_peek_mii($%p, $%p)",
1856 	    (void *)rgep, (void *)ppd));
1857 
1858 	ppd->pp_acc_data = rge_mii_get16(rgep, ppd->pp_acc_offset/2);
1859 }
1860 
1861 static void rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd);
1862 #pragma	no_inline(rge_chip_poke_mii)
1863 
1864 static void
1865 rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1866 {
1867 	RGE_TRACE(("rge_chip_poke_mii($%p, $%p)",
1868 	    (void *)rgep, (void *)ppd));
1869 
1870 	rge_mii_put16(rgep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
1871 }
1872 
1873 static void rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd);
1874 #pragma	no_inline(rge_chip_peek_mem)
1875 
1876 static void
1877 rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1878 {
1879 	uint64_t regval;
1880 	void *vaddr;
1881 
1882 	RGE_TRACE(("rge_chip_peek_rge($%p, $%p)",
1883 	    (void *)rgep, (void *)ppd));
1884 
1885 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1886 
1887 	switch (ppd->pp_acc_size) {
1888 	case 1:
1889 		regval = *(uint8_t *)vaddr;
1890 		break;
1891 
1892 	case 2:
1893 		regval = *(uint16_t *)vaddr;
1894 		break;
1895 
1896 	case 4:
1897 		regval = *(uint32_t *)vaddr;
1898 		break;
1899 
1900 	case 8:
1901 		regval = *(uint64_t *)vaddr;
1902 		break;
1903 	}
1904 
1905 	RGE_DEBUG(("rge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p",
1906 	    (void *)rgep, (void *)ppd, regval, vaddr));
1907 
1908 	ppd->pp_acc_data = regval;
1909 }
1910 
1911 static void rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd);
1912 #pragma	no_inline(rge_chip_poke_mem)
1913 
1914 static void
1915 rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1916 {
1917 	uint64_t regval;
1918 	void *vaddr;
1919 
1920 	RGE_TRACE(("rge_chip_poke_mem($%p, $%p)",
1921 	    (void *)rgep, (void *)ppd));
1922 
1923 	vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1924 	regval = ppd->pp_acc_data;
1925 
1926 	RGE_DEBUG(("rge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p",
1927 	    (void *)rgep, (void *)ppd, regval, vaddr));
1928 
1929 	switch (ppd->pp_acc_size) {
1930 	case 1:
1931 		*(uint8_t *)vaddr = (uint8_t)regval;
1932 		break;
1933 
1934 	case 2:
1935 		*(uint16_t *)vaddr = (uint16_t)regval;
1936 		break;
1937 
1938 	case 4:
1939 		*(uint32_t *)vaddr = (uint32_t)regval;
1940 		break;
1941 
1942 	case 8:
1943 		*(uint64_t *)vaddr = (uint64_t)regval;
1944 		break;
1945 	}
1946 }
1947 
1948 static enum ioc_reply rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
1949 					struct iocblk *iocp);
1950 #pragma	no_inline(rge_pp_ioctl)
1951 
1952 static enum ioc_reply
1953 rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
1954 {
1955 	void (*ppfn)(rge_t *rgep, rge_peekpoke_t *ppd);
1956 	rge_peekpoke_t *ppd;
1957 	dma_area_t *areap;
1958 	uint64_t sizemask;
1959 	uint64_t mem_va;
1960 	uint64_t maxoff;
1961 	boolean_t peek;
1962 
1963 	switch (cmd) {
1964 	default:
1965 		/* NOTREACHED */
1966 		rge_error(rgep, "rge_pp_ioctl: invalid cmd 0x%x", cmd);
1967 		return (IOC_INVAL);
1968 
1969 	case RGE_PEEK:
1970 		peek = B_TRUE;
1971 		break;
1972 
1973 	case RGE_POKE:
1974 		peek = B_FALSE;
1975 		break;
1976 	}
1977 
1978 	/*
1979 	 * Validate format of ioctl
1980 	 */
1981 	if (iocp->ioc_count != sizeof (rge_peekpoke_t))
1982 		return (IOC_INVAL);
1983 	if (mp->b_cont == NULL)
1984 		return (IOC_INVAL);
1985 	ppd = (rge_peekpoke_t *)mp->b_cont->b_rptr;
1986 
1987 	/*
1988 	 * Validate request parameters
1989 	 */
1990 	switch (ppd->pp_acc_space) {
1991 	default:
1992 		return (IOC_INVAL);
1993 
1994 	case RGE_PP_SPACE_CFG:
1995 		/*
1996 		 * Config space
1997 		 */
1998 		sizemask = 8|4|2|1;
1999 		mem_va = 0;
2000 		maxoff = PCI_CONF_HDR_SIZE;
2001 		ppfn = peek ? rge_chip_peek_cfg : rge_chip_poke_cfg;
2002 		break;
2003 
2004 	case RGE_PP_SPACE_REG:
2005 		/*
2006 		 * Memory-mapped I/O space
2007 		 */
2008 		sizemask = 8|4|2|1;
2009 		mem_va = 0;
2010 		maxoff = RGE_REGISTER_MAX;
2011 		ppfn = peek ? rge_chip_peek_reg : rge_chip_poke_reg;
2012 		break;
2013 
2014 	case RGE_PP_SPACE_MII:
2015 		/*
2016 		 * PHY's MII registers
2017 		 * NB: all PHY registers are two bytes, but the
2018 		 * addresses increment in ones (word addressing).
2019 		 * So we scale the address here, then undo the
2020 		 * transformation inside the peek/poke functions.
2021 		 */
2022 		ppd->pp_acc_offset *= 2;
2023 		sizemask = 2;
2024 		mem_va = 0;
2025 		maxoff = (MII_MAXREG+1)*2;
2026 		ppfn = peek ? rge_chip_peek_mii : rge_chip_poke_mii;
2027 		break;
2028 
2029 	case RGE_PP_SPACE_RGE:
2030 		/*
2031 		 * RGE data structure!
2032 		 */
2033 		sizemask = 8|4|2|1;
2034 		mem_va = (uintptr_t)rgep;
2035 		maxoff = sizeof (*rgep);
2036 		ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
2037 		break;
2038 
2039 	case RGE_PP_SPACE_STATISTICS:
2040 	case RGE_PP_SPACE_TXDESC:
2041 	case RGE_PP_SPACE_TXBUFF:
2042 	case RGE_PP_SPACE_RXDESC:
2043 	case RGE_PP_SPACE_RXBUFF:
2044 		/*
2045 		 * Various DMA_AREAs
2046 		 */
2047 		switch (ppd->pp_acc_space) {
2048 		case RGE_PP_SPACE_TXDESC:
2049 			areap = &rgep->dma_area_txdesc;
2050 			break;
2051 		case RGE_PP_SPACE_RXDESC:
2052 			areap = &rgep->dma_area_rxdesc;
2053 			break;
2054 		case RGE_PP_SPACE_STATISTICS:
2055 			areap = &rgep->dma_area_stats;
2056 			break;
2057 		}
2058 
2059 		sizemask = 8|4|2|1;
2060 		mem_va = (uintptr_t)areap->mem_va;
2061 		maxoff = areap->alength;
2062 		ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
2063 		break;
2064 	}
2065 
2066 	switch (ppd->pp_acc_size) {
2067 	default:
2068 		return (IOC_INVAL);
2069 
2070 	case 8:
2071 	case 4:
2072 	case 2:
2073 	case 1:
2074 		if ((ppd->pp_acc_size & sizemask) == 0)
2075 			return (IOC_INVAL);
2076 		break;
2077 	}
2078 
2079 	if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
2080 		return (IOC_INVAL);
2081 
2082 	if (ppd->pp_acc_offset >= maxoff)
2083 		return (IOC_INVAL);
2084 
2085 	if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
2086 		return (IOC_INVAL);
2087 
2088 	/*
2089 	 * All OK - go do it!
2090 	 */
2091 	ppd->pp_acc_offset += mem_va;
2092 	(*ppfn)(rgep, ppd);
2093 	return (peek ? IOC_REPLY : IOC_ACK);
2094 }
2095 
2096 static enum ioc_reply rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
2097 					struct iocblk *iocp);
2098 #pragma	no_inline(rge_diag_ioctl)
2099 
2100 static enum ioc_reply
2101 rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
2102 {
2103 	ASSERT(mutex_owned(rgep->genlock));
2104 
2105 	switch (cmd) {
2106 	default:
2107 		/* NOTREACHED */
2108 		rge_error(rgep, "rge_diag_ioctl: invalid cmd 0x%x", cmd);
2109 		return (IOC_INVAL);
2110 
2111 	case RGE_DIAG:
2112 		/*
2113 		 * Currently a no-op
2114 		 */
2115 		return (IOC_ACK);
2116 
2117 	case RGE_PEEK:
2118 	case RGE_POKE:
2119 		return (rge_pp_ioctl(rgep, cmd, mp, iocp));
2120 
2121 	case RGE_PHY_RESET:
2122 		return (IOC_RESTART_ACK);
2123 
2124 	case RGE_SOFT_RESET:
2125 	case RGE_HARD_RESET:
2126 		/*
2127 		 * Reset and reinitialise the 570x hardware
2128 		 */
2129 		rge_restart(rgep);
2130 		return (IOC_ACK);
2131 	}
2132 
2133 	/* NOTREACHED */
2134 }
2135 
2136 #endif	/* RGE_DEBUGGING || RGE_DO_PPIO */
2137 
2138 static enum ioc_reply rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp,
2139 				    struct iocblk *iocp);
2140 #pragma	no_inline(rge_mii_ioctl)
2141 
2142 static enum ioc_reply
2143 rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
2144 {
2145 	struct rge_mii_rw *miirwp;
2146 
2147 	/*
2148 	 * Validate format of ioctl
2149 	 */
2150 	if (iocp->ioc_count != sizeof (struct rge_mii_rw))
2151 		return (IOC_INVAL);
2152 	if (mp->b_cont == NULL)
2153 		return (IOC_INVAL);
2154 	miirwp = (struct rge_mii_rw *)mp->b_cont->b_rptr;
2155 
2156 	/*
2157 	 * Validate request parameters ...
2158 	 */
2159 	if (miirwp->mii_reg > MII_MAXREG)
2160 		return (IOC_INVAL);
2161 
2162 	switch (cmd) {
2163 	default:
2164 		/* NOTREACHED */
2165 		rge_error(rgep, "rge_mii_ioctl: invalid cmd 0x%x", cmd);
2166 		return (IOC_INVAL);
2167 
2168 	case RGE_MII_READ:
2169 		miirwp->mii_data = rge_mii_get16(rgep, miirwp->mii_reg);
2170 		return (IOC_REPLY);
2171 
2172 	case RGE_MII_WRITE:
2173 		rge_mii_put16(rgep, miirwp->mii_reg, miirwp->mii_data);
2174 		return (IOC_ACK);
2175 	}
2176 
2177 	/* NOTREACHED */
2178 }
2179 
2180 enum ioc_reply rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp,
2181 				struct iocblk *iocp);
2182 #pragma	no_inline(rge_chip_ioctl)
2183 
2184 enum ioc_reply
2185 rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
2186 {
2187 	int cmd;
2188 
2189 	RGE_TRACE(("rge_chip_ioctl($%p, $%p, $%p, $%p)",
2190 	    (void *)rgep, (void *)wq, (void *)mp, (void *)iocp));
2191 
2192 	ASSERT(mutex_owned(rgep->genlock));
2193 
2194 	cmd = iocp->ioc_cmd;
2195 	switch (cmd) {
2196 	default:
2197 		/* NOTREACHED */
2198 		rge_error(rgep, "rge_chip_ioctl: invalid cmd 0x%x", cmd);
2199 		return (IOC_INVAL);
2200 
2201 	case RGE_DIAG:
2202 	case RGE_PEEK:
2203 	case RGE_POKE:
2204 	case RGE_PHY_RESET:
2205 	case RGE_SOFT_RESET:
2206 	case RGE_HARD_RESET:
2207 #if	RGE_DEBUGGING || RGE_DO_PPIO
2208 		return (rge_diag_ioctl(rgep, cmd, mp, iocp));
2209 #else
2210 		return (IOC_INVAL);
2211 #endif	/* RGE_DEBUGGING || RGE_DO_PPIO */
2212 
2213 	case RGE_MII_READ:
2214 	case RGE_MII_WRITE:
2215 		return (rge_mii_ioctl(rgep, cmd, mp, iocp));
2216 
2217 	}
2218 
2219 	/* NOTREACHED */
2220 }
2221