1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 *
25 * Copyright 2025 Oxide Computer Company
26 */
27
28 #include "rge.h"
29
30 #define REG32(rgep, reg) ((uint32_t *)(rgep->io_regs+(reg)))
31 #define REG16(rgep, reg) ((uint16_t *)(rgep->io_regs+(reg)))
32 #define REG8(rgep, reg) ((uint8_t *)(rgep->io_regs+(reg)))
33 #define PIO_ADDR(rgep, offset) ((void *)(rgep->io_regs+(offset)))
34
35 /*
36 * Patchable globals:
37 *
38 * rge_autorecover
39 * Enables/disables automatic recovery after fault detection
40 */
41 static uint32_t rge_autorecover = 1;
42
43 /*
44 * globals:
45 */
46 #define RGE_DBG RGE_DBG_REGS /* debug flag for this code */
47 static uint32_t rge_watchdog_count = 1 << 5;
48 static uint32_t rge_rx_watchdog_count = 1 << 3;
49
50 /*
51 * Operating register get/set access routines
52 */
53
54 static uint32_t
rge_reg_get32(rge_t * rgep,uintptr_t regno)55 rge_reg_get32(rge_t *rgep, uintptr_t regno)
56 {
57 RGE_TRACE(("rge_reg_get32($%p, 0x%lx)",
58 (void *)rgep, regno));
59
60 return (ddi_get32(rgep->io_handle, REG32(rgep, regno)));
61 }
62
63 static void
rge_reg_put32(rge_t * rgep,uintptr_t regno,uint32_t data)64 rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data)
65 {
66 RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)",
67 (void *)rgep, regno, data));
68
69 ddi_put32(rgep->io_handle, REG32(rgep, regno), data);
70 }
71
72 static void
rge_reg_set32(rge_t * rgep,uintptr_t regno,uint32_t bits)73 rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits)
74 {
75 uint32_t regval;
76
77 RGE_TRACE(("rge_reg_set32($%p, 0x%lx, 0x%x)",
78 (void *)rgep, regno, bits));
79
80 regval = rge_reg_get32(rgep, regno);
81 regval |= bits;
82 rge_reg_put32(rgep, regno, regval);
83 }
84
85 static void
rge_reg_clr32(rge_t * rgep,uintptr_t regno,uint32_t bits)86 rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits)
87 {
88 uint32_t regval;
89
90 RGE_TRACE(("rge_reg_clr32($%p, 0x%lx, 0x%x)",
91 (void *)rgep, regno, bits));
92
93 regval = rge_reg_get32(rgep, regno);
94 regval &= ~bits;
95 rge_reg_put32(rgep, regno, regval);
96 }
97
98 static uint16_t
rge_reg_get16(rge_t * rgep,uintptr_t regno)99 rge_reg_get16(rge_t *rgep, uintptr_t regno)
100 {
101 RGE_TRACE(("rge_reg_get16($%p, 0x%lx)",
102 (void *)rgep, regno));
103
104 return (ddi_get16(rgep->io_handle, REG16(rgep, regno)));
105 }
106
107 static void
rge_reg_put16(rge_t * rgep,uintptr_t regno,uint16_t data)108 rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data)
109 {
110 RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)",
111 (void *)rgep, regno, data));
112
113 ddi_put16(rgep->io_handle, REG16(rgep, regno), data);
114 }
115
116 static uint8_t
rge_reg_get8(rge_t * rgep,uintptr_t regno)117 rge_reg_get8(rge_t *rgep, uintptr_t regno)
118 {
119 RGE_TRACE(("rge_reg_get8($%p, 0x%lx)",
120 (void *)rgep, regno));
121
122 return (ddi_get8(rgep->io_handle, REG8(rgep, regno)));
123 }
124
125 static void
rge_reg_put8(rge_t * rgep,uintptr_t regno,uint8_t data)126 rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data)
127 {
128 RGE_TRACE(("rge_reg_put8($%p, 0x%lx, 0x%x)",
129 (void *)rgep, regno, data));
130
131 ddi_put8(rgep->io_handle, REG8(rgep, regno), data);
132 }
133
134 static void
rge_reg_set8(rge_t * rgep,uintptr_t regno,uint8_t bits)135 rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits)
136 {
137 uint8_t regval;
138
139 RGE_TRACE(("rge_reg_set8($%p, 0x%lx, 0x%x)",
140 (void *)rgep, regno, bits));
141
142 regval = rge_reg_get8(rgep, regno);
143 regval |= bits;
144 rge_reg_put8(rgep, regno, regval);
145 }
146
147 static void
rge_reg_clr8(rge_t * rgep,uintptr_t regno,uint8_t bits)148 rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits)
149 {
150 uint8_t regval;
151
152 RGE_TRACE(("rge_reg_clr8($%p, 0x%lx, 0x%x)",
153 (void *)rgep, regno, bits));
154
155 regval = rge_reg_get8(rgep, regno);
156 regval &= ~bits;
157 rge_reg_put8(rgep, regno, regval);
158 }
159
160 uint16_t
rge_mii_get16(rge_t * rgep,uintptr_t mii)161 rge_mii_get16(rge_t *rgep, uintptr_t mii)
162 {
163 uint32_t regval;
164 uint32_t val32;
165 uint32_t i;
166
167 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
168 rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
169
170 /*
171 * Waiting for PHY reading OK
172 */
173 for (i = 0; i < PHY_RESET_LOOP; i++) {
174 drv_usecwait(1000);
175 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
176 if (val32 & PHY_ACCESS_WR_FLAG)
177 return ((uint16_t)(val32 & 0xffff));
178 }
179
180 RGE_REPORT((rgep, "rge_mii_get16(0x%x) fail, val = %x", mii, val32));
181 return ((uint16_t)~0u);
182 }
183
184 void
rge_mii_put16(rge_t * rgep,uintptr_t mii,uint16_t data)185 rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data)
186 {
187 uint32_t regval;
188 uint32_t val32;
189 uint32_t i;
190
191 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT;
192 regval |= data & PHY_DATA_MASK;
193 regval |= PHY_ACCESS_WR_FLAG;
194 rge_reg_put32(rgep, PHY_ACCESS_REG, regval);
195
196 /*
197 * Waiting for PHY writing OK
198 */
199 for (i = 0; i < PHY_RESET_LOOP; i++) {
200 drv_usecwait(1000);
201 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG);
202 if (!(val32 & PHY_ACCESS_WR_FLAG))
203 return;
204 }
205 RGE_REPORT((rgep, "rge_mii_put16(0x%lx, 0x%x) fail",
206 mii, data));
207 }
208
209 void
rge_ephy_put16(rge_t * rgep,uintptr_t emii,uint16_t data)210 rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data)
211 {
212 uint32_t regval;
213 uint32_t val32;
214 uint32_t i;
215
216 regval = (emii & EPHY_REG_MASK) << EPHY_REG_SHIFT;
217 regval |= data & EPHY_DATA_MASK;
218 regval |= EPHY_ACCESS_WR_FLAG;
219 rge_reg_put32(rgep, EPHY_ACCESS_REG, regval);
220
221 /*
222 * Waiting for PHY writing OK
223 */
224 for (i = 0; i < PHY_RESET_LOOP; i++) {
225 drv_usecwait(1000);
226 val32 = rge_reg_get32(rgep, EPHY_ACCESS_REG);
227 if (!(val32 & EPHY_ACCESS_WR_FLAG))
228 return;
229 }
230 RGE_REPORT((rgep, "rge_ephy_put16(0x%lx, 0x%x) fail",
231 emii, data));
232 }
233
234 /*
235 * Atomically shift a 32-bit word left, returning
236 * the value it had *before* the shift was applied
237 */
238 static uint32_t
rge_atomic_shl32(uint32_t * sp,uint_t count)239 rge_atomic_shl32(uint32_t *sp, uint_t count)
240 {
241 uint32_t oldval;
242 uint32_t newval;
243
244 /* ATOMICALLY */
245 do {
246 oldval = *sp;
247 newval = oldval << count;
248 } while (atomic_cas_32(sp, oldval, newval) != oldval);
249
250 return (oldval);
251 }
252
253 /*
254 * PHY operation routines
255 */
256 #if RGE_DEBUGGING
257
258 void
rge_phydump(rge_t * rgep)259 rge_phydump(rge_t *rgep)
260 {
261 uint16_t regs[32];
262 int i;
263
264 ASSERT(mutex_owned(rgep->genlock));
265
266 for (i = 0; i < 32; ++i) {
267 regs[i] = rge_mii_get16(rgep, i);
268 }
269
270 for (i = 0; i < 32; i += 8)
271 RGE_DEBUG(("rge_phydump: "
272 "0x%04x %04x %04x %04x %04x %04x %04x %04x",
273 regs[i+0], regs[i+1], regs[i+2], regs[i+3],
274 regs[i+4], regs[i+5], regs[i+6], regs[i+7]));
275 }
276
277 #endif /* RGE_DEBUGGING */
278
279 static void
rge_phy_check(rge_t * rgep)280 rge_phy_check(rge_t *rgep)
281 {
282 uint16_t gig_ctl;
283
284 if (rgep->param_link_up == LINK_STATE_DOWN) {
285 /*
286 * RTL8169S/8110S PHY has the "PCS bug". Need reset PHY
287 * every 15 seconds whin link down & advertise is 1000.
288 */
289 if (rgep->chipid.phy_ver == PHY_VER_S) {
290 gig_ctl = rge_mii_get16(rgep, MII_1000BASE_T_CONTROL);
291 if (gig_ctl & MII_1000BT_CTL_ADV_FDX) {
292 rgep->link_down_count++;
293 if (rgep->link_down_count > 15) {
294 (void) rge_phy_reset(rgep);
295 rgep->stats.phy_reset++;
296 rgep->link_down_count = 0;
297 }
298 }
299 }
300 } else {
301 rgep->link_down_count = 0;
302 }
303 }
304
305 /*
306 * Basic low-level function to reset the PHY.
307 * Doesn't incorporate any special-case workarounds.
308 *
309 * Returns TRUE on success, FALSE if the RESET bit doesn't clear
310 */
311 boolean_t
rge_phy_reset(rge_t * rgep)312 rge_phy_reset(rge_t *rgep)
313 {
314 uint16_t control;
315 uint_t count;
316
317 /*
318 * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear
319 */
320 control = rge_mii_get16(rgep, MII_CONTROL);
321 rge_mii_put16(rgep, MII_CONTROL, control | MII_CONTROL_RESET);
322 for (count = 0; count < 5; count++) {
323 drv_usecwait(100);
324 control = rge_mii_get16(rgep, MII_CONTROL);
325 if (BIC(control, MII_CONTROL_RESET))
326 return (B_TRUE);
327 }
328
329 RGE_REPORT((rgep, "rge_phy_reset: FAILED, control now 0x%x", control));
330 return (B_FALSE);
331 }
332
333 /*
334 * Synchronise the PHY's speed/duplex/autonegotiation capabilities
335 * and advertisements with the required settings as specified by the various
336 * param_* variables that can be poked via the NDD interface.
337 *
338 * We always reset the PHY and reprogram *all* the relevant registers,
339 * not just those changed. This should cause the link to go down, and then
340 * back up again once the link is stable and autonegotiation (if enabled)
341 * is complete. We should get a link state change interrupt somewhere along
342 * the way ...
343 *
344 * NOTE: <genlock> must already be held by the caller
345 */
346 void
rge_phy_update(rge_t * rgep)347 rge_phy_update(rge_t *rgep)
348 {
349 boolean_t adv_autoneg;
350 boolean_t adv_pause;
351 boolean_t adv_asym_pause;
352 boolean_t adv_1000fdx;
353 boolean_t adv_1000hdx;
354 boolean_t adv_100fdx;
355 boolean_t adv_100hdx;
356 boolean_t adv_10fdx;
357 boolean_t adv_10hdx;
358
359 uint16_t control;
360 uint16_t gigctrl;
361 uint16_t anar;
362
363 ASSERT(mutex_owned(rgep->genlock));
364
365 RGE_DEBUG(("rge_phy_update: autoneg %d "
366 "pause %d asym_pause %d "
367 "1000fdx %d 1000hdx %d "
368 "100fdx %d 100hdx %d "
369 "10fdx %d 10hdx %d ",
370 rgep->param_adv_autoneg,
371 rgep->param_adv_pause, rgep->param_adv_asym_pause,
372 rgep->param_adv_1000fdx, rgep->param_adv_1000hdx,
373 rgep->param_adv_100fdx, rgep->param_adv_100hdx,
374 rgep->param_adv_10fdx, rgep->param_adv_10hdx));
375
376 control = gigctrl = anar = 0;
377
378 /*
379 * PHY settings are normally based on the param_* variables,
380 * but if any loopback mode is in effect, that takes precedence.
381 *
382 * RGE supports MAC-internal loopback, PHY-internal loopback,
383 * and External loopback at a variety of speeds (with a special
384 * cable). In all cases, autoneg is turned OFF, full-duplex
385 * is turned ON, and the speed/mastership is forced.
386 */
387 switch (rgep->param_loop_mode) {
388 case RGE_LOOP_NONE:
389 default:
390 adv_autoneg = rgep->param_adv_autoneg;
391 adv_pause = rgep->param_adv_pause;
392 adv_asym_pause = rgep->param_adv_asym_pause;
393 adv_1000fdx = rgep->param_adv_1000fdx;
394 adv_1000hdx = rgep->param_adv_1000hdx;
395 adv_100fdx = rgep->param_adv_100fdx;
396 adv_100hdx = rgep->param_adv_100hdx;
397 adv_10fdx = rgep->param_adv_10fdx;
398 adv_10hdx = rgep->param_adv_10hdx;
399 break;
400
401 case RGE_LOOP_INTERNAL_PHY:
402 case RGE_LOOP_INTERNAL_MAC:
403 adv_autoneg = adv_pause = adv_asym_pause = B_FALSE;
404 adv_1000fdx = adv_100fdx = adv_10fdx = B_FALSE;
405 adv_1000hdx = adv_100hdx = adv_10hdx = B_FALSE;
406 rgep->param_link_duplex = LINK_DUPLEX_FULL;
407
408 switch (rgep->param_loop_mode) {
409 case RGE_LOOP_INTERNAL_PHY:
410 if (rgep->chipid.mac_ver != MAC_VER_8101E) {
411 rgep->param_link_speed = 1000;
412 adv_1000fdx = B_TRUE;
413 } else {
414 rgep->param_link_speed = 100;
415 adv_100fdx = B_TRUE;
416 }
417 control = MII_CONTROL_LOOPBACK;
418 break;
419
420 case RGE_LOOP_INTERNAL_MAC:
421 if (rgep->chipid.mac_ver != MAC_VER_8101E) {
422 rgep->param_link_speed = 1000;
423 adv_1000fdx = B_TRUE;
424 } else {
425 rgep->param_link_speed = 100;
426 adv_100fdx = B_TRUE;
427 }
428 break;
429 }
430 }
431
432 RGE_DEBUG(("rge_phy_update: autoneg %d "
433 "pause %d asym_pause %d "
434 "1000fdx %d 1000hdx %d "
435 "100fdx %d 100hdx %d "
436 "10fdx %d 10hdx %d ",
437 adv_autoneg,
438 adv_pause, adv_asym_pause,
439 adv_1000fdx, adv_1000hdx,
440 adv_100fdx, adv_100hdx,
441 adv_10fdx, adv_10hdx));
442
443 /*
444 * We should have at least one technology capability set;
445 * if not, we select a default of 1000Mb/s full-duplex
446 */
447 if (!adv_1000fdx && !adv_100fdx && !adv_10fdx &&
448 !adv_1000hdx && !adv_100hdx && !adv_10hdx) {
449 if (rgep->chipid.mac_ver != MAC_VER_8101E) {
450 adv_1000fdx = B_TRUE;
451 } else {
452 adv_100fdx = B_TRUE;
453 }
454 }
455
456 /*
457 * Now transform the adv_* variables into the proper settings
458 * of the PHY registers ...
459 *
460 * If autonegotiation is (now) enabled, we want to trigger
461 * a new autonegotiation cycle once the PHY has been
462 * programmed with the capabilities to be advertised.
463 *
464 * RTL8169/8110 doesn't support 1000Mb/s half-duplex.
465 */
466 if (adv_autoneg)
467 control |= MII_CONTROL_ANE|MII_CONTROL_RSAN;
468
469 if (adv_1000fdx)
470 control |= MII_CONTROL_1GB|MII_CONTROL_FDUPLEX;
471 else if (adv_1000hdx)
472 control |= MII_CONTROL_1GB;
473 else if (adv_100fdx)
474 control |= MII_CONTROL_100MB|MII_CONTROL_FDUPLEX;
475 else if (adv_100hdx)
476 control |= MII_CONTROL_100MB;
477 else if (adv_10fdx)
478 control |= MII_CONTROL_FDUPLEX;
479 else if (adv_10hdx)
480 control |= 0;
481 else
482 { _NOTE(EMPTY); } /* Can't get here anyway ... */
483
484 if (adv_1000fdx) {
485 gigctrl |= MII_1000BT_CTL_ADV_FDX;
486 /*
487 * Chipset limitation: need set other capabilities to true
488 */
489 if (rgep->chipid.is_pcie)
490 adv_1000hdx = B_TRUE;
491 adv_100fdx = B_TRUE;
492 adv_100hdx = B_TRUE;
493 adv_10fdx = B_TRUE;
494 adv_10hdx = B_TRUE;
495 }
496
497 if (adv_1000hdx)
498 gigctrl |= MII_1000BT_CTL_ADV_HDX;
499
500 if (adv_100fdx)
501 anar |= MII_ABILITY_100BASE_TX_FD;
502 if (adv_100hdx)
503 anar |= MII_ABILITY_100BASE_TX;
504 if (adv_10fdx)
505 anar |= MII_ABILITY_10BASE_T_FD;
506 if (adv_10hdx)
507 anar |= MII_ABILITY_10BASE_T;
508
509 if (adv_pause)
510 anar |= MII_ABILITY_PAUSE;
511 if (adv_asym_pause)
512 anar |= MII_ABILITY_ASMPAUSE;
513
514 /*
515 * Munge in any other fixed bits we require ...
516 */
517 anar |= MII_AN_SELECTOR_8023;
518
519 /*
520 * Restart the PHY and write the new values. Note the
521 * time, so that we can say whether subsequent link state
522 * changes can be attributed to our reprogramming the PHY
523 */
524 rge_phy_init(rgep);
525 if (rgep->chipid.mac_ver == MAC_VER_8168B_B ||
526 rgep->chipid.mac_ver == MAC_VER_8168B_C) {
527 /* power up PHY for RTL8168B chipset */
528 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
529 rge_mii_put16(rgep, PHY_0E_REG, 0x0000);
530 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
531 }
532 rge_mii_put16(rgep, MII_AN_ADVERT, anar);
533 rge_mii_put16(rgep, MII_1000BASE_T_CONTROL, gigctrl);
534 rge_mii_put16(rgep, MII_CONTROL, control);
535
536 RGE_DEBUG(("rge_phy_update: anar <- 0x%x", anar));
537 RGE_DEBUG(("rge_phy_update: control <- 0x%x", control));
538 RGE_DEBUG(("rge_phy_update: gigctrl <- 0x%x", gigctrl));
539 }
540
541 void
rge_phy_init(rge_t * rgep)542 rge_phy_init(rge_t *rgep)
543 {
544 rgep->phy_mii_addr = 1;
545
546 /*
547 * Below phy config steps are copied from the Programming Guide
548 * (there's no detail comments for these steps.)
549 */
550 switch (rgep->chipid.mac_ver) {
551 case MAC_VER_8169S_D:
552 case MAC_VER_8169S_E:
553 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
554 rge_mii_put16(rgep, PHY_15_REG, 0x1000);
555 rge_mii_put16(rgep, PHY_18_REG, 0x65c7);
556 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
557 rge_mii_put16(rgep, PHY_ID_REG_2, 0x00a1);
558 rge_mii_put16(rgep, PHY_ID_REG_1, 0x0008);
559 rge_mii_put16(rgep, PHY_BMSR_REG, 0x1020);
560 rge_mii_put16(rgep, PHY_BMCR_REG, 0x1000);
561 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0800);
562 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
563 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000);
564 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
565 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde60);
566 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
567 rge_mii_put16(rgep, PHY_BMCR_REG, 0x0077);
568 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7800);
569 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000);
570 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000);
571 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
572 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
573 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
574 rge_mii_put16(rgep, PHY_BMCR_REG, 0xfa00);
575 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa800);
576 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000);
577 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000);
578 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41);
579 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde20);
580 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140);
581 rge_mii_put16(rgep, PHY_BMCR_REG, 0x00bb);
582 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb800);
583 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000);
584 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000);
585 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01);
586 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20);
587 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95);
588 rge_mii_put16(rgep, PHY_BMCR_REG, 0xbf00);
589 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf800);
590 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000);
591 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000);
592 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
593 rge_mii_put16(rgep, PHY_0B_REG, 0x0000);
594 break;
595
596 case MAC_VER_8169SB:
597 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
598 rge_mii_put16(rgep, PHY_1B_REG, 0xD41E);
599 rge_mii_put16(rgep, PHY_0E_REG, 0x7bff);
600 rge_mii_put16(rgep, PHY_GBCR_REG, GBCR_DEFAULT);
601 rge_mii_put16(rgep, PHY_1F_REG, 0x0002);
602 rge_mii_put16(rgep, PHY_BMSR_REG, 0x90D0);
603 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
604 break;
605
606 case MAC_VER_8169SC:
607 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
608 rge_mii_put16(rgep, PHY_ANER_REG, 0x0078);
609 rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x05dc);
610 rge_mii_put16(rgep, PHY_GBCR_REG, 0x2672);
611 rge_mii_put16(rgep, PHY_GBSR_REG, 0x6a14);
612 rge_mii_put16(rgep, PHY_0B_REG, 0x7cb0);
613 rge_mii_put16(rgep, PHY_0C_REG, 0xdb80);
614 rge_mii_put16(rgep, PHY_1B_REG, 0xc414);
615 rge_mii_put16(rgep, PHY_1C_REG, 0xef03);
616 rge_mii_put16(rgep, PHY_1D_REG, 0x3dc8);
617 rge_mii_put16(rgep, PHY_1F_REG, 0x0003);
618 rge_mii_put16(rgep, PHY_13_REG, 0x0600);
619 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
620 break;
621
622 case MAC_VER_8168:
623 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
624 rge_mii_put16(rgep, PHY_ANER_REG, 0x00aa);
625 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x3173);
626 rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x08fc);
627 rge_mii_put16(rgep, PHY_GBCR_REG, 0xe2d0);
628 rge_mii_put16(rgep, PHY_0B_REG, 0x941a);
629 rge_mii_put16(rgep, PHY_18_REG, 0x65fe);
630 rge_mii_put16(rgep, PHY_1C_REG, 0x1e02);
631 rge_mii_put16(rgep, PHY_1F_REG, 0x0002);
632 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x103e);
633 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
634 break;
635
636 case MAC_VER_8168B_B:
637 case MAC_VER_8168B_C:
638 rge_mii_put16(rgep, PHY_1F_REG, 0x0001);
639 rge_mii_put16(rgep, PHY_0B_REG, 0x94b0);
640 rge_mii_put16(rgep, PHY_1B_REG, 0xc416);
641 rge_mii_put16(rgep, PHY_1F_REG, 0x0003);
642 rge_mii_put16(rgep, PHY_12_REG, 0x6096);
643 rge_mii_put16(rgep, PHY_1F_REG, 0x0000);
644 break;
645 }
646 }
647
648 void
rge_chip_ident(rge_t * rgep)649 rge_chip_ident(rge_t *rgep)
650 {
651 chip_id_t *chip = &rgep->chipid;
652 uint32_t val32;
653 uint16_t val16;
654
655 /*
656 * Read and record MAC version
657 */
658 val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
659 val32 &= HW_VERSION_ID_0 | HW_VERSION_ID_1;
660 chip->mac_ver = val32;
661 chip->is_pcie = pci_lcap_locate(rgep->cfg_handle,
662 PCI_CAP_ID_PCI_E, &val16) == DDI_SUCCESS;
663
664 /*
665 * Workaround for 8101E_C
666 */
667 chip->enable_mac_first = !chip->is_pcie;
668 if (chip->mac_ver == MAC_VER_8101E_C) {
669 chip->is_pcie = B_FALSE;
670 }
671
672 /*
673 * Read and record PHY version
674 */
675 val16 = rge_mii_get16(rgep, PHY_ID_REG_2);
676 val16 &= PHY_VER_MASK;
677 chip->phy_ver = val16;
678
679 /* set pci latency timer */
680 if (chip->mac_ver == MAC_VER_8169 ||
681 chip->mac_ver == MAC_VER_8169S_D ||
682 chip->mac_ver == MAC_VER_8169S_E ||
683 chip->mac_ver == MAC_VER_8169SC)
684 pci_config_put8(rgep->cfg_handle, PCI_CONF_LATENCY_TIMER, 0x40);
685
686 if (chip->mac_ver == MAC_VER_8169SC) {
687 val16 = rge_reg_get16(rgep, RT_CONFIG_1_REG);
688 val16 &= 0x0300;
689 if (val16 == 0x1) /* 66Mhz PCI */
690 rge_reg_put32(rgep, 0x7c, 0x000700ff);
691 else if (val16 == 0x0) /* 33Mhz PCI */
692 rge_reg_put32(rgep, 0x7c, 0x0007ff00);
693 }
694
695 /*
696 * PCIE chipset require the Rx buffer start address must be
697 * 8-byte alignment and the Rx buffer size must be multiple of 8.
698 * We'll just use bcopy in receive procedure for the PCIE chipset.
699 */
700 if (chip->is_pcie) {
701 rgep->chip_flags |= CHIP_FLAG_FORCE_BCOPY;
702 if (rgep->default_mtu > ETHERMTU) {
703 rge_notice(rgep, "Jumbo packets not supported "
704 "for this PCIE chipset");
705 rgep->default_mtu = ETHERMTU;
706 }
707 }
708 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY)
709 rgep->head_room = 0;
710 else
711 rgep->head_room = RGE_HEADROOM;
712
713 /*
714 * Initialize other variables.
715 */
716 if (rgep->default_mtu < ETHERMTU || rgep->default_mtu > RGE_JUMBO_MTU)
717 rgep->default_mtu = ETHERMTU;
718 if (rgep->default_mtu > ETHERMTU) {
719 rgep->rxbuf_size = RGE_BUFF_SIZE_JUMBO;
720 rgep->txbuf_size = RGE_BUFF_SIZE_JUMBO;
721 rgep->ethmax_size = RGE_JUMBO_SIZE;
722 } else {
723 rgep->rxbuf_size = RGE_BUFF_SIZE_STD;
724 rgep->txbuf_size = RGE_BUFF_SIZE_STD;
725 rgep->ethmax_size = ETHERMAX;
726 }
727 chip->rxconfig = RX_CONFIG_DEFAULT;
728 chip->txconfig = TX_CONFIG_DEFAULT;
729
730 /* interval to update statistics for polling mode */
731 rgep->tick_delta = drv_usectohz(1000*1000/CLK_TICK);
732
733 /* ensure we are not in polling mode */
734 rgep->curr_tick = ddi_get_lbolt() - 2*rgep->tick_delta;
735 RGE_TRACE(("%s: MAC version = %x, PHY version = %x",
736 rgep->ifname, chip->mac_ver, chip->phy_ver));
737 }
738
739 /*
740 * Perform first-stage chip (re-)initialisation, using only config-space
741 * accesses:
742 *
743 * + Read the vendor/device/revision/subsystem/cache-line-size registers,
744 * returning the data in the structure pointed to by <idp>.
745 * + Enable Memory Space accesses.
746 * + Enable Bus Mastering according.
747 */
748 void
rge_chip_cfg_init(rge_t * rgep,chip_id_t * cidp)749 rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp)
750 {
751 ddi_acc_handle_t handle;
752 uint16_t commd;
753
754 handle = rgep->cfg_handle;
755
756 /*
757 * Save PCI cache line size and subsystem vendor ID
758 */
759 cidp->command = pci_config_get16(handle, PCI_CONF_COMM);
760 cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID);
761 cidp->device = pci_config_get16(handle, PCI_CONF_DEVID);
762 cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID);
763 cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID);
764 cidp->revision = pci_config_get8(handle, PCI_CONF_REVID);
765 cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ);
766 cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER);
767
768 /*
769 * Turn on Master Enable (DMA) and IO Enable bits.
770 * Enable PCI Memory Space accesses
771 */
772 commd = cidp->command;
773 commd |= PCI_COMM_ME | PCI_COMM_MAE | PCI_COMM_IO;
774 pci_config_put16(handle, PCI_CONF_COMM, commd);
775
776 RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x",
777 cidp->vendor, cidp->device, cidp->revision));
778 RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x",
779 cidp->subven, cidp->subdev));
780 RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x",
781 cidp->clsize, cidp->latency, cidp->command));
782 }
783
784 int
rge_chip_reset(rge_t * rgep)785 rge_chip_reset(rge_t *rgep)
786 {
787 int i;
788 uint8_t val8;
789
790 /*
791 * Chip should be in STOP state
792 */
793 rge_reg_clr8(rgep, RT_COMMAND_REG,
794 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
795
796 /*
797 * Disable interrupt
798 */
799 rgep->int_mask = INT_MASK_NONE;
800 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
801
802 /*
803 * Clear pended interrupt
804 */
805 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
806
807 /*
808 * Reset chip
809 */
810 rge_reg_set8(rgep, RT_COMMAND_REG, RT_COMMAND_RESET);
811
812 /*
813 * Wait for reset success
814 */
815 for (i = 0; i < CHIP_RESET_LOOP; i++) {
816 drv_usecwait(10);
817 val8 = rge_reg_get8(rgep, RT_COMMAND_REG);
818 if (!(val8 & RT_COMMAND_RESET)) {
819 rgep->rge_chip_state = RGE_CHIP_RESET;
820 return (0);
821 }
822 }
823 RGE_REPORT((rgep, "rge_chip_reset fail."));
824 return (-1);
825 }
826
827 void
rge_chip_init(rge_t * rgep)828 rge_chip_init(rge_t *rgep)
829 {
830 uint32_t val32;
831 uint32_t val16;
832 uint32_t *hashp;
833 chip_id_t *chip = &rgep->chipid;
834
835 /*
836 * Increase the threshold voltage of RX sensitivity
837 */
838 if (chip->mac_ver == MAC_VER_8168B_B ||
839 chip->mac_ver == MAC_VER_8168B_C ||
840 chip->mac_ver == MAC_VER_8101E) {
841 rge_ephy_put16(rgep, 0x01, 0x1bd3);
842 }
843
844 if (chip->mac_ver == MAC_VER_8168 ||
845 chip->mac_ver == MAC_VER_8168B_B) {
846 val16 = rge_reg_get8(rgep, PHY_STATUS_REG);
847 val16 = 0x12<<8 | val16;
848 rge_reg_put16(rgep, PHY_STATUS_REG, val16);
849 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00021c01);
850 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f088);
851 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00004000);
852 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f0b0);
853 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x0000f068);
854 val32 = rge_reg_get32(rgep, RT_CSI_DATA_REG);
855 val32 |= 0x7000;
856 val32 &= 0xffff5fff;
857 rge_reg_put32(rgep, RT_CSI_DATA_REG, val32);
858 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f068);
859 }
860
861 /*
862 * Config MII register
863 */
864 rgep->param_link_up = LINK_STATE_DOWN;
865 rge_phy_update(rgep);
866
867 /*
868 * Enable Rx checksum offload.
869 * Then for vlan support, we must enable receive vlan de-tagging.
870 * Otherwise, there'll be checksum error.
871 */
872 val16 = rge_reg_get16(rgep, CPLUS_COMMAND_REG);
873 val16 |= RX_CKSM_OFFLOAD | RX_VLAN_DETAG;
874 if (chip->mac_ver == MAC_VER_8169S_D) {
875 val16 |= CPLUS_BIT14 | MUL_PCI_RW_ENABLE;
876 rge_reg_put8(rgep, RESV_82_REG, 0x01);
877 }
878 if (chip->mac_ver == MAC_VER_8169S_E ||
879 chip->mac_ver == MAC_VER_8169SC) {
880 val16 |= MUL_PCI_RW_ENABLE;
881 }
882 rge_reg_put16(rgep, CPLUS_COMMAND_REG, val16 & (~0x03));
883
884 /*
885 * Start transmit/receive before set tx/rx configuration register
886 */
887 if (chip->enable_mac_first)
888 rge_reg_set8(rgep, RT_COMMAND_REG,
889 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
890
891 /*
892 * Change to config register write enable mode
893 */
894 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
895
896 /*
897 * Set Tx/Rx maximum packet size
898 */
899 if (rgep->default_mtu > ETHERMTU) {
900 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_JUMBO);
901 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_JUMBO);
902 } else if (rgep->chipid.mac_ver != MAC_VER_8101E) {
903 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD);
904 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD);
905 } else {
906 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD_8101E);
907 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD_8101E);
908 }
909
910 /*
911 * Set receive configuration register
912 */
913 val32 = rge_reg_get32(rgep, RX_CONFIG_REG);
914 val32 &= RX_CONFIG_REG_RESV;
915 if (rgep->promisc)
916 val32 |= RX_ACCEPT_ALL_PKT;
917 rge_reg_put32(rgep, RX_CONFIG_REG, val32 | chip->rxconfig);
918
919 /*
920 * Set transmit configuration register
921 */
922 val32 = rge_reg_get32(rgep, TX_CONFIG_REG);
923 val32 &= TX_CONFIG_REG_RESV;
924 rge_reg_put32(rgep, TX_CONFIG_REG, val32 | chip->txconfig);
925
926 /*
927 * Set Tx/Rx descriptor register
928 */
929 val32 = rgep->tx_desc.cookie.dmac_laddress;
930 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_LO_REG, val32);
931 val32 = rgep->tx_desc.cookie.dmac_laddress >> 32;
932 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_HI_REG, val32);
933 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_LO_REG, 0);
934 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_HI_REG, 0);
935 val32 = rgep->rx_desc.cookie.dmac_laddress;
936 rge_reg_put32(rgep, RX_RING_ADDR_LO_REG, val32);
937 val32 = rgep->rx_desc.cookie.dmac_laddress >> 32;
938 rge_reg_put32(rgep, RX_RING_ADDR_HI_REG, val32);
939
940 /*
941 * Suggested setting from Realtek
942 */
943 if (rgep->chipid.mac_ver != MAC_VER_8101E)
944 rge_reg_put16(rgep, RESV_E2_REG, 0x282a);
945 else
946 rge_reg_put16(rgep, RESV_E2_REG, 0x0000);
947
948 /*
949 * Set multicast register
950 */
951 hashp = (uint32_t *)rgep->mcast_hash;
952 if (rgep->promisc) {
953 rge_reg_put32(rgep, MULTICAST_0_REG, ~0U);
954 rge_reg_put32(rgep, MULTICAST_4_REG, ~0U);
955 } else {
956 rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0]));
957 rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1]));
958 }
959
960 /*
961 * Msic register setting:
962 * -- Missed packet counter: clear it
963 * -- TimerInt Register
964 * -- Timer count register
965 */
966 rge_reg_put32(rgep, RX_PKT_MISS_COUNT_REG, 0);
967 rge_reg_put32(rgep, TIMER_INT_REG, TIMER_INT_NONE);
968 rge_reg_put32(rgep, TIMER_COUNT_REG, 0);
969
970 /*
971 * disable the Unicast Wakeup Frame capability
972 */
973 rge_reg_clr8(rgep, RT_CONFIG_5_REG, RT_UNI_WAKE_FRAME);
974
975 /*
976 * Return to normal network/host communication mode
977 */
978 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
979 drv_usecwait(20);
980 }
981
982 /*
983 * rge_chip_start() -- start the chip transmitting and/or receiving,
984 * including enabling interrupts
985 */
986 void
rge_chip_start(rge_t * rgep)987 rge_chip_start(rge_t *rgep)
988 {
989 /*
990 * Clear statistics
991 */
992 bzero(&rgep->stats, sizeof (rge_stats_t));
993 DMA_ZERO(rgep->dma_area_stats);
994
995 /*
996 * Start transmit/receive
997 */
998 rge_reg_set8(rgep, RT_COMMAND_REG,
999 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1000
1001 /*
1002 * Enable interrupt
1003 */
1004 rgep->int_mask = RGE_INT_MASK;
1005 if (rgep->chipid.is_pcie) {
1006 rgep->int_mask |= NO_TXDESC_INT;
1007 }
1008 rgep->rx_fifo_ovf = 0;
1009 rgep->int_mask |= RX_FIFO_OVERFLOW_INT;
1010 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1011
1012 /*
1013 * All done!
1014 */
1015 rgep->rge_chip_state = RGE_CHIP_RUNNING;
1016 }
1017
1018 /*
1019 * rge_chip_stop() -- stop board receiving
1020 *
1021 * Since this function is also invoked by rge_quiesce(), it
1022 * must not block; also, no tracing or logging takes place
1023 * when invoked by rge_quiesce().
1024 */
1025 void
rge_chip_stop(rge_t * rgep,boolean_t fault)1026 rge_chip_stop(rge_t *rgep, boolean_t fault)
1027 {
1028 /*
1029 * Disable interrupt
1030 */
1031 rgep->int_mask = INT_MASK_NONE;
1032 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1033
1034 /*
1035 * Clear pended interrupt
1036 */
1037 if (!rgep->suspended) {
1038 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL);
1039 }
1040
1041 /*
1042 * Stop the board and disable transmit/receive
1043 */
1044 rge_reg_clr8(rgep, RT_COMMAND_REG,
1045 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE);
1046
1047 if (fault)
1048 rgep->rge_chip_state = RGE_CHIP_FAULT;
1049 else
1050 rgep->rge_chip_state = RGE_CHIP_STOPPED;
1051 }
1052
1053 /*
1054 * rge_get_mac_addr() -- get the MAC address on NIC
1055 */
1056 static void
rge_get_mac_addr(rge_t * rgep)1057 rge_get_mac_addr(rge_t *rgep)
1058 {
1059 uint8_t *macaddr = rgep->netaddr;
1060 uint32_t val32;
1061
1062 /*
1063 * Read first 4-byte of mac address
1064 */
1065 val32 = rge_reg_get32(rgep, ID_0_REG);
1066 macaddr[0] = val32 & 0xff;
1067 val32 = val32 >> 8;
1068 macaddr[1] = val32 & 0xff;
1069 val32 = val32 >> 8;
1070 macaddr[2] = val32 & 0xff;
1071 val32 = val32 >> 8;
1072 macaddr[3] = val32 & 0xff;
1073
1074 /*
1075 * Read last 2-byte of mac address
1076 */
1077 val32 = rge_reg_get32(rgep, ID_4_REG);
1078 macaddr[4] = val32 & 0xff;
1079 val32 = val32 >> 8;
1080 macaddr[5] = val32 & 0xff;
1081 }
1082
1083 static void
rge_set_mac_addr(rge_t * rgep)1084 rge_set_mac_addr(rge_t *rgep)
1085 {
1086 uint8_t *p = rgep->netaddr;
1087 uint32_t val32;
1088
1089 /*
1090 * Change to config register write enable mode
1091 */
1092 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1093
1094 /*
1095 * Get first 4 bytes of mac address
1096 */
1097 val32 = p[3];
1098 val32 = val32 << 8;
1099 val32 |= p[2];
1100 val32 = val32 << 8;
1101 val32 |= p[1];
1102 val32 = val32 << 8;
1103 val32 |= p[0];
1104
1105 /*
1106 * Set first 4 bytes of mac address
1107 */
1108 rge_reg_put32(rgep, ID_0_REG, val32);
1109
1110 /*
1111 * Get last 2 bytes of mac address
1112 */
1113 val32 = p[5];
1114 val32 = val32 << 8;
1115 val32 |= p[4];
1116
1117 /*
1118 * Set last 2 bytes of mac address
1119 */
1120 val32 |= rge_reg_get32(rgep, ID_4_REG) & ~0xffff;
1121 rge_reg_put32(rgep, ID_4_REG, val32);
1122
1123 /*
1124 * Return to normal network/host communication mode
1125 */
1126 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1127 }
1128
1129 static void
rge_set_multi_addr(rge_t * rgep)1130 rge_set_multi_addr(rge_t *rgep)
1131 {
1132 uint32_t *hashp;
1133
1134 hashp = (uint32_t *)rgep->mcast_hash;
1135
1136 /*
1137 * Change to config register write enable mode
1138 */
1139 if (rgep->chipid.mac_ver == MAC_VER_8169SC) {
1140 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1141 }
1142 if (rgep->promisc) {
1143 rge_reg_put32(rgep, MULTICAST_0_REG, ~0U);
1144 rge_reg_put32(rgep, MULTICAST_4_REG, ~0U);
1145 } else {
1146 rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0]));
1147 rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1]));
1148 }
1149
1150 /*
1151 * Return to normal network/host communication mode
1152 */
1153 if (rgep->chipid.mac_ver == MAC_VER_8169SC) {
1154 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG);
1155 }
1156 }
1157
1158 static void
rge_set_promisc(rge_t * rgep)1159 rge_set_promisc(rge_t *rgep)
1160 {
1161 if (rgep->promisc)
1162 rge_reg_set32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1163 else
1164 rge_reg_clr32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT);
1165 }
1166
1167 /*
1168 * rge_chip_sync() -- program the chip with the unicast MAC address,
1169 * the multicast hash table, the required level of promiscuity, and
1170 * the current loopback mode ...
1171 */
1172 void
rge_chip_sync(rge_t * rgep,enum rge_sync_op todo)1173 rge_chip_sync(rge_t *rgep, enum rge_sync_op todo)
1174 {
1175 switch (todo) {
1176 case RGE_GET_MAC:
1177 rge_get_mac_addr(rgep);
1178 break;
1179 case RGE_SET_MAC:
1180 /* Reprogram the unicast MAC address(es) ... */
1181 rge_set_mac_addr(rgep);
1182 break;
1183 case RGE_SET_MUL:
1184 /* Reprogram the hashed multicast address table ... */
1185 rge_set_multi_addr(rgep);
1186 break;
1187 case RGE_SET_PROMISC:
1188 /* Set or clear the PROMISCUOUS mode bit */
1189 rge_set_multi_addr(rgep);
1190 rge_set_promisc(rgep);
1191 break;
1192 default:
1193 break;
1194 }
1195 }
1196
1197 /* ARGSUSED */
1198 void
rge_chip_blank(void * arg,time_t ticks,uint_t count,int flag)1199 rge_chip_blank(void *arg, time_t ticks, uint_t count, int flag)
1200 {
1201 _NOTE(ARGUNUSED(arg, ticks, count));
1202 }
1203
1204 void
rge_tx_trigger(rge_t * rgep)1205 rge_tx_trigger(rge_t *rgep)
1206 {
1207 rge_reg_put8(rgep, TX_RINGS_POLL_REG, NORMAL_TX_RING_POLL);
1208 }
1209
1210 void
rge_hw_stats_dump(rge_t * rgep)1211 rge_hw_stats_dump(rge_t *rgep)
1212 {
1213 uint32_t regval = 0;
1214
1215 if (rgep->rge_mac_state == RGE_MAC_STOPPED)
1216 return;
1217
1218 /*
1219 * Set the stats counter dump address. First, set the high part of the
1220 * address and read it back to ensure it's flushed out.
1221 */
1222 rge_reg_put32(rgep, DUMP_COUNTER_REG_1,
1223 rgep->dma_area_stats.cookie.dmac_laddress >> 32);
1224 (void) rge_reg_get32(rgep, DUMP_COUNTER_REG_1);
1225
1226 /*
1227 * Then set the low part of the address, preserving the reserved bits:
1228 */
1229 regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
1230 regval &= DUMP_COUNTER_REG_RESV;
1231 regval |= rgep->dma_area_stats.cookie.dmac_laddress;
1232 rge_reg_put32(rgep, DUMP_COUNTER_REG_0, regval);
1233
1234 /*
1235 * Set the command bit to start dumping statistics:
1236 */
1237 regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
1238 rge_reg_put32(rgep, DUMP_COUNTER_REG_0, regval | DUMP_START);
1239
1240 for (uint_t i = 0; i < STATS_DUMP_LOOP; i++) {
1241 drv_usecwait(100);
1242
1243 /*
1244 * Check to see if the dump has completed:
1245 */
1246 regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0);
1247 if ((regval & DUMP_START) == 0) {
1248 DMA_SYNC(rgep->dma_area_stats, DDI_DMA_SYNC_FORKERNEL);
1249 return;
1250 }
1251 }
1252
1253 RGE_DEBUG(("rge h/w statistics dump fail!"));
1254 rgep->rge_chip_state = RGE_CHIP_ERROR;
1255 }
1256
1257 /*
1258 * ========== Hardware interrupt handler ==========
1259 */
1260
1261 #undef RGE_DBG
1262 #define RGE_DBG RGE_DBG_INT /* debug flag for this code */
1263
1264 static void
rge_wake_factotum(rge_t * rgep)1265 rge_wake_factotum(rge_t *rgep)
1266 {
1267 if (rgep->factotum_flag == 0) {
1268 rgep->factotum_flag = 1;
1269 (void) ddi_intr_trigger_softint(rgep->factotum_hdl, NULL);
1270 }
1271 }
1272
1273 /*
1274 * rge_intr() -- handle chip interrupts
1275 */
1276 uint_t
rge_intr(caddr_t arg1,caddr_t arg2)1277 rge_intr(caddr_t arg1, caddr_t arg2)
1278 {
1279 rge_t *rgep = (rge_t *)arg1;
1280 uint16_t int_status;
1281 clock_t now;
1282 uint32_t tx_pkts;
1283 uint32_t rx_pkts;
1284 uint32_t poll_rate;
1285 uint32_t opt_pkts;
1286 uint32_t opt_intrs;
1287 boolean_t update_int_mask = B_FALSE;
1288 uint32_t itimer;
1289
1290 _NOTE(ARGUNUSED(arg2))
1291
1292 mutex_enter(rgep->genlock);
1293
1294 if (rgep->suspended) {
1295 mutex_exit(rgep->genlock);
1296 return (DDI_INTR_UNCLAIMED);
1297 }
1298
1299 /*
1300 * Was this interrupt caused by our device...
1301 */
1302 int_status = rge_reg_get16(rgep, INT_STATUS_REG);
1303 if (!(int_status & rgep->int_mask)) {
1304 mutex_exit(rgep->genlock);
1305 return (DDI_INTR_UNCLAIMED);
1306 /* indicate it wasn't our interrupt */
1307 }
1308 rgep->stats.intr++;
1309
1310 /*
1311 * Clear interrupt
1312 * For PCIE chipset, we need disable interrupt first.
1313 */
1314 if (rgep->chipid.is_pcie) {
1315 rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE);
1316 update_int_mask = B_TRUE;
1317 }
1318 rge_reg_put16(rgep, INT_STATUS_REG, int_status);
1319
1320 /*
1321 * Calculate optimal polling interval
1322 */
1323 now = ddi_get_lbolt();
1324 if (now - rgep->curr_tick >= rgep->tick_delta &&
1325 (rgep->param_link_speed == RGE_SPEED_1000M ||
1326 rgep->param_link_speed == RGE_SPEED_100M)) {
1327 /* number of rx and tx packets in the last tick */
1328 tx_pkts = rgep->stats.opackets - rgep->last_opackets;
1329 rx_pkts = rgep->stats.rpackets - rgep->last_rpackets;
1330
1331 rgep->last_opackets = rgep->stats.opackets;
1332 rgep->last_rpackets = rgep->stats.rpackets;
1333
1334 /* restore interrupt mask */
1335 rgep->int_mask |= TX_OK_INT | RX_OK_INT;
1336 if (rgep->chipid.is_pcie) {
1337 rgep->int_mask |= NO_TXDESC_INT;
1338 }
1339
1340 /* optimal number of packets in a tick */
1341 if (rgep->param_link_speed == RGE_SPEED_1000M) {
1342 opt_pkts = (1000*1000*1000/8)/ETHERMTU/CLK_TICK;
1343 } else {
1344 opt_pkts = (100*1000*1000/8)/ETHERMTU/CLK_TICK;
1345 }
1346
1347 /*
1348 * calculate polling interval based on rx and tx packets
1349 * in the last tick
1350 */
1351 poll_rate = 0;
1352 if (now - rgep->curr_tick < 2*rgep->tick_delta) {
1353 opt_intrs = opt_pkts/TX_COALESC;
1354 if (tx_pkts > opt_intrs) {
1355 poll_rate = max(tx_pkts/TX_COALESC, opt_intrs);
1356 rgep->int_mask &= ~(TX_OK_INT | NO_TXDESC_INT);
1357 }
1358
1359 opt_intrs = opt_pkts/RX_COALESC;
1360 if (rx_pkts > opt_intrs) {
1361 opt_intrs = max(rx_pkts/RX_COALESC, opt_intrs);
1362 poll_rate = max(opt_intrs, poll_rate);
1363 rgep->int_mask &= ~RX_OK_INT;
1364 }
1365 /* ensure poll_rate reasonable */
1366 poll_rate = min(poll_rate, opt_pkts*4);
1367 }
1368
1369 if (poll_rate) {
1370 /* move to polling mode */
1371 if (rgep->chipid.is_pcie) {
1372 itimer = (TIMER_CLK_PCIE/CLK_TICK)/poll_rate;
1373 } else {
1374 itimer = (TIMER_CLK_PCI/CLK_TICK)/poll_rate;
1375 }
1376 } else {
1377 /* move to normal mode */
1378 itimer = 0;
1379 }
1380 RGE_DEBUG(("%s: poll: itimer:%d int_mask:0x%x",
1381 __func__, itimer, rgep->int_mask));
1382 rge_reg_put32(rgep, TIMER_INT_REG, itimer);
1383
1384 /* update timestamp for statistics */
1385 rgep->curr_tick = now;
1386
1387 /* reset timer */
1388 int_status |= TIME_OUT_INT;
1389
1390 update_int_mask = B_TRUE;
1391 }
1392
1393 if (int_status & TIME_OUT_INT) {
1394 rge_reg_put32(rgep, TIMER_COUNT_REG, 0);
1395 }
1396
1397 /* flush post writes */
1398 (void) rge_reg_get16(rgep, INT_STATUS_REG);
1399
1400 /*
1401 * Cable link change interrupt
1402 */
1403 if (int_status & LINK_CHANGE_INT) {
1404 rge_chip_cyclic(rgep);
1405 }
1406
1407 if (int_status & RX_FIFO_OVERFLOW_INT) {
1408 /* start rx watchdog timeout detection */
1409 rgep->rx_fifo_ovf = 1;
1410 if (rgep->int_mask & RX_FIFO_OVERFLOW_INT) {
1411 rgep->int_mask &= ~RX_FIFO_OVERFLOW_INT;
1412 update_int_mask = B_TRUE;
1413 }
1414 } else if (int_status & RGE_RX_INT) {
1415 /* stop rx watchdog timeout detection */
1416 rgep->rx_fifo_ovf = 0;
1417 if ((rgep->int_mask & RX_FIFO_OVERFLOW_INT) == 0) {
1418 rgep->int_mask |= RX_FIFO_OVERFLOW_INT;
1419 update_int_mask = B_TRUE;
1420 }
1421 }
1422
1423 mutex_exit(rgep->genlock);
1424
1425 /*
1426 * Receive interrupt
1427 */
1428 if (int_status & RGE_RX_INT)
1429 rge_receive(rgep);
1430
1431 /*
1432 * Transmit interrupt
1433 */
1434 if (int_status & TX_ERR_INT) {
1435 RGE_REPORT((rgep, "tx error happened, resetting the chip "));
1436 mutex_enter(rgep->genlock);
1437 rgep->rge_chip_state = RGE_CHIP_ERROR;
1438 mutex_exit(rgep->genlock);
1439 } else if ((rgep->chipid.is_pcie && (int_status & NO_TXDESC_INT)) ||
1440 ((int_status & TX_OK_INT) && rgep->tx_free < RGE_SEND_SLOTS/8)) {
1441 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
1442 }
1443
1444 /*
1445 * System error interrupt
1446 */
1447 if (int_status & SYS_ERR_INT) {
1448 RGE_REPORT((rgep, "sys error happened, resetting the chip "));
1449 mutex_enter(rgep->genlock);
1450 rgep->rge_chip_state = RGE_CHIP_ERROR;
1451 mutex_exit(rgep->genlock);
1452 }
1453
1454 /*
1455 * Re-enable interrupt for PCIE chipset or install new int_mask
1456 */
1457 if (update_int_mask)
1458 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask);
1459
1460 return (DDI_INTR_CLAIMED); /* indicate it was our interrupt */
1461 }
1462
1463 /*
1464 * ========== Factotum, implemented as a softint handler ==========
1465 */
1466
1467 #undef RGE_DBG
1468 #define RGE_DBG RGE_DBG_FACT /* debug flag for this code */
1469
1470 static boolean_t
rge_factotum_link_check(rge_t * rgep)1471 rge_factotum_link_check(rge_t *rgep)
1472 {
1473 uint8_t media_status;
1474 int32_t link;
1475
1476 media_status = rge_reg_get8(rgep, PHY_STATUS_REG);
1477 link = (media_status & PHY_STATUS_LINK_UP) ?
1478 LINK_STATE_UP : LINK_STATE_DOWN;
1479 if (rgep->param_link_up != link) {
1480 /*
1481 * Link change.
1482 */
1483 rgep->param_link_up = link;
1484
1485 if (link == LINK_STATE_UP) {
1486 if (media_status & PHY_STATUS_1000MF) {
1487 rgep->param_link_speed = RGE_SPEED_1000M;
1488 rgep->param_link_duplex = LINK_DUPLEX_FULL;
1489 } else {
1490 rgep->param_link_speed =
1491 (media_status & PHY_STATUS_100M) ?
1492 RGE_SPEED_100M : RGE_SPEED_10M;
1493 rgep->param_link_duplex =
1494 (media_status & PHY_STATUS_DUPLEX_FULL) ?
1495 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
1496 }
1497 }
1498 return (B_TRUE);
1499 }
1500 return (B_FALSE);
1501 }
1502
1503 /*
1504 * Factotum routine to check for Tx stall, using the 'watchdog' counter
1505 */
1506 static boolean_t
rge_factotum_stall_check(rge_t * rgep)1507 rge_factotum_stall_check(rge_t *rgep)
1508 {
1509 uint32_t dogval;
1510
1511 ASSERT(mutex_owned(rgep->genlock));
1512
1513 /*
1514 * Specific check for RX stall ...
1515 */
1516 rgep->rx_fifo_ovf <<= 1;
1517 if (rgep->rx_fifo_ovf > rge_rx_watchdog_count) {
1518 RGE_REPORT((rgep, "rx_hang detected"));
1519 return (B_TRUE);
1520 }
1521
1522 /*
1523 * Specific check for Tx stall ...
1524 *
1525 * The 'watchdog' counter is incremented whenever a packet
1526 * is queued, reset to 1 when some (but not all) buffers
1527 * are reclaimed, reset to 0 (disabled) when all buffers
1528 * are reclaimed, and shifted left here. If it exceeds the
1529 * threshold value, the chip is assumed to have stalled and
1530 * is put into the ERROR state. The factotum will then reset
1531 * it on the next pass.
1532 *
1533 * All of which should ensure that we don't get into a state
1534 * where packets are left pending indefinitely!
1535 */
1536 if (rgep->resched_needed)
1537 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
1538 dogval = rge_atomic_shl32(&rgep->watchdog, 1);
1539 if (dogval < rge_watchdog_count)
1540 return (B_FALSE);
1541
1542 RGE_REPORT((rgep, "Tx stall detected, watchdog code 0x%x", dogval));
1543 return (B_TRUE);
1544
1545 }
1546
1547 /*
1548 * The factotum is woken up when there's something to do that we'd rather
1549 * not do from inside a hardware interrupt handler or high-level cyclic.
1550 * Its two main tasks are:
1551 * reset & restart the chip after an error
1552 * check the link status whenever necessary
1553 */
1554 uint_t
rge_chip_factotum(caddr_t arg1,caddr_t arg2)1555 rge_chip_factotum(caddr_t arg1, caddr_t arg2)
1556 {
1557 rge_t *rgep;
1558 uint_t result;
1559 boolean_t error;
1560 boolean_t linkchg;
1561
1562 rgep = (rge_t *)arg1;
1563 _NOTE(ARGUNUSED(arg2))
1564
1565 if (rgep->factotum_flag == 0)
1566 return (DDI_INTR_UNCLAIMED);
1567
1568 rgep->factotum_flag = 0;
1569 result = DDI_INTR_CLAIMED;
1570 error = B_FALSE;
1571 linkchg = B_FALSE;
1572
1573 mutex_enter(rgep->genlock);
1574 switch (rgep->rge_chip_state) {
1575 default:
1576 break;
1577
1578 case RGE_CHIP_RUNNING:
1579 linkchg = rge_factotum_link_check(rgep);
1580 error = rge_factotum_stall_check(rgep);
1581 break;
1582
1583 case RGE_CHIP_ERROR:
1584 error = B_TRUE;
1585 break;
1586
1587 case RGE_CHIP_FAULT:
1588 /*
1589 * Fault detected, time to reset ...
1590 */
1591 if (rge_autorecover) {
1592 RGE_REPORT((rgep, "automatic recovery activated"));
1593 rge_restart(rgep);
1594 }
1595 break;
1596 }
1597
1598 /*
1599 * If an error is detected, stop the chip now, marking it as
1600 * faulty, so that it will be reset next time through ...
1601 */
1602 if (error)
1603 rge_chip_stop(rgep, B_TRUE);
1604 mutex_exit(rgep->genlock);
1605
1606 /*
1607 * If the link state changed, tell the world about it.
1608 * Note: can't do this while still holding the mutex.
1609 */
1610 if (linkchg)
1611 mac_link_update(rgep->mh, rgep->param_link_up);
1612
1613 return (result);
1614 }
1615
1616 /*
1617 * High-level cyclic handler
1618 *
1619 * This routine schedules a (low-level) softint callback to the
1620 * factotum, and prods the chip to update the status block (which
1621 * will cause a hardware interrupt when complete).
1622 */
1623 void
rge_chip_cyclic(void * arg)1624 rge_chip_cyclic(void *arg)
1625 {
1626 rge_t *rgep;
1627
1628 rgep = arg;
1629
1630 switch (rgep->rge_chip_state) {
1631 default:
1632 return;
1633
1634 case RGE_CHIP_RUNNING:
1635 rge_phy_check(rgep);
1636 if (rgep->tx_free < RGE_SEND_SLOTS)
1637 rge_send_recycle(rgep);
1638 break;
1639
1640 case RGE_CHIP_FAULT:
1641 case RGE_CHIP_ERROR:
1642 break;
1643 }
1644
1645 rge_wake_factotum(rgep);
1646 }
1647
1648
1649 /*
1650 * ========== Ioctl subfunctions ==========
1651 */
1652
1653 #undef RGE_DBG
1654 #define RGE_DBG RGE_DBG_PPIO /* debug flag for this code */
1655
1656 #if RGE_DEBUGGING || RGE_DO_PPIO
1657
1658 static void
rge_chip_peek_cfg(rge_t * rgep,rge_peekpoke_t * ppd)1659 rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1660 {
1661 uint64_t regval;
1662 uint64_t regno;
1663
1664 RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)",
1665 (void *)rgep, (void *)ppd));
1666
1667 regno = ppd->pp_acc_offset;
1668
1669 switch (ppd->pp_acc_size) {
1670 case 1:
1671 regval = pci_config_get8(rgep->cfg_handle, regno);
1672 break;
1673
1674 case 2:
1675 regval = pci_config_get16(rgep->cfg_handle, regno);
1676 break;
1677
1678 case 4:
1679 regval = pci_config_get32(rgep->cfg_handle, regno);
1680 break;
1681
1682 case 8:
1683 regval = pci_config_get64(rgep->cfg_handle, regno);
1684 break;
1685 }
1686
1687 ppd->pp_acc_data = regval;
1688 }
1689
1690 static void
rge_chip_poke_cfg(rge_t * rgep,rge_peekpoke_t * ppd)1691 rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd)
1692 {
1693 uint64_t regval;
1694 uint64_t regno;
1695
1696 RGE_TRACE(("rge_chip_poke_cfg($%p, $%p)",
1697 (void *)rgep, (void *)ppd));
1698
1699 regno = ppd->pp_acc_offset;
1700 regval = ppd->pp_acc_data;
1701
1702 switch (ppd->pp_acc_size) {
1703 case 1:
1704 pci_config_put8(rgep->cfg_handle, regno, regval);
1705 break;
1706
1707 case 2:
1708 pci_config_put16(rgep->cfg_handle, regno, regval);
1709 break;
1710
1711 case 4:
1712 pci_config_put32(rgep->cfg_handle, regno, regval);
1713 break;
1714
1715 case 8:
1716 pci_config_put64(rgep->cfg_handle, regno, regval);
1717 break;
1718 }
1719 }
1720
1721 static void
rge_chip_peek_reg(rge_t * rgep,rge_peekpoke_t * ppd)1722 rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1723 {
1724 uint64_t regval;
1725 void *regaddr;
1726
1727 RGE_TRACE(("rge_chip_peek_reg($%p, $%p)",
1728 (void *)rgep, (void *)ppd));
1729
1730 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1731
1732 switch (ppd->pp_acc_size) {
1733 case 1:
1734 regval = ddi_get8(rgep->io_handle, regaddr);
1735 break;
1736
1737 case 2:
1738 regval = ddi_get16(rgep->io_handle, regaddr);
1739 break;
1740
1741 case 4:
1742 regval = ddi_get32(rgep->io_handle, regaddr);
1743 break;
1744
1745 case 8:
1746 regval = ddi_get64(rgep->io_handle, regaddr);
1747 break;
1748 }
1749
1750 ppd->pp_acc_data = regval;
1751 }
1752
1753 static void
rge_chip_poke_reg(rge_t * rgep,rge_peekpoke_t * ppd)1754 rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd)
1755 {
1756 uint64_t regval;
1757 void *regaddr;
1758
1759 RGE_TRACE(("rge_chip_poke_reg($%p, $%p)",
1760 (void *)rgep, (void *)ppd));
1761
1762 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);
1763 regval = ppd->pp_acc_data;
1764
1765 switch (ppd->pp_acc_size) {
1766 case 1:
1767 ddi_put8(rgep->io_handle, regaddr, regval);
1768 break;
1769
1770 case 2:
1771 ddi_put16(rgep->io_handle, regaddr, regval);
1772 break;
1773
1774 case 4:
1775 ddi_put32(rgep->io_handle, regaddr, regval);
1776 break;
1777
1778 case 8:
1779 ddi_put64(rgep->io_handle, regaddr, regval);
1780 break;
1781 }
1782 }
1783
1784 static void
rge_chip_peek_mii(rge_t * rgep,rge_peekpoke_t * ppd)1785 rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1786 {
1787 RGE_TRACE(("rge_chip_peek_mii($%p, $%p)",
1788 (void *)rgep, (void *)ppd));
1789
1790 ppd->pp_acc_data = rge_mii_get16(rgep, ppd->pp_acc_offset/2);
1791 }
1792
1793 static void
rge_chip_poke_mii(rge_t * rgep,rge_peekpoke_t * ppd)1794 rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd)
1795 {
1796 RGE_TRACE(("rge_chip_poke_mii($%p, $%p)",
1797 (void *)rgep, (void *)ppd));
1798
1799 rge_mii_put16(rgep, ppd->pp_acc_offset/2, ppd->pp_acc_data);
1800 }
1801
1802 static void
rge_chip_peek_mem(rge_t * rgep,rge_peekpoke_t * ppd)1803 rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1804 {
1805 uint64_t regval;
1806 void *vaddr;
1807
1808 RGE_TRACE(("rge_chip_peek_rge($%p, $%p)",
1809 (void *)rgep, (void *)ppd));
1810
1811 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1812
1813 switch (ppd->pp_acc_size) {
1814 case 1:
1815 regval = *(uint8_t *)vaddr;
1816 break;
1817
1818 case 2:
1819 regval = *(uint16_t *)vaddr;
1820 break;
1821
1822 case 4:
1823 regval = *(uint32_t *)vaddr;
1824 break;
1825
1826 case 8:
1827 regval = *(uint64_t *)vaddr;
1828 break;
1829 }
1830
1831 RGE_DEBUG(("rge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p",
1832 (void *)rgep, (void *)ppd, regval, vaddr));
1833
1834 ppd->pp_acc_data = regval;
1835 }
1836
1837 static void
rge_chip_poke_mem(rge_t * rgep,rge_peekpoke_t * ppd)1838 rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd)
1839 {
1840 uint64_t regval;
1841 void *vaddr;
1842
1843 RGE_TRACE(("rge_chip_poke_mem($%p, $%p)",
1844 (void *)rgep, (void *)ppd));
1845
1846 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
1847 regval = ppd->pp_acc_data;
1848
1849 RGE_DEBUG(("rge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p",
1850 (void *)rgep, (void *)ppd, regval, vaddr));
1851
1852 switch (ppd->pp_acc_size) {
1853 case 1:
1854 *(uint8_t *)vaddr = (uint8_t)regval;
1855 break;
1856
1857 case 2:
1858 *(uint16_t *)vaddr = (uint16_t)regval;
1859 break;
1860
1861 case 4:
1862 *(uint32_t *)vaddr = (uint32_t)regval;
1863 break;
1864
1865 case 8:
1866 *(uint64_t *)vaddr = (uint64_t)regval;
1867 break;
1868 }
1869 }
1870
1871 static enum ioc_reply
rge_pp_ioctl(rge_t * rgep,int cmd,mblk_t * mp,struct iocblk * iocp)1872 rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
1873 {
1874 void (*ppfn)(rge_t *rgep, rge_peekpoke_t *ppd);
1875 rge_peekpoke_t *ppd;
1876 dma_area_t *areap;
1877 uint64_t sizemask;
1878 uint64_t mem_va;
1879 uint64_t maxoff;
1880 boolean_t peek;
1881
1882 switch (cmd) {
1883 default:
1884 /* NOTREACHED */
1885 rge_error(rgep, "rge_pp_ioctl: invalid cmd 0x%x", cmd);
1886 return (IOC_INVAL);
1887
1888 case RGE_PEEK:
1889 peek = B_TRUE;
1890 break;
1891
1892 case RGE_POKE:
1893 peek = B_FALSE;
1894 break;
1895 }
1896
1897 /*
1898 * Validate format of ioctl
1899 */
1900 if (iocp->ioc_count != sizeof (rge_peekpoke_t))
1901 return (IOC_INVAL);
1902 if (mp->b_cont == NULL)
1903 return (IOC_INVAL);
1904 ppd = (rge_peekpoke_t *)mp->b_cont->b_rptr;
1905
1906 /*
1907 * Validate request parameters
1908 */
1909 switch (ppd->pp_acc_space) {
1910 default:
1911 return (IOC_INVAL);
1912
1913 case RGE_PP_SPACE_CFG:
1914 /*
1915 * Config space
1916 */
1917 sizemask = 8|4|2|1;
1918 mem_va = 0;
1919 maxoff = PCI_CONF_HDR_SIZE;
1920 ppfn = peek ? rge_chip_peek_cfg : rge_chip_poke_cfg;
1921 break;
1922
1923 case RGE_PP_SPACE_REG:
1924 /*
1925 * Memory-mapped I/O space
1926 */
1927 sizemask = 8|4|2|1;
1928 mem_va = 0;
1929 maxoff = RGE_REGISTER_MAX;
1930 ppfn = peek ? rge_chip_peek_reg : rge_chip_poke_reg;
1931 break;
1932
1933 case RGE_PP_SPACE_MII:
1934 /*
1935 * PHY's MII registers
1936 * NB: all PHY registers are two bytes, but the
1937 * addresses increment in ones (word addressing).
1938 * So we scale the address here, then undo the
1939 * transformation inside the peek/poke functions.
1940 */
1941 ppd->pp_acc_offset *= 2;
1942 sizemask = 2;
1943 mem_va = 0;
1944 maxoff = (MII_MAXREG+1)*2;
1945 ppfn = peek ? rge_chip_peek_mii : rge_chip_poke_mii;
1946 break;
1947
1948 case RGE_PP_SPACE_RGE:
1949 /*
1950 * RGE data structure!
1951 */
1952 sizemask = 8|4|2|1;
1953 mem_va = (uintptr_t)rgep;
1954 maxoff = sizeof (*rgep);
1955 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
1956 break;
1957
1958 case RGE_PP_SPACE_STATISTICS:
1959 case RGE_PP_SPACE_TXDESC:
1960 case RGE_PP_SPACE_TXBUFF:
1961 case RGE_PP_SPACE_RXDESC:
1962 case RGE_PP_SPACE_RXBUFF:
1963 /*
1964 * Various DMA_AREAs
1965 */
1966 switch (ppd->pp_acc_space) {
1967 case RGE_PP_SPACE_TXDESC:
1968 areap = &rgep->dma_area_txdesc;
1969 break;
1970 case RGE_PP_SPACE_RXDESC:
1971 areap = &rgep->dma_area_rxdesc;
1972 break;
1973 case RGE_PP_SPACE_STATISTICS:
1974 areap = &rgep->dma_area_stats;
1975 break;
1976 }
1977
1978 sizemask = 8|4|2|1;
1979 mem_va = (uintptr_t)areap->mem_va;
1980 maxoff = areap->alength;
1981 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem;
1982 break;
1983 }
1984
1985 switch (ppd->pp_acc_size) {
1986 default:
1987 return (IOC_INVAL);
1988
1989 case 8:
1990 case 4:
1991 case 2:
1992 case 1:
1993 if ((ppd->pp_acc_size & sizemask) == 0)
1994 return (IOC_INVAL);
1995 break;
1996 }
1997
1998 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
1999 return (IOC_INVAL);
2000
2001 if (ppd->pp_acc_offset >= maxoff)
2002 return (IOC_INVAL);
2003
2004 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff)
2005 return (IOC_INVAL);
2006
2007 /*
2008 * All OK - go do it!
2009 */
2010 ppd->pp_acc_offset += mem_va;
2011 (*ppfn)(rgep, ppd);
2012 return (peek ? IOC_REPLY : IOC_ACK);
2013 }
2014
2015 static enum ioc_reply
rge_diag_ioctl(rge_t * rgep,int cmd,mblk_t * mp,struct iocblk * iocp)2016 rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
2017 {
2018 ASSERT(mutex_owned(rgep->genlock));
2019
2020 switch (cmd) {
2021 default:
2022 /* NOTREACHED */
2023 rge_error(rgep, "rge_diag_ioctl: invalid cmd 0x%x", cmd);
2024 return (IOC_INVAL);
2025
2026 case RGE_DIAG:
2027 /*
2028 * Currently a no-op
2029 */
2030 return (IOC_ACK);
2031
2032 case RGE_PEEK:
2033 case RGE_POKE:
2034 return (rge_pp_ioctl(rgep, cmd, mp, iocp));
2035
2036 case RGE_PHY_RESET:
2037 return (IOC_RESTART_ACK);
2038
2039 case RGE_SOFT_RESET:
2040 case RGE_HARD_RESET:
2041 /*
2042 * Reset and reinitialise the 570x hardware
2043 */
2044 rge_restart(rgep);
2045 return (IOC_ACK);
2046 }
2047
2048 /* NOTREACHED */
2049 }
2050
2051 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */
2052
2053 static enum ioc_reply
rge_mii_ioctl(rge_t * rgep,int cmd,mblk_t * mp,struct iocblk * iocp)2054 rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
2055 {
2056 struct rge_mii_rw *miirwp;
2057
2058 /*
2059 * Validate format of ioctl
2060 */
2061 if (iocp->ioc_count != sizeof (struct rge_mii_rw))
2062 return (IOC_INVAL);
2063 if (mp->b_cont == NULL)
2064 return (IOC_INVAL);
2065 miirwp = (struct rge_mii_rw *)mp->b_cont->b_rptr;
2066
2067 /*
2068 * Validate request parameters ...
2069 */
2070 if (miirwp->mii_reg > MII_MAXREG)
2071 return (IOC_INVAL);
2072
2073 switch (cmd) {
2074 default:
2075 /* NOTREACHED */
2076 rge_error(rgep, "rge_mii_ioctl: invalid cmd 0x%x", cmd);
2077 return (IOC_INVAL);
2078
2079 case RGE_MII_READ:
2080 miirwp->mii_data = rge_mii_get16(rgep, miirwp->mii_reg);
2081 return (IOC_REPLY);
2082
2083 case RGE_MII_WRITE:
2084 rge_mii_put16(rgep, miirwp->mii_reg, miirwp->mii_data);
2085 return (IOC_ACK);
2086 }
2087
2088 /* NOTREACHED */
2089 }
2090
2091 enum ioc_reply
rge_chip_ioctl(rge_t * rgep,queue_t * wq,mblk_t * mp,struct iocblk * iocp)2092 rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
2093 {
2094 int cmd;
2095
2096 RGE_TRACE(("rge_chip_ioctl($%p, $%p, $%p, $%p)",
2097 (void *)rgep, (void *)wq, (void *)mp, (void *)iocp));
2098
2099 ASSERT(mutex_owned(rgep->genlock));
2100
2101 cmd = iocp->ioc_cmd;
2102 switch (cmd) {
2103 default:
2104 /* NOTREACHED */
2105 rge_error(rgep, "rge_chip_ioctl: invalid cmd 0x%x", cmd);
2106 return (IOC_INVAL);
2107
2108 case RGE_DIAG:
2109 case RGE_PEEK:
2110 case RGE_POKE:
2111 case RGE_PHY_RESET:
2112 case RGE_SOFT_RESET:
2113 case RGE_HARD_RESET:
2114 #if RGE_DEBUGGING || RGE_DO_PPIO
2115 return (rge_diag_ioctl(rgep, cmd, mp, iocp));
2116 #else
2117 return (IOC_INVAL);
2118 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */
2119
2120 case RGE_MII_READ:
2121 case RGE_MII_WRITE:
2122 return (rge_mii_ioctl(rgep, cmd, mp, iocp));
2123
2124 }
2125
2126 /* NOTREACHED */
2127 }
2128