1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include "rge.h" 29 30 #define REG32(rgep, reg) ((uint32_t *)(rgep->io_regs+(reg))) 31 #define REG16(rgep, reg) ((uint16_t *)(rgep->io_regs+(reg))) 32 #define REG8(rgep, reg) ((uint8_t *)(rgep->io_regs+(reg))) 33 #define PIO_ADDR(rgep, offset) ((void *)(rgep->io_regs+(offset))) 34 35 /* 36 * Patchable globals: 37 * 38 * rge_autorecover 39 * Enables/disables automatic recovery after fault detection 40 */ 41 static uint32_t rge_autorecover = 1; 42 43 /* 44 * globals: 45 */ 46 #define RGE_DBG RGE_DBG_REGS /* debug flag for this code */ 47 static uint32_t rge_watchdog_count = 1 << 16; 48 49 /* 50 * Operating register get/set access routines 51 */ 52 #if RGE_DEBUGGING 53 54 static void rge_pci_check(rge_t *rgep); 55 #pragma no_inline(rge_pci_check) 56 57 static void 58 rge_pci_check(rge_t *rgep) 59 { 60 uint16_t pcistatus; 61 62 pcistatus = pci_config_get16(rgep->cfg_handle, PCI_CONF_STAT); 63 if ((pcistatus & (PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB)) != 0) 64 RGE_DEBUG(("rge_pci_check($%p): PCI status 0x%x", 65 (void *)rgep, pcistatus)); 66 } 67 68 #endif /* RGE_DEBUGGING */ 69 70 static uint32_t rge_reg_get32(rge_t *rgep, uintptr_t regno); 71 #pragma inline(rge_reg_get32) 72 73 static uint32_t 74 rge_reg_get32(rge_t *rgep, uintptr_t regno) 75 { 76 RGE_TRACE(("rge_reg_get32($%p, 0x%lx)", 77 (void *)rgep, regno)); 78 79 return (ddi_get32(rgep->io_handle, REG32(rgep, regno))); 80 } 81 82 static void rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data); 83 #pragma inline(rge_reg_put32) 84 85 static void 86 rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data) 87 { 88 RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)", 89 (void *)rgep, regno, data)); 90 91 ddi_put32(rgep->io_handle, REG32(rgep, regno), data); 92 RGE_PCICHK(rgep); 93 } 94 95 static void rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits); 96 #pragma inline(rge_reg_set32) 97 98 static void 99 rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits) 100 { 101 uint32_t regval; 102 103 RGE_TRACE(("rge_reg_set32($%p, 0x%lx, 0x%x)", 104 (void *)rgep, regno, bits)); 105 106 regval = rge_reg_get32(rgep, regno); 107 regval |= bits; 108 rge_reg_put32(rgep, regno, regval); 109 } 110 111 static void rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits); 112 #pragma inline(rge_reg_clr32) 113 114 static void 115 rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits) 116 { 117 uint32_t regval; 118 119 RGE_TRACE(("rge_reg_clr32($%p, 0x%lx, 0x%x)", 120 (void *)rgep, regno, bits)); 121 122 regval = rge_reg_get32(rgep, regno); 123 regval &= ~bits; 124 rge_reg_put32(rgep, regno, regval); 125 } 126 127 static uint16_t rge_reg_get16(rge_t *rgep, uintptr_t regno); 128 #pragma inline(rge_reg_get16) 129 130 static uint16_t 131 rge_reg_get16(rge_t *rgep, uintptr_t regno) 132 { 133 RGE_TRACE(("rge_reg_get16($%p, 0x%lx)", 134 (void *)rgep, regno)); 135 136 return (ddi_get16(rgep->io_handle, REG16(rgep, regno))); 137 } 138 139 static void rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data); 140 #pragma inline(rge_reg_put16) 141 142 static void 143 rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data) 144 { 145 RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)", 146 (void *)rgep, regno, data)); 147 148 ddi_put16(rgep->io_handle, REG16(rgep, regno), data); 149 RGE_PCICHK(rgep); 150 } 151 152 static void rge_reg_set16(rge_t *rgep, uintptr_t regno, uint16_t bits); 153 #pragma inline(rge_reg_set16) 154 155 static void 156 rge_reg_set16(rge_t *rgep, uintptr_t regno, uint16_t bits) 157 { 158 uint16_t regval; 159 160 RGE_TRACE(("rge_reg_set16($%p, 0x%lx, 0x%x)", 161 (void *)rgep, regno, bits)); 162 163 regval = rge_reg_get16(rgep, regno); 164 regval |= bits; 165 rge_reg_put16(rgep, regno, regval); 166 } 167 168 static void rge_reg_clr16(rge_t *rgep, uintptr_t regno, uint16_t bits); 169 #pragma inline(rge_reg_clr16) 170 171 static void 172 rge_reg_clr16(rge_t *rgep, uintptr_t regno, uint16_t bits) 173 { 174 uint16_t regval; 175 176 RGE_TRACE(("rge_reg_clr16($%p, 0x%lx, 0x%x)", 177 (void *)rgep, regno, bits)); 178 179 regval = rge_reg_get16(rgep, regno); 180 regval &= ~bits; 181 rge_reg_put16(rgep, regno, regval); 182 } 183 184 static uint8_t rge_reg_get8(rge_t *rgep, uintptr_t regno); 185 #pragma inline(rge_reg_get8) 186 187 static uint8_t 188 rge_reg_get8(rge_t *rgep, uintptr_t regno) 189 { 190 RGE_TRACE(("rge_reg_get8($%p, 0x%lx)", 191 (void *)rgep, regno)); 192 193 return (ddi_get8(rgep->io_handle, REG8(rgep, regno))); 194 } 195 196 static void rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data); 197 #pragma inline(rge_reg_put8) 198 199 static void 200 rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data) 201 { 202 RGE_TRACE(("rge_reg_put8($%p, 0x%lx, 0x%x)", 203 (void *)rgep, regno, data)); 204 205 ddi_put8(rgep->io_handle, REG8(rgep, regno), data); 206 RGE_PCICHK(rgep); 207 } 208 209 static void rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits); 210 #pragma inline(rge_reg_set8) 211 212 static void 213 rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits) 214 { 215 uint8_t regval; 216 217 RGE_TRACE(("rge_reg_set8($%p, 0x%lx, 0x%x)", 218 (void *)rgep, regno, bits)); 219 220 regval = rge_reg_get8(rgep, regno); 221 regval |= bits; 222 rge_reg_put8(rgep, regno, regval); 223 } 224 225 static void rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits); 226 #pragma inline(rge_reg_clr8) 227 228 static void 229 rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits) 230 { 231 uint8_t regval; 232 233 RGE_TRACE(("rge_reg_clr8($%p, 0x%lx, 0x%x)", 234 (void *)rgep, regno, bits)); 235 236 regval = rge_reg_get8(rgep, regno); 237 regval &= ~bits; 238 rge_reg_put8(rgep, regno, regval); 239 } 240 241 uint16_t rge_mii_get16(rge_t *rgep, uintptr_t mii); 242 #pragma no_inline(rge_mii_get16) 243 244 uint16_t 245 rge_mii_get16(rge_t *rgep, uintptr_t mii) 246 { 247 uint32_t regval; 248 uint32_t val32; 249 uint32_t i; 250 251 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT; 252 rge_reg_put32(rgep, PHY_ACCESS_REG, regval); 253 254 /* 255 * Waiting for PHY reading OK 256 */ 257 for (i = 0; i < PHY_RESET_LOOP; i++) { 258 drv_usecwait(100); 259 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG); 260 if (val32 & PHY_ACCESS_WR_FLAG) 261 return (val32 & 0xffff); 262 } 263 264 RGE_REPORT((rgep, "rge_mii_get16(0x%x) fail, val = %x", mii, val32)); 265 return ((uint16_t)~0u); 266 } 267 268 void rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data); 269 #pragma no_inline(rge_mii_put16) 270 271 void 272 rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data) 273 { 274 uint32_t regval; 275 uint32_t val32; 276 uint32_t i; 277 278 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT; 279 regval |= data & PHY_DATA_MASK; 280 regval |= PHY_ACCESS_WR_FLAG; 281 rge_reg_put32(rgep, PHY_ACCESS_REG, regval); 282 283 /* 284 * Waiting for PHY writing OK 285 */ 286 for (i = 0; i < PHY_RESET_LOOP; i++) { 287 drv_usecwait(100); 288 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG); 289 if (!(val32 & PHY_ACCESS_WR_FLAG)) 290 return; 291 } 292 RGE_REPORT((rgep, "rge_mii_put16(0x%lx, 0x%x) fail", 293 mii, data)); 294 } 295 296 /* 297 * Atomically shift a 32-bit word left, returning 298 * the value it had *before* the shift was applied 299 */ 300 static uint32_t rge_atomic_shl32(uint32_t *sp, uint_t count); 301 #pragma inline(rge_mii_put16) 302 303 static uint32_t 304 rge_atomic_shl32(uint32_t *sp, uint_t count) 305 { 306 uint32_t oldval; 307 uint32_t newval; 308 309 /* ATOMICALLY */ 310 do { 311 oldval = *sp; 312 newval = oldval << count; 313 } while (cas32(sp, oldval, newval) != oldval); 314 315 return (oldval); 316 } 317 318 /* 319 * PHY operation routines 320 */ 321 #if RGE_DEBUGGING 322 323 static void 324 rge_phydump(rge_t *rgep) 325 { 326 uint16_t regs[32]; 327 int i; 328 329 ASSERT(mutex_owned(rgep->genlock)); 330 331 for (i = 0; i < 32; ++i) { 332 regs[i] = rge_mii_get16(rgep, i); 333 } 334 335 for (i = 0; i < 32; i += 8) 336 RGE_DEBUG(("rge_phydump: " 337 "0x%04x %04x %04x %04x %04x %04x %04x %04x", 338 regs[i+0], regs[i+1], regs[i+2], regs[i+3], 339 regs[i+4], regs[i+5], regs[i+6], regs[i+7])); 340 } 341 342 #endif /* RGE_DEBUGGING */ 343 344 /* 345 * Basic low-level function to probe for a PHY 346 * 347 * Returns TRUE if the PHY responds with valid data, FALSE otherwise 348 */ 349 static boolean_t 350 rge_phy_probe(rge_t *rgep) 351 { 352 uint16_t phy_status; 353 354 ASSERT(mutex_owned(rgep->genlock)); 355 356 /* 357 * Read the MII_STATUS register twice, in 358 * order to clear any sticky bits (but they should 359 * have been cleared by the RESET, I think). 360 */ 361 phy_status = rge_mii_get16(rgep, MII_STATUS); 362 phy_status = rge_mii_get16(rgep, MII_STATUS); 363 RGE_DEBUG(("rge_phy_probe: status 0x%x", phy_status)); 364 365 /* 366 * Now check the value read; it should have at least one bit set 367 * (for the device capabilities) and at least one clear (one of 368 * the error bits). So if we see all 0s or all 1s, there's a 369 * problem. In particular, rge_mii_get16() returns all 1s if 370 * communications fails ... 371 */ 372 switch (phy_status) { 373 case 0x0000: 374 case 0xffff: 375 return (B_FALSE); 376 377 default : 378 return (B_TRUE); 379 } 380 } 381 382 static void 383 rge_phy_check(rge_t *rgep) 384 { 385 uint16_t gig_ctl; 386 387 if (rgep->param_link_up == LINK_STATE_DOWN) { 388 /* 389 * RTL8169S/8110S PHY has the "PCS bug". Need reset PHY 390 * every 15 seconds whin link down & advertise is 1000. 391 */ 392 if (rgep->chipid.phy_ver == PHY_VER_S) { 393 gig_ctl = rge_mii_get16(rgep, MII_1000BASE_T_CONTROL); 394 if (gig_ctl & MII_1000BT_CTL_ADV_FDX) { 395 rgep->link_down_count++; 396 if (rgep->link_down_count > 15) { 397 (void) rge_phy_reset(rgep); 398 rgep->stats.phy_reset++; 399 rgep->link_down_count = 0; 400 } 401 } 402 } 403 } else { 404 rgep->link_down_count = 0; 405 } 406 } 407 408 /* 409 * Basic low-level function to reset the PHY. 410 * Doesn't incorporate any special-case workarounds. 411 * 412 * Returns TRUE on success, FALSE if the RESET bit doesn't clear 413 */ 414 boolean_t 415 rge_phy_reset(rge_t *rgep) 416 { 417 uint16_t control; 418 uint_t count; 419 420 /* 421 * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear 422 */ 423 control = rge_mii_get16(rgep, MII_CONTROL); 424 rge_mii_put16(rgep, MII_CONTROL, control | MII_CONTROL_RESET); 425 for (count = 0; ++count < 1000; ) { 426 drv_usecwait(100); 427 control = rge_mii_get16(rgep, MII_CONTROL); 428 if (BIC(control, MII_CONTROL_RESET)) 429 return (B_TRUE); 430 } 431 432 RGE_REPORT((rgep, "rge_phy_reset: FAILED, control now 0x%x", control)); 433 return (B_FALSE); 434 } 435 436 /* 437 * Synchronise the PHY's speed/duplex/autonegotiation capabilities 438 * and advertisements with the required settings as specified by the various 439 * param_* variables that can be poked via the NDD interface. 440 * 441 * We always reset the PHY and reprogram *all* the relevant registers, 442 * not just those changed. This should cause the link to go down, and then 443 * back up again once the link is stable and autonegotiation (if enabled) 444 * is complete. We should get a link state change interrupt somewhere along 445 * the way ... 446 * 447 * NOTE: <genlock> must already be held by the caller 448 */ 449 void 450 rge_phy_update(rge_t *rgep) 451 { 452 boolean_t adv_autoneg; 453 boolean_t adv_pause; 454 boolean_t adv_asym_pause; 455 boolean_t adv_1000fdx; 456 boolean_t adv_1000hdx; 457 boolean_t adv_100fdx; 458 boolean_t adv_100hdx; 459 boolean_t adv_10fdx; 460 boolean_t adv_10hdx; 461 462 uint16_t control; 463 uint16_t gigctrl; 464 uint16_t anar; 465 466 ASSERT(mutex_owned(rgep->genlock)); 467 468 RGE_DEBUG(("rge_phy_update: autoneg %d " 469 "pause %d asym_pause %d " 470 "1000fdx %d 1000hdx %d " 471 "100fdx %d 100hdx %d " 472 "10fdx %d 10hdx %d ", 473 rgep->param_adv_autoneg, 474 rgep->param_adv_pause, rgep->param_adv_asym_pause, 475 rgep->param_adv_1000fdx, rgep->param_adv_1000hdx, 476 rgep->param_adv_100fdx, rgep->param_adv_100hdx, 477 rgep->param_adv_10fdx, rgep->param_adv_10hdx)); 478 479 control = gigctrl = anar = 0; 480 481 /* 482 * PHY settings are normally based on the param_* variables, 483 * but if any loopback mode is in effect, that takes precedence. 484 * 485 * RGE supports MAC-internal loopback, PHY-internal loopback, 486 * and External loopback at a variety of speeds (with a special 487 * cable). In all cases, autoneg is turned OFF, full-duplex 488 * is turned ON, and the speed/mastership is forced. 489 */ 490 switch (rgep->param_loop_mode) { 491 case RGE_LOOP_NONE: 492 default: 493 adv_autoneg = rgep->param_adv_autoneg; 494 adv_pause = rgep->param_adv_pause; 495 adv_asym_pause = rgep->param_adv_asym_pause; 496 adv_1000fdx = rgep->param_adv_1000fdx; 497 adv_1000hdx = rgep->param_adv_1000hdx; 498 adv_100fdx = rgep->param_adv_100fdx; 499 adv_100hdx = rgep->param_adv_100hdx; 500 adv_10fdx = rgep->param_adv_10fdx; 501 adv_10hdx = rgep->param_adv_10hdx; 502 break; 503 504 case RGE_LOOP_INTERNAL_PHY: 505 case RGE_LOOP_INTERNAL_MAC: 506 adv_autoneg = adv_pause = adv_asym_pause = B_FALSE; 507 adv_1000fdx = adv_100fdx = adv_10fdx = B_FALSE; 508 adv_1000hdx = adv_100hdx = adv_10hdx = B_FALSE; 509 rgep->param_link_duplex = LINK_DUPLEX_FULL; 510 511 switch (rgep->param_loop_mode) { 512 case RGE_LOOP_INTERNAL_PHY: 513 rgep->param_link_speed = 1000; 514 adv_1000fdx = B_TRUE; 515 control = MII_CONTROL_LOOPBACK; 516 break; 517 518 case RGE_LOOP_INTERNAL_MAC: 519 rgep->param_link_speed = 1000; 520 adv_1000fdx = B_TRUE; 521 break; 522 } 523 } 524 525 RGE_DEBUG(("rge_phy_update: autoneg %d " 526 "pause %d asym_pause %d " 527 "1000fdx %d 1000hdx %d " 528 "100fdx %d 100hdx %d " 529 "10fdx %d 10hdx %d ", 530 adv_autoneg, 531 adv_pause, adv_asym_pause, 532 adv_1000fdx, adv_1000hdx, 533 adv_100fdx, adv_100hdx, 534 adv_10fdx, adv_10hdx)); 535 536 /* 537 * We should have at least one technology capability set; 538 * if not, we select a default of 1000Mb/s full-duplex 539 */ 540 if (!adv_1000fdx && !adv_100fdx && !adv_10fdx && 541 !adv_1000hdx && !adv_100hdx && !adv_10hdx) 542 adv_1000fdx = B_TRUE; 543 544 /* 545 * Now transform the adv_* variables into the proper settings 546 * of the PHY registers ... 547 * 548 * If autonegotiation is (now) enabled, we want to trigger 549 * a new autonegotiation cycle once the PHY has been 550 * programmed with the capabilities to be advertised. 551 * 552 * RTL8169/8110 doesn't support 1000Mb/s half-duplex. 553 */ 554 if (adv_autoneg) 555 control |= MII_CONTROL_ANE|MII_CONTROL_RSAN; 556 557 if (adv_1000fdx) 558 control |= MII_CONTROL_1000MB|MII_CONTROL_FDUPLEX; 559 else if (adv_1000hdx) 560 control |= MII_CONTROL_1000MB; 561 else if (adv_100fdx) 562 control |= MII_CONTROL_100MB|MII_CONTROL_FDUPLEX; 563 else if (adv_100hdx) 564 control |= MII_CONTROL_100MB; 565 else if (adv_10fdx) 566 control |= MII_CONTROL_FDUPLEX; 567 else if (adv_10hdx) 568 control |= 0; 569 else 570 { _NOTE(EMPTY); } /* Can't get here anyway ... */ 571 572 if (adv_1000fdx) { 573 gigctrl |= MII_1000BT_CTL_ADV_FDX; 574 /* 575 * Chipset limitation: need set other capabilities to true 576 */ 577 adv_100fdx = B_TRUE; 578 adv_100hdx = B_TRUE; 579 adv_10fdx = B_TRUE; 580 adv_10hdx = B_TRUE; 581 } 582 583 if (adv_1000hdx) 584 gigctrl |= MII_1000BT_CTL_ADV_HDX; 585 586 if (adv_100fdx) 587 anar |= MII_ABILITY_100BASE_TX_FD; 588 if (adv_100hdx) 589 anar |= MII_ABILITY_100BASE_TX; 590 if (adv_10fdx) 591 anar |= MII_ABILITY_10BASE_T_FD; 592 if (adv_10hdx) 593 anar |= MII_ABILITY_10BASE_T; 594 595 if (adv_pause) 596 anar |= MII_ABILITY_PAUSE; 597 if (adv_asym_pause) 598 anar |= MII_ABILITY_ASYM_PAUSE; 599 600 /* 601 * Munge in any other fixed bits we require ... 602 */ 603 anar |= MII_AN_SELECTOR_8023; 604 605 /* 606 * Restart the PHY and write the new values. Note the 607 * time, so that we can say whether subsequent link state 608 * changes can be attributed to our reprogramming the PHY 609 */ 610 rgep->phys_write_time = gethrtime(); 611 rge_phy_init(rgep); 612 rge_mii_put16(rgep, MII_AN_ADVERT, anar); 613 rge_mii_put16(rgep, MII_CONTROL, control); 614 rge_mii_put16(rgep, MII_1000BASE_T_CONTROL, gigctrl); 615 616 RGE_DEBUG(("rge_phy_update: anar <- 0x%x", anar)); 617 RGE_DEBUG(("rge_phy_update: control <- 0x%x", control)); 618 RGE_DEBUG(("rge_phy_update: gigctrl <- 0x%x", gigctrl)); 619 } 620 621 void rge_phy_init(rge_t *rgep); 622 #pragma no_inline(rge_phy_init) 623 624 void 625 rge_phy_init(rge_t *rgep) 626 { 627 uint16_t val16; 628 629 rgep->phy_mii_addr = 1; 630 631 /* 632 * Below phy config steps are copied from the Programming Guide 633 * (there's no detail comments for these steps.) 634 */ 635 if ((rgep->chipid.mac_ver == MAC_VER_SD || 636 rgep->chipid.mac_ver == MAC_VER_SE) && 637 (rgep->chipid.phy_ver == PHY_VER_S)) { 638 rge_mii_put16(rgep, PHY_1F_REG, 1); 639 rge_mii_put16(rgep, PHY_15_REG, 0x1000); 640 rge_mii_put16(rgep, PHY_18_REG, 0x65c7); 641 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 642 rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE)); 643 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 644 rge_mii_put16(rgep, PHY_ANAR_REG, val16 & 0x0fff); 645 rge_mii_put16(rgep, PHY_ID_REG_2, 0x00a1); 646 rge_mii_put16(rgep, PHY_ID_REG_1, 0x0008); 647 rge_mii_put16(rgep, PHY_BMSR_REG, 0x1020); 648 rge_mii_put16(rgep, PHY_BMCR_REG, 0x1000); 649 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 650 rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE); 651 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 652 rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE)); 653 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 654 rge_mii_put16(rgep, PHY_ANAR_REG, (val16 & 0x0fff) | 0x7000); 655 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41); 656 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde60); 657 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140); 658 rge_mii_put16(rgep, PHY_BMCR_REG, 0x0077); 659 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 660 rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE); 661 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 662 rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE)); 663 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 664 rge_mii_put16(rgep, PHY_ANAR_REG, (val16 & 0x0fff) | 0xa000); 665 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01); 666 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20); 667 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95); 668 rge_mii_put16(rgep, PHY_BMCR_REG, 0xfa00); 669 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 670 rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE); 671 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 672 rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE)); 673 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 674 rge_mii_put16(rgep, PHY_ANAR_REG, (val16 & 0x0fff) | 0xb000); 675 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41); 676 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde20); 677 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140); 678 rge_mii_put16(rgep, PHY_BMCR_REG, 0x00bb); 679 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 680 rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE); 681 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 682 rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE)); 683 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 684 rge_mii_put16(rgep, PHY_ANAR_REG, (val16 & 0x0fff) | 0xf000); 685 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01); 686 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20); 687 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95); 688 rge_mii_put16(rgep, PHY_BMCR_REG, 0xbf00); 689 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 690 rge_mii_put16(rgep, PHY_ANAR_REG, val16 | ANAR_ASY_PAUSE); 691 val16 = rge_mii_get16(rgep, PHY_ANAR_REG); 692 rge_mii_put16(rgep, PHY_ANAR_REG, val16 & (~ANAR_ASY_PAUSE)); 693 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 694 rge_mii_put16(rgep, PHY_0B_REG, 0x0000); 695 } 696 697 if (rgep->chipid.mac_ver == MAC_VER_SB) { 698 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 699 rge_mii_put16(rgep, PHY_1B_REG, 0x841e); 700 rge_mii_put16(rgep, PHY_0E_REG, 0x7bfb); 701 rge_mii_put16(rgep, PHY_GBCR_REG, GBCR_DEFAULT); 702 rge_mii_put16(rgep, PHY_1F_REG, 0x0002); 703 rge_mii_put16(rgep, PHY_BMSR_REG, 0x90D0); 704 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 705 } 706 } 707 708 void rge_chip_ident(rge_t *rgep); 709 #pragma no_inline(rge_chip_ident) 710 711 void 712 rge_chip_ident(rge_t *rgep) 713 { 714 chip_id_t *chip = &rgep->chipid; 715 uint32_t val32; 716 uint16_t val16; 717 718 val32 = rge_reg_get32(rgep, TX_CONFIG_REG); 719 val32 &= HW_VERSION_ID_0 | HW_VERSION_ID_1; 720 chip->mac_ver = val32; 721 722 val16 = rge_mii_get16(rgep, PHY_ID_REG_2); 723 val16 &= PHY_VER_MASK; 724 chip->phy_ver = val16; 725 726 if (rgep->param_default_mtu > ETHERMTU) { 727 rgep->rxbuf_size = RGE_BUFF_SIZE_JUMBO; 728 rgep->txbuf_size = RGE_BUFF_SIZE_JUMBO; 729 rgep->ethmax_size = RGE_JUMBO_SIZE; 730 } else { 731 rgep->rxbuf_size = RGE_BUFF_SIZE_STD; 732 rgep->txbuf_size = RGE_BUFF_SIZE_STD; 733 rgep->ethmax_size = ETHERMAX; 734 } 735 736 chip->rxconfig = RX_CONFIG_DEFAULT; 737 chip->txconfig = TX_CONFIG_DEFAULT; 738 739 RGE_TRACE(("%s: MAC version = %x, PHY version = %x", 740 rgep->ifname, chip->mac_ver, chip->phy_ver)); 741 742 /* set pci latency timer */ 743 if (chip->mac_ver == MAC_VER_NS || chip->mac_ver == MAC_VER_SD) 744 pci_config_put8(rgep->cfg_handle, PCI_CONF_LATENCY_TIMER, 0x40); 745 } 746 747 /* 748 * Perform first-stage chip (re-)initialisation, using only config-space 749 * accesses: 750 * 751 * + Read the vendor/device/revision/subsystem/cache-line-size registers, 752 * returning the data in the structure pointed to by <idp>. 753 * + Enable Memory Space accesses. 754 * + Enable Bus Mastering according. 755 */ 756 void rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp); 757 #pragma no_inline(rge_chip_cfg_init) 758 759 void 760 rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp) 761 { 762 ddi_acc_handle_t handle; 763 uint16_t commd; 764 765 handle = rgep->cfg_handle; 766 767 /* 768 * Save PCI cache line size and subsystem vendor ID 769 */ 770 cidp->command = pci_config_get16(handle, PCI_CONF_COMM); 771 cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID); 772 cidp->device = pci_config_get16(handle, PCI_CONF_DEVID); 773 cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID); 774 cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID); 775 cidp->revision = pci_config_get8(handle, PCI_CONF_REVID); 776 cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ); 777 cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER); 778 779 /* 780 * Turn on Master Enable (DMA) and IO Enable bits. 781 * Enable PCI Memory Space accesses 782 */ 783 commd = cidp->command; 784 commd |= PCI_COMM_ME | PCI_COMM_MAE | PCI_COMM_IO; 785 pci_config_put16(handle, PCI_CONF_COMM, commd); 786 787 RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x", 788 cidp->vendor, cidp->device, cidp->revision)); 789 RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x", 790 cidp->subven, cidp->subdev)); 791 RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x", 792 cidp->clsize, cidp->latency, cidp->command)); 793 } 794 795 int rge_chip_reset(rge_t *rgep); 796 #pragma no_inline(rge_chip_reset) 797 798 int 799 rge_chip_reset(rge_t *rgep) 800 { 801 int i; 802 uint8_t val8; 803 804 /* 805 * Chip should be in STOP state 806 */ 807 rge_reg_clr8(rgep, RT_COMMAND_REG, 808 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 809 810 /* 811 * Disable interrupt 812 */ 813 rge_reg_clr16(rgep, INT_MASK_REG, INT_MASK_ALL); 814 rgep->int_mask = INT_MASK_NONE; 815 816 /* 817 * Clear pended interrupt 818 */ 819 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL); 820 821 /* 822 * Reset chip 823 */ 824 rge_reg_set8(rgep, RT_COMMAND_REG, RT_COMMAND_RESET); 825 826 /* 827 * Wait for reset success 828 */ 829 for (i = 0; i < CHIP_RESET_LOOP; i++) { 830 drv_usecwait(10); 831 val8 = rge_reg_get8(rgep, RT_COMMAND_REG); 832 if (!(val8 & RT_COMMAND_RESET)) { 833 rgep->rge_chip_state = RGE_CHIP_RESET; 834 return (0); 835 } 836 } 837 RGE_REPORT((rgep, "rge_chip_reset fail.")); 838 return (-1); 839 } 840 841 void rge_chip_init(rge_t *rgep); 842 #pragma no_inline(rge_chip_init) 843 844 void 845 rge_chip_init(rge_t *rgep) 846 { 847 uint32_t val32; 848 849 /* 850 * Config MII register 851 */ 852 rgep->param_link_up = LINK_STATE_DOWN; 853 rge_phy_update(rgep); 854 855 /* 856 * Enable Rx checksum offload. 857 * Then for vlan support, we must enable receive vlan de-tagging. 858 * Otherwise, there'll be checksum error. 859 */ 860 rge_reg_set16(rgep, CPLUS_COMMAND_REG, RX_CKSM_OFFLOAD | RX_VLAN_DETAG); 861 862 /* 863 * Suggested setting from Realtek 864 */ 865 if (rgep->chipid.mac_ver == MAC_VER_SD) { 866 rge_reg_set16(rgep, CPLUS_COMMAND_REG, 867 CPLUS_BIT14 | MUL_PCI_RW_ENABLE); 868 rge_reg_put8(rgep, RESV_82_REG, 0x01); 869 } 870 rge_reg_clr16(rgep, CPLUS_COMMAND_REG, 0x03); 871 872 /* 873 * Start transmit/receive before set tx/rx configuration register 874 */ 875 rge_reg_set8(rgep, RT_COMMAND_REG, 876 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 877 878 /* 879 * Set dump tally counter register 880 */ 881 val32 = rgep->dma_area_stats.cookie.dmac_laddress >> 32; 882 rge_reg_put32(rgep, DUMP_COUNTER_REG_1, val32); 883 val32 = rge_reg_get32(rgep, DUMP_COUNTER_REG_0); 884 val32 &= DUMP_COUNTER_REG_RESV; 885 val32 |= rgep->dma_area_stats.cookie.dmac_laddress; 886 rge_reg_put32(rgep, DUMP_COUNTER_REG_0, val32); 887 888 /* 889 * Change to config register write enable mode 890 */ 891 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 892 893 /* 894 * Set Tx/Rx maximum packet size 895 */ 896 if (rgep->param_default_mtu > ETHERMTU) { 897 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_JUMBO); 898 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_JUMBO); 899 } else { 900 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD); 901 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD); 902 } 903 904 /* 905 * Set receive configuration register 906 */ 907 val32 = rge_reg_get32(rgep, RX_CONFIG_REG); 908 val32 &= RX_CONFIG_REG_RESV; 909 if (rgep->promisc) 910 val32 |= RX_ACCEPT_ALL_PKT; 911 rge_reg_put32(rgep, RX_CONFIG_REG, val32 | rgep->chipid.rxconfig); 912 913 /* 914 * Set transmit configuration register 915 */ 916 val32 = rge_reg_get32(rgep, TX_CONFIG_REG); 917 val32 &= TX_CONFIG_REG_RESV; 918 rge_reg_put32(rgep, TX_CONFIG_REG, val32 | rgep->chipid.txconfig); 919 920 /* 921 * Initialize PHY registers 922 */ 923 rge_phy_init(rgep); 924 925 /* 926 * Set Tx/Rx descriptor register 927 */ 928 val32 = rgep->tx_desc.cookie.dmac_laddress; 929 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_LO_REG, val32); 930 val32 = rgep->tx_desc.cookie.dmac_laddress >> 32; 931 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_HI_REG, val32); 932 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_LO_REG, 0); 933 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_HI_REG, 0); 934 val32 = rgep->rx_desc.cookie.dmac_laddress; 935 rge_reg_put32(rgep, RX_RING_ADDR_LO_REG, val32); 936 val32 = rgep->rx_desc.cookie.dmac_laddress >> 32; 937 rge_reg_put32(rgep, RX_RING_ADDR_HI_REG, val32); 938 939 /* 940 * Suggested setting from Realtek 941 */ 942 rge_reg_put16(rgep, RESV_E2_REG, 0x282a); 943 944 /* 945 * Return to normal network/host communication mode 946 */ 947 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 948 drv_usecwait(20); 949 950 /* 951 * Set multicast register 952 */ 953 rge_reg_put32(rgep, MULTICAST_0_REG, rgep->mcast_hash[0]); 954 rge_reg_put32(rgep, MULTICAST_4_REG, rgep->mcast_hash[1]); 955 956 /* 957 * Mask and clear all Interrupt 958 */ 959 rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE); 960 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL); 961 962 /* 963 * Msic register setting: 964 * -- Missed packet counter: clear it 965 * -- TimerInt Register 966 * -- Timer count register 967 */ 968 rge_reg_put32(rgep, RX_PKT_MISS_COUNT_REG, 0); 969 rge_reg_put32(rgep, TIMER_INT_REG, TIMER_INT_NONE); 970 rge_reg_put32(rgep, TIMER_COUNT_REG, 0); 971 } 972 973 /* 974 * rge_chip_start() -- start the chip transmitting and/or receiving, 975 * including enabling interrupts 976 */ 977 void rge_chip_start(rge_t *rgep); 978 #pragma no_inline(rge_chip_start) 979 980 void 981 rge_chip_start(rge_t *rgep) 982 { 983 /* 984 * Clear statistics 985 */ 986 bzero(&rgep->stats, sizeof (rge_stats_t)); 987 DMA_ZERO(rgep->dma_area_stats); 988 989 /* 990 * Start transmit/receive 991 */ 992 rge_reg_set8(rgep, RT_COMMAND_REG, 993 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 994 995 /* 996 * Enable interrupt 997 */ 998 rge_reg_set16(rgep, INT_MASK_REG, RGE_INT_MASK); 999 rgep->int_mask = RGE_INT_MASK; 1000 1001 /* 1002 * All done! 1003 */ 1004 rgep->rge_chip_state = RGE_CHIP_RUNNING; 1005 } 1006 1007 /* 1008 * rge_chip_stop() -- stop board receiving 1009 */ 1010 void rge_chip_stop(rge_t *rgep, boolean_t fault); 1011 #pragma no_inline(rge_chip_stop) 1012 1013 void 1014 rge_chip_stop(rge_t *rgep, boolean_t fault) 1015 { 1016 /* 1017 * Disable interrupt 1018 */ 1019 rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE); 1020 rgep->int_mask = INT_MASK_NONE; 1021 1022 /* 1023 * Clear pended interrupt 1024 */ 1025 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL); 1026 1027 /* 1028 * Stop the board and disable transmit/receive 1029 */ 1030 rge_reg_clr8(rgep, RT_COMMAND_REG, 1031 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 1032 1033 if (fault) 1034 rgep->rge_chip_state = RGE_CHIP_FAULT; 1035 else 1036 rgep->rge_chip_state = RGE_CHIP_STOPPED; 1037 } 1038 1039 /* 1040 * rge_get_mac_addr() -- get the MAC address on NIC 1041 */ 1042 static void rge_get_mac_addr(rge_t *rgep); 1043 #pragma inline(rge_get_mac_addr) 1044 1045 static void 1046 rge_get_mac_addr(rge_t *rgep) 1047 { 1048 uint8_t *macaddr = rgep->netaddr; 1049 uint32_t val32; 1050 1051 /* 1052 * Read first 4-byte of mac address 1053 */ 1054 val32 = rge_reg_get32(rgep, ID_0_REG); 1055 macaddr[0] = val32 & 0xff; 1056 val32 = val32 >> 8; 1057 macaddr[1] = val32 & 0xff; 1058 val32 = val32 >> 8; 1059 macaddr[2] = val32 & 0xff; 1060 val32 = val32 >> 8; 1061 macaddr[3] = val32 & 0xff; 1062 1063 /* 1064 * Read last 2-byte of mac address 1065 */ 1066 val32 = rge_reg_get32(rgep, ID_4_REG); 1067 macaddr[4] = val32 & 0xff; 1068 val32 = val32 >> 8; 1069 macaddr[5] = val32 & 0xff; 1070 } 1071 1072 static void rge_set_mac_addr(rge_t *rgep); 1073 #pragma inline(rge_set_mac_addr) 1074 1075 static void 1076 rge_set_mac_addr(rge_t *rgep) 1077 { 1078 uint8_t *p = rgep->netaddr; 1079 uint32_t val32; 1080 1081 /* 1082 * Change to config register write enable mode 1083 */ 1084 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1085 1086 /* 1087 * Get first 4 bytes of mac address 1088 */ 1089 val32 = p[3]; 1090 val32 = val32 << 8; 1091 val32 |= p[2]; 1092 val32 = val32 << 8; 1093 val32 |= p[1]; 1094 val32 = val32 << 8; 1095 val32 |= p[0]; 1096 1097 /* 1098 * Set first 4 bytes of mac address 1099 */ 1100 rge_reg_put32(rgep, ID_0_REG, val32); 1101 1102 /* 1103 * Get last 2 bytes of mac address 1104 */ 1105 val32 = p[5]; 1106 val32 = val32 << 8; 1107 val32 |= p[4]; 1108 1109 /* 1110 * Set last 2 bytes of mac address 1111 */ 1112 val32 |= rge_reg_get32(rgep, ID_4_REG) & ~0xffff; 1113 rge_reg_put32(rgep, ID_4_REG, val32); 1114 1115 /* 1116 * Return to normal network/host communication mode 1117 */ 1118 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1119 } 1120 1121 static void rge_set_multi_addr(rge_t *rgep); 1122 #pragma inline(rge_set_multi_addr) 1123 1124 static void 1125 rge_set_multi_addr(rge_t *rgep) 1126 { 1127 uint32_t *hashp; 1128 1129 hashp = rgep->mcast_hash; 1130 rge_reg_put32(rgep, MULTICAST_0_REG, hashp[0]); 1131 rge_reg_put32(rgep, MULTICAST_4_REG, hashp[1]); 1132 rge_reg_set8(rgep, RT_COMMAND_REG, 1133 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 1134 } 1135 1136 static void rge_set_promisc(rge_t *rgep); 1137 #pragma inline(rge_set_promisc) 1138 1139 static void 1140 rge_set_promisc(rge_t *rgep) 1141 { 1142 if (rgep->promisc) 1143 rge_reg_set32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT); 1144 else 1145 rge_reg_clr32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT); 1146 1147 rge_reg_set8(rgep, RT_COMMAND_REG, 1148 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 1149 } 1150 1151 /* 1152 * rge_chip_sync() -- program the chip with the unicast MAC address, 1153 * the multicast hash table, the required level of promiscuity, and 1154 * the current loopback mode ... 1155 */ 1156 void rge_chip_sync(rge_t *rgep, enum rge_sync_op todo); 1157 #pragma no_inline(rge_chip_sync) 1158 1159 void 1160 rge_chip_sync(rge_t *rgep, enum rge_sync_op todo) 1161 { 1162 switch (todo) { 1163 case RGE_GET_MAC: 1164 rge_get_mac_addr(rgep); 1165 break; 1166 case RGE_SET_MAC: 1167 /* Reprogram the unicast MAC address(es) ... */ 1168 rge_set_mac_addr(rgep); 1169 break; 1170 case RGE_SET_MUL: 1171 /* Reprogram the hashed multicast address table ... */ 1172 rge_set_multi_addr(rgep); 1173 break; 1174 case RGE_SET_PROMISC: 1175 /* Set or clear the PROMISCUOUS mode bit */ 1176 rge_set_promisc(rgep); 1177 break; 1178 default: 1179 break; 1180 } 1181 } 1182 1183 void rge_chip_blank(void *arg, time_t ticks, uint_t count); 1184 #pragma no_inline(rge_chip_blank) 1185 1186 void 1187 rge_chip_blank(void *arg, time_t ticks, uint_t count) 1188 { 1189 _NOTE(ARGUNUSED(arg, ticks, count)); 1190 } 1191 1192 void rge_tx_trigger(rge_t *rgep); 1193 #pragma no_inline(rge_tx_trigger) 1194 1195 void 1196 rge_tx_trigger(rge_t *rgep) 1197 { 1198 rge_reg_set8(rgep, TX_RINGS_POLL_REG, NORMAL_TX_RING_POLL); 1199 } 1200 1201 void rge_hw_stats_dump(rge_t *rgep); 1202 #pragma no_inline(rge_tx_trigger) 1203 1204 void 1205 rge_hw_stats_dump(rge_t *rgep) 1206 { 1207 int i = 0; 1208 1209 while (rge_reg_get32(rgep, DUMP_COUNTER_REG_0) & DUMP_START) { 1210 drv_usecwait(100); 1211 if (++i > STATS_DUMP_LOOP) { 1212 RGE_DEBUG(("rge h/w statistics dump fail!")); 1213 rgep->rge_chip_state = RGE_CHIP_ERROR; 1214 return; 1215 } 1216 } 1217 DMA_SYNC(rgep->dma_area_stats, DDI_DMA_SYNC_FORKERNEL); 1218 1219 /* 1220 * Start H/W statistics dump for RTL8169 chip 1221 */ 1222 rge_reg_set32(rgep, DUMP_COUNTER_REG_0, DUMP_START); 1223 } 1224 1225 /* 1226 * ========== Hardware interrupt handler ========== 1227 */ 1228 1229 #undef RGE_DBG 1230 #define RGE_DBG RGE_DBG_INT /* debug flag for this code */ 1231 1232 static void rge_wake_factotum(rge_t *rgep); 1233 #pragma inline(rge_wake_factotum) 1234 1235 static void 1236 rge_wake_factotum(rge_t *rgep) 1237 { 1238 if (rgep->factotum_flag == 0) { 1239 rgep->factotum_flag = 1; 1240 ddi_trigger_softintr(rgep->factotum_id); 1241 } 1242 } 1243 1244 /* 1245 * rge_intr() -- handle chip interrupts 1246 */ 1247 uint_t rge_intr(caddr_t arg); 1248 #pragma no_inline(rge_intr) 1249 1250 uint_t 1251 rge_intr(caddr_t arg) 1252 { 1253 rge_t *rgep = (rge_t *)arg; 1254 uint16_t int_status; 1255 1256 mutex_enter(rgep->genlock); 1257 1258 /* 1259 * Was this interrupt caused by our device... 1260 */ 1261 int_status = rge_reg_get16(rgep, INT_STATUS_REG); 1262 if (!(int_status & rgep->int_mask)) { 1263 mutex_exit(rgep->genlock); 1264 return (DDI_INTR_UNCLAIMED); 1265 /* indicate it wasn't our interrupt */ 1266 } 1267 1268 rgep->stats.intr++; 1269 1270 /* 1271 * Clear interrupt 1272 */ 1273 rge_reg_put16(rgep, INT_STATUS_REG, int_status); 1274 1275 /* 1276 * Cable link change interrupt 1277 */ 1278 if (int_status & LINK_CHANGE_INT) { 1279 rge_chip_cyclic(rgep); 1280 } 1281 mutex_exit(rgep->genlock); 1282 1283 /* 1284 * Receive interrupt 1285 */ 1286 if (int_status & RGE_RX_OVERFLOW_INT) 1287 rgep->stats.overflow++; 1288 if (rgep->rge_chip_state == RGE_CHIP_RUNNING) 1289 rge_receive(rgep); 1290 1291 return (DDI_INTR_CLAIMED); /* indicate it was our interrupt */ 1292 } 1293 1294 /* 1295 * ========== Factotum, implemented as a softint handler ========== 1296 */ 1297 1298 #undef RGE_DBG 1299 #define RGE_DBG RGE_DBG_FACT /* debug flag for this code */ 1300 1301 static boolean_t rge_factotum_link_check(rge_t *rgep); 1302 #pragma no_inline(rge_factotum_link_check) 1303 1304 static boolean_t 1305 rge_factotum_link_check(rge_t *rgep) 1306 { 1307 uint8_t media_status; 1308 int32_t link; 1309 void (*logfn)(rge_t *rgep, const char *fmt, ...); 1310 const char *msg; 1311 hrtime_t deltat; 1312 1313 media_status = rge_reg_get8(rgep, PHY_STATUS_REG); 1314 link = (media_status & PHY_STATUS_LINK_UP) ? 1315 LINK_STATE_UP : LINK_STATE_DOWN; 1316 if (rgep->param_link_up != link) { 1317 /* 1318 * Link change. We have to decide whether to write a message 1319 * on the console or only in the log. If the PHY has 1320 * been reprogrammed (at user request) "recently", then 1321 * the message only goes in the log. Otherwise it's an 1322 * "unexpected" event, and it goes on the console as well. 1323 */ 1324 rgep->param_link_up = link; 1325 rgep->phys_event_time = gethrtime(); 1326 deltat = rgep->phys_event_time - rgep->phys_write_time; 1327 if (deltat > RGE_LINK_SETTLE_TIME) 1328 msg = ""; 1329 else if (link == LINK_STATE_UP) 1330 msg = rgep->link_up_msg; 1331 else 1332 msg = rgep->link_down_msg; 1333 logfn = (msg == NULL || *msg == '\0') ? rge_notice : rge_log; 1334 1335 if (link == LINK_STATE_UP) { 1336 if (media_status & PHY_STATUS_1000MF) { 1337 rgep->param_link_speed = RGE_SPEED_1000M; 1338 rgep->param_link_duplex = LINK_DUPLEX_FULL; 1339 } else { 1340 rgep->param_link_speed = 1341 (media_status & PHY_STATUS_100M) ? 1342 RGE_SPEED_100M : RGE_SPEED_10M; 1343 rgep->param_link_duplex = 1344 (media_status & PHY_STATUS_DUPLEX_FULL) ? 1345 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; 1346 } 1347 logfn(rgep, 1348 "link up %sbps %s_Duplex%s", 1349 (rgep->param_link_speed == RGE_SPEED_10M) ? 1350 "10M" : (rgep->param_link_speed == RGE_SPEED_100M ? 1351 "100M" : "1000M"), 1352 (rgep->param_link_duplex == LINK_DUPLEX_FULL) ? 1353 "Full" : "Half", 1354 msg); 1355 } else { 1356 logfn(rgep, "link down%s", msg); 1357 } 1358 return (B_TRUE); 1359 } 1360 return (B_FALSE); 1361 } 1362 1363 /* 1364 * Factotum routine to check for Tx stall, using the 'watchdog' counter 1365 */ 1366 static boolean_t rge_factotum_stall_check(rge_t *rgep); 1367 #pragma no_inline(rge_factotum_stall_check) 1368 1369 static boolean_t 1370 rge_factotum_stall_check(rge_t *rgep) 1371 { 1372 uint32_t dogval; 1373 1374 ASSERT(mutex_owned(rgep->genlock)); 1375 1376 /* 1377 * Specific check for Tx stall ... 1378 * 1379 * The 'watchdog' counter is incremented whenever a packet 1380 * is queued, reset to 1 when some (but not all) buffers 1381 * are reclaimed, reset to 0 (disabled) when all buffers 1382 * are reclaimed, and shifted left here. If it exceeds the 1383 * threshold value, the chip is assumed to have stalled and 1384 * is put into the ERROR state. The factotum will then reset 1385 * it on the next pass. 1386 * 1387 * All of which should ensure that we don't get into a state 1388 * where packets are left pending indefinitely! 1389 */ 1390 dogval = rge_atomic_shl32(&rgep->watchdog, 1); 1391 if (dogval < rge_watchdog_count) 1392 return (B_FALSE); 1393 1394 RGE_REPORT((rgep, "Tx stall detected, watchdog code 0x%x", dogval)); 1395 return (B_TRUE); 1396 1397 } 1398 1399 /* 1400 * The factotum is woken up when there's something to do that we'd rather 1401 * not do from inside a hardware interrupt handler or high-level cyclic. 1402 * Its two main tasks are: 1403 * reset & restart the chip after an error 1404 * check the link status whenever necessary 1405 */ 1406 uint_t rge_chip_factotum(caddr_t arg); 1407 #pragma no_inline(rge_chip_factotum) 1408 1409 uint_t 1410 rge_chip_factotum(caddr_t arg) 1411 { 1412 rge_t *rgep; 1413 uint_t result; 1414 boolean_t error; 1415 boolean_t linkchg; 1416 1417 rgep = (rge_t *)arg; 1418 1419 if (rgep->factotum_flag == 0) 1420 return (DDI_INTR_UNCLAIMED); 1421 1422 rgep->factotum_flag = 0; 1423 result = DDI_INTR_CLAIMED; 1424 error = B_FALSE; 1425 linkchg = B_FALSE; 1426 1427 mutex_enter(rgep->genlock); 1428 switch (rgep->rge_chip_state) { 1429 default: 1430 break; 1431 1432 case RGE_CHIP_RUNNING: 1433 linkchg = rge_factotum_link_check(rgep); 1434 error = rge_factotum_stall_check(rgep); 1435 break; 1436 1437 case RGE_CHIP_ERROR: 1438 error = B_TRUE; 1439 break; 1440 1441 case RGE_CHIP_FAULT: 1442 /* 1443 * Fault detected, time to reset ... 1444 */ 1445 if (rge_autorecover) { 1446 RGE_REPORT((rgep, "automatic recovery activated")); 1447 rge_restart(rgep); 1448 } 1449 break; 1450 } 1451 1452 /* 1453 * If an error is detected, stop the chip now, marking it as 1454 * faulty, so that it will be reset next time through ... 1455 */ 1456 if (error) 1457 rge_chip_stop(rgep, B_TRUE); 1458 mutex_exit(rgep->genlock); 1459 1460 /* 1461 * If the link state changed, tell the world about it. 1462 * Note: can't do this while still holding the mutex. 1463 */ 1464 if (linkchg) 1465 mac_link_update(rgep->mh, rgep->param_link_up); 1466 1467 return (result); 1468 } 1469 1470 /* 1471 * High-level cyclic handler 1472 * 1473 * This routine schedules a (low-level) softint callback to the 1474 * factotum, and prods the chip to update the status block (which 1475 * will cause a hardware interrupt when complete). 1476 */ 1477 void rge_chip_cyclic(void *arg); 1478 #pragma no_inline(rge_chip_cyclic) 1479 1480 void 1481 rge_chip_cyclic(void *arg) 1482 { 1483 rge_t *rgep; 1484 1485 rgep = arg; 1486 1487 switch (rgep->rge_chip_state) { 1488 default: 1489 return; 1490 1491 case RGE_CHIP_RUNNING: 1492 rge_phy_check(rgep); 1493 break; 1494 1495 case RGE_CHIP_FAULT: 1496 case RGE_CHIP_ERROR: 1497 break; 1498 } 1499 1500 rge_wake_factotum(rgep); 1501 } 1502 1503 1504 /* 1505 * ========== Ioctl subfunctions ========== 1506 */ 1507 1508 #undef RGE_DBG 1509 #define RGE_DBG RGE_DBG_PPIO /* debug flag for this code */ 1510 1511 #if RGE_DEBUGGING || RGE_DO_PPIO 1512 1513 static void rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd); 1514 #pragma no_inline(rge_chip_peek_cfg) 1515 1516 static void 1517 rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd) 1518 { 1519 uint64_t regval; 1520 uint64_t regno; 1521 1522 RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)", 1523 (void *)rgep, (void *)ppd)); 1524 1525 regno = ppd->pp_acc_offset; 1526 1527 switch (ppd->pp_acc_size) { 1528 case 1: 1529 regval = pci_config_get8(rgep->cfg_handle, regno); 1530 break; 1531 1532 case 2: 1533 regval = pci_config_get16(rgep->cfg_handle, regno); 1534 break; 1535 1536 case 4: 1537 regval = pci_config_get32(rgep->cfg_handle, regno); 1538 break; 1539 1540 case 8: 1541 regval = pci_config_get64(rgep->cfg_handle, regno); 1542 break; 1543 } 1544 1545 ppd->pp_acc_data = regval; 1546 } 1547 1548 static void rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd); 1549 #pragma no_inline(rge_chip_poke_cfg) 1550 1551 static void 1552 rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd) 1553 { 1554 uint64_t regval; 1555 uint64_t regno; 1556 1557 RGE_TRACE(("rge_chip_poke_cfg($%p, $%p)", 1558 (void *)rgep, (void *)ppd)); 1559 1560 regno = ppd->pp_acc_offset; 1561 regval = ppd->pp_acc_data; 1562 1563 switch (ppd->pp_acc_size) { 1564 case 1: 1565 pci_config_put8(rgep->cfg_handle, regno, regval); 1566 break; 1567 1568 case 2: 1569 pci_config_put16(rgep->cfg_handle, regno, regval); 1570 break; 1571 1572 case 4: 1573 pci_config_put32(rgep->cfg_handle, regno, regval); 1574 break; 1575 1576 case 8: 1577 pci_config_put64(rgep->cfg_handle, regno, regval); 1578 break; 1579 } 1580 } 1581 1582 static void rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd); 1583 #pragma no_inline(rge_chip_peek_reg) 1584 1585 static void 1586 rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd) 1587 { 1588 uint64_t regval; 1589 void *regaddr; 1590 1591 RGE_TRACE(("rge_chip_peek_reg($%p, $%p)", 1592 (void *)rgep, (void *)ppd)); 1593 1594 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset); 1595 1596 switch (ppd->pp_acc_size) { 1597 case 1: 1598 regval = ddi_get8(rgep->io_handle, regaddr); 1599 break; 1600 1601 case 2: 1602 regval = ddi_get16(rgep->io_handle, regaddr); 1603 break; 1604 1605 case 4: 1606 regval = ddi_get32(rgep->io_handle, regaddr); 1607 break; 1608 1609 case 8: 1610 regval = ddi_get64(rgep->io_handle, regaddr); 1611 break; 1612 } 1613 1614 ppd->pp_acc_data = regval; 1615 } 1616 1617 static void rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd); 1618 #pragma no_inline(rge_chip_peek_reg) 1619 1620 static void 1621 rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd) 1622 { 1623 uint64_t regval; 1624 void *regaddr; 1625 1626 RGE_TRACE(("rge_chip_poke_reg($%p, $%p)", 1627 (void *)rgep, (void *)ppd)); 1628 1629 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset); 1630 regval = ppd->pp_acc_data; 1631 1632 switch (ppd->pp_acc_size) { 1633 case 1: 1634 ddi_put8(rgep->io_handle, regaddr, regval); 1635 break; 1636 1637 case 2: 1638 ddi_put16(rgep->io_handle, regaddr, regval); 1639 break; 1640 1641 case 4: 1642 ddi_put32(rgep->io_handle, regaddr, regval); 1643 break; 1644 1645 case 8: 1646 ddi_put64(rgep->io_handle, regaddr, regval); 1647 break; 1648 } 1649 RGE_PCICHK(rgep); 1650 } 1651 1652 static void rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd); 1653 #pragma no_inline(rge_chip_peek_mii) 1654 1655 static void 1656 rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd) 1657 { 1658 RGE_TRACE(("rge_chip_peek_mii($%p, $%p)", 1659 (void *)rgep, (void *)ppd)); 1660 1661 ppd->pp_acc_data = rge_mii_get16(rgep, ppd->pp_acc_offset/2); 1662 } 1663 1664 static void rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd); 1665 #pragma no_inline(rge_chip_poke_mii) 1666 1667 static void 1668 rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd) 1669 { 1670 RGE_TRACE(("rge_chip_poke_mii($%p, $%p)", 1671 (void *)rgep, (void *)ppd)); 1672 1673 rge_mii_put16(rgep, ppd->pp_acc_offset/2, ppd->pp_acc_data); 1674 } 1675 1676 static void rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd); 1677 #pragma no_inline(rge_chip_peek_mem) 1678 1679 static void 1680 rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd) 1681 { 1682 uint64_t regval; 1683 void *vaddr; 1684 1685 RGE_TRACE(("rge_chip_peek_rge($%p, $%p)", 1686 (void *)rgep, (void *)ppd)); 1687 1688 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 1689 1690 switch (ppd->pp_acc_size) { 1691 case 1: 1692 regval = *(uint8_t *)vaddr; 1693 break; 1694 1695 case 2: 1696 regval = *(uint16_t *)vaddr; 1697 break; 1698 1699 case 4: 1700 regval = *(uint32_t *)vaddr; 1701 break; 1702 1703 case 8: 1704 regval = *(uint64_t *)vaddr; 1705 break; 1706 } 1707 1708 RGE_DEBUG(("rge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p", 1709 (void *)rgep, (void *)ppd, regval, vaddr)); 1710 1711 ppd->pp_acc_data = regval; 1712 } 1713 1714 static void rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd); 1715 #pragma no_inline(rge_chip_poke_mem) 1716 1717 static void 1718 rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd) 1719 { 1720 uint64_t regval; 1721 void *vaddr; 1722 1723 RGE_TRACE(("rge_chip_poke_mem($%p, $%p)", 1724 (void *)rgep, (void *)ppd)); 1725 1726 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 1727 regval = ppd->pp_acc_data; 1728 1729 RGE_DEBUG(("rge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p", 1730 (void *)rgep, (void *)ppd, regval, vaddr)); 1731 1732 switch (ppd->pp_acc_size) { 1733 case 1: 1734 *(uint8_t *)vaddr = (uint8_t)regval; 1735 break; 1736 1737 case 2: 1738 *(uint16_t *)vaddr = (uint16_t)regval; 1739 break; 1740 1741 case 4: 1742 *(uint32_t *)vaddr = (uint32_t)regval; 1743 break; 1744 1745 case 8: 1746 *(uint64_t *)vaddr = (uint64_t)regval; 1747 break; 1748 } 1749 } 1750 1751 static enum ioc_reply rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, 1752 struct iocblk *iocp); 1753 #pragma no_inline(rge_pp_ioctl) 1754 1755 static enum ioc_reply 1756 rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp) 1757 { 1758 void (*ppfn)(rge_t *rgep, rge_peekpoke_t *ppd); 1759 rge_peekpoke_t *ppd; 1760 dma_area_t *areap; 1761 uint64_t sizemask; 1762 uint64_t mem_va; 1763 uint64_t maxoff; 1764 boolean_t peek; 1765 1766 switch (cmd) { 1767 default: 1768 /* NOTREACHED */ 1769 rge_error(rgep, "rge_pp_ioctl: invalid cmd 0x%x", cmd); 1770 return (IOC_INVAL); 1771 1772 case RGE_PEEK: 1773 peek = B_TRUE; 1774 break; 1775 1776 case RGE_POKE: 1777 peek = B_FALSE; 1778 break; 1779 } 1780 1781 /* 1782 * Validate format of ioctl 1783 */ 1784 if (iocp->ioc_count != sizeof (rge_peekpoke_t)) 1785 return (IOC_INVAL); 1786 if (mp->b_cont == NULL) 1787 return (IOC_INVAL); 1788 ppd = (rge_peekpoke_t *)mp->b_cont->b_rptr; 1789 1790 /* 1791 * Validate request parameters 1792 */ 1793 switch (ppd->pp_acc_space) { 1794 default: 1795 return (IOC_INVAL); 1796 1797 case RGE_PP_SPACE_CFG: 1798 /* 1799 * Config space 1800 */ 1801 sizemask = 8|4|2|1; 1802 mem_va = 0; 1803 maxoff = PCI_CONF_HDR_SIZE; 1804 ppfn = peek ? rge_chip_peek_cfg : rge_chip_poke_cfg; 1805 break; 1806 1807 case RGE_PP_SPACE_REG: 1808 /* 1809 * Memory-mapped I/O space 1810 */ 1811 sizemask = 8|4|2|1; 1812 mem_va = 0; 1813 maxoff = RGE_REGISTER_MAX; 1814 ppfn = peek ? rge_chip_peek_reg : rge_chip_poke_reg; 1815 break; 1816 1817 case RGE_PP_SPACE_MII: 1818 /* 1819 * PHY's MII registers 1820 * NB: all PHY registers are two bytes, but the 1821 * addresses increment in ones (word addressing). 1822 * So we scale the address here, then undo the 1823 * transformation inside the peek/poke functions. 1824 */ 1825 ppd->pp_acc_offset *= 2; 1826 sizemask = 2; 1827 mem_va = 0; 1828 maxoff = (MII_MAXREG+1)*2; 1829 ppfn = peek ? rge_chip_peek_mii : rge_chip_poke_mii; 1830 break; 1831 1832 case RGE_PP_SPACE_RGE: 1833 /* 1834 * RGE data structure! 1835 */ 1836 sizemask = 8|4|2|1; 1837 mem_va = (uintptr_t)rgep; 1838 maxoff = sizeof (*rgep); 1839 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem; 1840 break; 1841 1842 case RGE_PP_SPACE_STATISTICS: 1843 case RGE_PP_SPACE_TXDESC: 1844 case RGE_PP_SPACE_TXBUFF: 1845 case RGE_PP_SPACE_RXDESC: 1846 case RGE_PP_SPACE_RXBUFF: 1847 /* 1848 * Various DMA_AREAs 1849 */ 1850 switch (ppd->pp_acc_space) { 1851 case RGE_PP_SPACE_TXDESC: 1852 areap = &rgep->dma_area_txdesc; 1853 break; 1854 case RGE_PP_SPACE_TXBUFF: 1855 areap = &rgep->dma_area_txbuf[0]; 1856 break; 1857 case RGE_PP_SPACE_RXDESC: 1858 areap = &rgep->dma_area_rxdesc; 1859 break; 1860 case RGE_PP_SPACE_RXBUFF: 1861 areap = &rgep->dma_area_rxbuf[0]; 1862 break; 1863 case RGE_PP_SPACE_STATISTICS: 1864 areap = &rgep->dma_area_stats; 1865 break; 1866 } 1867 1868 sizemask = 8|4|2|1; 1869 mem_va = (uintptr_t)areap->mem_va; 1870 maxoff = areap->alength; 1871 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem; 1872 break; 1873 } 1874 1875 switch (ppd->pp_acc_size) { 1876 default: 1877 return (IOC_INVAL); 1878 1879 case 8: 1880 case 4: 1881 case 2: 1882 case 1: 1883 if ((ppd->pp_acc_size & sizemask) == 0) 1884 return (IOC_INVAL); 1885 break; 1886 } 1887 1888 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 1889 return (IOC_INVAL); 1890 1891 if (ppd->pp_acc_offset >= maxoff) 1892 return (IOC_INVAL); 1893 1894 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff) 1895 return (IOC_INVAL); 1896 1897 /* 1898 * All OK - go do it! 1899 */ 1900 ppd->pp_acc_offset += mem_va; 1901 (*ppfn)(rgep, ppd); 1902 return (peek ? IOC_REPLY : IOC_ACK); 1903 } 1904 1905 static enum ioc_reply rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, 1906 struct iocblk *iocp); 1907 #pragma no_inline(rge_diag_ioctl) 1908 1909 static enum ioc_reply 1910 rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp) 1911 { 1912 ASSERT(mutex_owned(rgep->genlock)); 1913 1914 switch (cmd) { 1915 default: 1916 /* NOTREACHED */ 1917 rge_error(rgep, "rge_diag_ioctl: invalid cmd 0x%x", cmd); 1918 return (IOC_INVAL); 1919 1920 case RGE_DIAG: 1921 /* 1922 * Currently a no-op 1923 */ 1924 return (IOC_ACK); 1925 1926 case RGE_PEEK: 1927 case RGE_POKE: 1928 return (rge_pp_ioctl(rgep, cmd, mp, iocp)); 1929 1930 case RGE_PHY_RESET: 1931 return (IOC_RESTART_ACK); 1932 1933 case RGE_SOFT_RESET: 1934 case RGE_HARD_RESET: 1935 /* 1936 * Reset and reinitialise the 570x hardware 1937 */ 1938 rge_restart(rgep); 1939 return (IOC_ACK); 1940 } 1941 1942 /* NOTREACHED */ 1943 } 1944 1945 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */ 1946 1947 static enum ioc_reply rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, 1948 struct iocblk *iocp); 1949 #pragma no_inline(rge_mii_ioctl) 1950 1951 static enum ioc_reply 1952 rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp) 1953 { 1954 struct rge_mii_rw *miirwp; 1955 1956 /* 1957 * Validate format of ioctl 1958 */ 1959 if (iocp->ioc_count != sizeof (struct rge_mii_rw)) 1960 return (IOC_INVAL); 1961 if (mp->b_cont == NULL) 1962 return (IOC_INVAL); 1963 miirwp = (struct rge_mii_rw *)mp->b_cont->b_rptr; 1964 1965 /* 1966 * Validate request parameters ... 1967 */ 1968 if (miirwp->mii_reg > MII_MAXREG) 1969 return (IOC_INVAL); 1970 1971 switch (cmd) { 1972 default: 1973 /* NOTREACHED */ 1974 rge_error(rgep, "rge_mii_ioctl: invalid cmd 0x%x", cmd); 1975 return (IOC_INVAL); 1976 1977 case RGE_MII_READ: 1978 miirwp->mii_data = rge_mii_get16(rgep, miirwp->mii_reg); 1979 return (IOC_REPLY); 1980 1981 case RGE_MII_WRITE: 1982 rge_mii_put16(rgep, miirwp->mii_reg, miirwp->mii_data); 1983 return (IOC_ACK); 1984 } 1985 1986 /* NOTREACHED */ 1987 } 1988 1989 enum ioc_reply rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, 1990 struct iocblk *iocp); 1991 #pragma no_inline(rge_chip_ioctl) 1992 1993 enum ioc_reply 1994 rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 1995 { 1996 int cmd; 1997 1998 RGE_TRACE(("rge_chip_ioctl($%p, $%p, $%p, $%p)", 1999 (void *)rgep, (void *)wq, (void *)mp, (void *)iocp)); 2000 2001 ASSERT(mutex_owned(rgep->genlock)); 2002 2003 cmd = iocp->ioc_cmd; 2004 switch (cmd) { 2005 default: 2006 /* NOTREACHED */ 2007 rge_error(rgep, "rge_chip_ioctl: invalid cmd 0x%x", cmd); 2008 return (IOC_INVAL); 2009 2010 case RGE_DIAG: 2011 case RGE_PEEK: 2012 case RGE_POKE: 2013 case RGE_PHY_RESET: 2014 case RGE_SOFT_RESET: 2015 case RGE_HARD_RESET: 2016 #if RGE_DEBUGGING || RGE_DO_PPIO 2017 return (rge_diag_ioctl(rgep, cmd, mp, iocp)); 2018 #else 2019 return (IOC_INVAL); 2020 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */ 2021 2022 case RGE_MII_READ: 2023 case RGE_MII_WRITE: 2024 return (rge_mii_ioctl(rgep, cmd, mp, iocp)); 2025 2026 } 2027 2028 /* NOTREACHED */ 2029 } 2030