1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include "rge.h" 29 30 #define REG32(rgep, reg) ((uint32_t *)(rgep->io_regs+(reg))) 31 #define REG16(rgep, reg) ((uint16_t *)(rgep->io_regs+(reg))) 32 #define REG8(rgep, reg) ((uint8_t *)(rgep->io_regs+(reg))) 33 #define PIO_ADDR(rgep, offset) ((void *)(rgep->io_regs+(offset))) 34 35 /* 36 * Patchable globals: 37 * 38 * rge_autorecover 39 * Enables/disables automatic recovery after fault detection 40 */ 41 static uint32_t rge_autorecover = 1; 42 43 /* 44 * globals: 45 */ 46 #define RGE_DBG RGE_DBG_REGS /* debug flag for this code */ 47 static uint32_t rge_watchdog_count = 1 << 16; 48 49 /* 50 * Operating register get/set access routines 51 */ 52 #if RGE_DEBUGGING 53 54 static void rge_pci_check(rge_t *rgep); 55 #pragma no_inline(rge_pci_check) 56 57 static void 58 rge_pci_check(rge_t *rgep) 59 { 60 uint16_t pcistatus; 61 62 pcistatus = pci_config_get16(rgep->cfg_handle, PCI_CONF_STAT); 63 if ((pcistatus & (PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB)) != 0) 64 RGE_DEBUG(("rge_pci_check($%p): PCI status 0x%x", 65 (void *)rgep, pcistatus)); 66 } 67 68 #endif /* RGE_DEBUGGING */ 69 70 static uint32_t rge_reg_get32(rge_t *rgep, uintptr_t regno); 71 #pragma inline(rge_reg_get32) 72 73 static uint32_t 74 rge_reg_get32(rge_t *rgep, uintptr_t regno) 75 { 76 RGE_TRACE(("rge_reg_get32($%p, 0x%lx)", 77 (void *)rgep, regno)); 78 79 return (ddi_get32(rgep->io_handle, REG32(rgep, regno))); 80 } 81 82 static void rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data); 83 #pragma inline(rge_reg_put32) 84 85 static void 86 rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data) 87 { 88 RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)", 89 (void *)rgep, regno, data)); 90 91 ddi_put32(rgep->io_handle, REG32(rgep, regno), data); 92 RGE_PCICHK(rgep); 93 } 94 95 static void rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits); 96 #pragma inline(rge_reg_set32) 97 98 static void 99 rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits) 100 { 101 uint32_t regval; 102 103 RGE_TRACE(("rge_reg_set32($%p, 0x%lx, 0x%x)", 104 (void *)rgep, regno, bits)); 105 106 regval = rge_reg_get32(rgep, regno); 107 regval |= bits; 108 rge_reg_put32(rgep, regno, regval); 109 } 110 111 static void rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits); 112 #pragma inline(rge_reg_clr32) 113 114 static void 115 rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits) 116 { 117 uint32_t regval; 118 119 RGE_TRACE(("rge_reg_clr32($%p, 0x%lx, 0x%x)", 120 (void *)rgep, regno, bits)); 121 122 regval = rge_reg_get32(rgep, regno); 123 regval &= ~bits; 124 rge_reg_put32(rgep, regno, regval); 125 } 126 127 static uint16_t rge_reg_get16(rge_t *rgep, uintptr_t regno); 128 #pragma inline(rge_reg_get16) 129 130 static uint16_t 131 rge_reg_get16(rge_t *rgep, uintptr_t regno) 132 { 133 RGE_TRACE(("rge_reg_get16($%p, 0x%lx)", 134 (void *)rgep, regno)); 135 136 return (ddi_get16(rgep->io_handle, REG16(rgep, regno))); 137 } 138 139 static void rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data); 140 #pragma inline(rge_reg_put16) 141 142 static void 143 rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data) 144 { 145 RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)", 146 (void *)rgep, regno, data)); 147 148 ddi_put16(rgep->io_handle, REG16(rgep, regno), data); 149 RGE_PCICHK(rgep); 150 } 151 152 static void rge_reg_set16(rge_t *rgep, uintptr_t regno, uint16_t bits); 153 #pragma inline(rge_reg_set16) 154 155 static void 156 rge_reg_set16(rge_t *rgep, uintptr_t regno, uint16_t bits) 157 { 158 uint16_t regval; 159 160 RGE_TRACE(("rge_reg_set16($%p, 0x%lx, 0x%x)", 161 (void *)rgep, regno, bits)); 162 163 regval = rge_reg_get16(rgep, regno); 164 regval |= bits; 165 rge_reg_put16(rgep, regno, regval); 166 } 167 168 static void rge_reg_clr16(rge_t *rgep, uintptr_t regno, uint16_t bits); 169 #pragma inline(rge_reg_clr16) 170 171 static void 172 rge_reg_clr16(rge_t *rgep, uintptr_t regno, uint16_t bits) 173 { 174 uint16_t regval; 175 176 RGE_TRACE(("rge_reg_clr16($%p, 0x%lx, 0x%x)", 177 (void *)rgep, regno, bits)); 178 179 regval = rge_reg_get16(rgep, regno); 180 regval &= ~bits; 181 rge_reg_put16(rgep, regno, regval); 182 } 183 184 static uint8_t rge_reg_get8(rge_t *rgep, uintptr_t regno); 185 #pragma inline(rge_reg_get8) 186 187 static uint8_t 188 rge_reg_get8(rge_t *rgep, uintptr_t regno) 189 { 190 RGE_TRACE(("rge_reg_get8($%p, 0x%lx)", 191 (void *)rgep, regno)); 192 193 return (ddi_get8(rgep->io_handle, REG8(rgep, regno))); 194 } 195 196 static void rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data); 197 #pragma inline(rge_reg_put8) 198 199 static void 200 rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data) 201 { 202 RGE_TRACE(("rge_reg_put8($%p, 0x%lx, 0x%x)", 203 (void *)rgep, regno, data)); 204 205 ddi_put8(rgep->io_handle, REG8(rgep, regno), data); 206 RGE_PCICHK(rgep); 207 } 208 209 static void rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits); 210 #pragma inline(rge_reg_set8) 211 212 static void 213 rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits) 214 { 215 uint8_t regval; 216 217 RGE_TRACE(("rge_reg_set8($%p, 0x%lx, 0x%x)", 218 (void *)rgep, regno, bits)); 219 220 regval = rge_reg_get8(rgep, regno); 221 regval |= bits; 222 rge_reg_put8(rgep, regno, regval); 223 } 224 225 static void rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits); 226 #pragma inline(rge_reg_clr8) 227 228 static void 229 rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits) 230 { 231 uint8_t regval; 232 233 RGE_TRACE(("rge_reg_clr8($%p, 0x%lx, 0x%x)", 234 (void *)rgep, regno, bits)); 235 236 regval = rge_reg_get8(rgep, regno); 237 regval &= ~bits; 238 rge_reg_put8(rgep, regno, regval); 239 } 240 241 uint16_t rge_mii_get16(rge_t *rgep, uintptr_t mii); 242 #pragma no_inline(rge_mii_get16) 243 244 uint16_t 245 rge_mii_get16(rge_t *rgep, uintptr_t mii) 246 { 247 uint32_t regval; 248 uint32_t val32; 249 uint32_t i; 250 251 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT; 252 rge_reg_put32(rgep, PHY_ACCESS_REG, regval); 253 254 /* 255 * Waiting for PHY reading OK 256 */ 257 for (i = 0; i < PHY_RESET_LOOP; i++) { 258 drv_usecwait(100); 259 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG); 260 if (val32 & PHY_ACCESS_WR_FLAG) 261 return ((uint16_t)(val32 & 0xffff)); 262 } 263 264 RGE_REPORT((rgep, "rge_mii_get16(0x%x) fail, val = %x", mii, val32)); 265 return ((uint16_t)~0u); 266 } 267 268 void rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data); 269 #pragma no_inline(rge_mii_put16) 270 271 void 272 rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data) 273 { 274 uint32_t regval; 275 uint32_t val32; 276 uint32_t i; 277 278 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT; 279 regval |= data & PHY_DATA_MASK; 280 regval |= PHY_ACCESS_WR_FLAG; 281 rge_reg_put32(rgep, PHY_ACCESS_REG, regval); 282 283 /* 284 * Waiting for PHY writing OK 285 */ 286 for (i = 0; i < PHY_RESET_LOOP; i++) { 287 drv_usecwait(100); 288 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG); 289 if (!(val32 & PHY_ACCESS_WR_FLAG)) 290 return; 291 } 292 RGE_REPORT((rgep, "rge_mii_put16(0x%lx, 0x%x) fail", 293 mii, data)); 294 } 295 296 void rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data); 297 #pragma no_inline(rge_ephy_put16) 298 299 void 300 rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data) 301 { 302 uint32_t regval; 303 uint32_t val32; 304 uint32_t i; 305 306 regval = (emii & EPHY_REG_MASK) << EPHY_REG_SHIFT; 307 regval |= data & EPHY_DATA_MASK; 308 regval |= EPHY_ACCESS_WR_FLAG; 309 rge_reg_put32(rgep, EPHY_ACCESS_REG, regval); 310 311 /* 312 * Waiting for PHY writing OK 313 */ 314 for (i = 0; i < PHY_RESET_LOOP; i++) { 315 drv_usecwait(100); 316 val32 = rge_reg_get32(rgep, EPHY_ACCESS_REG); 317 if (!(val32 & EPHY_ACCESS_WR_FLAG)) 318 return; 319 } 320 RGE_REPORT((rgep, "rge_ephy_put16(0x%lx, 0x%x) fail", 321 emii, data)); 322 } 323 324 /* 325 * Atomically shift a 32-bit word left, returning 326 * the value it had *before* the shift was applied 327 */ 328 static uint32_t rge_atomic_shl32(uint32_t *sp, uint_t count); 329 #pragma inline(rge_mii_put16) 330 331 static uint32_t 332 rge_atomic_shl32(uint32_t *sp, uint_t count) 333 { 334 uint32_t oldval; 335 uint32_t newval; 336 337 /* ATOMICALLY */ 338 do { 339 oldval = *sp; 340 newval = oldval << count; 341 } while (cas32(sp, oldval, newval) != oldval); 342 343 return (oldval); 344 } 345 346 /* 347 * PHY operation routines 348 */ 349 #if RGE_DEBUGGING 350 351 static void 352 rge_phydump(rge_t *rgep) 353 { 354 uint16_t regs[32]; 355 int i; 356 357 ASSERT(mutex_owned(rgep->genlock)); 358 359 for (i = 0; i < 32; ++i) { 360 regs[i] = rge_mii_get16(rgep, i); 361 } 362 363 for (i = 0; i < 32; i += 8) 364 RGE_DEBUG(("rge_phydump: " 365 "0x%04x %04x %04x %04x %04x %04x %04x %04x", 366 regs[i+0], regs[i+1], regs[i+2], regs[i+3], 367 regs[i+4], regs[i+5], regs[i+6], regs[i+7])); 368 } 369 370 #endif /* RGE_DEBUGGING */ 371 372 /* 373 * Basic low-level function to probe for a PHY 374 * 375 * Returns TRUE if the PHY responds with valid data, FALSE otherwise 376 */ 377 static boolean_t 378 rge_phy_probe(rge_t *rgep) 379 { 380 uint16_t phy_status; 381 382 ASSERT(mutex_owned(rgep->genlock)); 383 384 /* 385 * Read the MII_STATUS register twice, in 386 * order to clear any sticky bits (but they should 387 * have been cleared by the RESET, I think). 388 */ 389 phy_status = rge_mii_get16(rgep, MII_STATUS); 390 phy_status = rge_mii_get16(rgep, MII_STATUS); 391 RGE_DEBUG(("rge_phy_probe: status 0x%x", phy_status)); 392 393 /* 394 * Now check the value read; it should have at least one bit set 395 * (for the device capabilities) and at least one clear (one of 396 * the error bits). So if we see all 0s or all 1s, there's a 397 * problem. In particular, rge_mii_get16() returns all 1s if 398 * communications fails ... 399 */ 400 switch (phy_status) { 401 case 0x0000: 402 case 0xffff: 403 return (B_FALSE); 404 405 default : 406 return (B_TRUE); 407 } 408 } 409 410 static void 411 rge_phy_check(rge_t *rgep) 412 { 413 uint16_t gig_ctl; 414 415 if (rgep->param_link_up == LINK_STATE_DOWN) { 416 /* 417 * RTL8169S/8110S PHY has the "PCS bug". Need reset PHY 418 * every 15 seconds whin link down & advertise is 1000. 419 */ 420 if (rgep->chipid.phy_ver == PHY_VER_S) { 421 gig_ctl = rge_mii_get16(rgep, MII_1000BASE_T_CONTROL); 422 if (gig_ctl & MII_1000BT_CTL_ADV_FDX) { 423 rgep->link_down_count++; 424 if (rgep->link_down_count > 15) { 425 (void) rge_phy_reset(rgep); 426 rgep->stats.phy_reset++; 427 rgep->link_down_count = 0; 428 } 429 } 430 } 431 } else { 432 rgep->link_down_count = 0; 433 } 434 } 435 436 /* 437 * Basic low-level function to reset the PHY. 438 * Doesn't incorporate any special-case workarounds. 439 * 440 * Returns TRUE on success, FALSE if the RESET bit doesn't clear 441 */ 442 boolean_t 443 rge_phy_reset(rge_t *rgep) 444 { 445 uint16_t control; 446 uint_t count; 447 448 /* 449 * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear 450 */ 451 control = rge_mii_get16(rgep, MII_CONTROL); 452 rge_mii_put16(rgep, MII_CONTROL, control | MII_CONTROL_RESET); 453 for (count = 0; ++count < 1000; ) { 454 drv_usecwait(100); 455 control = rge_mii_get16(rgep, MII_CONTROL); 456 if (BIC(control, MII_CONTROL_RESET)) 457 return (B_TRUE); 458 } 459 460 RGE_REPORT((rgep, "rge_phy_reset: FAILED, control now 0x%x", control)); 461 return (B_FALSE); 462 } 463 464 /* 465 * Synchronise the PHY's speed/duplex/autonegotiation capabilities 466 * and advertisements with the required settings as specified by the various 467 * param_* variables that can be poked via the NDD interface. 468 * 469 * We always reset the PHY and reprogram *all* the relevant registers, 470 * not just those changed. This should cause the link to go down, and then 471 * back up again once the link is stable and autonegotiation (if enabled) 472 * is complete. We should get a link state change interrupt somewhere along 473 * the way ... 474 * 475 * NOTE: <genlock> must already be held by the caller 476 */ 477 void 478 rge_phy_update(rge_t *rgep) 479 { 480 boolean_t adv_autoneg; 481 boolean_t adv_pause; 482 boolean_t adv_asym_pause; 483 boolean_t adv_1000fdx; 484 boolean_t adv_1000hdx; 485 boolean_t adv_100fdx; 486 boolean_t adv_100hdx; 487 boolean_t adv_10fdx; 488 boolean_t adv_10hdx; 489 490 uint16_t control; 491 uint16_t gigctrl; 492 uint16_t anar; 493 494 ASSERT(mutex_owned(rgep->genlock)); 495 496 RGE_DEBUG(("rge_phy_update: autoneg %d " 497 "pause %d asym_pause %d " 498 "1000fdx %d 1000hdx %d " 499 "100fdx %d 100hdx %d " 500 "10fdx %d 10hdx %d ", 501 rgep->param_adv_autoneg, 502 rgep->param_adv_pause, rgep->param_adv_asym_pause, 503 rgep->param_adv_1000fdx, rgep->param_adv_1000hdx, 504 rgep->param_adv_100fdx, rgep->param_adv_100hdx, 505 rgep->param_adv_10fdx, rgep->param_adv_10hdx)); 506 507 control = gigctrl = anar = 0; 508 509 /* 510 * PHY settings are normally based on the param_* variables, 511 * but if any loopback mode is in effect, that takes precedence. 512 * 513 * RGE supports MAC-internal loopback, PHY-internal loopback, 514 * and External loopback at a variety of speeds (with a special 515 * cable). In all cases, autoneg is turned OFF, full-duplex 516 * is turned ON, and the speed/mastership is forced. 517 */ 518 switch (rgep->param_loop_mode) { 519 case RGE_LOOP_NONE: 520 default: 521 adv_autoneg = rgep->param_adv_autoneg; 522 adv_pause = rgep->param_adv_pause; 523 adv_asym_pause = rgep->param_adv_asym_pause; 524 adv_1000fdx = rgep->param_adv_1000fdx; 525 adv_1000hdx = rgep->param_adv_1000hdx; 526 adv_100fdx = rgep->param_adv_100fdx; 527 adv_100hdx = rgep->param_adv_100hdx; 528 adv_10fdx = rgep->param_adv_10fdx; 529 adv_10hdx = rgep->param_adv_10hdx; 530 break; 531 532 case RGE_LOOP_INTERNAL_PHY: 533 case RGE_LOOP_INTERNAL_MAC: 534 adv_autoneg = adv_pause = adv_asym_pause = B_FALSE; 535 adv_1000fdx = adv_100fdx = adv_10fdx = B_FALSE; 536 adv_1000hdx = adv_100hdx = adv_10hdx = B_FALSE; 537 rgep->param_link_duplex = LINK_DUPLEX_FULL; 538 539 switch (rgep->param_loop_mode) { 540 case RGE_LOOP_INTERNAL_PHY: 541 rgep->param_link_speed = 1000; 542 adv_1000fdx = B_TRUE; 543 control = MII_CONTROL_LOOPBACK; 544 break; 545 546 case RGE_LOOP_INTERNAL_MAC: 547 rgep->param_link_speed = 1000; 548 adv_1000fdx = B_TRUE; 549 break; 550 } 551 } 552 553 RGE_DEBUG(("rge_phy_update: autoneg %d " 554 "pause %d asym_pause %d " 555 "1000fdx %d 1000hdx %d " 556 "100fdx %d 100hdx %d " 557 "10fdx %d 10hdx %d ", 558 adv_autoneg, 559 adv_pause, adv_asym_pause, 560 adv_1000fdx, adv_1000hdx, 561 adv_100fdx, adv_100hdx, 562 adv_10fdx, adv_10hdx)); 563 564 /* 565 * We should have at least one technology capability set; 566 * if not, we select a default of 1000Mb/s full-duplex 567 */ 568 if (!adv_1000fdx && !adv_100fdx && !adv_10fdx && 569 !adv_1000hdx && !adv_100hdx && !adv_10hdx) 570 adv_1000fdx = B_TRUE; 571 572 /* 573 * Now transform the adv_* variables into the proper settings 574 * of the PHY registers ... 575 * 576 * If autonegotiation is (now) enabled, we want to trigger 577 * a new autonegotiation cycle once the PHY has been 578 * programmed with the capabilities to be advertised. 579 * 580 * RTL8169/8110 doesn't support 1000Mb/s half-duplex. 581 */ 582 if (adv_autoneg) 583 control |= MII_CONTROL_ANE|MII_CONTROL_RSAN; 584 585 if (adv_1000fdx) 586 control |= MII_CONTROL_1000MB|MII_CONTROL_FDUPLEX; 587 else if (adv_1000hdx) 588 control |= MII_CONTROL_1000MB; 589 else if (adv_100fdx) 590 control |= MII_CONTROL_100MB|MII_CONTROL_FDUPLEX; 591 else if (adv_100hdx) 592 control |= MII_CONTROL_100MB; 593 else if (adv_10fdx) 594 control |= MII_CONTROL_FDUPLEX; 595 else if (adv_10hdx) 596 control |= 0; 597 else 598 { _NOTE(EMPTY); } /* Can't get here anyway ... */ 599 600 if (adv_1000fdx) { 601 gigctrl |= MII_1000BT_CTL_ADV_FDX; 602 /* 603 * Chipset limitation: need set other capabilities to true 604 */ 605 if (rgep->chipid.is_pcie) 606 adv_1000hdx = B_TRUE; 607 adv_100fdx = B_TRUE; 608 adv_100hdx = B_TRUE; 609 adv_10fdx = B_TRUE; 610 adv_10hdx = B_TRUE; 611 } 612 613 if (adv_1000hdx) 614 gigctrl |= MII_1000BT_CTL_ADV_HDX; 615 616 if (adv_100fdx) 617 anar |= MII_ABILITY_100BASE_TX_FD; 618 if (adv_100hdx) 619 anar |= MII_ABILITY_100BASE_TX; 620 if (adv_10fdx) 621 anar |= MII_ABILITY_10BASE_T_FD; 622 if (adv_10hdx) 623 anar |= MII_ABILITY_10BASE_T; 624 625 if (adv_pause) 626 anar |= MII_ABILITY_PAUSE; 627 if (adv_asym_pause) 628 anar |= MII_ABILITY_ASYM_PAUSE; 629 630 /* 631 * Munge in any other fixed bits we require ... 632 */ 633 anar |= MII_AN_SELECTOR_8023; 634 635 /* 636 * Restart the PHY and write the new values. Note the 637 * time, so that we can say whether subsequent link state 638 * changes can be attributed to our reprogramming the PHY 639 */ 640 rgep->phys_write_time = gethrtime(); 641 rge_phy_init(rgep); 642 rge_mii_put16(rgep, MII_AN_ADVERT, anar); 643 rge_mii_put16(rgep, MII_1000BASE_T_CONTROL, gigctrl); 644 rge_mii_put16(rgep, MII_CONTROL, control); 645 646 RGE_DEBUG(("rge_phy_update: anar <- 0x%x", anar)); 647 RGE_DEBUG(("rge_phy_update: control <- 0x%x", control)); 648 RGE_DEBUG(("rge_phy_update: gigctrl <- 0x%x", gigctrl)); 649 } 650 651 void rge_phy_init(rge_t *rgep); 652 #pragma no_inline(rge_phy_init) 653 654 void 655 rge_phy_init(rge_t *rgep) 656 { 657 rgep->phy_mii_addr = 1; 658 659 /* 660 * Below phy config steps are copied from the Programming Guide 661 * (there's no detail comments for these steps.) 662 */ 663 switch (rgep->chipid.mac_ver) { 664 case MAC_VER_8169S_D: 665 case MAC_VER_8169S_E : 666 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 667 rge_mii_put16(rgep, PHY_15_REG, 0x1000); 668 rge_mii_put16(rgep, PHY_18_REG, 0x65c7); 669 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000); 670 rge_mii_put16(rgep, PHY_ID_REG_2, 0x00a1); 671 rge_mii_put16(rgep, PHY_ID_REG_1, 0x0008); 672 rge_mii_put16(rgep, PHY_BMSR_REG, 0x1020); 673 rge_mii_put16(rgep, PHY_BMCR_REG, 0x1000); 674 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0800); 675 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000); 676 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000); 677 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41); 678 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde60); 679 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140); 680 rge_mii_put16(rgep, PHY_BMCR_REG, 0x0077); 681 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7800); 682 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000); 683 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000); 684 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01); 685 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20); 686 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95); 687 rge_mii_put16(rgep, PHY_BMCR_REG, 0xfa00); 688 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa800); 689 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000); 690 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000); 691 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41); 692 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde20); 693 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140); 694 rge_mii_put16(rgep, PHY_BMCR_REG, 0x00bb); 695 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb800); 696 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000); 697 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000); 698 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01); 699 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20); 700 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95); 701 rge_mii_put16(rgep, PHY_BMCR_REG, 0xbf00); 702 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf800); 703 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000); 704 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000); 705 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 706 rge_mii_put16(rgep, PHY_0B_REG, 0x0000); 707 break; 708 709 case MAC_VER_8169SB: 710 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 711 rge_mii_put16(rgep, PHY_1B_REG, 0xD41E); 712 rge_mii_put16(rgep, PHY_0E_REG, 0x7bff); 713 rge_mii_put16(rgep, PHY_GBCR_REG, GBCR_DEFAULT); 714 rge_mii_put16(rgep, PHY_1F_REG, 0x0002); 715 rge_mii_put16(rgep, PHY_BMSR_REG, 0x90D0); 716 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 717 break; 718 719 case MAC_VER_8168: 720 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 721 rge_mii_put16(rgep, PHY_ANER_REG, 0x00aa); 722 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x3173); 723 rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x08fc); 724 rge_mii_put16(rgep, PHY_GBCR_REG, 0xe2d0); 725 rge_mii_put16(rgep, PHY_0B_REG, 0x941a); 726 rge_mii_put16(rgep, PHY_18_REG, 0x65fe); 727 rge_mii_put16(rgep, PHY_1C_REG, 0x1e02); 728 rge_mii_put16(rgep, PHY_1F_REG, 0x0002); 729 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x103e); 730 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 731 break; 732 733 case MAC_VER_8168B_B: 734 case MAC_VER_8168B_C: 735 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 736 rge_mii_put16(rgep, PHY_0B_REG, 0x94b0); 737 rge_mii_put16(rgep, PHY_1B_REG, 0xc416); 738 rge_mii_put16(rgep, PHY_1F_REG, 0x0003); 739 rge_mii_put16(rgep, PHY_12_REG, 0x6096); 740 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 741 break; 742 } 743 } 744 745 void rge_chip_ident(rge_t *rgep); 746 #pragma no_inline(rge_chip_ident) 747 748 void 749 rge_chip_ident(rge_t *rgep) 750 { 751 chip_id_t *chip = &rgep->chipid; 752 uint32_t val32; 753 uint16_t val16; 754 755 /* 756 * Read and record MAC version 757 */ 758 val32 = rge_reg_get32(rgep, TX_CONFIG_REG); 759 val32 &= HW_VERSION_ID_0 | HW_VERSION_ID_1; 760 chip->mac_ver = val32; 761 switch (chip->mac_ver) { 762 case MAC_VER_8168: 763 case MAC_VER_8168B_B: 764 case MAC_VER_8168B_C: 765 chip->is_pcie = B_TRUE; 766 break; 767 768 default: 769 chip->is_pcie = B_FALSE; 770 break; 771 } 772 773 /* 774 * Read and record PHY version 775 */ 776 val16 = rge_mii_get16(rgep, PHY_ID_REG_2); 777 val16 &= PHY_VER_MASK; 778 chip->phy_ver = val16; 779 780 /* set pci latency timer */ 781 if (chip->mac_ver == MAC_VER_8169 || 782 chip->mac_ver == MAC_VER_8169S_D) 783 pci_config_put8(rgep->cfg_handle, PCI_CONF_LATENCY_TIMER, 0x40); 784 785 /* 786 * PCIE chipset require the Rx buffer start address must be 787 * 8-byte alignment and the Rx buffer size must be multiple of 8. 788 * We'll just use bcopy in receive procedure for the PCIE chipset. 789 */ 790 if (chip->is_pcie) { 791 rgep->chip_flags |= CHIP_FLAG_FORCE_BCOPY; 792 if (rgep->default_mtu > ETHERMTU) { 793 rge_notice(rgep, "Jumbo packets not supported " 794 "for this PCIE chipset"); 795 rgep->default_mtu = ETHERMTU; 796 } 797 } 798 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) 799 rgep->head_room = 0; 800 else 801 rgep->head_room = RGE_HEADROOM; 802 803 /* 804 * Initialize other variables. 805 */ 806 if (rgep->default_mtu < ETHERMTU || rgep->default_mtu > RGE_JUMBO_MTU) 807 rgep->default_mtu = ETHERMTU; 808 if (rgep->default_mtu > ETHERMTU) { 809 rgep->rxbuf_size = RGE_BUFF_SIZE_JUMBO; 810 rgep->txbuf_size = RGE_BUFF_SIZE_JUMBO; 811 rgep->ethmax_size = RGE_JUMBO_SIZE; 812 } else { 813 rgep->rxbuf_size = RGE_BUFF_SIZE_STD; 814 rgep->txbuf_size = RGE_BUFF_SIZE_STD; 815 rgep->ethmax_size = ETHERMAX; 816 } 817 chip->rxconfig = RX_CONFIG_DEFAULT; 818 chip->txconfig = TX_CONFIG_DEFAULT; 819 820 RGE_TRACE(("%s: MAC version = %x, PHY version = %x", 821 rgep->ifname, chip->mac_ver, chip->phy_ver)); 822 } 823 824 /* 825 * Perform first-stage chip (re-)initialisation, using only config-space 826 * accesses: 827 * 828 * + Read the vendor/device/revision/subsystem/cache-line-size registers, 829 * returning the data in the structure pointed to by <idp>. 830 * + Enable Memory Space accesses. 831 * + Enable Bus Mastering according. 832 */ 833 void rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp); 834 #pragma no_inline(rge_chip_cfg_init) 835 836 void 837 rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp) 838 { 839 ddi_acc_handle_t handle; 840 uint16_t commd; 841 842 handle = rgep->cfg_handle; 843 844 /* 845 * Save PCI cache line size and subsystem vendor ID 846 */ 847 cidp->command = pci_config_get16(handle, PCI_CONF_COMM); 848 cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID); 849 cidp->device = pci_config_get16(handle, PCI_CONF_DEVID); 850 cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID); 851 cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID); 852 cidp->revision = pci_config_get8(handle, PCI_CONF_REVID); 853 cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ); 854 cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER); 855 856 /* 857 * Turn on Master Enable (DMA) and IO Enable bits. 858 * Enable PCI Memory Space accesses 859 */ 860 commd = cidp->command; 861 commd |= PCI_COMM_ME | PCI_COMM_MAE | PCI_COMM_IO; 862 pci_config_put16(handle, PCI_CONF_COMM, commd); 863 864 RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x", 865 cidp->vendor, cidp->device, cidp->revision)); 866 RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x", 867 cidp->subven, cidp->subdev)); 868 RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x", 869 cidp->clsize, cidp->latency, cidp->command)); 870 } 871 872 int rge_chip_reset(rge_t *rgep); 873 #pragma no_inline(rge_chip_reset) 874 875 int 876 rge_chip_reset(rge_t *rgep) 877 { 878 int i; 879 uint8_t val8; 880 881 /* 882 * Chip should be in STOP state 883 */ 884 rge_reg_clr8(rgep, RT_COMMAND_REG, 885 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 886 887 /* 888 * Disable interrupt 889 */ 890 rgep->int_mask = INT_MASK_NONE; 891 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask); 892 893 /* 894 * Clear pended interrupt 895 */ 896 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL); 897 898 /* 899 * Reset chip 900 */ 901 rge_reg_set8(rgep, RT_COMMAND_REG, RT_COMMAND_RESET); 902 903 /* 904 * Wait for reset success 905 */ 906 for (i = 0; i < CHIP_RESET_LOOP; i++) { 907 drv_usecwait(10); 908 val8 = rge_reg_get8(rgep, RT_COMMAND_REG); 909 if (!(val8 & RT_COMMAND_RESET)) { 910 rgep->rge_chip_state = RGE_CHIP_RESET; 911 return (0); 912 } 913 } 914 RGE_REPORT((rgep, "rge_chip_reset fail.")); 915 return (-1); 916 } 917 918 void rge_chip_init(rge_t *rgep); 919 #pragma no_inline(rge_chip_init) 920 921 void 922 rge_chip_init(rge_t *rgep) 923 { 924 uint32_t val32; 925 uint32_t val16; 926 uint32_t *hashp; 927 chip_id_t *chip = &rgep->chipid; 928 929 if (chip->is_pcie) { 930 /* 931 * Increase the threshold voltage of RX sensitivity 932 */ 933 if (chip->mac_ver != MAC_VER_8168) 934 rge_ephy_put16(rgep, 0x01, 0x1bd3); 935 936 val16 = rge_reg_get8(rgep, PHY_STATUS_REG); 937 val16 = 0x12<<8 | val16; 938 rge_reg_put16(rgep, PHY_STATUS_REG, val16); 939 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00021c01); 940 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f088); 941 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00004000); 942 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f0b0); 943 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x0000f068); 944 val32 = rge_reg_get32(rgep, RT_CSI_DATA_REG); 945 val32 |= 0x7000; 946 val32 &= 0xffff5fff; 947 rge_reg_put32(rgep, RT_CSI_DATA_REG, val32); 948 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f068); 949 } 950 951 /* 952 * Config MII register 953 */ 954 rgep->param_link_up = LINK_STATE_DOWN; 955 rge_phy_update(rgep); 956 957 /* 958 * Enable Rx checksum offload. 959 * Then for vlan support, we must enable receive vlan de-tagging. 960 * Otherwise, there'll be checksum error. 961 */ 962 val16 = rge_reg_get16(rgep, CPLUS_COMMAND_REG); 963 val16 |= RX_CKSM_OFFLOAD | RX_VLAN_DETAG; 964 if (chip->mac_ver == MAC_VER_8169S_D) { 965 val16 |= CPLUS_BIT14 | MUL_PCI_RW_ENABLE; 966 rge_reg_put8(rgep, RESV_82_REG, 0x01); 967 } 968 rge_reg_put16(rgep, CPLUS_COMMAND_REG, val16 & (~0x03)); 969 970 /* 971 * Start transmit/receive before set tx/rx configuration register 972 */ 973 if (!chip->is_pcie) 974 rge_reg_set8(rgep, RT_COMMAND_REG, 975 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 976 977 /* 978 * Set dump tally counter register 979 */ 980 val32 = rgep->dma_area_stats.cookie.dmac_laddress >> 32; 981 rge_reg_put32(rgep, DUMP_COUNTER_REG_1, val32); 982 val32 = rge_reg_get32(rgep, DUMP_COUNTER_REG_0); 983 val32 &= DUMP_COUNTER_REG_RESV; 984 val32 |= rgep->dma_area_stats.cookie.dmac_laddress; 985 rge_reg_put32(rgep, DUMP_COUNTER_REG_0, val32); 986 987 /* 988 * Change to config register write enable mode 989 */ 990 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 991 992 /* 993 * Set Tx/Rx maximum packet size 994 */ 995 if (rgep->default_mtu > ETHERMTU) { 996 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_JUMBO); 997 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_JUMBO); 998 } else { 999 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD); 1000 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD); 1001 } 1002 1003 /* 1004 * Set receive configuration register 1005 */ 1006 val32 = rge_reg_get32(rgep, RX_CONFIG_REG); 1007 val32 &= RX_CONFIG_REG_RESV; 1008 if (rgep->promisc) 1009 val32 |= RX_ACCEPT_ALL_PKT; 1010 rge_reg_put32(rgep, RX_CONFIG_REG, val32 | chip->rxconfig); 1011 1012 /* 1013 * Set transmit configuration register 1014 */ 1015 val32 = rge_reg_get32(rgep, TX_CONFIG_REG); 1016 val32 &= TX_CONFIG_REG_RESV; 1017 rge_reg_put32(rgep, TX_CONFIG_REG, val32 | chip->txconfig); 1018 1019 /* 1020 * Set Tx/Rx descriptor register 1021 */ 1022 val32 = rgep->tx_desc.cookie.dmac_laddress; 1023 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_LO_REG, val32); 1024 val32 = rgep->tx_desc.cookie.dmac_laddress >> 32; 1025 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_HI_REG, val32); 1026 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_LO_REG, 0); 1027 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_HI_REG, 0); 1028 val32 = rgep->rx_desc.cookie.dmac_laddress; 1029 rge_reg_put32(rgep, RX_RING_ADDR_LO_REG, val32); 1030 val32 = rgep->rx_desc.cookie.dmac_laddress >> 32; 1031 rge_reg_put32(rgep, RX_RING_ADDR_HI_REG, val32); 1032 1033 /* 1034 * Suggested setting from Realtek 1035 */ 1036 rge_reg_put16(rgep, RESV_E2_REG, 0x282a); 1037 1038 /* 1039 * Return to normal network/host communication mode 1040 */ 1041 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1042 drv_usecwait(20); 1043 1044 /* 1045 * Set multicast register 1046 */ 1047 hashp = (uint32_t *)rgep->mcast_hash; 1048 rge_reg_put32(rgep, MULTICAST_0_REG, hashp[0]); 1049 rge_reg_put32(rgep, MULTICAST_4_REG, hashp[1]); 1050 1051 /* 1052 * Msic register setting: 1053 * -- Missed packet counter: clear it 1054 * -- TimerInt Register 1055 * -- Timer count register 1056 */ 1057 rge_reg_put32(rgep, RX_PKT_MISS_COUNT_REG, 0); 1058 rge_reg_put32(rgep, TIMER_INT_REG, TIMER_INT_NONE); 1059 rge_reg_put32(rgep, TIMER_COUNT_REG, 0); 1060 } 1061 1062 /* 1063 * rge_chip_start() -- start the chip transmitting and/or receiving, 1064 * including enabling interrupts 1065 */ 1066 void rge_chip_start(rge_t *rgep); 1067 #pragma no_inline(rge_chip_start) 1068 1069 void 1070 rge_chip_start(rge_t *rgep) 1071 { 1072 /* 1073 * Clear statistics 1074 */ 1075 bzero(&rgep->stats, sizeof (rge_stats_t)); 1076 DMA_ZERO(rgep->dma_area_stats); 1077 1078 /* 1079 * Start transmit/receive 1080 */ 1081 rge_reg_set8(rgep, RT_COMMAND_REG, 1082 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 1083 1084 /* 1085 * Enable interrupt 1086 */ 1087 rgep->int_mask = RGE_INT_MASK; 1088 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask); 1089 1090 /* 1091 * All done! 1092 */ 1093 rgep->rge_chip_state = RGE_CHIP_RUNNING; 1094 } 1095 1096 /* 1097 * rge_chip_stop() -- stop board receiving 1098 */ 1099 void rge_chip_stop(rge_t *rgep, boolean_t fault); 1100 #pragma no_inline(rge_chip_stop) 1101 1102 void 1103 rge_chip_stop(rge_t *rgep, boolean_t fault) 1104 { 1105 /* 1106 * Disable interrupt 1107 */ 1108 rgep->int_mask = INT_MASK_NONE; 1109 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask); 1110 1111 /* 1112 * Clear pended interrupt 1113 */ 1114 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL); 1115 1116 /* 1117 * Stop the board and disable transmit/receive 1118 */ 1119 rge_reg_clr8(rgep, RT_COMMAND_REG, 1120 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 1121 1122 if (fault) 1123 rgep->rge_chip_state = RGE_CHIP_FAULT; 1124 else 1125 rgep->rge_chip_state = RGE_CHIP_STOPPED; 1126 } 1127 1128 /* 1129 * rge_get_mac_addr() -- get the MAC address on NIC 1130 */ 1131 static void rge_get_mac_addr(rge_t *rgep); 1132 #pragma inline(rge_get_mac_addr) 1133 1134 static void 1135 rge_get_mac_addr(rge_t *rgep) 1136 { 1137 uint8_t *macaddr = rgep->netaddr; 1138 uint32_t val32; 1139 1140 /* 1141 * Read first 4-byte of mac address 1142 */ 1143 val32 = rge_reg_get32(rgep, ID_0_REG); 1144 macaddr[0] = val32 & 0xff; 1145 val32 = val32 >> 8; 1146 macaddr[1] = val32 & 0xff; 1147 val32 = val32 >> 8; 1148 macaddr[2] = val32 & 0xff; 1149 val32 = val32 >> 8; 1150 macaddr[3] = val32 & 0xff; 1151 1152 /* 1153 * Read last 2-byte of mac address 1154 */ 1155 val32 = rge_reg_get32(rgep, ID_4_REG); 1156 macaddr[4] = val32 & 0xff; 1157 val32 = val32 >> 8; 1158 macaddr[5] = val32 & 0xff; 1159 } 1160 1161 static void rge_set_mac_addr(rge_t *rgep); 1162 #pragma inline(rge_set_mac_addr) 1163 1164 static void 1165 rge_set_mac_addr(rge_t *rgep) 1166 { 1167 uint8_t *p = rgep->netaddr; 1168 uint32_t val32; 1169 1170 /* 1171 * Change to config register write enable mode 1172 */ 1173 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1174 1175 /* 1176 * Get first 4 bytes of mac address 1177 */ 1178 val32 = p[3]; 1179 val32 = val32 << 8; 1180 val32 |= p[2]; 1181 val32 = val32 << 8; 1182 val32 |= p[1]; 1183 val32 = val32 << 8; 1184 val32 |= p[0]; 1185 1186 /* 1187 * Set first 4 bytes of mac address 1188 */ 1189 rge_reg_put32(rgep, ID_0_REG, val32); 1190 1191 /* 1192 * Get last 2 bytes of mac address 1193 */ 1194 val32 = p[5]; 1195 val32 = val32 << 8; 1196 val32 |= p[4]; 1197 1198 /* 1199 * Set last 2 bytes of mac address 1200 */ 1201 val32 |= rge_reg_get32(rgep, ID_4_REG) & ~0xffff; 1202 rge_reg_put32(rgep, ID_4_REG, val32); 1203 1204 /* 1205 * Return to normal network/host communication mode 1206 */ 1207 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1208 } 1209 1210 static void rge_set_multi_addr(rge_t *rgep); 1211 #pragma inline(rge_set_multi_addr) 1212 1213 static void 1214 rge_set_multi_addr(rge_t *rgep) 1215 { 1216 uint32_t *hashp; 1217 1218 hashp = (uint32_t *)rgep->mcast_hash; 1219 rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0])); 1220 rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1])); 1221 rge_reg_set8(rgep, RT_COMMAND_REG, 1222 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 1223 } 1224 1225 static void rge_set_promisc(rge_t *rgep); 1226 #pragma inline(rge_set_promisc) 1227 1228 static void 1229 rge_set_promisc(rge_t *rgep) 1230 { 1231 if (rgep->promisc) 1232 rge_reg_set32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT); 1233 else 1234 rge_reg_clr32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT); 1235 1236 rge_reg_set8(rgep, RT_COMMAND_REG, 1237 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 1238 } 1239 1240 /* 1241 * rge_chip_sync() -- program the chip with the unicast MAC address, 1242 * the multicast hash table, the required level of promiscuity, and 1243 * the current loopback mode ... 1244 */ 1245 void rge_chip_sync(rge_t *rgep, enum rge_sync_op todo); 1246 #pragma no_inline(rge_chip_sync) 1247 1248 void 1249 rge_chip_sync(rge_t *rgep, enum rge_sync_op todo) 1250 { 1251 switch (todo) { 1252 case RGE_GET_MAC: 1253 rge_get_mac_addr(rgep); 1254 break; 1255 case RGE_SET_MAC: 1256 /* Reprogram the unicast MAC address(es) ... */ 1257 rge_set_mac_addr(rgep); 1258 break; 1259 case RGE_SET_MUL: 1260 /* Reprogram the hashed multicast address table ... */ 1261 rge_set_multi_addr(rgep); 1262 break; 1263 case RGE_SET_PROMISC: 1264 /* Set or clear the PROMISCUOUS mode bit */ 1265 rge_set_promisc(rgep); 1266 break; 1267 default: 1268 break; 1269 } 1270 } 1271 1272 void rge_chip_blank(void *arg, time_t ticks, uint_t count); 1273 #pragma no_inline(rge_chip_blank) 1274 1275 void 1276 rge_chip_blank(void *arg, time_t ticks, uint_t count) 1277 { 1278 _NOTE(ARGUNUSED(arg, ticks, count)); 1279 } 1280 1281 void rge_tx_trigger(rge_t *rgep); 1282 #pragma no_inline(rge_tx_trigger) 1283 1284 void 1285 rge_tx_trigger(rge_t *rgep) 1286 { 1287 rge_reg_set8(rgep, TX_RINGS_POLL_REG, NORMAL_TX_RING_POLL); 1288 } 1289 1290 void rge_hw_stats_dump(rge_t *rgep); 1291 #pragma no_inline(rge_tx_trigger) 1292 1293 void 1294 rge_hw_stats_dump(rge_t *rgep) 1295 { 1296 int i = 0; 1297 1298 while (rge_reg_get32(rgep, DUMP_COUNTER_REG_0) & DUMP_START) { 1299 drv_usecwait(100); 1300 if (++i > STATS_DUMP_LOOP) { 1301 RGE_DEBUG(("rge h/w statistics dump fail!")); 1302 rgep->rge_chip_state = RGE_CHIP_ERROR; 1303 return; 1304 } 1305 } 1306 DMA_SYNC(rgep->dma_area_stats, DDI_DMA_SYNC_FORKERNEL); 1307 1308 /* 1309 * Start H/W statistics dump for RTL8169 chip 1310 */ 1311 rge_reg_set32(rgep, DUMP_COUNTER_REG_0, DUMP_START); 1312 } 1313 1314 /* 1315 * ========== Hardware interrupt handler ========== 1316 */ 1317 1318 #undef RGE_DBG 1319 #define RGE_DBG RGE_DBG_INT /* debug flag for this code */ 1320 1321 static void rge_wake_factotum(rge_t *rgep); 1322 #pragma inline(rge_wake_factotum) 1323 1324 static void 1325 rge_wake_factotum(rge_t *rgep) 1326 { 1327 if (rgep->factotum_flag == 0) { 1328 rgep->factotum_flag = 1; 1329 (void) ddi_intr_trigger_softint(rgep->factotum_hdl, NULL); 1330 } 1331 } 1332 1333 /* 1334 * rge_intr() -- handle chip interrupts 1335 */ 1336 uint_t rge_intr(caddr_t arg1, caddr_t arg2); 1337 #pragma no_inline(rge_intr) 1338 1339 uint_t 1340 rge_intr(caddr_t arg1, caddr_t arg2) 1341 { 1342 rge_t *rgep = (rge_t *)arg1; 1343 uint16_t int_status; 1344 1345 _NOTE(ARGUNUSED(arg2)) 1346 1347 mutex_enter(rgep->genlock); 1348 /* 1349 * Was this interrupt caused by our device... 1350 */ 1351 int_status = rge_reg_get16(rgep, INT_STATUS_REG); 1352 if (!(int_status & rgep->int_mask)) { 1353 mutex_exit(rgep->genlock); 1354 return (DDI_INTR_UNCLAIMED); 1355 /* indicate it wasn't our interrupt */ 1356 } 1357 rgep->stats.intr++; 1358 1359 /* 1360 * Clear interrupt 1361 * For PCIE chipset, we need disable interrupt first. 1362 */ 1363 if (rgep->chipid.is_pcie) 1364 rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE); 1365 rge_reg_put16(rgep, INT_STATUS_REG, int_status); 1366 1367 /* 1368 * Cable link change interrupt 1369 */ 1370 if (int_status & LINK_CHANGE_INT) { 1371 rge_chip_cyclic(rgep); 1372 } 1373 1374 mutex_exit(rgep->genlock); 1375 1376 /* 1377 * Receive interrupt 1378 */ 1379 if (int_status & RGE_RX_INT) 1380 rge_receive(rgep); 1381 1382 /* 1383 * Re-enable interrupt for PCIE chipset 1384 */ 1385 if (rgep->chipid.is_pcie) 1386 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask); 1387 1388 return (DDI_INTR_CLAIMED); /* indicate it was our interrupt */ 1389 } 1390 1391 /* 1392 * ========== Factotum, implemented as a softint handler ========== 1393 */ 1394 1395 #undef RGE_DBG 1396 #define RGE_DBG RGE_DBG_FACT /* debug flag for this code */ 1397 1398 static boolean_t rge_factotum_link_check(rge_t *rgep); 1399 #pragma no_inline(rge_factotum_link_check) 1400 1401 static boolean_t 1402 rge_factotum_link_check(rge_t *rgep) 1403 { 1404 uint8_t media_status; 1405 int32_t link; 1406 void (*logfn)(rge_t *rgep, const char *fmt, ...); 1407 const char *msg; 1408 hrtime_t deltat; 1409 1410 media_status = rge_reg_get8(rgep, PHY_STATUS_REG); 1411 link = (media_status & PHY_STATUS_LINK_UP) ? 1412 LINK_STATE_UP : LINK_STATE_DOWN; 1413 if (rgep->param_link_up != link) { 1414 /* 1415 * Link change. We have to decide whether to write a message 1416 * on the console or only in the log. If the PHY has 1417 * been reprogrammed (at user request) "recently", then 1418 * the message only goes in the log. Otherwise it's an 1419 * "unexpected" event, and it goes on the console as well. 1420 */ 1421 rgep->param_link_up = link; 1422 rgep->phys_event_time = gethrtime(); 1423 deltat = rgep->phys_event_time - rgep->phys_write_time; 1424 if (deltat > RGE_LINK_SETTLE_TIME) 1425 msg = ""; 1426 else if (link == LINK_STATE_UP) 1427 msg = rgep->link_up_msg; 1428 else 1429 msg = rgep->link_down_msg; 1430 logfn = (msg == NULL || *msg == '\0') ? rge_notice : rge_log; 1431 1432 if (link == LINK_STATE_UP) { 1433 if (media_status & PHY_STATUS_1000MF) { 1434 rgep->param_link_speed = RGE_SPEED_1000M; 1435 rgep->param_link_duplex = LINK_DUPLEX_FULL; 1436 } else { 1437 rgep->param_link_speed = 1438 (media_status & PHY_STATUS_100M) ? 1439 RGE_SPEED_100M : RGE_SPEED_10M; 1440 rgep->param_link_duplex = 1441 (media_status & PHY_STATUS_DUPLEX_FULL) ? 1442 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; 1443 } 1444 logfn(rgep, 1445 "link up %sbps %s_Duplex%s", 1446 (rgep->param_link_speed == RGE_SPEED_10M) ? 1447 "10M" : (rgep->param_link_speed == RGE_SPEED_100M ? 1448 "100M" : "1000M"), 1449 (rgep->param_link_duplex == LINK_DUPLEX_FULL) ? 1450 "Full" : "Half", 1451 msg); 1452 } else { 1453 logfn(rgep, "link down%s", msg); 1454 } 1455 return (B_TRUE); 1456 } 1457 return (B_FALSE); 1458 } 1459 1460 /* 1461 * Factotum routine to check for Tx stall, using the 'watchdog' counter 1462 */ 1463 static boolean_t rge_factotum_stall_check(rge_t *rgep); 1464 #pragma no_inline(rge_factotum_stall_check) 1465 1466 static boolean_t 1467 rge_factotum_stall_check(rge_t *rgep) 1468 { 1469 uint32_t dogval; 1470 1471 ASSERT(mutex_owned(rgep->genlock)); 1472 1473 /* 1474 * Specific check for Tx stall ... 1475 * 1476 * The 'watchdog' counter is incremented whenever a packet 1477 * is queued, reset to 1 when some (but not all) buffers 1478 * are reclaimed, reset to 0 (disabled) when all buffers 1479 * are reclaimed, and shifted left here. If it exceeds the 1480 * threshold value, the chip is assumed to have stalled and 1481 * is put into the ERROR state. The factotum will then reset 1482 * it on the next pass. 1483 * 1484 * All of which should ensure that we don't get into a state 1485 * where packets are left pending indefinitely! 1486 */ 1487 if (rgep->resched_needed) 1488 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL); 1489 dogval = rge_atomic_shl32(&rgep->watchdog, 1); 1490 if (dogval < rge_watchdog_count) 1491 return (B_FALSE); 1492 1493 RGE_REPORT((rgep, "Tx stall detected, watchdog code 0x%x", dogval)); 1494 return (B_TRUE); 1495 1496 } 1497 1498 /* 1499 * The factotum is woken up when there's something to do that we'd rather 1500 * not do from inside a hardware interrupt handler or high-level cyclic. 1501 * Its two main tasks are: 1502 * reset & restart the chip after an error 1503 * check the link status whenever necessary 1504 */ 1505 uint_t rge_chip_factotum(caddr_t arg1, caddr_t arg2); 1506 #pragma no_inline(rge_chip_factotum) 1507 1508 uint_t 1509 rge_chip_factotum(caddr_t arg1, caddr_t arg2) 1510 { 1511 rge_t *rgep; 1512 uint_t result; 1513 boolean_t error; 1514 boolean_t linkchg; 1515 1516 rgep = (rge_t *)arg1; 1517 _NOTE(ARGUNUSED(arg2)) 1518 1519 if (rgep->factotum_flag == 0) 1520 return (DDI_INTR_UNCLAIMED); 1521 1522 rgep->factotum_flag = 0; 1523 result = DDI_INTR_CLAIMED; 1524 error = B_FALSE; 1525 linkchg = B_FALSE; 1526 1527 mutex_enter(rgep->genlock); 1528 switch (rgep->rge_chip_state) { 1529 default: 1530 break; 1531 1532 case RGE_CHIP_RUNNING: 1533 linkchg = rge_factotum_link_check(rgep); 1534 error = rge_factotum_stall_check(rgep); 1535 break; 1536 1537 case RGE_CHIP_ERROR: 1538 error = B_TRUE; 1539 break; 1540 1541 case RGE_CHIP_FAULT: 1542 /* 1543 * Fault detected, time to reset ... 1544 */ 1545 if (rge_autorecover) { 1546 RGE_REPORT((rgep, "automatic recovery activated")); 1547 rge_restart(rgep); 1548 } 1549 break; 1550 } 1551 1552 /* 1553 * If an error is detected, stop the chip now, marking it as 1554 * faulty, so that it will be reset next time through ... 1555 */ 1556 if (error) 1557 rge_chip_stop(rgep, B_TRUE); 1558 mutex_exit(rgep->genlock); 1559 1560 /* 1561 * If the link state changed, tell the world about it. 1562 * Note: can't do this while still holding the mutex. 1563 */ 1564 if (linkchg) 1565 mac_link_update(rgep->mh, rgep->param_link_up); 1566 1567 return (result); 1568 } 1569 1570 /* 1571 * High-level cyclic handler 1572 * 1573 * This routine schedules a (low-level) softint callback to the 1574 * factotum, and prods the chip to update the status block (which 1575 * will cause a hardware interrupt when complete). 1576 */ 1577 void rge_chip_cyclic(void *arg); 1578 #pragma no_inline(rge_chip_cyclic) 1579 1580 void 1581 rge_chip_cyclic(void *arg) 1582 { 1583 rge_t *rgep; 1584 1585 rgep = arg; 1586 1587 switch (rgep->rge_chip_state) { 1588 default: 1589 return; 1590 1591 case RGE_CHIP_RUNNING: 1592 rge_phy_check(rgep); 1593 break; 1594 1595 case RGE_CHIP_FAULT: 1596 case RGE_CHIP_ERROR: 1597 break; 1598 } 1599 1600 rge_wake_factotum(rgep); 1601 } 1602 1603 1604 /* 1605 * ========== Ioctl subfunctions ========== 1606 */ 1607 1608 #undef RGE_DBG 1609 #define RGE_DBG RGE_DBG_PPIO /* debug flag for this code */ 1610 1611 #if RGE_DEBUGGING || RGE_DO_PPIO 1612 1613 static void rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd); 1614 #pragma no_inline(rge_chip_peek_cfg) 1615 1616 static void 1617 rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd) 1618 { 1619 uint64_t regval; 1620 uint64_t regno; 1621 1622 RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)", 1623 (void *)rgep, (void *)ppd)); 1624 1625 regno = ppd->pp_acc_offset; 1626 1627 switch (ppd->pp_acc_size) { 1628 case 1: 1629 regval = pci_config_get8(rgep->cfg_handle, regno); 1630 break; 1631 1632 case 2: 1633 regval = pci_config_get16(rgep->cfg_handle, regno); 1634 break; 1635 1636 case 4: 1637 regval = pci_config_get32(rgep->cfg_handle, regno); 1638 break; 1639 1640 case 8: 1641 regval = pci_config_get64(rgep->cfg_handle, regno); 1642 break; 1643 } 1644 1645 ppd->pp_acc_data = regval; 1646 } 1647 1648 static void rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd); 1649 #pragma no_inline(rge_chip_poke_cfg) 1650 1651 static void 1652 rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd) 1653 { 1654 uint64_t regval; 1655 uint64_t regno; 1656 1657 RGE_TRACE(("rge_chip_poke_cfg($%p, $%p)", 1658 (void *)rgep, (void *)ppd)); 1659 1660 regno = ppd->pp_acc_offset; 1661 regval = ppd->pp_acc_data; 1662 1663 switch (ppd->pp_acc_size) { 1664 case 1: 1665 pci_config_put8(rgep->cfg_handle, regno, regval); 1666 break; 1667 1668 case 2: 1669 pci_config_put16(rgep->cfg_handle, regno, regval); 1670 break; 1671 1672 case 4: 1673 pci_config_put32(rgep->cfg_handle, regno, regval); 1674 break; 1675 1676 case 8: 1677 pci_config_put64(rgep->cfg_handle, regno, regval); 1678 break; 1679 } 1680 } 1681 1682 static void rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd); 1683 #pragma no_inline(rge_chip_peek_reg) 1684 1685 static void 1686 rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd) 1687 { 1688 uint64_t regval; 1689 void *regaddr; 1690 1691 RGE_TRACE(("rge_chip_peek_reg($%p, $%p)", 1692 (void *)rgep, (void *)ppd)); 1693 1694 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset); 1695 1696 switch (ppd->pp_acc_size) { 1697 case 1: 1698 regval = ddi_get8(rgep->io_handle, regaddr); 1699 break; 1700 1701 case 2: 1702 regval = ddi_get16(rgep->io_handle, regaddr); 1703 break; 1704 1705 case 4: 1706 regval = ddi_get32(rgep->io_handle, regaddr); 1707 break; 1708 1709 case 8: 1710 regval = ddi_get64(rgep->io_handle, regaddr); 1711 break; 1712 } 1713 1714 ppd->pp_acc_data = regval; 1715 } 1716 1717 static void rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd); 1718 #pragma no_inline(rge_chip_peek_reg) 1719 1720 static void 1721 rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd) 1722 { 1723 uint64_t regval; 1724 void *regaddr; 1725 1726 RGE_TRACE(("rge_chip_poke_reg($%p, $%p)", 1727 (void *)rgep, (void *)ppd)); 1728 1729 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset); 1730 regval = ppd->pp_acc_data; 1731 1732 switch (ppd->pp_acc_size) { 1733 case 1: 1734 ddi_put8(rgep->io_handle, regaddr, regval); 1735 break; 1736 1737 case 2: 1738 ddi_put16(rgep->io_handle, regaddr, regval); 1739 break; 1740 1741 case 4: 1742 ddi_put32(rgep->io_handle, regaddr, regval); 1743 break; 1744 1745 case 8: 1746 ddi_put64(rgep->io_handle, regaddr, regval); 1747 break; 1748 } 1749 RGE_PCICHK(rgep); 1750 } 1751 1752 static void rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd); 1753 #pragma no_inline(rge_chip_peek_mii) 1754 1755 static void 1756 rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd) 1757 { 1758 RGE_TRACE(("rge_chip_peek_mii($%p, $%p)", 1759 (void *)rgep, (void *)ppd)); 1760 1761 ppd->pp_acc_data = rge_mii_get16(rgep, ppd->pp_acc_offset/2); 1762 } 1763 1764 static void rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd); 1765 #pragma no_inline(rge_chip_poke_mii) 1766 1767 static void 1768 rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd) 1769 { 1770 RGE_TRACE(("rge_chip_poke_mii($%p, $%p)", 1771 (void *)rgep, (void *)ppd)); 1772 1773 rge_mii_put16(rgep, ppd->pp_acc_offset/2, ppd->pp_acc_data); 1774 } 1775 1776 static void rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd); 1777 #pragma no_inline(rge_chip_peek_mem) 1778 1779 static void 1780 rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd) 1781 { 1782 uint64_t regval; 1783 void *vaddr; 1784 1785 RGE_TRACE(("rge_chip_peek_rge($%p, $%p)", 1786 (void *)rgep, (void *)ppd)); 1787 1788 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 1789 1790 switch (ppd->pp_acc_size) { 1791 case 1: 1792 regval = *(uint8_t *)vaddr; 1793 break; 1794 1795 case 2: 1796 regval = *(uint16_t *)vaddr; 1797 break; 1798 1799 case 4: 1800 regval = *(uint32_t *)vaddr; 1801 break; 1802 1803 case 8: 1804 regval = *(uint64_t *)vaddr; 1805 break; 1806 } 1807 1808 RGE_DEBUG(("rge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p", 1809 (void *)rgep, (void *)ppd, regval, vaddr)); 1810 1811 ppd->pp_acc_data = regval; 1812 } 1813 1814 static void rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd); 1815 #pragma no_inline(rge_chip_poke_mem) 1816 1817 static void 1818 rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd) 1819 { 1820 uint64_t regval; 1821 void *vaddr; 1822 1823 RGE_TRACE(("rge_chip_poke_mem($%p, $%p)", 1824 (void *)rgep, (void *)ppd)); 1825 1826 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 1827 regval = ppd->pp_acc_data; 1828 1829 RGE_DEBUG(("rge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p", 1830 (void *)rgep, (void *)ppd, regval, vaddr)); 1831 1832 switch (ppd->pp_acc_size) { 1833 case 1: 1834 *(uint8_t *)vaddr = (uint8_t)regval; 1835 break; 1836 1837 case 2: 1838 *(uint16_t *)vaddr = (uint16_t)regval; 1839 break; 1840 1841 case 4: 1842 *(uint32_t *)vaddr = (uint32_t)regval; 1843 break; 1844 1845 case 8: 1846 *(uint64_t *)vaddr = (uint64_t)regval; 1847 break; 1848 } 1849 } 1850 1851 static enum ioc_reply rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, 1852 struct iocblk *iocp); 1853 #pragma no_inline(rge_pp_ioctl) 1854 1855 static enum ioc_reply 1856 rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp) 1857 { 1858 void (*ppfn)(rge_t *rgep, rge_peekpoke_t *ppd); 1859 rge_peekpoke_t *ppd; 1860 dma_area_t *areap; 1861 uint64_t sizemask; 1862 uint64_t mem_va; 1863 uint64_t maxoff; 1864 boolean_t peek; 1865 1866 switch (cmd) { 1867 default: 1868 /* NOTREACHED */ 1869 rge_error(rgep, "rge_pp_ioctl: invalid cmd 0x%x", cmd); 1870 return (IOC_INVAL); 1871 1872 case RGE_PEEK: 1873 peek = B_TRUE; 1874 break; 1875 1876 case RGE_POKE: 1877 peek = B_FALSE; 1878 break; 1879 } 1880 1881 /* 1882 * Validate format of ioctl 1883 */ 1884 if (iocp->ioc_count != sizeof (rge_peekpoke_t)) 1885 return (IOC_INVAL); 1886 if (mp->b_cont == NULL) 1887 return (IOC_INVAL); 1888 ppd = (rge_peekpoke_t *)mp->b_cont->b_rptr; 1889 1890 /* 1891 * Validate request parameters 1892 */ 1893 switch (ppd->pp_acc_space) { 1894 default: 1895 return (IOC_INVAL); 1896 1897 case RGE_PP_SPACE_CFG: 1898 /* 1899 * Config space 1900 */ 1901 sizemask = 8|4|2|1; 1902 mem_va = 0; 1903 maxoff = PCI_CONF_HDR_SIZE; 1904 ppfn = peek ? rge_chip_peek_cfg : rge_chip_poke_cfg; 1905 break; 1906 1907 case RGE_PP_SPACE_REG: 1908 /* 1909 * Memory-mapped I/O space 1910 */ 1911 sizemask = 8|4|2|1; 1912 mem_va = 0; 1913 maxoff = RGE_REGISTER_MAX; 1914 ppfn = peek ? rge_chip_peek_reg : rge_chip_poke_reg; 1915 break; 1916 1917 case RGE_PP_SPACE_MII: 1918 /* 1919 * PHY's MII registers 1920 * NB: all PHY registers are two bytes, but the 1921 * addresses increment in ones (word addressing). 1922 * So we scale the address here, then undo the 1923 * transformation inside the peek/poke functions. 1924 */ 1925 ppd->pp_acc_offset *= 2; 1926 sizemask = 2; 1927 mem_va = 0; 1928 maxoff = (MII_MAXREG+1)*2; 1929 ppfn = peek ? rge_chip_peek_mii : rge_chip_poke_mii; 1930 break; 1931 1932 case RGE_PP_SPACE_RGE: 1933 /* 1934 * RGE data structure! 1935 */ 1936 sizemask = 8|4|2|1; 1937 mem_va = (uintptr_t)rgep; 1938 maxoff = sizeof (*rgep); 1939 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem; 1940 break; 1941 1942 case RGE_PP_SPACE_STATISTICS: 1943 case RGE_PP_SPACE_TXDESC: 1944 case RGE_PP_SPACE_TXBUFF: 1945 case RGE_PP_SPACE_RXDESC: 1946 case RGE_PP_SPACE_RXBUFF: 1947 /* 1948 * Various DMA_AREAs 1949 */ 1950 switch (ppd->pp_acc_space) { 1951 case RGE_PP_SPACE_TXDESC: 1952 areap = &rgep->dma_area_txdesc; 1953 break; 1954 case RGE_PP_SPACE_RXDESC: 1955 areap = &rgep->dma_area_rxdesc; 1956 break; 1957 case RGE_PP_SPACE_STATISTICS: 1958 areap = &rgep->dma_area_stats; 1959 break; 1960 } 1961 1962 sizemask = 8|4|2|1; 1963 mem_va = (uintptr_t)areap->mem_va; 1964 maxoff = areap->alength; 1965 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem; 1966 break; 1967 } 1968 1969 switch (ppd->pp_acc_size) { 1970 default: 1971 return (IOC_INVAL); 1972 1973 case 8: 1974 case 4: 1975 case 2: 1976 case 1: 1977 if ((ppd->pp_acc_size & sizemask) == 0) 1978 return (IOC_INVAL); 1979 break; 1980 } 1981 1982 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 1983 return (IOC_INVAL); 1984 1985 if (ppd->pp_acc_offset >= maxoff) 1986 return (IOC_INVAL); 1987 1988 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff) 1989 return (IOC_INVAL); 1990 1991 /* 1992 * All OK - go do it! 1993 */ 1994 ppd->pp_acc_offset += mem_va; 1995 (*ppfn)(rgep, ppd); 1996 return (peek ? IOC_REPLY : IOC_ACK); 1997 } 1998 1999 static enum ioc_reply rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, 2000 struct iocblk *iocp); 2001 #pragma no_inline(rge_diag_ioctl) 2002 2003 static enum ioc_reply 2004 rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp) 2005 { 2006 ASSERT(mutex_owned(rgep->genlock)); 2007 2008 switch (cmd) { 2009 default: 2010 /* NOTREACHED */ 2011 rge_error(rgep, "rge_diag_ioctl: invalid cmd 0x%x", cmd); 2012 return (IOC_INVAL); 2013 2014 case RGE_DIAG: 2015 /* 2016 * Currently a no-op 2017 */ 2018 return (IOC_ACK); 2019 2020 case RGE_PEEK: 2021 case RGE_POKE: 2022 return (rge_pp_ioctl(rgep, cmd, mp, iocp)); 2023 2024 case RGE_PHY_RESET: 2025 return (IOC_RESTART_ACK); 2026 2027 case RGE_SOFT_RESET: 2028 case RGE_HARD_RESET: 2029 /* 2030 * Reset and reinitialise the 570x hardware 2031 */ 2032 rge_restart(rgep); 2033 return (IOC_ACK); 2034 } 2035 2036 /* NOTREACHED */ 2037 } 2038 2039 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */ 2040 2041 static enum ioc_reply rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, 2042 struct iocblk *iocp); 2043 #pragma no_inline(rge_mii_ioctl) 2044 2045 static enum ioc_reply 2046 rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp) 2047 { 2048 struct rge_mii_rw *miirwp; 2049 2050 /* 2051 * Validate format of ioctl 2052 */ 2053 if (iocp->ioc_count != sizeof (struct rge_mii_rw)) 2054 return (IOC_INVAL); 2055 if (mp->b_cont == NULL) 2056 return (IOC_INVAL); 2057 miirwp = (struct rge_mii_rw *)mp->b_cont->b_rptr; 2058 2059 /* 2060 * Validate request parameters ... 2061 */ 2062 if (miirwp->mii_reg > MII_MAXREG) 2063 return (IOC_INVAL); 2064 2065 switch (cmd) { 2066 default: 2067 /* NOTREACHED */ 2068 rge_error(rgep, "rge_mii_ioctl: invalid cmd 0x%x", cmd); 2069 return (IOC_INVAL); 2070 2071 case RGE_MII_READ: 2072 miirwp->mii_data = rge_mii_get16(rgep, miirwp->mii_reg); 2073 return (IOC_REPLY); 2074 2075 case RGE_MII_WRITE: 2076 rge_mii_put16(rgep, miirwp->mii_reg, miirwp->mii_data); 2077 return (IOC_ACK); 2078 } 2079 2080 /* NOTREACHED */ 2081 } 2082 2083 enum ioc_reply rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, 2084 struct iocblk *iocp); 2085 #pragma no_inline(rge_chip_ioctl) 2086 2087 enum ioc_reply 2088 rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 2089 { 2090 int cmd; 2091 2092 RGE_TRACE(("rge_chip_ioctl($%p, $%p, $%p, $%p)", 2093 (void *)rgep, (void *)wq, (void *)mp, (void *)iocp)); 2094 2095 ASSERT(mutex_owned(rgep->genlock)); 2096 2097 cmd = iocp->ioc_cmd; 2098 switch (cmd) { 2099 default: 2100 /* NOTREACHED */ 2101 rge_error(rgep, "rge_chip_ioctl: invalid cmd 0x%x", cmd); 2102 return (IOC_INVAL); 2103 2104 case RGE_DIAG: 2105 case RGE_PEEK: 2106 case RGE_POKE: 2107 case RGE_PHY_RESET: 2108 case RGE_SOFT_RESET: 2109 case RGE_HARD_RESET: 2110 #if RGE_DEBUGGING || RGE_DO_PPIO 2111 return (rge_diag_ioctl(rgep, cmd, mp, iocp)); 2112 #else 2113 return (IOC_INVAL); 2114 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */ 2115 2116 case RGE_MII_READ: 2117 case RGE_MII_WRITE: 2118 return (rge_mii_ioctl(rgep, cmd, mp, iocp)); 2119 2120 } 2121 2122 /* NOTREACHED */ 2123 } 2124