1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include "rge.h" 27 28 #define REG32(rgep, reg) ((uint32_t *)(rgep->io_regs+(reg))) 29 #define REG16(rgep, reg) ((uint16_t *)(rgep->io_regs+(reg))) 30 #define REG8(rgep, reg) ((uint8_t *)(rgep->io_regs+(reg))) 31 #define PIO_ADDR(rgep, offset) ((void *)(rgep->io_regs+(offset))) 32 33 /* 34 * Patchable globals: 35 * 36 * rge_autorecover 37 * Enables/disables automatic recovery after fault detection 38 */ 39 static uint32_t rge_autorecover = 1; 40 41 /* 42 * globals: 43 */ 44 #define RGE_DBG RGE_DBG_REGS /* debug flag for this code */ 45 static uint32_t rge_watchdog_count = 1 << 5; 46 static uint32_t rge_rx_watchdog_count = 1 << 3; 47 48 /* 49 * Operating register get/set access routines 50 */ 51 52 static uint32_t rge_reg_get32(rge_t *rgep, uintptr_t regno); 53 #pragma inline(rge_reg_get32) 54 55 static uint32_t 56 rge_reg_get32(rge_t *rgep, uintptr_t regno) 57 { 58 RGE_TRACE(("rge_reg_get32($%p, 0x%lx)", 59 (void *)rgep, regno)); 60 61 return (ddi_get32(rgep->io_handle, REG32(rgep, regno))); 62 } 63 64 static void rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data); 65 #pragma inline(rge_reg_put32) 66 67 static void 68 rge_reg_put32(rge_t *rgep, uintptr_t regno, uint32_t data) 69 { 70 RGE_TRACE(("rge_reg_put32($%p, 0x%lx, 0x%x)", 71 (void *)rgep, regno, data)); 72 73 ddi_put32(rgep->io_handle, REG32(rgep, regno), data); 74 } 75 76 static void rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits); 77 #pragma inline(rge_reg_set32) 78 79 static void 80 rge_reg_set32(rge_t *rgep, uintptr_t regno, uint32_t bits) 81 { 82 uint32_t regval; 83 84 RGE_TRACE(("rge_reg_set32($%p, 0x%lx, 0x%x)", 85 (void *)rgep, regno, bits)); 86 87 regval = rge_reg_get32(rgep, regno); 88 regval |= bits; 89 rge_reg_put32(rgep, regno, regval); 90 } 91 92 static void rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits); 93 #pragma inline(rge_reg_clr32) 94 95 static void 96 rge_reg_clr32(rge_t *rgep, uintptr_t regno, uint32_t bits) 97 { 98 uint32_t regval; 99 100 RGE_TRACE(("rge_reg_clr32($%p, 0x%lx, 0x%x)", 101 (void *)rgep, regno, bits)); 102 103 regval = rge_reg_get32(rgep, regno); 104 regval &= ~bits; 105 rge_reg_put32(rgep, regno, regval); 106 } 107 108 static uint16_t rge_reg_get16(rge_t *rgep, uintptr_t regno); 109 #pragma inline(rge_reg_get16) 110 111 static uint16_t 112 rge_reg_get16(rge_t *rgep, uintptr_t regno) 113 { 114 RGE_TRACE(("rge_reg_get16($%p, 0x%lx)", 115 (void *)rgep, regno)); 116 117 return (ddi_get16(rgep->io_handle, REG16(rgep, regno))); 118 } 119 120 static void rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data); 121 #pragma inline(rge_reg_put16) 122 123 static void 124 rge_reg_put16(rge_t *rgep, uintptr_t regno, uint16_t data) 125 { 126 RGE_TRACE(("rge_reg_put16($%p, 0x%lx, 0x%x)", 127 (void *)rgep, regno, data)); 128 129 ddi_put16(rgep->io_handle, REG16(rgep, regno), data); 130 } 131 132 static uint8_t rge_reg_get8(rge_t *rgep, uintptr_t regno); 133 #pragma inline(rge_reg_get8) 134 135 static uint8_t 136 rge_reg_get8(rge_t *rgep, uintptr_t regno) 137 { 138 RGE_TRACE(("rge_reg_get8($%p, 0x%lx)", 139 (void *)rgep, regno)); 140 141 return (ddi_get8(rgep->io_handle, REG8(rgep, regno))); 142 } 143 144 static void rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data); 145 #pragma inline(rge_reg_put8) 146 147 static void 148 rge_reg_put8(rge_t *rgep, uintptr_t regno, uint8_t data) 149 { 150 RGE_TRACE(("rge_reg_put8($%p, 0x%lx, 0x%x)", 151 (void *)rgep, regno, data)); 152 153 ddi_put8(rgep->io_handle, REG8(rgep, regno), data); 154 } 155 156 static void rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits); 157 #pragma inline(rge_reg_set8) 158 159 static void 160 rge_reg_set8(rge_t *rgep, uintptr_t regno, uint8_t bits) 161 { 162 uint8_t regval; 163 164 RGE_TRACE(("rge_reg_set8($%p, 0x%lx, 0x%x)", 165 (void *)rgep, regno, bits)); 166 167 regval = rge_reg_get8(rgep, regno); 168 regval |= bits; 169 rge_reg_put8(rgep, regno, regval); 170 } 171 172 static void rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits); 173 #pragma inline(rge_reg_clr8) 174 175 static void 176 rge_reg_clr8(rge_t *rgep, uintptr_t regno, uint8_t bits) 177 { 178 uint8_t regval; 179 180 RGE_TRACE(("rge_reg_clr8($%p, 0x%lx, 0x%x)", 181 (void *)rgep, regno, bits)); 182 183 regval = rge_reg_get8(rgep, regno); 184 regval &= ~bits; 185 rge_reg_put8(rgep, regno, regval); 186 } 187 188 uint16_t rge_mii_get16(rge_t *rgep, uintptr_t mii); 189 #pragma no_inline(rge_mii_get16) 190 191 uint16_t 192 rge_mii_get16(rge_t *rgep, uintptr_t mii) 193 { 194 uint32_t regval; 195 uint32_t val32; 196 uint32_t i; 197 198 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT; 199 rge_reg_put32(rgep, PHY_ACCESS_REG, regval); 200 201 /* 202 * Waiting for PHY reading OK 203 */ 204 for (i = 0; i < PHY_RESET_LOOP; i++) { 205 drv_usecwait(1000); 206 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG); 207 if (val32 & PHY_ACCESS_WR_FLAG) 208 return ((uint16_t)(val32 & 0xffff)); 209 } 210 211 RGE_REPORT((rgep, "rge_mii_get16(0x%x) fail, val = %x", mii, val32)); 212 return ((uint16_t)~0u); 213 } 214 215 void rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data); 216 #pragma no_inline(rge_mii_put16) 217 218 void 219 rge_mii_put16(rge_t *rgep, uintptr_t mii, uint16_t data) 220 { 221 uint32_t regval; 222 uint32_t val32; 223 uint32_t i; 224 225 regval = (mii & PHY_REG_MASK) << PHY_REG_SHIFT; 226 regval |= data & PHY_DATA_MASK; 227 regval |= PHY_ACCESS_WR_FLAG; 228 rge_reg_put32(rgep, PHY_ACCESS_REG, regval); 229 230 /* 231 * Waiting for PHY writing OK 232 */ 233 for (i = 0; i < PHY_RESET_LOOP; i++) { 234 drv_usecwait(1000); 235 val32 = rge_reg_get32(rgep, PHY_ACCESS_REG); 236 if (!(val32 & PHY_ACCESS_WR_FLAG)) 237 return; 238 } 239 RGE_REPORT((rgep, "rge_mii_put16(0x%lx, 0x%x) fail", 240 mii, data)); 241 } 242 243 void rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data); 244 #pragma no_inline(rge_ephy_put16) 245 246 void 247 rge_ephy_put16(rge_t *rgep, uintptr_t emii, uint16_t data) 248 { 249 uint32_t regval; 250 uint32_t val32; 251 uint32_t i; 252 253 regval = (emii & EPHY_REG_MASK) << EPHY_REG_SHIFT; 254 regval |= data & EPHY_DATA_MASK; 255 regval |= EPHY_ACCESS_WR_FLAG; 256 rge_reg_put32(rgep, EPHY_ACCESS_REG, regval); 257 258 /* 259 * Waiting for PHY writing OK 260 */ 261 for (i = 0; i < PHY_RESET_LOOP; i++) { 262 drv_usecwait(1000); 263 val32 = rge_reg_get32(rgep, EPHY_ACCESS_REG); 264 if (!(val32 & EPHY_ACCESS_WR_FLAG)) 265 return; 266 } 267 RGE_REPORT((rgep, "rge_ephy_put16(0x%lx, 0x%x) fail", 268 emii, data)); 269 } 270 271 /* 272 * Atomically shift a 32-bit word left, returning 273 * the value it had *before* the shift was applied 274 */ 275 static uint32_t rge_atomic_shl32(uint32_t *sp, uint_t count); 276 #pragma inline(rge_mii_put16) 277 278 static uint32_t 279 rge_atomic_shl32(uint32_t *sp, uint_t count) 280 { 281 uint32_t oldval; 282 uint32_t newval; 283 284 /* ATOMICALLY */ 285 do { 286 oldval = *sp; 287 newval = oldval << count; 288 } while (atomic_cas_32(sp, oldval, newval) != oldval); 289 290 return (oldval); 291 } 292 293 /* 294 * PHY operation routines 295 */ 296 #if RGE_DEBUGGING 297 298 void 299 rge_phydump(rge_t *rgep) 300 { 301 uint16_t regs[32]; 302 int i; 303 304 ASSERT(mutex_owned(rgep->genlock)); 305 306 for (i = 0; i < 32; ++i) { 307 regs[i] = rge_mii_get16(rgep, i); 308 } 309 310 for (i = 0; i < 32; i += 8) 311 RGE_DEBUG(("rge_phydump: " 312 "0x%04x %04x %04x %04x %04x %04x %04x %04x", 313 regs[i+0], regs[i+1], regs[i+2], regs[i+3], 314 regs[i+4], regs[i+5], regs[i+6], regs[i+7])); 315 } 316 317 #endif /* RGE_DEBUGGING */ 318 319 static void 320 rge_phy_check(rge_t *rgep) 321 { 322 uint16_t gig_ctl; 323 324 if (rgep->param_link_up == LINK_STATE_DOWN) { 325 /* 326 * RTL8169S/8110S PHY has the "PCS bug". Need reset PHY 327 * every 15 seconds whin link down & advertise is 1000. 328 */ 329 if (rgep->chipid.phy_ver == PHY_VER_S) { 330 gig_ctl = rge_mii_get16(rgep, MII_1000BASE_T_CONTROL); 331 if (gig_ctl & MII_1000BT_CTL_ADV_FDX) { 332 rgep->link_down_count++; 333 if (rgep->link_down_count > 15) { 334 (void) rge_phy_reset(rgep); 335 rgep->stats.phy_reset++; 336 rgep->link_down_count = 0; 337 } 338 } 339 } 340 } else { 341 rgep->link_down_count = 0; 342 } 343 } 344 345 /* 346 * Basic low-level function to reset the PHY. 347 * Doesn't incorporate any special-case workarounds. 348 * 349 * Returns TRUE on success, FALSE if the RESET bit doesn't clear 350 */ 351 boolean_t 352 rge_phy_reset(rge_t *rgep) 353 { 354 uint16_t control; 355 uint_t count; 356 357 /* 358 * Set the PHY RESET bit, then wait up to 5 ms for it to self-clear 359 */ 360 control = rge_mii_get16(rgep, MII_CONTROL); 361 rge_mii_put16(rgep, MII_CONTROL, control | MII_CONTROL_RESET); 362 for (count = 0; count < 5; count++) { 363 drv_usecwait(100); 364 control = rge_mii_get16(rgep, MII_CONTROL); 365 if (BIC(control, MII_CONTROL_RESET)) 366 return (B_TRUE); 367 } 368 369 RGE_REPORT((rgep, "rge_phy_reset: FAILED, control now 0x%x", control)); 370 return (B_FALSE); 371 } 372 373 /* 374 * Synchronise the PHY's speed/duplex/autonegotiation capabilities 375 * and advertisements with the required settings as specified by the various 376 * param_* variables that can be poked via the NDD interface. 377 * 378 * We always reset the PHY and reprogram *all* the relevant registers, 379 * not just those changed. This should cause the link to go down, and then 380 * back up again once the link is stable and autonegotiation (if enabled) 381 * is complete. We should get a link state change interrupt somewhere along 382 * the way ... 383 * 384 * NOTE: <genlock> must already be held by the caller 385 */ 386 void 387 rge_phy_update(rge_t *rgep) 388 { 389 boolean_t adv_autoneg; 390 boolean_t adv_pause; 391 boolean_t adv_asym_pause; 392 boolean_t adv_1000fdx; 393 boolean_t adv_1000hdx; 394 boolean_t adv_100fdx; 395 boolean_t adv_100hdx; 396 boolean_t adv_10fdx; 397 boolean_t adv_10hdx; 398 399 uint16_t control; 400 uint16_t gigctrl; 401 uint16_t anar; 402 403 ASSERT(mutex_owned(rgep->genlock)); 404 405 RGE_DEBUG(("rge_phy_update: autoneg %d " 406 "pause %d asym_pause %d " 407 "1000fdx %d 1000hdx %d " 408 "100fdx %d 100hdx %d " 409 "10fdx %d 10hdx %d ", 410 rgep->param_adv_autoneg, 411 rgep->param_adv_pause, rgep->param_adv_asym_pause, 412 rgep->param_adv_1000fdx, rgep->param_adv_1000hdx, 413 rgep->param_adv_100fdx, rgep->param_adv_100hdx, 414 rgep->param_adv_10fdx, rgep->param_adv_10hdx)); 415 416 control = gigctrl = anar = 0; 417 418 /* 419 * PHY settings are normally based on the param_* variables, 420 * but if any loopback mode is in effect, that takes precedence. 421 * 422 * RGE supports MAC-internal loopback, PHY-internal loopback, 423 * and External loopback at a variety of speeds (with a special 424 * cable). In all cases, autoneg is turned OFF, full-duplex 425 * is turned ON, and the speed/mastership is forced. 426 */ 427 switch (rgep->param_loop_mode) { 428 case RGE_LOOP_NONE: 429 default: 430 adv_autoneg = rgep->param_adv_autoneg; 431 adv_pause = rgep->param_adv_pause; 432 adv_asym_pause = rgep->param_adv_asym_pause; 433 adv_1000fdx = rgep->param_adv_1000fdx; 434 adv_1000hdx = rgep->param_adv_1000hdx; 435 adv_100fdx = rgep->param_adv_100fdx; 436 adv_100hdx = rgep->param_adv_100hdx; 437 adv_10fdx = rgep->param_adv_10fdx; 438 adv_10hdx = rgep->param_adv_10hdx; 439 break; 440 441 case RGE_LOOP_INTERNAL_PHY: 442 case RGE_LOOP_INTERNAL_MAC: 443 adv_autoneg = adv_pause = adv_asym_pause = B_FALSE; 444 adv_1000fdx = adv_100fdx = adv_10fdx = B_FALSE; 445 adv_1000hdx = adv_100hdx = adv_10hdx = B_FALSE; 446 rgep->param_link_duplex = LINK_DUPLEX_FULL; 447 448 switch (rgep->param_loop_mode) { 449 case RGE_LOOP_INTERNAL_PHY: 450 if (rgep->chipid.mac_ver != MAC_VER_8101E) { 451 rgep->param_link_speed = 1000; 452 adv_1000fdx = B_TRUE; 453 } else { 454 rgep->param_link_speed = 100; 455 adv_100fdx = B_TRUE; 456 } 457 control = MII_CONTROL_LOOPBACK; 458 break; 459 460 case RGE_LOOP_INTERNAL_MAC: 461 if (rgep->chipid.mac_ver != MAC_VER_8101E) { 462 rgep->param_link_speed = 1000; 463 adv_1000fdx = B_TRUE; 464 } else { 465 rgep->param_link_speed = 100; 466 adv_100fdx = B_TRUE; 467 break; 468 } 469 } 470 471 RGE_DEBUG(("rge_phy_update: autoneg %d " 472 "pause %d asym_pause %d " 473 "1000fdx %d 1000hdx %d " 474 "100fdx %d 100hdx %d " 475 "10fdx %d 10hdx %d ", 476 adv_autoneg, 477 adv_pause, adv_asym_pause, 478 adv_1000fdx, adv_1000hdx, 479 adv_100fdx, adv_100hdx, 480 adv_10fdx, adv_10hdx)); 481 482 /* 483 * We should have at least one technology capability set; 484 * if not, we select a default of 1000Mb/s full-duplex 485 */ 486 if (!adv_1000fdx && !adv_100fdx && !adv_10fdx && 487 !adv_1000hdx && !adv_100hdx && !adv_10hdx) { 488 if (rgep->chipid.mac_ver != MAC_VER_8101E) 489 adv_1000fdx = B_TRUE; 490 } else { 491 adv_1000fdx = B_FALSE; 492 adv_100fdx = B_TRUE; 493 } 494 } 495 496 /* 497 * Now transform the adv_* variables into the proper settings 498 * of the PHY registers ... 499 * 500 * If autonegotiation is (now) enabled, we want to trigger 501 * a new autonegotiation cycle once the PHY has been 502 * programmed with the capabilities to be advertised. 503 * 504 * RTL8169/8110 doesn't support 1000Mb/s half-duplex. 505 */ 506 if (adv_autoneg) 507 control |= MII_CONTROL_ANE|MII_CONTROL_RSAN; 508 509 if (adv_1000fdx) 510 control |= MII_CONTROL_1GB|MII_CONTROL_FDUPLEX; 511 else if (adv_1000hdx) 512 control |= MII_CONTROL_1GB; 513 else if (adv_100fdx) 514 control |= MII_CONTROL_100MB|MII_CONTROL_FDUPLEX; 515 else if (adv_100hdx) 516 control |= MII_CONTROL_100MB; 517 else if (adv_10fdx) 518 control |= MII_CONTROL_FDUPLEX; 519 else if (adv_10hdx) 520 control |= 0; 521 else 522 { _NOTE(EMPTY); } /* Can't get here anyway ... */ 523 524 if (adv_1000fdx) { 525 gigctrl |= MII_1000BT_CTL_ADV_FDX; 526 /* 527 * Chipset limitation: need set other capabilities to true 528 */ 529 if (rgep->chipid.is_pcie) 530 adv_1000hdx = B_TRUE; 531 adv_100fdx = B_TRUE; 532 adv_100hdx = B_TRUE; 533 adv_10fdx = B_TRUE; 534 adv_10hdx = B_TRUE; 535 } 536 537 if (adv_1000hdx) 538 gigctrl |= MII_1000BT_CTL_ADV_HDX; 539 540 if (adv_100fdx) 541 anar |= MII_ABILITY_100BASE_TX_FD; 542 if (adv_100hdx) 543 anar |= MII_ABILITY_100BASE_TX; 544 if (adv_10fdx) 545 anar |= MII_ABILITY_10BASE_T_FD; 546 if (adv_10hdx) 547 anar |= MII_ABILITY_10BASE_T; 548 549 if (adv_pause) 550 anar |= MII_ABILITY_PAUSE; 551 if (adv_asym_pause) 552 anar |= MII_ABILITY_ASMPAUSE; 553 554 /* 555 * Munge in any other fixed bits we require ... 556 */ 557 anar |= MII_AN_SELECTOR_8023; 558 559 /* 560 * Restart the PHY and write the new values. Note the 561 * time, so that we can say whether subsequent link state 562 * changes can be attributed to our reprogramming the PHY 563 */ 564 rge_phy_init(rgep); 565 if (rgep->chipid.mac_ver == MAC_VER_8168B_B || 566 rgep->chipid.mac_ver == MAC_VER_8168B_C) { 567 /* power up PHY for RTL8168B chipset */ 568 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 569 rge_mii_put16(rgep, PHY_0E_REG, 0x0000); 570 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 571 } 572 rge_mii_put16(rgep, MII_AN_ADVERT, anar); 573 rge_mii_put16(rgep, MII_1000BASE_T_CONTROL, gigctrl); 574 rge_mii_put16(rgep, MII_CONTROL, control); 575 576 RGE_DEBUG(("rge_phy_update: anar <- 0x%x", anar)); 577 RGE_DEBUG(("rge_phy_update: control <- 0x%x", control)); 578 RGE_DEBUG(("rge_phy_update: gigctrl <- 0x%x", gigctrl)); 579 } 580 581 void rge_phy_init(rge_t *rgep); 582 #pragma no_inline(rge_phy_init) 583 584 void 585 rge_phy_init(rge_t *rgep) 586 { 587 rgep->phy_mii_addr = 1; 588 589 /* 590 * Below phy config steps are copied from the Programming Guide 591 * (there's no detail comments for these steps.) 592 */ 593 switch (rgep->chipid.mac_ver) { 594 case MAC_VER_8169S_D: 595 case MAC_VER_8169S_E : 596 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 597 rge_mii_put16(rgep, PHY_15_REG, 0x1000); 598 rge_mii_put16(rgep, PHY_18_REG, 0x65c7); 599 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000); 600 rge_mii_put16(rgep, PHY_ID_REG_2, 0x00a1); 601 rge_mii_put16(rgep, PHY_ID_REG_1, 0x0008); 602 rge_mii_put16(rgep, PHY_BMSR_REG, 0x1020); 603 rge_mii_put16(rgep, PHY_BMCR_REG, 0x1000); 604 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0800); 605 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000); 606 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000); 607 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41); 608 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde60); 609 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140); 610 rge_mii_put16(rgep, PHY_BMCR_REG, 0x0077); 611 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7800); 612 rge_mii_put16(rgep, PHY_ANAR_REG, 0x7000); 613 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000); 614 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01); 615 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20); 616 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95); 617 rge_mii_put16(rgep, PHY_BMCR_REG, 0xfa00); 618 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa800); 619 rge_mii_put16(rgep, PHY_ANAR_REG, 0xa000); 620 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000); 621 rge_mii_put16(rgep, PHY_ID_REG_2, 0xff41); 622 rge_mii_put16(rgep, PHY_ID_REG_1, 0xde20); 623 rge_mii_put16(rgep, PHY_BMSR_REG, 0x0140); 624 rge_mii_put16(rgep, PHY_BMCR_REG, 0x00bb); 625 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb800); 626 rge_mii_put16(rgep, PHY_ANAR_REG, 0xb000); 627 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000); 628 rge_mii_put16(rgep, PHY_ID_REG_2, 0xdf01); 629 rge_mii_put16(rgep, PHY_ID_REG_1, 0xdf20); 630 rge_mii_put16(rgep, PHY_BMSR_REG, 0xff95); 631 rge_mii_put16(rgep, PHY_BMCR_REG, 0xbf00); 632 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf800); 633 rge_mii_put16(rgep, PHY_ANAR_REG, 0xf000); 634 rge_mii_put16(rgep, PHY_ANAR_REG, 0x0000); 635 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 636 rge_mii_put16(rgep, PHY_0B_REG, 0x0000); 637 break; 638 639 case MAC_VER_8169SB: 640 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 641 rge_mii_put16(rgep, PHY_1B_REG, 0xD41E); 642 rge_mii_put16(rgep, PHY_0E_REG, 0x7bff); 643 rge_mii_put16(rgep, PHY_GBCR_REG, GBCR_DEFAULT); 644 rge_mii_put16(rgep, PHY_1F_REG, 0x0002); 645 rge_mii_put16(rgep, PHY_BMSR_REG, 0x90D0); 646 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 647 break; 648 649 case MAC_VER_8169SC: 650 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 651 rge_mii_put16(rgep, PHY_ANER_REG, 0x0078); 652 rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x05dc); 653 rge_mii_put16(rgep, PHY_GBCR_REG, 0x2672); 654 rge_mii_put16(rgep, PHY_GBSR_REG, 0x6a14); 655 rge_mii_put16(rgep, PHY_0B_REG, 0x7cb0); 656 rge_mii_put16(rgep, PHY_0C_REG, 0xdb80); 657 rge_mii_put16(rgep, PHY_1B_REG, 0xc414); 658 rge_mii_put16(rgep, PHY_1C_REG, 0xef03); 659 rge_mii_put16(rgep, PHY_1D_REG, 0x3dc8); 660 rge_mii_put16(rgep, PHY_1F_REG, 0x0003); 661 rge_mii_put16(rgep, PHY_13_REG, 0x0600); 662 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 663 break; 664 665 case MAC_VER_8168: 666 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 667 rge_mii_put16(rgep, PHY_ANER_REG, 0x00aa); 668 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x3173); 669 rge_mii_put16(rgep, PHY_ANNPRR_REG, 0x08fc); 670 rge_mii_put16(rgep, PHY_GBCR_REG, 0xe2d0); 671 rge_mii_put16(rgep, PHY_0B_REG, 0x941a); 672 rge_mii_put16(rgep, PHY_18_REG, 0x65fe); 673 rge_mii_put16(rgep, PHY_1C_REG, 0x1e02); 674 rge_mii_put16(rgep, PHY_1F_REG, 0x0002); 675 rge_mii_put16(rgep, PHY_ANNPTR_REG, 0x103e); 676 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 677 break; 678 679 case MAC_VER_8168B_B: 680 case MAC_VER_8168B_C: 681 rge_mii_put16(rgep, PHY_1F_REG, 0x0001); 682 rge_mii_put16(rgep, PHY_0B_REG, 0x94b0); 683 rge_mii_put16(rgep, PHY_1B_REG, 0xc416); 684 rge_mii_put16(rgep, PHY_1F_REG, 0x0003); 685 rge_mii_put16(rgep, PHY_12_REG, 0x6096); 686 rge_mii_put16(rgep, PHY_1F_REG, 0x0000); 687 break; 688 } 689 } 690 691 void rge_chip_ident(rge_t *rgep); 692 #pragma no_inline(rge_chip_ident) 693 694 void 695 rge_chip_ident(rge_t *rgep) 696 { 697 chip_id_t *chip = &rgep->chipid; 698 uint32_t val32; 699 uint16_t val16; 700 701 /* 702 * Read and record MAC version 703 */ 704 val32 = rge_reg_get32(rgep, TX_CONFIG_REG); 705 val32 &= HW_VERSION_ID_0 | HW_VERSION_ID_1; 706 chip->mac_ver = val32; 707 chip->is_pcie = pci_lcap_locate(rgep->cfg_handle, 708 PCI_CAP_ID_PCI_E, &val16) == DDI_SUCCESS; 709 710 /* 711 * Workaround for 8101E_C 712 */ 713 chip->enable_mac_first = !chip->is_pcie; 714 if (chip->mac_ver == MAC_VER_8101E_C) { 715 chip->is_pcie = B_FALSE; 716 } 717 718 /* 719 * Read and record PHY version 720 */ 721 val16 = rge_mii_get16(rgep, PHY_ID_REG_2); 722 val16 &= PHY_VER_MASK; 723 chip->phy_ver = val16; 724 725 /* set pci latency timer */ 726 if (chip->mac_ver == MAC_VER_8169 || 727 chip->mac_ver == MAC_VER_8169S_D || 728 chip->mac_ver == MAC_VER_8169S_E || 729 chip->mac_ver == MAC_VER_8169SC) 730 pci_config_put8(rgep->cfg_handle, PCI_CONF_LATENCY_TIMER, 0x40); 731 732 if (chip->mac_ver == MAC_VER_8169SC) { 733 val16 = rge_reg_get16(rgep, RT_CONFIG_1_REG); 734 val16 &= 0x0300; 735 if (val16 == 0x1) /* 66Mhz PCI */ 736 rge_reg_put32(rgep, 0x7c, 0x000700ff); 737 else if (val16 == 0x0) /* 33Mhz PCI */ 738 rge_reg_put32(rgep, 0x7c, 0x0007ff00); 739 } 740 741 /* 742 * PCIE chipset require the Rx buffer start address must be 743 * 8-byte alignment and the Rx buffer size must be multiple of 8. 744 * We'll just use bcopy in receive procedure for the PCIE chipset. 745 */ 746 if (chip->is_pcie) { 747 rgep->chip_flags |= CHIP_FLAG_FORCE_BCOPY; 748 if (rgep->default_mtu > ETHERMTU) { 749 rge_notice(rgep, "Jumbo packets not supported " 750 "for this PCIE chipset"); 751 rgep->default_mtu = ETHERMTU; 752 } 753 } 754 if (rgep->chip_flags & CHIP_FLAG_FORCE_BCOPY) 755 rgep->head_room = 0; 756 else 757 rgep->head_room = RGE_HEADROOM; 758 759 /* 760 * Initialize other variables. 761 */ 762 if (rgep->default_mtu < ETHERMTU || rgep->default_mtu > RGE_JUMBO_MTU) 763 rgep->default_mtu = ETHERMTU; 764 if (rgep->default_mtu > ETHERMTU) { 765 rgep->rxbuf_size = RGE_BUFF_SIZE_JUMBO; 766 rgep->txbuf_size = RGE_BUFF_SIZE_JUMBO; 767 rgep->ethmax_size = RGE_JUMBO_SIZE; 768 } else { 769 rgep->rxbuf_size = RGE_BUFF_SIZE_STD; 770 rgep->txbuf_size = RGE_BUFF_SIZE_STD; 771 rgep->ethmax_size = ETHERMAX; 772 } 773 chip->rxconfig = RX_CONFIG_DEFAULT; 774 chip->txconfig = TX_CONFIG_DEFAULT; 775 776 /* interval to update statistics for polling mode */ 777 rgep->tick_delta = drv_usectohz(1000*1000/CLK_TICK); 778 779 /* ensure we are not in polling mode */ 780 rgep->curr_tick = ddi_get_lbolt() - 2*rgep->tick_delta; 781 RGE_TRACE(("%s: MAC version = %x, PHY version = %x", 782 rgep->ifname, chip->mac_ver, chip->phy_ver)); 783 } 784 785 /* 786 * Perform first-stage chip (re-)initialisation, using only config-space 787 * accesses: 788 * 789 * + Read the vendor/device/revision/subsystem/cache-line-size registers, 790 * returning the data in the structure pointed to by <idp>. 791 * + Enable Memory Space accesses. 792 * + Enable Bus Mastering according. 793 */ 794 void rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp); 795 #pragma no_inline(rge_chip_cfg_init) 796 797 void 798 rge_chip_cfg_init(rge_t *rgep, chip_id_t *cidp) 799 { 800 ddi_acc_handle_t handle; 801 uint16_t commd; 802 803 handle = rgep->cfg_handle; 804 805 /* 806 * Save PCI cache line size and subsystem vendor ID 807 */ 808 cidp->command = pci_config_get16(handle, PCI_CONF_COMM); 809 cidp->vendor = pci_config_get16(handle, PCI_CONF_VENID); 810 cidp->device = pci_config_get16(handle, PCI_CONF_DEVID); 811 cidp->subven = pci_config_get16(handle, PCI_CONF_SUBVENID); 812 cidp->subdev = pci_config_get16(handle, PCI_CONF_SUBSYSID); 813 cidp->revision = pci_config_get8(handle, PCI_CONF_REVID); 814 cidp->clsize = pci_config_get8(handle, PCI_CONF_CACHE_LINESZ); 815 cidp->latency = pci_config_get8(handle, PCI_CONF_LATENCY_TIMER); 816 817 /* 818 * Turn on Master Enable (DMA) and IO Enable bits. 819 * Enable PCI Memory Space accesses 820 */ 821 commd = cidp->command; 822 commd |= PCI_COMM_ME | PCI_COMM_MAE | PCI_COMM_IO; 823 pci_config_put16(handle, PCI_CONF_COMM, commd); 824 825 RGE_DEBUG(("rge_chip_cfg_init: vendor 0x%x device 0x%x revision 0x%x", 826 cidp->vendor, cidp->device, cidp->revision)); 827 RGE_DEBUG(("rge_chip_cfg_init: subven 0x%x subdev 0x%x", 828 cidp->subven, cidp->subdev)); 829 RGE_DEBUG(("rge_chip_cfg_init: clsize %d latency %d command 0x%x", 830 cidp->clsize, cidp->latency, cidp->command)); 831 } 832 833 int rge_chip_reset(rge_t *rgep); 834 #pragma no_inline(rge_chip_reset) 835 836 int 837 rge_chip_reset(rge_t *rgep) 838 { 839 int i; 840 uint8_t val8; 841 842 /* 843 * Chip should be in STOP state 844 */ 845 rge_reg_clr8(rgep, RT_COMMAND_REG, 846 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 847 848 /* 849 * Disable interrupt 850 */ 851 rgep->int_mask = INT_MASK_NONE; 852 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask); 853 854 /* 855 * Clear pended interrupt 856 */ 857 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL); 858 859 /* 860 * Reset chip 861 */ 862 rge_reg_set8(rgep, RT_COMMAND_REG, RT_COMMAND_RESET); 863 864 /* 865 * Wait for reset success 866 */ 867 for (i = 0; i < CHIP_RESET_LOOP; i++) { 868 drv_usecwait(10); 869 val8 = rge_reg_get8(rgep, RT_COMMAND_REG); 870 if (!(val8 & RT_COMMAND_RESET)) { 871 rgep->rge_chip_state = RGE_CHIP_RESET; 872 return (0); 873 } 874 } 875 RGE_REPORT((rgep, "rge_chip_reset fail.")); 876 return (-1); 877 } 878 879 void rge_chip_init(rge_t *rgep); 880 #pragma no_inline(rge_chip_init) 881 882 void 883 rge_chip_init(rge_t *rgep) 884 { 885 uint32_t val32; 886 uint32_t val16; 887 uint32_t *hashp; 888 chip_id_t *chip = &rgep->chipid; 889 890 /* 891 * Increase the threshold voltage of RX sensitivity 892 */ 893 if (chip->mac_ver == MAC_VER_8168B_B || 894 chip->mac_ver == MAC_VER_8168B_C || 895 chip->mac_ver == MAC_VER_8101E) { 896 rge_ephy_put16(rgep, 0x01, 0x1bd3); 897 } 898 899 if (chip->mac_ver == MAC_VER_8168 || 900 chip->mac_ver == MAC_VER_8168B_B) { 901 val16 = rge_reg_get8(rgep, PHY_STATUS_REG); 902 val16 = 0x12<<8 | val16; 903 rge_reg_put16(rgep, PHY_STATUS_REG, val16); 904 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00021c01); 905 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f088); 906 rge_reg_put32(rgep, RT_CSI_DATA_REG, 0x00004000); 907 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f0b0); 908 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x0000f068); 909 val32 = rge_reg_get32(rgep, RT_CSI_DATA_REG); 910 val32 |= 0x7000; 911 val32 &= 0xffff5fff; 912 rge_reg_put32(rgep, RT_CSI_DATA_REG, val32); 913 rge_reg_put32(rgep, RT_CSI_ACCESS_REG, 0x8000f068); 914 } 915 916 /* 917 * Config MII register 918 */ 919 rgep->param_link_up = LINK_STATE_DOWN; 920 rge_phy_update(rgep); 921 922 /* 923 * Enable Rx checksum offload. 924 * Then for vlan support, we must enable receive vlan de-tagging. 925 * Otherwise, there'll be checksum error. 926 */ 927 val16 = rge_reg_get16(rgep, CPLUS_COMMAND_REG); 928 val16 |= RX_CKSM_OFFLOAD | RX_VLAN_DETAG; 929 if (chip->mac_ver == MAC_VER_8169S_D) { 930 val16 |= CPLUS_BIT14 | MUL_PCI_RW_ENABLE; 931 rge_reg_put8(rgep, RESV_82_REG, 0x01); 932 } 933 if (chip->mac_ver == MAC_VER_8169S_E || 934 chip->mac_ver == MAC_VER_8169SC) { 935 val16 |= MUL_PCI_RW_ENABLE; 936 } 937 rge_reg_put16(rgep, CPLUS_COMMAND_REG, val16 & (~0x03)); 938 939 /* 940 * Start transmit/receive before set tx/rx configuration register 941 */ 942 if (chip->enable_mac_first) 943 rge_reg_set8(rgep, RT_COMMAND_REG, 944 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 945 946 /* 947 * Set dump tally counter register 948 */ 949 val32 = rgep->dma_area_stats.cookie.dmac_laddress >> 32; 950 rge_reg_put32(rgep, DUMP_COUNTER_REG_1, val32); 951 val32 = rge_reg_get32(rgep, DUMP_COUNTER_REG_0); 952 val32 &= DUMP_COUNTER_REG_RESV; 953 val32 |= rgep->dma_area_stats.cookie.dmac_laddress; 954 rge_reg_put32(rgep, DUMP_COUNTER_REG_0, val32); 955 956 /* 957 * Change to config register write enable mode 958 */ 959 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 960 961 /* 962 * Set Tx/Rx maximum packet size 963 */ 964 if (rgep->default_mtu > ETHERMTU) { 965 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_JUMBO); 966 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_JUMBO); 967 } else if (rgep->chipid.mac_ver != MAC_VER_8101E) { 968 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD); 969 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD); 970 } else { 971 rge_reg_put8(rgep, TX_MAX_PKTSIZE_REG, TX_PKTSIZE_STD_8101E); 972 rge_reg_put16(rgep, RX_MAX_PKTSIZE_REG, RX_PKTSIZE_STD_8101E); 973 } 974 975 /* 976 * Set receive configuration register 977 */ 978 val32 = rge_reg_get32(rgep, RX_CONFIG_REG); 979 val32 &= RX_CONFIG_REG_RESV; 980 if (rgep->promisc) 981 val32 |= RX_ACCEPT_ALL_PKT; 982 rge_reg_put32(rgep, RX_CONFIG_REG, val32 | chip->rxconfig); 983 984 /* 985 * Set transmit configuration register 986 */ 987 val32 = rge_reg_get32(rgep, TX_CONFIG_REG); 988 val32 &= TX_CONFIG_REG_RESV; 989 rge_reg_put32(rgep, TX_CONFIG_REG, val32 | chip->txconfig); 990 991 /* 992 * Set Tx/Rx descriptor register 993 */ 994 val32 = rgep->tx_desc.cookie.dmac_laddress; 995 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_LO_REG, val32); 996 val32 = rgep->tx_desc.cookie.dmac_laddress >> 32; 997 rge_reg_put32(rgep, NORMAL_TX_RING_ADDR_HI_REG, val32); 998 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_LO_REG, 0); 999 rge_reg_put32(rgep, HIGH_TX_RING_ADDR_HI_REG, 0); 1000 val32 = rgep->rx_desc.cookie.dmac_laddress; 1001 rge_reg_put32(rgep, RX_RING_ADDR_LO_REG, val32); 1002 val32 = rgep->rx_desc.cookie.dmac_laddress >> 32; 1003 rge_reg_put32(rgep, RX_RING_ADDR_HI_REG, val32); 1004 1005 /* 1006 * Suggested setting from Realtek 1007 */ 1008 if (rgep->chipid.mac_ver != MAC_VER_8101E) 1009 rge_reg_put16(rgep, RESV_E2_REG, 0x282a); 1010 else 1011 rge_reg_put16(rgep, RESV_E2_REG, 0x0000); 1012 1013 /* 1014 * Set multicast register 1015 */ 1016 hashp = (uint32_t *)rgep->mcast_hash; 1017 if (rgep->promisc) { 1018 rge_reg_put32(rgep, MULTICAST_0_REG, ~0U); 1019 rge_reg_put32(rgep, MULTICAST_4_REG, ~0U); 1020 } else { 1021 rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0])); 1022 rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1])); 1023 } 1024 1025 /* 1026 * Msic register setting: 1027 * -- Missed packet counter: clear it 1028 * -- TimerInt Register 1029 * -- Timer count register 1030 */ 1031 rge_reg_put32(rgep, RX_PKT_MISS_COUNT_REG, 0); 1032 rge_reg_put32(rgep, TIMER_INT_REG, TIMER_INT_NONE); 1033 rge_reg_put32(rgep, TIMER_COUNT_REG, 0); 1034 1035 /* 1036 * disable the Unicast Wakeup Frame capability 1037 */ 1038 rge_reg_clr8(rgep, RT_CONFIG_5_REG, RT_UNI_WAKE_FRAME); 1039 1040 /* 1041 * Return to normal network/host communication mode 1042 */ 1043 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1044 drv_usecwait(20); 1045 } 1046 1047 /* 1048 * rge_chip_start() -- start the chip transmitting and/or receiving, 1049 * including enabling interrupts 1050 */ 1051 void rge_chip_start(rge_t *rgep); 1052 #pragma no_inline(rge_chip_start) 1053 1054 void 1055 rge_chip_start(rge_t *rgep) 1056 { 1057 /* 1058 * Clear statistics 1059 */ 1060 bzero(&rgep->stats, sizeof (rge_stats_t)); 1061 DMA_ZERO(rgep->dma_area_stats); 1062 1063 /* 1064 * Start transmit/receive 1065 */ 1066 rge_reg_set8(rgep, RT_COMMAND_REG, 1067 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 1068 1069 /* 1070 * Enable interrupt 1071 */ 1072 rgep->int_mask = RGE_INT_MASK; 1073 if (rgep->chipid.is_pcie) { 1074 rgep->int_mask |= NO_TXDESC_INT; 1075 } 1076 rgep->rx_fifo_ovf = 0; 1077 rgep->int_mask |= RX_FIFO_OVERFLOW_INT; 1078 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask); 1079 1080 /* 1081 * All done! 1082 */ 1083 rgep->rge_chip_state = RGE_CHIP_RUNNING; 1084 } 1085 1086 /* 1087 * rge_chip_stop() -- stop board receiving 1088 * 1089 * Since this function is also invoked by rge_quiesce(), it 1090 * must not block; also, no tracing or logging takes place 1091 * when invoked by rge_quiesce(). 1092 */ 1093 void rge_chip_stop(rge_t *rgep, boolean_t fault); 1094 #pragma no_inline(rge_chip_stop) 1095 1096 void 1097 rge_chip_stop(rge_t *rgep, boolean_t fault) 1098 { 1099 /* 1100 * Disable interrupt 1101 */ 1102 rgep->int_mask = INT_MASK_NONE; 1103 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask); 1104 1105 /* 1106 * Clear pended interrupt 1107 */ 1108 if (!rgep->suspended) { 1109 rge_reg_put16(rgep, INT_STATUS_REG, INT_MASK_ALL); 1110 } 1111 1112 /* 1113 * Stop the board and disable transmit/receive 1114 */ 1115 rge_reg_clr8(rgep, RT_COMMAND_REG, 1116 RT_COMMAND_RX_ENABLE | RT_COMMAND_TX_ENABLE); 1117 1118 if (fault) 1119 rgep->rge_chip_state = RGE_CHIP_FAULT; 1120 else 1121 rgep->rge_chip_state = RGE_CHIP_STOPPED; 1122 } 1123 1124 /* 1125 * rge_get_mac_addr() -- get the MAC address on NIC 1126 */ 1127 static void rge_get_mac_addr(rge_t *rgep); 1128 #pragma inline(rge_get_mac_addr) 1129 1130 static void 1131 rge_get_mac_addr(rge_t *rgep) 1132 { 1133 uint8_t *macaddr = rgep->netaddr; 1134 uint32_t val32; 1135 1136 /* 1137 * Read first 4-byte of mac address 1138 */ 1139 val32 = rge_reg_get32(rgep, ID_0_REG); 1140 macaddr[0] = val32 & 0xff; 1141 val32 = val32 >> 8; 1142 macaddr[1] = val32 & 0xff; 1143 val32 = val32 >> 8; 1144 macaddr[2] = val32 & 0xff; 1145 val32 = val32 >> 8; 1146 macaddr[3] = val32 & 0xff; 1147 1148 /* 1149 * Read last 2-byte of mac address 1150 */ 1151 val32 = rge_reg_get32(rgep, ID_4_REG); 1152 macaddr[4] = val32 & 0xff; 1153 val32 = val32 >> 8; 1154 macaddr[5] = val32 & 0xff; 1155 } 1156 1157 static void rge_set_mac_addr(rge_t *rgep); 1158 #pragma inline(rge_set_mac_addr) 1159 1160 static void 1161 rge_set_mac_addr(rge_t *rgep) 1162 { 1163 uint8_t *p = rgep->netaddr; 1164 uint32_t val32; 1165 1166 /* 1167 * Change to config register write enable mode 1168 */ 1169 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1170 1171 /* 1172 * Get first 4 bytes of mac address 1173 */ 1174 val32 = p[3]; 1175 val32 = val32 << 8; 1176 val32 |= p[2]; 1177 val32 = val32 << 8; 1178 val32 |= p[1]; 1179 val32 = val32 << 8; 1180 val32 |= p[0]; 1181 1182 /* 1183 * Set first 4 bytes of mac address 1184 */ 1185 rge_reg_put32(rgep, ID_0_REG, val32); 1186 1187 /* 1188 * Get last 2 bytes of mac address 1189 */ 1190 val32 = p[5]; 1191 val32 = val32 << 8; 1192 val32 |= p[4]; 1193 1194 /* 1195 * Set last 2 bytes of mac address 1196 */ 1197 val32 |= rge_reg_get32(rgep, ID_4_REG) & ~0xffff; 1198 rge_reg_put32(rgep, ID_4_REG, val32); 1199 1200 /* 1201 * Return to normal network/host communication mode 1202 */ 1203 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1204 } 1205 1206 static void rge_set_multi_addr(rge_t *rgep); 1207 #pragma inline(rge_set_multi_addr) 1208 1209 static void 1210 rge_set_multi_addr(rge_t *rgep) 1211 { 1212 uint32_t *hashp; 1213 1214 hashp = (uint32_t *)rgep->mcast_hash; 1215 1216 /* 1217 * Change to config register write enable mode 1218 */ 1219 if (rgep->chipid.mac_ver == MAC_VER_8169SC) { 1220 rge_reg_set8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1221 } 1222 if (rgep->promisc) { 1223 rge_reg_put32(rgep, MULTICAST_0_REG, ~0U); 1224 rge_reg_put32(rgep, MULTICAST_4_REG, ~0U); 1225 } else { 1226 rge_reg_put32(rgep, MULTICAST_0_REG, RGE_BSWAP_32(hashp[0])); 1227 rge_reg_put32(rgep, MULTICAST_4_REG, RGE_BSWAP_32(hashp[1])); 1228 } 1229 1230 /* 1231 * Return to normal network/host communication mode 1232 */ 1233 if (rgep->chipid.mac_ver == MAC_VER_8169SC) { 1234 rge_reg_clr8(rgep, RT_93c46_COMMOND_REG, RT_93c46_MODE_CONFIG); 1235 } 1236 } 1237 1238 static void rge_set_promisc(rge_t *rgep); 1239 #pragma inline(rge_set_promisc) 1240 1241 static void 1242 rge_set_promisc(rge_t *rgep) 1243 { 1244 if (rgep->promisc) 1245 rge_reg_set32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT); 1246 else 1247 rge_reg_clr32(rgep, RX_CONFIG_REG, RX_ACCEPT_ALL_PKT); 1248 } 1249 1250 /* 1251 * rge_chip_sync() -- program the chip with the unicast MAC address, 1252 * the multicast hash table, the required level of promiscuity, and 1253 * the current loopback mode ... 1254 */ 1255 void rge_chip_sync(rge_t *rgep, enum rge_sync_op todo); 1256 #pragma no_inline(rge_chip_sync) 1257 1258 void 1259 rge_chip_sync(rge_t *rgep, enum rge_sync_op todo) 1260 { 1261 switch (todo) { 1262 case RGE_GET_MAC: 1263 rge_get_mac_addr(rgep); 1264 break; 1265 case RGE_SET_MAC: 1266 /* Reprogram the unicast MAC address(es) ... */ 1267 rge_set_mac_addr(rgep); 1268 break; 1269 case RGE_SET_MUL: 1270 /* Reprogram the hashed multicast address table ... */ 1271 rge_set_multi_addr(rgep); 1272 break; 1273 case RGE_SET_PROMISC: 1274 /* Set or clear the PROMISCUOUS mode bit */ 1275 rge_set_multi_addr(rgep); 1276 rge_set_promisc(rgep); 1277 break; 1278 default: 1279 break; 1280 } 1281 } 1282 1283 void rge_chip_blank(void *arg, time_t ticks, uint_t count, int flag); 1284 #pragma no_inline(rge_chip_blank) 1285 1286 /* ARGSUSED */ 1287 void 1288 rge_chip_blank(void *arg, time_t ticks, uint_t count, int flag) 1289 { 1290 _NOTE(ARGUNUSED(arg, ticks, count)); 1291 } 1292 1293 void rge_tx_trigger(rge_t *rgep); 1294 #pragma no_inline(rge_tx_trigger) 1295 1296 void 1297 rge_tx_trigger(rge_t *rgep) 1298 { 1299 rge_reg_put8(rgep, TX_RINGS_POLL_REG, NORMAL_TX_RING_POLL); 1300 } 1301 1302 void rge_hw_stats_dump(rge_t *rgep); 1303 #pragma no_inline(rge_tx_trigger) 1304 1305 void 1306 rge_hw_stats_dump(rge_t *rgep) 1307 { 1308 int i = 0; 1309 uint32_t regval = 0; 1310 1311 if (rgep->rge_mac_state == RGE_MAC_STOPPED) 1312 return; 1313 1314 regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0); 1315 while (regval & DUMP_START) { 1316 drv_usecwait(100); 1317 if (++i > STATS_DUMP_LOOP) { 1318 RGE_DEBUG(("rge h/w statistics dump fail!")); 1319 rgep->rge_chip_state = RGE_CHIP_ERROR; 1320 return; 1321 } 1322 regval = rge_reg_get32(rgep, DUMP_COUNTER_REG_0); 1323 } 1324 DMA_SYNC(rgep->dma_area_stats, DDI_DMA_SYNC_FORKERNEL); 1325 1326 /* 1327 * Start H/W statistics dump for RTL8169 chip 1328 */ 1329 rge_reg_set32(rgep, DUMP_COUNTER_REG_0, DUMP_START); 1330 } 1331 1332 /* 1333 * ========== Hardware interrupt handler ========== 1334 */ 1335 1336 #undef RGE_DBG 1337 #define RGE_DBG RGE_DBG_INT /* debug flag for this code */ 1338 1339 static void rge_wake_factotum(rge_t *rgep); 1340 #pragma inline(rge_wake_factotum) 1341 1342 static void 1343 rge_wake_factotum(rge_t *rgep) 1344 { 1345 if (rgep->factotum_flag == 0) { 1346 rgep->factotum_flag = 1; 1347 (void) ddi_intr_trigger_softint(rgep->factotum_hdl, NULL); 1348 } 1349 } 1350 1351 /* 1352 * rge_intr() -- handle chip interrupts 1353 */ 1354 uint_t rge_intr(caddr_t arg1, caddr_t arg2); 1355 #pragma no_inline(rge_intr) 1356 1357 uint_t 1358 rge_intr(caddr_t arg1, caddr_t arg2) 1359 { 1360 rge_t *rgep = (rge_t *)arg1; 1361 uint16_t int_status; 1362 clock_t now; 1363 uint32_t tx_pkts; 1364 uint32_t rx_pkts; 1365 uint32_t poll_rate; 1366 uint32_t opt_pkts; 1367 uint32_t opt_intrs; 1368 boolean_t update_int_mask = B_FALSE; 1369 uint32_t itimer; 1370 1371 _NOTE(ARGUNUSED(arg2)) 1372 1373 mutex_enter(rgep->genlock); 1374 1375 if (rgep->suspended) { 1376 mutex_exit(rgep->genlock); 1377 return (DDI_INTR_UNCLAIMED); 1378 } 1379 1380 /* 1381 * Was this interrupt caused by our device... 1382 */ 1383 int_status = rge_reg_get16(rgep, INT_STATUS_REG); 1384 if (!(int_status & rgep->int_mask)) { 1385 mutex_exit(rgep->genlock); 1386 return (DDI_INTR_UNCLAIMED); 1387 /* indicate it wasn't our interrupt */ 1388 } 1389 rgep->stats.intr++; 1390 1391 /* 1392 * Clear interrupt 1393 * For PCIE chipset, we need disable interrupt first. 1394 */ 1395 if (rgep->chipid.is_pcie) { 1396 rge_reg_put16(rgep, INT_MASK_REG, INT_MASK_NONE); 1397 update_int_mask = B_TRUE; 1398 } 1399 rge_reg_put16(rgep, INT_STATUS_REG, int_status); 1400 1401 /* 1402 * Calculate optimal polling interval 1403 */ 1404 now = ddi_get_lbolt(); 1405 if (now - rgep->curr_tick >= rgep->tick_delta && 1406 (rgep->param_link_speed == RGE_SPEED_1000M || 1407 rgep->param_link_speed == RGE_SPEED_100M)) { 1408 /* number of rx and tx packets in the last tick */ 1409 tx_pkts = rgep->stats.opackets - rgep->last_opackets; 1410 rx_pkts = rgep->stats.rpackets - rgep->last_rpackets; 1411 1412 rgep->last_opackets = rgep->stats.opackets; 1413 rgep->last_rpackets = rgep->stats.rpackets; 1414 1415 /* restore interrupt mask */ 1416 rgep->int_mask |= TX_OK_INT | RX_OK_INT; 1417 if (rgep->chipid.is_pcie) { 1418 rgep->int_mask |= NO_TXDESC_INT; 1419 } 1420 1421 /* optimal number of packets in a tick */ 1422 if (rgep->param_link_speed == RGE_SPEED_1000M) { 1423 opt_pkts = (1000*1000*1000/8)/ETHERMTU/CLK_TICK; 1424 } else { 1425 opt_pkts = (100*1000*1000/8)/ETHERMTU/CLK_TICK; 1426 } 1427 1428 /* 1429 * calculate polling interval based on rx and tx packets 1430 * in the last tick 1431 */ 1432 poll_rate = 0; 1433 if (now - rgep->curr_tick < 2*rgep->tick_delta) { 1434 opt_intrs = opt_pkts/TX_COALESC; 1435 if (tx_pkts > opt_intrs) { 1436 poll_rate = max(tx_pkts/TX_COALESC, opt_intrs); 1437 rgep->int_mask &= ~(TX_OK_INT | NO_TXDESC_INT); 1438 } 1439 1440 opt_intrs = opt_pkts/RX_COALESC; 1441 if (rx_pkts > opt_intrs) { 1442 opt_intrs = max(rx_pkts/RX_COALESC, opt_intrs); 1443 poll_rate = max(opt_intrs, poll_rate); 1444 rgep->int_mask &= ~RX_OK_INT; 1445 } 1446 /* ensure poll_rate reasonable */ 1447 poll_rate = min(poll_rate, opt_pkts*4); 1448 } 1449 1450 if (poll_rate) { 1451 /* move to polling mode */ 1452 if (rgep->chipid.is_pcie) { 1453 itimer = (TIMER_CLK_PCIE/CLK_TICK)/poll_rate; 1454 } else { 1455 itimer = (TIMER_CLK_PCI/CLK_TICK)/poll_rate; 1456 } 1457 } else { 1458 /* move to normal mode */ 1459 itimer = 0; 1460 } 1461 RGE_DEBUG(("%s: poll: itimer:%d int_mask:0x%x", 1462 __func__, itimer, rgep->int_mask)); 1463 rge_reg_put32(rgep, TIMER_INT_REG, itimer); 1464 1465 /* update timestamp for statistics */ 1466 rgep->curr_tick = now; 1467 1468 /* reset timer */ 1469 int_status |= TIME_OUT_INT; 1470 1471 update_int_mask = B_TRUE; 1472 } 1473 1474 if (int_status & TIME_OUT_INT) { 1475 rge_reg_put32(rgep, TIMER_COUNT_REG, 0); 1476 } 1477 1478 /* flush post writes */ 1479 (void) rge_reg_get16(rgep, INT_STATUS_REG); 1480 1481 /* 1482 * Cable link change interrupt 1483 */ 1484 if (int_status & LINK_CHANGE_INT) { 1485 rge_chip_cyclic(rgep); 1486 } 1487 1488 if (int_status & RX_FIFO_OVERFLOW_INT) { 1489 /* start rx watchdog timeout detection */ 1490 rgep->rx_fifo_ovf = 1; 1491 if (rgep->int_mask & RX_FIFO_OVERFLOW_INT) { 1492 rgep->int_mask &= ~RX_FIFO_OVERFLOW_INT; 1493 update_int_mask = B_TRUE; 1494 } 1495 } else if (int_status & RGE_RX_INT) { 1496 /* stop rx watchdog timeout detection */ 1497 rgep->rx_fifo_ovf = 0; 1498 if ((rgep->int_mask & RX_FIFO_OVERFLOW_INT) == 0) { 1499 rgep->int_mask |= RX_FIFO_OVERFLOW_INT; 1500 update_int_mask = B_TRUE; 1501 } 1502 } 1503 1504 mutex_exit(rgep->genlock); 1505 1506 /* 1507 * Receive interrupt 1508 */ 1509 if (int_status & RGE_RX_INT) 1510 rge_receive(rgep); 1511 1512 /* 1513 * Transmit interrupt 1514 */ 1515 if (int_status & TX_ERR_INT) { 1516 RGE_REPORT((rgep, "tx error happened, resetting the chip ")); 1517 mutex_enter(rgep->genlock); 1518 rgep->rge_chip_state = RGE_CHIP_ERROR; 1519 mutex_exit(rgep->genlock); 1520 } else if ((rgep->chipid.is_pcie && (int_status & NO_TXDESC_INT)) || 1521 ((int_status & TX_OK_INT) && rgep->tx_free < RGE_SEND_SLOTS/8)) { 1522 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL); 1523 } 1524 1525 /* 1526 * System error interrupt 1527 */ 1528 if (int_status & SYS_ERR_INT) { 1529 RGE_REPORT((rgep, "sys error happened, resetting the chip ")); 1530 mutex_enter(rgep->genlock); 1531 rgep->rge_chip_state = RGE_CHIP_ERROR; 1532 mutex_exit(rgep->genlock); 1533 } 1534 1535 /* 1536 * Re-enable interrupt for PCIE chipset or install new int_mask 1537 */ 1538 if (update_int_mask) 1539 rge_reg_put16(rgep, INT_MASK_REG, rgep->int_mask); 1540 1541 return (DDI_INTR_CLAIMED); /* indicate it was our interrupt */ 1542 } 1543 1544 /* 1545 * ========== Factotum, implemented as a softint handler ========== 1546 */ 1547 1548 #undef RGE_DBG 1549 #define RGE_DBG RGE_DBG_FACT /* debug flag for this code */ 1550 1551 static boolean_t rge_factotum_link_check(rge_t *rgep); 1552 #pragma no_inline(rge_factotum_link_check) 1553 1554 static boolean_t 1555 rge_factotum_link_check(rge_t *rgep) 1556 { 1557 uint8_t media_status; 1558 int32_t link; 1559 1560 media_status = rge_reg_get8(rgep, PHY_STATUS_REG); 1561 link = (media_status & PHY_STATUS_LINK_UP) ? 1562 LINK_STATE_UP : LINK_STATE_DOWN; 1563 if (rgep->param_link_up != link) { 1564 /* 1565 * Link change. 1566 */ 1567 rgep->param_link_up = link; 1568 1569 if (link == LINK_STATE_UP) { 1570 if (media_status & PHY_STATUS_1000MF) { 1571 rgep->param_link_speed = RGE_SPEED_1000M; 1572 rgep->param_link_duplex = LINK_DUPLEX_FULL; 1573 } else { 1574 rgep->param_link_speed = 1575 (media_status & PHY_STATUS_100M) ? 1576 RGE_SPEED_100M : RGE_SPEED_10M; 1577 rgep->param_link_duplex = 1578 (media_status & PHY_STATUS_DUPLEX_FULL) ? 1579 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; 1580 } 1581 } 1582 return (B_TRUE); 1583 } 1584 return (B_FALSE); 1585 } 1586 1587 /* 1588 * Factotum routine to check for Tx stall, using the 'watchdog' counter 1589 */ 1590 static boolean_t rge_factotum_stall_check(rge_t *rgep); 1591 #pragma no_inline(rge_factotum_stall_check) 1592 1593 static boolean_t 1594 rge_factotum_stall_check(rge_t *rgep) 1595 { 1596 uint32_t dogval; 1597 1598 ASSERT(mutex_owned(rgep->genlock)); 1599 1600 /* 1601 * Specific check for RX stall ... 1602 */ 1603 rgep->rx_fifo_ovf <<= 1; 1604 if (rgep->rx_fifo_ovf > rge_rx_watchdog_count) { 1605 RGE_REPORT((rgep, "rx_hang detected")); 1606 return (B_TRUE); 1607 } 1608 1609 /* 1610 * Specific check for Tx stall ... 1611 * 1612 * The 'watchdog' counter is incremented whenever a packet 1613 * is queued, reset to 1 when some (but not all) buffers 1614 * are reclaimed, reset to 0 (disabled) when all buffers 1615 * are reclaimed, and shifted left here. If it exceeds the 1616 * threshold value, the chip is assumed to have stalled and 1617 * is put into the ERROR state. The factotum will then reset 1618 * it on the next pass. 1619 * 1620 * All of which should ensure that we don't get into a state 1621 * where packets are left pending indefinitely! 1622 */ 1623 if (rgep->resched_needed) 1624 (void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL); 1625 dogval = rge_atomic_shl32(&rgep->watchdog, 1); 1626 if (dogval < rge_watchdog_count) 1627 return (B_FALSE); 1628 1629 RGE_REPORT((rgep, "Tx stall detected, watchdog code 0x%x", dogval)); 1630 return (B_TRUE); 1631 1632 } 1633 1634 /* 1635 * The factotum is woken up when there's something to do that we'd rather 1636 * not do from inside a hardware interrupt handler or high-level cyclic. 1637 * Its two main tasks are: 1638 * reset & restart the chip after an error 1639 * check the link status whenever necessary 1640 */ 1641 uint_t rge_chip_factotum(caddr_t arg1, caddr_t arg2); 1642 #pragma no_inline(rge_chip_factotum) 1643 1644 uint_t 1645 rge_chip_factotum(caddr_t arg1, caddr_t arg2) 1646 { 1647 rge_t *rgep; 1648 uint_t result; 1649 boolean_t error; 1650 boolean_t linkchg; 1651 1652 rgep = (rge_t *)arg1; 1653 _NOTE(ARGUNUSED(arg2)) 1654 1655 if (rgep->factotum_flag == 0) 1656 return (DDI_INTR_UNCLAIMED); 1657 1658 rgep->factotum_flag = 0; 1659 result = DDI_INTR_CLAIMED; 1660 error = B_FALSE; 1661 linkchg = B_FALSE; 1662 1663 mutex_enter(rgep->genlock); 1664 switch (rgep->rge_chip_state) { 1665 default: 1666 break; 1667 1668 case RGE_CHIP_RUNNING: 1669 linkchg = rge_factotum_link_check(rgep); 1670 error = rge_factotum_stall_check(rgep); 1671 break; 1672 1673 case RGE_CHIP_ERROR: 1674 error = B_TRUE; 1675 break; 1676 1677 case RGE_CHIP_FAULT: 1678 /* 1679 * Fault detected, time to reset ... 1680 */ 1681 if (rge_autorecover) { 1682 RGE_REPORT((rgep, "automatic recovery activated")); 1683 rge_restart(rgep); 1684 } 1685 break; 1686 } 1687 1688 /* 1689 * If an error is detected, stop the chip now, marking it as 1690 * faulty, so that it will be reset next time through ... 1691 */ 1692 if (error) 1693 rge_chip_stop(rgep, B_TRUE); 1694 mutex_exit(rgep->genlock); 1695 1696 /* 1697 * If the link state changed, tell the world about it. 1698 * Note: can't do this while still holding the mutex. 1699 */ 1700 if (linkchg) 1701 mac_link_update(rgep->mh, rgep->param_link_up); 1702 1703 return (result); 1704 } 1705 1706 /* 1707 * High-level cyclic handler 1708 * 1709 * This routine schedules a (low-level) softint callback to the 1710 * factotum, and prods the chip to update the status block (which 1711 * will cause a hardware interrupt when complete). 1712 */ 1713 void rge_chip_cyclic(void *arg); 1714 #pragma no_inline(rge_chip_cyclic) 1715 1716 void 1717 rge_chip_cyclic(void *arg) 1718 { 1719 rge_t *rgep; 1720 1721 rgep = arg; 1722 1723 switch (rgep->rge_chip_state) { 1724 default: 1725 return; 1726 1727 case RGE_CHIP_RUNNING: 1728 rge_phy_check(rgep); 1729 if (rgep->tx_free < RGE_SEND_SLOTS) 1730 rge_send_recycle(rgep); 1731 break; 1732 1733 case RGE_CHIP_FAULT: 1734 case RGE_CHIP_ERROR: 1735 break; 1736 } 1737 1738 rge_wake_factotum(rgep); 1739 } 1740 1741 1742 /* 1743 * ========== Ioctl subfunctions ========== 1744 */ 1745 1746 #undef RGE_DBG 1747 #define RGE_DBG RGE_DBG_PPIO /* debug flag for this code */ 1748 1749 #if RGE_DEBUGGING || RGE_DO_PPIO 1750 1751 static void rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd); 1752 #pragma no_inline(rge_chip_peek_cfg) 1753 1754 static void 1755 rge_chip_peek_cfg(rge_t *rgep, rge_peekpoke_t *ppd) 1756 { 1757 uint64_t regval; 1758 uint64_t regno; 1759 1760 RGE_TRACE(("rge_chip_peek_cfg($%p, $%p)", 1761 (void *)rgep, (void *)ppd)); 1762 1763 regno = ppd->pp_acc_offset; 1764 1765 switch (ppd->pp_acc_size) { 1766 case 1: 1767 regval = pci_config_get8(rgep->cfg_handle, regno); 1768 break; 1769 1770 case 2: 1771 regval = pci_config_get16(rgep->cfg_handle, regno); 1772 break; 1773 1774 case 4: 1775 regval = pci_config_get32(rgep->cfg_handle, regno); 1776 break; 1777 1778 case 8: 1779 regval = pci_config_get64(rgep->cfg_handle, regno); 1780 break; 1781 } 1782 1783 ppd->pp_acc_data = regval; 1784 } 1785 1786 static void rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd); 1787 #pragma no_inline(rge_chip_poke_cfg) 1788 1789 static void 1790 rge_chip_poke_cfg(rge_t *rgep, rge_peekpoke_t *ppd) 1791 { 1792 uint64_t regval; 1793 uint64_t regno; 1794 1795 RGE_TRACE(("rge_chip_poke_cfg($%p, $%p)", 1796 (void *)rgep, (void *)ppd)); 1797 1798 regno = ppd->pp_acc_offset; 1799 regval = ppd->pp_acc_data; 1800 1801 switch (ppd->pp_acc_size) { 1802 case 1: 1803 pci_config_put8(rgep->cfg_handle, regno, regval); 1804 break; 1805 1806 case 2: 1807 pci_config_put16(rgep->cfg_handle, regno, regval); 1808 break; 1809 1810 case 4: 1811 pci_config_put32(rgep->cfg_handle, regno, regval); 1812 break; 1813 1814 case 8: 1815 pci_config_put64(rgep->cfg_handle, regno, regval); 1816 break; 1817 } 1818 } 1819 1820 static void rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd); 1821 #pragma no_inline(rge_chip_peek_reg) 1822 1823 static void 1824 rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd) 1825 { 1826 uint64_t regval; 1827 void *regaddr; 1828 1829 RGE_TRACE(("rge_chip_peek_reg($%p, $%p)", 1830 (void *)rgep, (void *)ppd)); 1831 1832 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset); 1833 1834 switch (ppd->pp_acc_size) { 1835 case 1: 1836 regval = ddi_get8(rgep->io_handle, regaddr); 1837 break; 1838 1839 case 2: 1840 regval = ddi_get16(rgep->io_handle, regaddr); 1841 break; 1842 1843 case 4: 1844 regval = ddi_get32(rgep->io_handle, regaddr); 1845 break; 1846 1847 case 8: 1848 regval = ddi_get64(rgep->io_handle, regaddr); 1849 break; 1850 } 1851 1852 ppd->pp_acc_data = regval; 1853 } 1854 1855 static void rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd); 1856 #pragma no_inline(rge_chip_peek_reg) 1857 1858 static void 1859 rge_chip_poke_reg(rge_t *rgep, rge_peekpoke_t *ppd) 1860 { 1861 uint64_t regval; 1862 void *regaddr; 1863 1864 RGE_TRACE(("rge_chip_poke_reg($%p, $%p)", 1865 (void *)rgep, (void *)ppd)); 1866 1867 regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset); 1868 regval = ppd->pp_acc_data; 1869 1870 switch (ppd->pp_acc_size) { 1871 case 1: 1872 ddi_put8(rgep->io_handle, regaddr, regval); 1873 break; 1874 1875 case 2: 1876 ddi_put16(rgep->io_handle, regaddr, regval); 1877 break; 1878 1879 case 4: 1880 ddi_put32(rgep->io_handle, regaddr, regval); 1881 break; 1882 1883 case 8: 1884 ddi_put64(rgep->io_handle, regaddr, regval); 1885 break; 1886 } 1887 } 1888 1889 static void rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd); 1890 #pragma no_inline(rge_chip_peek_mii) 1891 1892 static void 1893 rge_chip_peek_mii(rge_t *rgep, rge_peekpoke_t *ppd) 1894 { 1895 RGE_TRACE(("rge_chip_peek_mii($%p, $%p)", 1896 (void *)rgep, (void *)ppd)); 1897 1898 ppd->pp_acc_data = rge_mii_get16(rgep, ppd->pp_acc_offset/2); 1899 } 1900 1901 static void rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd); 1902 #pragma no_inline(rge_chip_poke_mii) 1903 1904 static void 1905 rge_chip_poke_mii(rge_t *rgep, rge_peekpoke_t *ppd) 1906 { 1907 RGE_TRACE(("rge_chip_poke_mii($%p, $%p)", 1908 (void *)rgep, (void *)ppd)); 1909 1910 rge_mii_put16(rgep, ppd->pp_acc_offset/2, ppd->pp_acc_data); 1911 } 1912 1913 static void rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd); 1914 #pragma no_inline(rge_chip_peek_mem) 1915 1916 static void 1917 rge_chip_peek_mem(rge_t *rgep, rge_peekpoke_t *ppd) 1918 { 1919 uint64_t regval; 1920 void *vaddr; 1921 1922 RGE_TRACE(("rge_chip_peek_rge($%p, $%p)", 1923 (void *)rgep, (void *)ppd)); 1924 1925 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 1926 1927 switch (ppd->pp_acc_size) { 1928 case 1: 1929 regval = *(uint8_t *)vaddr; 1930 break; 1931 1932 case 2: 1933 regval = *(uint16_t *)vaddr; 1934 break; 1935 1936 case 4: 1937 regval = *(uint32_t *)vaddr; 1938 break; 1939 1940 case 8: 1941 regval = *(uint64_t *)vaddr; 1942 break; 1943 } 1944 1945 RGE_DEBUG(("rge_chip_peek_mem($%p, $%p) peeked 0x%llx from $%p", 1946 (void *)rgep, (void *)ppd, regval, vaddr)); 1947 1948 ppd->pp_acc_data = regval; 1949 } 1950 1951 static void rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd); 1952 #pragma no_inline(rge_chip_poke_mem) 1953 1954 static void 1955 rge_chip_poke_mem(rge_t *rgep, rge_peekpoke_t *ppd) 1956 { 1957 uint64_t regval; 1958 void *vaddr; 1959 1960 RGE_TRACE(("rge_chip_poke_mem($%p, $%p)", 1961 (void *)rgep, (void *)ppd)); 1962 1963 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 1964 regval = ppd->pp_acc_data; 1965 1966 RGE_DEBUG(("rge_chip_poke_mem($%p, $%p) poking 0x%llx at $%p", 1967 (void *)rgep, (void *)ppd, regval, vaddr)); 1968 1969 switch (ppd->pp_acc_size) { 1970 case 1: 1971 *(uint8_t *)vaddr = (uint8_t)regval; 1972 break; 1973 1974 case 2: 1975 *(uint16_t *)vaddr = (uint16_t)regval; 1976 break; 1977 1978 case 4: 1979 *(uint32_t *)vaddr = (uint32_t)regval; 1980 break; 1981 1982 case 8: 1983 *(uint64_t *)vaddr = (uint64_t)regval; 1984 break; 1985 } 1986 } 1987 1988 static enum ioc_reply rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, 1989 struct iocblk *iocp); 1990 #pragma no_inline(rge_pp_ioctl) 1991 1992 static enum ioc_reply 1993 rge_pp_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp) 1994 { 1995 void (*ppfn)(rge_t *rgep, rge_peekpoke_t *ppd); 1996 rge_peekpoke_t *ppd; 1997 dma_area_t *areap; 1998 uint64_t sizemask; 1999 uint64_t mem_va; 2000 uint64_t maxoff; 2001 boolean_t peek; 2002 2003 switch (cmd) { 2004 default: 2005 /* NOTREACHED */ 2006 rge_error(rgep, "rge_pp_ioctl: invalid cmd 0x%x", cmd); 2007 return (IOC_INVAL); 2008 2009 case RGE_PEEK: 2010 peek = B_TRUE; 2011 break; 2012 2013 case RGE_POKE: 2014 peek = B_FALSE; 2015 break; 2016 } 2017 2018 /* 2019 * Validate format of ioctl 2020 */ 2021 if (iocp->ioc_count != sizeof (rge_peekpoke_t)) 2022 return (IOC_INVAL); 2023 if (mp->b_cont == NULL) 2024 return (IOC_INVAL); 2025 ppd = (rge_peekpoke_t *)mp->b_cont->b_rptr; 2026 2027 /* 2028 * Validate request parameters 2029 */ 2030 switch (ppd->pp_acc_space) { 2031 default: 2032 return (IOC_INVAL); 2033 2034 case RGE_PP_SPACE_CFG: 2035 /* 2036 * Config space 2037 */ 2038 sizemask = 8|4|2|1; 2039 mem_va = 0; 2040 maxoff = PCI_CONF_HDR_SIZE; 2041 ppfn = peek ? rge_chip_peek_cfg : rge_chip_poke_cfg; 2042 break; 2043 2044 case RGE_PP_SPACE_REG: 2045 /* 2046 * Memory-mapped I/O space 2047 */ 2048 sizemask = 8|4|2|1; 2049 mem_va = 0; 2050 maxoff = RGE_REGISTER_MAX; 2051 ppfn = peek ? rge_chip_peek_reg : rge_chip_poke_reg; 2052 break; 2053 2054 case RGE_PP_SPACE_MII: 2055 /* 2056 * PHY's MII registers 2057 * NB: all PHY registers are two bytes, but the 2058 * addresses increment in ones (word addressing). 2059 * So we scale the address here, then undo the 2060 * transformation inside the peek/poke functions. 2061 */ 2062 ppd->pp_acc_offset *= 2; 2063 sizemask = 2; 2064 mem_va = 0; 2065 maxoff = (MII_MAXREG+1)*2; 2066 ppfn = peek ? rge_chip_peek_mii : rge_chip_poke_mii; 2067 break; 2068 2069 case RGE_PP_SPACE_RGE: 2070 /* 2071 * RGE data structure! 2072 */ 2073 sizemask = 8|4|2|1; 2074 mem_va = (uintptr_t)rgep; 2075 maxoff = sizeof (*rgep); 2076 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem; 2077 break; 2078 2079 case RGE_PP_SPACE_STATISTICS: 2080 case RGE_PP_SPACE_TXDESC: 2081 case RGE_PP_SPACE_TXBUFF: 2082 case RGE_PP_SPACE_RXDESC: 2083 case RGE_PP_SPACE_RXBUFF: 2084 /* 2085 * Various DMA_AREAs 2086 */ 2087 switch (ppd->pp_acc_space) { 2088 case RGE_PP_SPACE_TXDESC: 2089 areap = &rgep->dma_area_txdesc; 2090 break; 2091 case RGE_PP_SPACE_RXDESC: 2092 areap = &rgep->dma_area_rxdesc; 2093 break; 2094 case RGE_PP_SPACE_STATISTICS: 2095 areap = &rgep->dma_area_stats; 2096 break; 2097 } 2098 2099 sizemask = 8|4|2|1; 2100 mem_va = (uintptr_t)areap->mem_va; 2101 maxoff = areap->alength; 2102 ppfn = peek ? rge_chip_peek_mem : rge_chip_poke_mem; 2103 break; 2104 } 2105 2106 switch (ppd->pp_acc_size) { 2107 default: 2108 return (IOC_INVAL); 2109 2110 case 8: 2111 case 4: 2112 case 2: 2113 case 1: 2114 if ((ppd->pp_acc_size & sizemask) == 0) 2115 return (IOC_INVAL); 2116 break; 2117 } 2118 2119 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 2120 return (IOC_INVAL); 2121 2122 if (ppd->pp_acc_offset >= maxoff) 2123 return (IOC_INVAL); 2124 2125 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff) 2126 return (IOC_INVAL); 2127 2128 /* 2129 * All OK - go do it! 2130 */ 2131 ppd->pp_acc_offset += mem_va; 2132 (*ppfn)(rgep, ppd); 2133 return (peek ? IOC_REPLY : IOC_ACK); 2134 } 2135 2136 static enum ioc_reply rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, 2137 struct iocblk *iocp); 2138 #pragma no_inline(rge_diag_ioctl) 2139 2140 static enum ioc_reply 2141 rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp) 2142 { 2143 ASSERT(mutex_owned(rgep->genlock)); 2144 2145 switch (cmd) { 2146 default: 2147 /* NOTREACHED */ 2148 rge_error(rgep, "rge_diag_ioctl: invalid cmd 0x%x", cmd); 2149 return (IOC_INVAL); 2150 2151 case RGE_DIAG: 2152 /* 2153 * Currently a no-op 2154 */ 2155 return (IOC_ACK); 2156 2157 case RGE_PEEK: 2158 case RGE_POKE: 2159 return (rge_pp_ioctl(rgep, cmd, mp, iocp)); 2160 2161 case RGE_PHY_RESET: 2162 return (IOC_RESTART_ACK); 2163 2164 case RGE_SOFT_RESET: 2165 case RGE_HARD_RESET: 2166 /* 2167 * Reset and reinitialise the 570x hardware 2168 */ 2169 rge_restart(rgep); 2170 return (IOC_ACK); 2171 } 2172 2173 /* NOTREACHED */ 2174 } 2175 2176 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */ 2177 2178 static enum ioc_reply rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, 2179 struct iocblk *iocp); 2180 #pragma no_inline(rge_mii_ioctl) 2181 2182 static enum ioc_reply 2183 rge_mii_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp) 2184 { 2185 struct rge_mii_rw *miirwp; 2186 2187 /* 2188 * Validate format of ioctl 2189 */ 2190 if (iocp->ioc_count != sizeof (struct rge_mii_rw)) 2191 return (IOC_INVAL); 2192 if (mp->b_cont == NULL) 2193 return (IOC_INVAL); 2194 miirwp = (struct rge_mii_rw *)mp->b_cont->b_rptr; 2195 2196 /* 2197 * Validate request parameters ... 2198 */ 2199 if (miirwp->mii_reg > MII_MAXREG) 2200 return (IOC_INVAL); 2201 2202 switch (cmd) { 2203 default: 2204 /* NOTREACHED */ 2205 rge_error(rgep, "rge_mii_ioctl: invalid cmd 0x%x", cmd); 2206 return (IOC_INVAL); 2207 2208 case RGE_MII_READ: 2209 miirwp->mii_data = rge_mii_get16(rgep, miirwp->mii_reg); 2210 return (IOC_REPLY); 2211 2212 case RGE_MII_WRITE: 2213 rge_mii_put16(rgep, miirwp->mii_reg, miirwp->mii_data); 2214 return (IOC_ACK); 2215 } 2216 2217 /* NOTREACHED */ 2218 } 2219 2220 enum ioc_reply rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, 2221 struct iocblk *iocp); 2222 #pragma no_inline(rge_chip_ioctl) 2223 2224 enum ioc_reply 2225 rge_chip_ioctl(rge_t *rgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 2226 { 2227 int cmd; 2228 2229 RGE_TRACE(("rge_chip_ioctl($%p, $%p, $%p, $%p)", 2230 (void *)rgep, (void *)wq, (void *)mp, (void *)iocp)); 2231 2232 ASSERT(mutex_owned(rgep->genlock)); 2233 2234 cmd = iocp->ioc_cmd; 2235 switch (cmd) { 2236 default: 2237 /* NOTREACHED */ 2238 rge_error(rgep, "rge_chip_ioctl: invalid cmd 0x%x", cmd); 2239 return (IOC_INVAL); 2240 2241 case RGE_DIAG: 2242 case RGE_PEEK: 2243 case RGE_POKE: 2244 case RGE_PHY_RESET: 2245 case RGE_SOFT_RESET: 2246 case RGE_HARD_RESET: 2247 #if RGE_DEBUGGING || RGE_DO_PPIO 2248 return (rge_diag_ioctl(rgep, cmd, mp, iocp)); 2249 #else 2250 return (IOC_INVAL); 2251 #endif /* RGE_DEBUGGING || RGE_DO_PPIO */ 2252 2253 case RGE_MII_READ: 2254 case RGE_MII_WRITE: 2255 return (rge_mii_ioctl(rgep, cmd, mp, iocp)); 2256 2257 } 2258 2259 /* NOTREACHED */ 2260 } 2261