1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "nge.h" 28 static uint32_t nge_watchdog_count = 1 << 29; 29 extern boolean_t nge_enable_msi; 30 static void nge_sync_mac_modes(nge_t *); 31 32 #undef NGE_DBG 33 #define NGE_DBG NGE_DBG_CHIP 34 35 /* 36 * Operating register get/set access routines 37 */ 38 uint8_t nge_reg_get8(nge_t *ngep, nge_regno_t regno); 39 #pragma inline(nge_reg_get8) 40 41 uint8_t 42 nge_reg_get8(nge_t *ngep, nge_regno_t regno) 43 { 44 NGE_TRACE(("nge_reg_get8($%p, 0x%lx)", (void *)ngep, regno)); 45 46 return (ddi_get8(ngep->io_handle, PIO_ADDR(ngep, regno))); 47 } 48 49 void nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data); 50 #pragma inline(nge_reg_put8) 51 52 void 53 nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data) 54 { 55 NGE_TRACE(("nge_reg_put8($%p, 0x%lx, 0x%x)", 56 (void *)ngep, regno, data)); 57 ddi_put8(ngep->io_handle, PIO_ADDR(ngep, regno), data); 58 59 } 60 61 uint16_t nge_reg_get16(nge_t *ngep, nge_regno_t regno); 62 #pragma inline(nge_reg_get16) 63 64 uint16_t 65 nge_reg_get16(nge_t *ngep, nge_regno_t regno) 66 { 67 NGE_TRACE(("nge_reg_get16($%p, 0x%lx)", (void *)ngep, regno)); 68 return (ddi_get16(ngep->io_handle, PIO_ADDR(ngep, regno))); 69 } 70 71 void nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data); 72 #pragma inline(nge_reg_put16) 73 74 void 75 nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data) 76 { 77 NGE_TRACE(("nge_reg_put16($%p, 0x%lx, 0x%x)", 78 (void *)ngep, regno, data)); 79 ddi_put16(ngep->io_handle, PIO_ADDR(ngep, regno), data); 80 81 } 82 83 uint32_t nge_reg_get32(nge_t *ngep, nge_regno_t regno); 84 #pragma inline(nge_reg_get32) 85 86 uint32_t 87 nge_reg_get32(nge_t *ngep, nge_regno_t regno) 88 { 89 NGE_TRACE(("nge_reg_get32($%p, 0x%lx)", (void *)ngep, regno)); 90 return (ddi_get32(ngep->io_handle, PIO_ADDR(ngep, regno))); 91 } 92 93 void nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data); 94 #pragma inline(nge_reg_put32) 95 96 void 97 nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data) 98 { 99 NGE_TRACE(("nge_reg_put32($%p, 0x%lx, 0x%x)", 100 (void *)ngep, regno, data)); 101 ddi_put32(ngep->io_handle, PIO_ADDR(ngep, regno), data); 102 103 } 104 105 106 static int nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 107 #pragma no_inline(nge_chip_peek_cfg) 108 109 static int 110 nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 111 { 112 int err; 113 uint64_t regval; 114 uint64_t regno; 115 116 NGE_TRACE(("nge_chip_peek_cfg($%p, $%p)", 117 (void *)ngep, (void *)ppd)); 118 119 err = DDI_SUCCESS; 120 regno = ppd->pp_acc_offset; 121 122 switch (ppd->pp_acc_size) { 123 case 1: 124 regval = pci_config_get8(ngep->cfg_handle, regno); 125 break; 126 127 case 2: 128 regval = pci_config_get16(ngep->cfg_handle, regno); 129 break; 130 131 case 4: 132 regval = pci_config_get32(ngep->cfg_handle, regno); 133 break; 134 135 case 8: 136 regval = pci_config_get64(ngep->cfg_handle, regno); 137 break; 138 } 139 ppd->pp_acc_data = regval; 140 return (err); 141 } 142 143 static int nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 144 145 static int 146 nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 147 { 148 int err; 149 uint64_t regval; 150 uint64_t regno; 151 152 NGE_TRACE(("nge_chip_poke_cfg($%p, $%p)", 153 (void *)ngep, (void *)ppd)); 154 155 err = DDI_SUCCESS; 156 regno = ppd->pp_acc_offset; 157 regval = ppd->pp_acc_data; 158 159 switch (ppd->pp_acc_size) { 160 case 1: 161 pci_config_put8(ngep->cfg_handle, regno, regval); 162 break; 163 164 case 2: 165 pci_config_put16(ngep->cfg_handle, regno, regval); 166 break; 167 168 case 4: 169 pci_config_put32(ngep->cfg_handle, regno, regval); 170 break; 171 172 case 8: 173 pci_config_put64(ngep->cfg_handle, regno, regval); 174 break; 175 } 176 177 return (err); 178 179 } 180 181 static int nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd); 182 183 static int 184 nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd) 185 { 186 int err; 187 uint64_t regval; 188 void *regaddr; 189 190 NGE_TRACE(("nge_chip_peek_reg($%p, $%p)", 191 (void *)ngep, (void *)ppd)); 192 193 err = DDI_SUCCESS; 194 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 195 196 switch (ppd->pp_acc_size) { 197 case 1: 198 regval = ddi_get8(ngep->io_handle, regaddr); 199 break; 200 201 case 2: 202 regval = ddi_get16(ngep->io_handle, regaddr); 203 break; 204 205 case 4: 206 regval = ddi_get32(ngep->io_handle, regaddr); 207 break; 208 209 case 8: 210 regval = ddi_get64(ngep->io_handle, regaddr); 211 break; 212 213 default: 214 regval = 0x0ull; 215 break; 216 } 217 ppd->pp_acc_data = regval; 218 return (err); 219 } 220 221 static int nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd); 222 223 static int 224 nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd) 225 { 226 int err; 227 uint64_t regval; 228 void *regaddr; 229 230 NGE_TRACE(("nge_chip_poke_reg($%p, $%p)", 231 (void *)ngep, (void *)ppd)); 232 233 err = DDI_SUCCESS; 234 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 235 regval = ppd->pp_acc_data; 236 237 switch (ppd->pp_acc_size) { 238 case 1: 239 ddi_put8(ngep->io_handle, regaddr, regval); 240 break; 241 242 case 2: 243 ddi_put16(ngep->io_handle, regaddr, regval); 244 break; 245 246 case 4: 247 ddi_put32(ngep->io_handle, regaddr, regval); 248 break; 249 250 case 8: 251 ddi_put64(ngep->io_handle, regaddr, regval); 252 break; 253 } 254 return (err); 255 } 256 257 static int nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd); 258 #pragma no_inline(nge_chip_peek_mii) 259 260 static int 261 nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd) 262 { 263 int err; 264 265 err = DDI_SUCCESS; 266 ppd->pp_acc_data = nge_mii_get16(ngep, ppd->pp_acc_offset/2); 267 return (err); 268 } 269 270 static int nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd); 271 #pragma no_inline(nge_chip_poke_mii) 272 273 static int 274 nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd) 275 { 276 int err; 277 err = DDI_SUCCESS; 278 nge_mii_put16(ngep, ppd->pp_acc_offset/2, ppd->pp_acc_data); 279 return (err); 280 } 281 282 /* 283 * Basic SEEPROM get/set access routine 284 * 285 * This uses the chip's SEEPROM auto-access method, controlled by the 286 * Serial EEPROM Address/Data Registers at 0x504h, so the CPU 287 * doesn't have to fiddle with the individual bits. 288 * 289 * The caller should hold <genlock> and *also* have already acquired 290 * the right to access the SEEPROM. 291 * 292 * Return value: 293 * 0 on success, 294 * ENODATA on access timeout (maybe retryable: device may just be busy) 295 * EPROTO on other h/w or s/w errors. 296 * 297 * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output 298 * from a (successful) SEEPROM_ACCESS_READ. 299 */ 300 301 static int 302 nge_seeprom_access(nge_t *ngep, uint32_t cmd, nge_regno_t addr, uint16_t *dp) 303 { 304 uint32_t tries; 305 nge_ep_cmd cmd_reg; 306 nge_ep_data data_reg; 307 308 NGE_TRACE(("nge_seeprom_access($%p, %d, %x, $%p)", 309 (void *)ngep, cmd, addr, (void *)dp)); 310 311 ASSERT(mutex_owned(ngep->genlock)); 312 313 /* 314 * Check there's no command in progress. 315 * 316 * Note: this *shouldn't* ever find that there is a command 317 * in progress, because we already hold the <genlock> mutex. 318 * Also, to ensure we don't have a conflict with the chip's 319 * internal firmware or a process accessing the same (shared) 320 * So this is just a final consistency check: we shouldn't 321 * see EITHER the START bit (command started but not complete) 322 * OR the COMPLETE bit (command completed but not cleared). 323 */ 324 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 325 for (tries = 0; tries < 30; tries++) { 326 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 327 break; 328 drv_usecwait(10); 329 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 330 } 331 332 /* 333 * This should not happen. If so, we have to restart eeprom 334 * state machine 335 */ 336 if (tries == 30) { 337 cmd_reg.cmd_bits.sts = SEEPROM_READY; 338 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 339 drv_usecwait(10); 340 /* 341 * Polling the status bit to make assure the eeprom is ready 342 */ 343 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 344 for (tries = 0; tries < 30; tries++) { 345 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 346 break; 347 drv_usecwait(10); 348 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 349 } 350 } 351 352 /* 353 * Assemble the command ... 354 */ 355 cmd_reg.cmd_bits.addr = (uint32_t)addr; 356 cmd_reg.cmd_bits.cmd = cmd; 357 cmd_reg.cmd_bits.sts = 0; 358 359 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 360 361 /* 362 * Polling whether the access is successful. 363 * 364 */ 365 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 366 for (tries = 0; tries < 30; tries++) { 367 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 368 break; 369 drv_usecwait(10); 370 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 371 } 372 373 if (tries == 30) { 374 nge_report(ngep, NGE_HW_ROM); 375 return (DDI_FAILURE); 376 } 377 switch (cmd) { 378 default: 379 case SEEPROM_CMD_WRITE_ENABLE: 380 case SEEPROM_CMD_ERASE: 381 case SEEPROM_CMD_ERALSE_ALL: 382 case SEEPROM_CMD_WRITE_DIS: 383 break; 384 385 case SEEPROM_CMD_READ: 386 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 387 *dp = data_reg.data_bits.data; 388 break; 389 390 case SEEPROM_CMD_WRITE: 391 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 392 data_reg.data_bits.data = *dp; 393 nge_reg_put32(ngep, NGE_EP_DATA, data_reg.data_val); 394 break; 395 } 396 397 return (DDI_SUCCESS); 398 } 399 400 401 static int 402 nge_chip_peek_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 403 { 404 uint16_t data; 405 int err; 406 407 err = nge_seeprom_access(ngep, SEEPROM_CMD_READ, 408 ppd->pp_acc_offset, &data); 409 ppd->pp_acc_data = data; 410 return (err); 411 } 412 413 static int 414 nge_chip_poke_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 415 { 416 uint16_t data; 417 int err; 418 419 data = ppd->pp_acc_data; 420 err = nge_seeprom_access(ngep, SEEPROM_CMD_WRITE, 421 ppd->pp_acc_offset, &data); 422 return (err); 423 } 424 425 void 426 nge_init_dev_spec_param(nge_t *ngep) 427 { 428 nge_dev_spec_param_t *dev_param_p; 429 chip_info_t *infop; 430 431 dev_param_p = &ngep->dev_spec_param; 432 infop = (chip_info_t *)&ngep->chipinfo; 433 434 switch (infop->device) { 435 case DEVICE_ID_NF3_E6: 436 case DEVICE_ID_NF3_DF: 437 case DEVICE_ID_MCP04_37: 438 case DEVICE_ID_MCP04_38: 439 dev_param_p->msi = B_FALSE; 440 dev_param_p->msi_x = B_FALSE; 441 dev_param_p->vlan = B_FALSE; 442 dev_param_p->advanced_pm = B_FALSE; 443 dev_param_p->mac_addr_order = B_FALSE; 444 dev_param_p->tx_pause_frame = B_FALSE; 445 dev_param_p->rx_pause_frame = B_FALSE; 446 dev_param_p->jumbo = B_FALSE; 447 dev_param_p->tx_rx_64byte = B_FALSE; 448 dev_param_p->rx_hw_checksum = B_FALSE; 449 dev_param_p->tx_hw_checksum = 0; 450 dev_param_p->desc_type = DESC_OFFLOAD; 451 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 452 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 453 dev_param_p->nge_split = NGE_SPLIT_32; 454 break; 455 456 case DEVICE_ID_CK804_56: 457 case DEVICE_ID_CK804_57: 458 dev_param_p->msi = B_TRUE; 459 dev_param_p->msi_x = B_TRUE; 460 dev_param_p->vlan = B_FALSE; 461 dev_param_p->advanced_pm = B_FALSE; 462 dev_param_p->mac_addr_order = B_FALSE; 463 dev_param_p->tx_pause_frame = B_FALSE; 464 dev_param_p->rx_pause_frame = B_TRUE; 465 dev_param_p->jumbo = B_TRUE; 466 dev_param_p->tx_rx_64byte = B_FALSE; 467 dev_param_p->rx_hw_checksum = B_TRUE; 468 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 469 dev_param_p->desc_type = DESC_HOT; 470 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 471 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 472 dev_param_p->nge_split = NGE_SPLIT_96; 473 break; 474 475 case DEVICE_ID_MCP51_268: 476 case DEVICE_ID_MCP51_269: 477 dev_param_p->msi = B_FALSE; 478 dev_param_p->msi_x = B_FALSE; 479 dev_param_p->vlan = B_FALSE; 480 dev_param_p->advanced_pm = B_TRUE; 481 dev_param_p->mac_addr_order = B_FALSE; 482 dev_param_p->tx_pause_frame = B_FALSE; 483 dev_param_p->rx_pause_frame = B_FALSE; 484 dev_param_p->jumbo = B_FALSE; 485 dev_param_p->tx_rx_64byte = B_TRUE; 486 dev_param_p->rx_hw_checksum = B_FALSE; 487 dev_param_p->tx_hw_checksum = 0; 488 dev_param_p->desc_type = DESC_OFFLOAD; 489 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 490 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 491 dev_param_p->nge_split = NGE_SPLIT_32; 492 break; 493 494 case DEVICE_ID_MCP55_372: 495 case DEVICE_ID_MCP55_373: 496 dev_param_p->msi = B_TRUE; 497 dev_param_p->msi_x = B_TRUE; 498 dev_param_p->vlan = B_TRUE; 499 dev_param_p->advanced_pm = B_TRUE; 500 dev_param_p->mac_addr_order = B_FALSE; 501 dev_param_p->tx_pause_frame = B_TRUE; 502 dev_param_p->rx_pause_frame = B_TRUE; 503 dev_param_p->jumbo = B_TRUE; 504 dev_param_p->tx_rx_64byte = B_TRUE; 505 dev_param_p->rx_hw_checksum = B_TRUE; 506 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 507 dev_param_p->desc_type = DESC_HOT; 508 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 509 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 510 dev_param_p->nge_split = NGE_SPLIT_96; 511 break; 512 513 case DEVICE_ID_MCP61_3EE: 514 case DEVICE_ID_MCP61_3EF: 515 dev_param_p->msi = B_FALSE; 516 dev_param_p->msi_x = B_FALSE; 517 dev_param_p->vlan = B_FALSE; 518 dev_param_p->advanced_pm = B_TRUE; 519 dev_param_p->mac_addr_order = B_TRUE; 520 dev_param_p->tx_pause_frame = B_FALSE; 521 dev_param_p->rx_pause_frame = B_FALSE; 522 dev_param_p->jumbo = B_FALSE; 523 dev_param_p->tx_rx_64byte = B_TRUE; 524 dev_param_p->rx_hw_checksum = B_FALSE; 525 dev_param_p->tx_hw_checksum = 0; 526 dev_param_p->desc_type = DESC_OFFLOAD; 527 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 528 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 529 dev_param_p->nge_split = NGE_SPLIT_32; 530 break; 531 532 default: 533 dev_param_p->msi = B_FALSE; 534 dev_param_p->msi_x = B_FALSE; 535 dev_param_p->vlan = B_FALSE; 536 dev_param_p->advanced_pm = B_FALSE; 537 dev_param_p->mac_addr_order = B_FALSE; 538 dev_param_p->tx_pause_frame = B_FALSE; 539 dev_param_p->rx_pause_frame = B_FALSE; 540 dev_param_p->jumbo = B_FALSE; 541 dev_param_p->tx_rx_64byte = B_FALSE; 542 dev_param_p->rx_hw_checksum = B_FALSE; 543 dev_param_p->tx_hw_checksum = 0; 544 dev_param_p->desc_type = DESC_OFFLOAD; 545 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 546 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 547 dev_param_p->nge_split = NGE_SPLIT_32; 548 return; 549 } 550 } 551 /* 552 * Perform first-stage chip (re-)initialisation, using only config-space 553 * accesses: 554 * 555 * + Read the vendor/device/revision/subsystem/cache-line-size registers, 556 * returning the data in the structure pointed to by <infop>. 557 */ 558 void nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset); 559 #pragma no_inline(nge_chip_cfg_init) 560 561 void 562 nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset) 563 { 564 uint16_t command; 565 ddi_acc_handle_t handle; 566 nge_interbus_conf interbus_conf; 567 nge_msi_mask_conf msi_mask_conf; 568 nge_msi_map_cap_conf cap_conf; 569 570 NGE_TRACE(("nge_chip_cfg_init($%p, $%p, %d)", 571 (void *)ngep, (void *)infop, reset)); 572 573 /* 574 * save PCI cache line size and subsystem vendor ID 575 * 576 * Read all the config-space registers that characterise the 577 * chip, specifically vendor/device/revision/subsystem vendor 578 * and subsystem device id. We expect (but don't check) that 579 */ 580 handle = ngep->cfg_handle; 581 /* reading the vendor information once */ 582 if (reset == B_FALSE) { 583 infop->command = pci_config_get16(handle, 584 PCI_CONF_COMM); 585 infop->vendor = pci_config_get16(handle, 586 PCI_CONF_VENID); 587 infop->device = pci_config_get16(handle, 588 PCI_CONF_DEVID); 589 infop->subven = pci_config_get16(handle, 590 PCI_CONF_SUBVENID); 591 infop->subdev = pci_config_get16(handle, 592 PCI_CONF_SUBSYSID); 593 infop->class_code = pci_config_get8(handle, 594 PCI_CONF_BASCLASS); 595 infop->revision = pci_config_get8(handle, 596 PCI_CONF_REVID); 597 infop->clsize = pci_config_get8(handle, 598 PCI_CONF_CACHE_LINESZ); 599 infop->latency = pci_config_get8(handle, 600 PCI_CONF_LATENCY_TIMER); 601 } 602 if (nge_enable_msi) { 603 /* Disable the hidden for MSI support */ 604 interbus_conf.conf_val = pci_config_get32(handle, 605 PCI_CONF_HT_INTERNAL); 606 if ((infop->device == DEVICE_ID_MCP55_373) || 607 (infop->device == DEVICE_ID_MCP55_372)) 608 interbus_conf.conf_bits.msix_off = NGE_SET; 609 interbus_conf.conf_bits.msi_off = NGE_CLEAR; 610 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 611 interbus_conf.conf_val); 612 613 if ((infop->device == DEVICE_ID_MCP55_373) || 614 (infop->device == DEVICE_ID_MCP55_372)) { 615 616 /* Disable the vector off for mcp55 */ 617 msi_mask_conf.msi_mask_conf_val = 618 pci_config_get32(handle, PCI_CONF_HT_MSI_MASK); 619 msi_mask_conf.msi_mask_bits.vec0_off = NGE_CLEAR; 620 msi_mask_conf.msi_mask_bits.vec1_off = NGE_CLEAR; 621 msi_mask_conf.msi_mask_bits.vec2_off = NGE_CLEAR; 622 msi_mask_conf.msi_mask_bits.vec3_off = NGE_CLEAR; 623 msi_mask_conf.msi_mask_bits.vec4_off = NGE_CLEAR; 624 msi_mask_conf.msi_mask_bits.vec5_off = NGE_CLEAR; 625 msi_mask_conf.msi_mask_bits.vec6_off = NGE_CLEAR; 626 msi_mask_conf.msi_mask_bits.vec7_off = NGE_CLEAR; 627 pci_config_put32(handle, PCI_CONF_HT_MSI_MASK, 628 msi_mask_conf.msi_mask_conf_val); 629 630 /* Enable the MSI mapping */ 631 cap_conf.msi_map_cap_conf_val = 632 pci_config_get32(handle, PCI_CONF_HT_MSI_MAP_CAP); 633 cap_conf.map_cap_conf_bits.map_en = NGE_SET; 634 pci_config_put32(handle, PCI_CONF_HT_MSI_MAP_CAP, 635 cap_conf.msi_map_cap_conf_val); 636 } 637 } else { 638 interbus_conf.conf_val = pci_config_get32(handle, 639 PCI_CONF_HT_INTERNAL); 640 interbus_conf.conf_bits.msi_off = NGE_SET; 641 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 642 interbus_conf.conf_val); 643 } 644 command = infop->command | PCI_COMM_MAE; 645 command &= ~PCI_COMM_MEMWR_INVAL; 646 command |= PCI_COMM_ME; 647 pci_config_put16(handle, PCI_CONF_COMM, command); 648 pci_config_put16(handle, PCI_CONF_STAT, ~0); 649 650 } 651 652 int 653 nge_chip_stop(nge_t *ngep, boolean_t fault) 654 { 655 int err; 656 uint32_t reg_val; 657 uint32_t tries; 658 nge_mintr_src mintr_src; 659 nge_mii_cs mii_cs; 660 nge_rx_poll rx_poll; 661 nge_tx_poll tx_poll; 662 nge_rx_en rx_en; 663 nge_tx_en tx_en; 664 nge_tx_sta tx_sta; 665 nge_rx_sta rx_sta; 666 nge_mode_cntl mode; 667 nge_pmu_cntl2 pmu_cntl2; 668 669 NGE_TRACE(("nge_chip_stop($%p, %d)", (void *)ngep, fault)); 670 671 err = DDI_SUCCESS; 672 673 /* Clear any pending PHY interrupt */ 674 mintr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC); 675 nge_reg_put8(ngep, NGE_MINTR_SRC, mintr_src.src_val); 676 677 /* Mask all interrupts */ 678 reg_val = nge_reg_get32(ngep, NGE_INTR_MASK); 679 reg_val &= ~NGE_INTR_ALL_EN; 680 nge_reg_put32(ngep, NGE_INTR_MASK, reg_val); 681 682 /* Disable auto-polling of phy */ 683 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 684 mii_cs.cs_bits.ap_en = NGE_CLEAR; 685 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 686 687 /* Reset buffer management & DMA */ 688 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 689 mode.mode_bits.dma_dis = NGE_SET; 690 mode.mode_bits.desc_type = ngep->desc_mode; 691 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 692 693 for (tries = 0; tries < 10000; tries++) { 694 drv_usecwait(10); 695 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 696 if (mode.mode_bits.dma_status == NGE_SET) 697 break; 698 } 699 if (tries == 10000) { 700 ngep->nge_chip_state = NGE_CHIP_FAULT; 701 return (DDI_FAILURE); 702 } 703 704 /* 705 * For mcp55, the bits 1:31 of NGE_RX_EN and NGE_TX_EN are 706 * defined to be used by SMU. The newer PXE than 527 began to 707 * support SMU and bit 24 of NGE_RX_EN/NGE_TX_EN are set 708 * when leaving PXE to prevents the MAC from winning 709 * arbitration to the main transmit/receive channels. 710 */ 711 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 712 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 713 714 /* Disable rx's machine */ 715 nge_reg_put32(ngep, NGE_RX_EN, 0x0); 716 717 /* Disable tx's machine */ 718 nge_reg_put32(ngep, NGE_TX_EN, 0x0); 719 } else { 720 721 /* Disable rx's machine */ 722 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 723 rx_en.bits.rx_en = NGE_CLEAR; 724 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 725 726 727 /* Disable tx's machine */ 728 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 729 tx_en.bits.tx_en = NGE_CLEAR; 730 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 731 } 732 733 /* 734 * Clean the status of tx's state machine 735 * and Make assure the tx's channel is idle 736 */ 737 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 738 for (tries = 0; tries < 1000; tries++) { 739 if (tx_sta.sta_bits.tx_chan_sta == NGE_CLEAR) 740 break; 741 drv_usecwait(10); 742 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 743 } 744 if (tries == 1000) { 745 ngep->nge_chip_state = NGE_CHIP_FAULT; 746 return (DDI_FAILURE); 747 } 748 nge_reg_put32(ngep, NGE_TX_STA, tx_sta.sta_val); 749 750 /* 751 * Clean the status of rx's state machine 752 * and Make assure the tx's channel is idle 753 */ 754 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 755 for (tries = 0; tries < 1000; tries++) { 756 if (rx_sta.sta_bits.rx_chan_sta == NGE_CLEAR) 757 break; 758 drv_usecwait(10); 759 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 760 } 761 if (tries == 1000) { 762 ngep->nge_chip_state = NGE_CHIP_FAULT; 763 return (DDI_FAILURE); 764 } 765 nge_reg_put32(ngep, NGE_RX_STA, rx_sta.sta_val); 766 767 /* Disable auto-poll of rx's state machine */ 768 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 769 rx_poll.poll_bits.rpen = NGE_CLEAR; 770 rx_poll.poll_bits.rpi = NGE_CLEAR; 771 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 772 773 /* Disable auto-polling of tx's state machine */ 774 tx_poll.poll_val = nge_reg_get32(ngep, NGE_TX_POLL); 775 tx_poll.poll_bits.tpen = NGE_CLEAR; 776 tx_poll.poll_bits.tpi = NGE_CLEAR; 777 nge_reg_put32(ngep, NGE_TX_POLL, tx_poll.poll_val); 778 779 /* Restore buffer management */ 780 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 781 mode.mode_bits.bm_reset = NGE_SET; 782 mode.mode_bits.tx_rcom_en = NGE_SET; 783 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 784 785 if (ngep->dev_spec_param.advanced_pm) { 786 787 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 0); 788 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 0); 789 790 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 791 pmu_cntl2.cntl2_bits.cidle_timer = NGE_CLEAR; 792 pmu_cntl2.cntl2_bits.didle_timer = NGE_CLEAR; 793 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 794 } 795 if (fault) 796 ngep->nge_chip_state = NGE_CHIP_FAULT; 797 else 798 ngep->nge_chip_state = NGE_CHIP_STOPPED; 799 800 return (err); 801 } 802 803 static void 804 nge_rx_setup(nge_t *ngep) 805 { 806 uint64_t desc_addr; 807 nge_rxtx_dlen dlen; 808 nge_rx_poll rx_poll; 809 810 /* 811 * Filling the address and length of rx's descriptors 812 */ 813 desc_addr = ngep->recv->desc.cookie.dmac_laddress; 814 nge_reg_put32(ngep, NGE_RX_DADR, desc_addr); 815 nge_reg_put32(ngep, NGE_RX_DADR_HI, desc_addr >> 32); 816 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 817 dlen.dlen_bits.rdlen = ngep->recv->desc.nslots - 1; 818 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 819 820 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 821 rx_poll.poll_bits.rpi = RX_POLL_INTV_1G; 822 rx_poll.poll_bits.rpen = NGE_SET; 823 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 824 } 825 826 static void 827 nge_tx_setup(nge_t *ngep) 828 { 829 uint64_t desc_addr; 830 nge_rxtx_dlen dlen; 831 832 /* 833 * Filling the address and length of tx's descriptors 834 */ 835 desc_addr = ngep->send->desc.cookie.dmac_laddress; 836 nge_reg_put32(ngep, NGE_TX_DADR, desc_addr); 837 nge_reg_put32(ngep, NGE_TX_DADR_HI, desc_addr >> 32); 838 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 839 dlen.dlen_bits.tdlen = ngep->send->desc.nslots - 1; 840 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 841 } 842 843 static int 844 nge_buff_setup(nge_t *ngep) 845 { 846 nge_mode_cntl mode_cntl; 847 nge_dev_spec_param_t *dev_param_p; 848 849 dev_param_p = &ngep->dev_spec_param; 850 851 /* 852 * Configure Rx&Tx's buffer 853 */ 854 nge_rx_setup(ngep); 855 nge_tx_setup(ngep); 856 857 /* 858 * Configure buffer attribute 859 */ 860 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 861 862 /* 863 * Enable Dma access request 864 */ 865 mode_cntl.mode_bits.dma_dis = NGE_CLEAR; 866 867 /* 868 * Enbale Buffer management 869 */ 870 mode_cntl.mode_bits.bm_reset = NGE_CLEAR; 871 872 /* 873 * Support Standoffload Descriptor 874 */ 875 mode_cntl.mode_bits.desc_type = ngep->desc_mode; 876 877 /* 878 * Support receive hardware checksum 879 */ 880 if (dev_param_p->rx_hw_checksum) { 881 mode_cntl.mode_bits.rx_sum_en = NGE_SET; 882 } else 883 mode_cntl.mode_bits.rx_sum_en = NGE_CLEAR; 884 885 /* 886 * Disable Tx PRD coarse update 887 */ 888 mode_cntl.mode_bits.tx_prd_cu_en = NGE_CLEAR; 889 890 /* 891 * Disable 64-byte access 892 */ 893 mode_cntl.mode_bits.w64_dis = NGE_SET; 894 895 /* 896 * Skip Rx Error Frame is not supported and if 897 * enable it, jumbo frame does not work any more. 898 */ 899 mode_cntl.mode_bits.rx_filter_en = NGE_CLEAR; 900 901 /* 902 * Can not support hot mode now 903 */ 904 mode_cntl.mode_bits.resv15 = NGE_CLEAR; 905 906 if (dev_param_p->vlan) { 907 /* Disable the vlan strip for devices which support vlan */ 908 mode_cntl.mode_bits.vlan_strip = NGE_CLEAR; 909 910 /* Disable the vlan insert for devices which supprot vlan */ 911 mode_cntl.mode_bits.vlan_ins = NGE_CLEAR; 912 } 913 914 if (dev_param_p->tx_rx_64byte) { 915 916 /* Set the maximum TX PRD fetch size to 64 bytes */ 917 mode_cntl.mode_bits.tx_fetch_prd = NGE_SET; 918 919 /* Set the maximum RX PRD fetch size to 64 bytes */ 920 mode_cntl.mode_bits.rx_fetch_prd = NGE_SET; 921 } 922 /* 923 * Upload Rx data as it arrives, rather than waiting for full frame 924 */ 925 mode_cntl.mode_bits.resv16 = NGE_CLEAR; 926 927 /* 928 * Normal HOT table accesses 929 */ 930 mode_cntl.mode_bits.resv17 = NGE_CLEAR; 931 932 /* 933 * Normal HOT buffer requesting 934 */ 935 mode_cntl.mode_bits.resv18 = NGE_CLEAR; 936 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 937 938 /* 939 * Signal controller to check for new Rx descriptors 940 */ 941 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 942 mode_cntl.mode_bits.rxdm = NGE_SET; 943 mode_cntl.mode_bits.tx_rcom_en = NGE_SET; 944 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 945 946 947 return (DDI_SUCCESS); 948 } 949 950 /* 951 * When chipset resets, the chipset can not restore the orignial 952 * mac address to the mac address registers. 953 * 954 * When the driver is dettached, the function will write the orignial 955 * mac address to the mac address registers. 956 */ 957 958 void 959 nge_restore_mac_addr(nge_t *ngep) 960 { 961 uint32_t mac_addr; 962 963 mac_addr = (uint32_t)ngep->chipinfo.hw_mac_addr; 964 nge_reg_put32(ngep, NGE_UNI_ADDR0, mac_addr); 965 mac_addr = (uint32_t)(ngep->chipinfo.hw_mac_addr >> 32); 966 nge_reg_put32(ngep, NGE_UNI_ADDR1, mac_addr); 967 } 968 969 int 970 nge_chip_reset(nge_t *ngep) 971 { 972 int err; 973 uint8_t i; 974 uint32_t regno; 975 uint64_t mac = 0; 976 uint64_t mac_tmp = 0; 977 nge_uni_addr1 uaddr1; 978 nge_cp_cntl ee_cntl; 979 nge_soft_misc soft_misc; 980 nge_pmu_cntl0 pmu_cntl0; 981 nge_pmu_cntl2 pmu_cntl2; 982 nge_pm_cntl2 pm_cntl2; 983 const nge_ksindex_t *ksip; 984 985 NGE_TRACE(("nge_chip_reset($%p)", (void *)ngep)); 986 987 /* 988 * Clear the statistics by reading the statistics register 989 */ 990 for (ksip = nge_statistics; ksip->name != NULL; ++ksip) { 991 regno = KS_BASE + ksip->index * sizeof (uint32_t); 992 (void) nge_reg_get32(ngep, regno); 993 } 994 995 /* 996 * Setup seeprom control 997 */ 998 ee_cntl.cntl_val = nge_reg_get32(ngep, NGE_EP_CNTL); 999 ee_cntl.cntl_bits.clkdiv = EEPROM_CLKDIV; 1000 ee_cntl.cntl_bits.rom_size = EEPROM_32K; 1001 ee_cntl.cntl_bits.word_wid = ACCESS_16BIT; 1002 ee_cntl.cntl_bits.wait_slots = EEPROM_WAITCLK; 1003 nge_reg_put32(ngep, NGE_EP_CNTL, ee_cntl.cntl_val); 1004 1005 /* 1006 * Reading the unicast mac address table 1007 */ 1008 if (ngep->nge_chip_state == NGE_CHIP_INITIAL) { 1009 uaddr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 1010 mac = uaddr1.addr_bits.addr; 1011 mac <<= 32; 1012 mac |= nge_reg_get32(ngep, NGE_UNI_ADDR0); 1013 if (mac != 0ULL && mac != ~0ULL) { 1014 /* 1015 * workaround for the MAC address reversed issue 1016 * on some motherboards 1017 */ 1018 if (ngep->dev_spec_param.mac_addr_order && 1019 (ngep->mac_addr_reversion || 1020 (mac & LOW_24BITS_MASK) == REVERSE_MAC_ELITE || 1021 (mac & LOW_24BITS_MASK) == REVERSE_MAC_GIGABYTE || 1022 (mac & LOW_24BITS_MASK) == REVERSE_MAC_ASUS)) { 1023 for (i = 0; i < ETHERADDRL; i ++) { 1024 mac_tmp <<= 8; 1025 mac_tmp += (mac & 0xffULL); 1026 mac >>= 8; 1027 } 1028 mac = mac_tmp; 1029 nge_reg_put32(ngep, 1030 NGE_UNI_ADDR0, (uint32_t)mac); 1031 nge_reg_put32(ngep, 1032 NGE_UNI_ADDR1, (uint32_t)(mac>>32)); 1033 } 1034 1035 ngep->chipinfo.hw_mac_addr = mac; 1036 for (i = ETHERADDRL; i-- != 0; ) { 1037 ngep->chipinfo.vendor_addr.addr[i] = 1038 (uchar_t)mac; 1039 ngep->cur_uni_addr.addr[i] = (uchar_t)mac; 1040 mac >>= 8; 1041 } 1042 ngep->chipinfo.vendor_addr.set = 1; 1043 } 1044 } 1045 pci_config_put8(ngep->cfg_handle, PCI_CONF_CACHE_LINESZ, 1046 ngep->chipinfo.clsize); 1047 pci_config_put8(ngep->cfg_handle, PCI_CONF_LATENCY_TIMER, 1048 ngep->chipinfo.latency); 1049 1050 1051 if (ngep->dev_spec_param.advanced_pm) { 1052 1053 /* Program software misc register */ 1054 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1055 soft_misc.misc_bits.rx_clk_vx_rst = NGE_SET; 1056 soft_misc.misc_bits.tx_clk_vx_rst = NGE_SET; 1057 soft_misc.misc_bits.clk12m_vx_rst = NGE_SET; 1058 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_SET; 1059 soft_misc.misc_bits.rx_clk_vc_rst = NGE_SET; 1060 soft_misc.misc_bits.tx_clk_vc_rst = NGE_SET; 1061 soft_misc.misc_bits.fs_clk_vc_rst = NGE_SET; 1062 soft_misc.misc_bits.rst_ex_m2pintf = NGE_SET; 1063 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1064 1065 /* wait for 32 us */ 1066 drv_usecwait(32); 1067 1068 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1069 soft_misc.misc_bits.rx_clk_vx_rst = NGE_CLEAR; 1070 soft_misc.misc_bits.tx_clk_vx_rst = NGE_CLEAR; 1071 soft_misc.misc_bits.clk12m_vx_rst = NGE_CLEAR; 1072 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_CLEAR; 1073 soft_misc.misc_bits.rx_clk_vc_rst = NGE_CLEAR; 1074 soft_misc.misc_bits.tx_clk_vc_rst = NGE_CLEAR; 1075 soft_misc.misc_bits.fs_clk_vc_rst = NGE_CLEAR; 1076 soft_misc.misc_bits.rst_ex_m2pintf = NGE_CLEAR; 1077 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1078 1079 /* Program PMU registers */ 1080 pmu_cntl0.cntl0_val = nge_reg_get32(ngep, NGE_PMU_CNTL0); 1081 pmu_cntl0.cntl0_bits.core_spd10_fp = 1082 NGE_PMU_CORE_SPD10_BUSY; 1083 pmu_cntl0.cntl0_bits.core_spd10_idle = 1084 NGE_PMU_CORE_SPD10_IDLE; 1085 pmu_cntl0.cntl0_bits.core_spd100_fp = 1086 NGE_PMU_CORE_SPD100_BUSY; 1087 pmu_cntl0.cntl0_bits.core_spd100_idle = 1088 NGE_PMU_CORE_SPD100_IDLE; 1089 pmu_cntl0.cntl0_bits.core_spd1000_fp = 1090 NGE_PMU_CORE_SPD1000_BUSY; 1091 pmu_cntl0.cntl0_bits.core_spd1000_idle = 1092 NGE_PMU_CORE_SPD100_IDLE; 1093 pmu_cntl0.cntl0_bits.core_spd10_idle = 1094 NGE_PMU_CORE_SPD10_IDLE; 1095 nge_reg_put32(ngep, NGE_PMU_CNTL0, pmu_cntl0.cntl0_val); 1096 1097 /* Set the core idle limit value */ 1098 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 1099 NGE_PMU_CIDLE_LIMIT_DEF); 1100 1101 /* Set the device idle limit value */ 1102 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 1103 NGE_PMU_DIDLE_LIMIT_DEF); 1104 1105 /* Enable the core/device idle timer in PMU control 2 */ 1106 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 1107 pmu_cntl2.cntl2_bits.cidle_timer = NGE_SET; 1108 pmu_cntl2.cntl2_bits.didle_timer = NGE_SET; 1109 pmu_cntl2.cntl2_bits.core_enable = NGE_SET; 1110 pmu_cntl2.cntl2_bits.dev_enable = NGE_SET; 1111 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 1112 } 1113 /* 1114 * Stop the chipset and clear buffer management 1115 */ 1116 err = nge_chip_stop(ngep, B_FALSE); 1117 if (err == DDI_FAILURE) 1118 return (err); 1119 /* 1120 * Clear the power state bits for phy since interface no longer 1121 * works after rebooting from Windows on a multi-boot machine 1122 */ 1123 if (ngep->chipinfo.device == DEVICE_ID_MCP51_268 || 1124 ngep->chipinfo.device == DEVICE_ID_MCP51_269 || 1125 ngep->chipinfo.device == DEVICE_ID_MCP55_372 || 1126 ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1127 ngep->chipinfo.device == DEVICE_ID_MCP61_3EE || 1128 ngep->chipinfo.device == DEVICE_ID_MCP61_3EF) { 1129 1130 pm_cntl2.cntl_val = nge_reg_get32(ngep, NGE_PM_CNTL2); 1131 /* bring phy out of coma mode */ 1132 pm_cntl2.cntl_bits.phy_coma_set = NGE_CLEAR; 1133 /* disable auto reset coma bits */ 1134 pm_cntl2.cntl_bits.resv4 = NGE_CLEAR; 1135 /* restore power to gated clocks */ 1136 pm_cntl2.cntl_bits.resv8_11 = NGE_CLEAR; 1137 nge_reg_put32(ngep, NGE_PM_CNTL2, pm_cntl2.cntl_val); 1138 } 1139 1140 /* 1141 * Reset the external phy 1142 */ 1143 if (!nge_phy_reset(ngep)) 1144 return (DDI_FAILURE); 1145 ngep->nge_chip_state = NGE_CHIP_RESET; 1146 return (DDI_SUCCESS); 1147 } 1148 1149 int 1150 nge_chip_start(nge_t *ngep) 1151 { 1152 int err; 1153 nge_itc itc; 1154 nge_tx_cntl tx_cntl; 1155 nge_rx_cntrl0 rx_cntl0; 1156 nge_rx_cntl1 rx_cntl1; 1157 nge_tx_en tx_en; 1158 nge_rx_en rx_en; 1159 nge_mii_cs mii_cs; 1160 nge_swtr_cntl swtr_cntl; 1161 nge_rx_fifo_wm rx_fifo; 1162 nge_intr_mask intr_mask; 1163 nge_mintr_mask mintr_mask; 1164 nge_dev_spec_param_t *dev_param_p; 1165 1166 NGE_TRACE(("nge_chip_start($%p)", (void *)ngep)); 1167 1168 /* 1169 * Setup buffer management 1170 */ 1171 err = nge_buff_setup(ngep); 1172 if (err == DDI_FAILURE) 1173 return (err); 1174 1175 dev_param_p = &ngep->dev_spec_param; 1176 1177 /* 1178 * Enable polling attribute 1179 */ 1180 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 1181 mii_cs.cs_bits.ap_paddr = ngep->phy_xmii_addr; 1182 mii_cs.cs_bits.ap_en = NGE_SET; 1183 mii_cs.cs_bits.ap_intv = MII_POLL_INTV; 1184 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 1185 1186 /* 1187 * Setup link 1188 */ 1189 (*ngep->physops->phys_update)(ngep); 1190 1191 /* 1192 * Configure the tx's parameters 1193 */ 1194 tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL); 1195 if (dev_param_p->tx_pause_frame) 1196 tx_cntl.cntl_bits.paen = NGE_SET; 1197 else 1198 tx_cntl.cntl_bits.paen = NGE_CLEAR; 1199 tx_cntl.cntl_bits.retry_en = NGE_SET; 1200 tx_cntl.cntl_bits.pad_en = NGE_SET; 1201 tx_cntl.cntl_bits.fappend_en = NGE_SET; 1202 tx_cntl.cntl_bits.two_def_en = NGE_SET; 1203 tx_cntl.cntl_bits.max_retry = 15; 1204 tx_cntl.cntl_bits.burst_en = NGE_CLEAR; 1205 tx_cntl.cntl_bits.uflo_err_mask = NGE_CLEAR; 1206 tx_cntl.cntl_bits.tlcol_mask = NGE_CLEAR; 1207 tx_cntl.cntl_bits.lcar_mask = NGE_CLEAR; 1208 tx_cntl.cntl_bits.def_mask = NGE_CLEAR; 1209 tx_cntl.cntl_bits.exdef_mask = NGE_SET; 1210 tx_cntl.cntl_bits.lcar_mask = NGE_SET; 1211 tx_cntl.cntl_bits.tlcol_mask = NGE_SET; 1212 tx_cntl.cntl_bits.uflo_err_mask = NGE_SET; 1213 tx_cntl.cntl_bits.jam_seq_en = NGE_CLEAR; 1214 nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val); 1215 1216 1217 /* 1218 * Configure the parameters of Rx's state machine 1219 * Enabe the parameters: 1220 * 1). Pad Strip 1221 * 2). FCS Relay 1222 * 3). Pause 1223 * 4). Address filter 1224 * 5). Runt Packet receive 1225 * 6). Broadcast 1226 * 7). Receive Deferral 1227 * 1228 * Disable the following parameters for decreasing 1229 * the number of interrupts: 1230 * 1). Runt Inerrupt. 1231 * 2). Rx's Late Collision interrupt. 1232 * 3). Rx's Max length Error Interrupt. 1233 * 4). Rx's Length Field error Interrupt. 1234 * 5). Rx's FCS error interrupt. 1235 * 6). Rx's overflow error interrupt. 1236 * 7). Rx's Frame alignment error interrupt. 1237 */ 1238 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1239 rx_cntl0.cntl_bits.padsen = NGE_CLEAR; 1240 rx_cntl0.cntl_bits.fcsren = NGE_CLEAR; 1241 if (dev_param_p->rx_pause_frame) 1242 rx_cntl0.cntl_bits.paen = NGE_SET; 1243 else 1244 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1245 rx_cntl0.cntl_bits.lben = NGE_CLEAR; 1246 rx_cntl0.cntl_bits.afen = NGE_SET; 1247 rx_cntl0.cntl_bits.runten = NGE_CLEAR; 1248 rx_cntl0.cntl_bits.brdis = NGE_CLEAR; 1249 rx_cntl0.cntl_bits.rdfen = NGE_CLEAR; 1250 rx_cntl0.cntl_bits.runtm = NGE_CLEAR; 1251 rx_cntl0.cntl_bits.slfb = NGE_CLEAR; 1252 rx_cntl0.cntl_bits.rlcolm = NGE_CLEAR; 1253 rx_cntl0.cntl_bits.maxerm = NGE_CLEAR; 1254 rx_cntl0.cntl_bits.lferm = NGE_CLEAR; 1255 rx_cntl0.cntl_bits.crcm = NGE_CLEAR; 1256 rx_cntl0.cntl_bits.ofolm = NGE_CLEAR; 1257 rx_cntl0.cntl_bits.framerm = NGE_CLEAR; 1258 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1259 1260 /* 1261 * Configure the watermark for the rx's statemachine 1262 */ 1263 rx_fifo.wm_val = nge_reg_get32(ngep, NGE_RX_FIFO_WM); 1264 rx_fifo.wm_bits.data_hwm = ngep->rx_datahwm; 1265 rx_fifo.wm_bits.prd_lwm = ngep->rx_prdlwm; 1266 rx_fifo.wm_bits.prd_hwm = ngep->rx_prdhwm; 1267 nge_reg_put32(ngep, NGE_RX_FIFO_WM, rx_fifo.wm_val); 1268 1269 /* 1270 * Configure the deffer time slot for rx's state machine 1271 */ 1272 nge_reg_put8(ngep, NGE_RX_DEf, ngep->rx_def); 1273 1274 /* 1275 * Configure the length of rx's packet 1276 */ 1277 rx_cntl1.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL1); 1278 rx_cntl1.cntl_bits.length = ngep->max_sdu; 1279 nge_reg_put32(ngep, NGE_RX_CNTL1, rx_cntl1.cntl_val); 1280 /* 1281 * Enable Tx's state machine 1282 */ 1283 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 1284 tx_en.bits.tx_en = NGE_SET; 1285 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 1286 1287 /* 1288 * Enable Rx's state machine 1289 */ 1290 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 1291 rx_en.bits.rx_en = NGE_SET; 1292 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 1293 1294 itc.itc_val = nge_reg_get32(ngep, NGE_SWTR_ITC); 1295 itc.itc_bits.sw_intv = ngep->sw_intr_intv; 1296 nge_reg_put32(ngep, NGE_SWTR_ITC, itc.itc_val); 1297 1298 swtr_cntl.ctrl_val = nge_reg_get8(ngep, NGE_SWTR_CNTL); 1299 swtr_cntl.cntl_bits.sten = NGE_SET; 1300 swtr_cntl.cntl_bits.stren = NGE_SET; 1301 nge_reg_put32(ngep, NGE_SWTR_CNTL, swtr_cntl.ctrl_val); 1302 1303 /* 1304 * Disable all mii read/write operation Interrupt 1305 */ 1306 mintr_mask.mask_val = nge_reg_get8(ngep, NGE_MINTR_MASK); 1307 mintr_mask.mask_bits.mrei = NGE_CLEAR; 1308 mintr_mask.mask_bits.mcc2 = NGE_CLEAR; 1309 mintr_mask.mask_bits.mcc1 = NGE_CLEAR; 1310 mintr_mask.mask_bits.mapi = NGE_SET; 1311 mintr_mask.mask_bits.mpdi = NGE_SET; 1312 nge_reg_put8(ngep, NGE_MINTR_MASK, mintr_mask.mask_val); 1313 1314 /* 1315 * Enable all interrupt event 1316 */ 1317 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1318 intr_mask.mask_bits.reint = NGE_SET; 1319 intr_mask.mask_bits.rcint = NGE_SET; 1320 intr_mask.mask_bits.miss = NGE_SET; 1321 intr_mask.mask_bits.teint = NGE_CLEAR; 1322 intr_mask.mask_bits.tcint = NGE_SET; 1323 intr_mask.mask_bits.stint = NGE_CLEAR; 1324 intr_mask.mask_bits.mint = NGE_CLEAR; 1325 intr_mask.mask_bits.rfint = NGE_CLEAR; 1326 intr_mask.mask_bits.tfint = NGE_CLEAR; 1327 intr_mask.mask_bits.feint = NGE_SET; 1328 intr_mask.mask_bits.resv10 = NGE_CLEAR; 1329 intr_mask.mask_bits.resv11 = NGE_CLEAR; 1330 intr_mask.mask_bits.resv12 = NGE_CLEAR; 1331 intr_mask.mask_bits.resv13 = NGE_CLEAR; 1332 intr_mask.mask_bits.phyint = NGE_CLEAR; 1333 ngep->intr_masks = intr_mask.mask_val; 1334 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1335 ngep->nge_chip_state = NGE_CHIP_RUNNING; 1336 return (DDI_SUCCESS); 1337 } 1338 1339 /* 1340 * nge_chip_sync() -- program the chip with the unicast MAC address, 1341 * the multicast hash table, the required level of promiscuity. 1342 */ 1343 void 1344 nge_chip_sync(nge_t *ngep) 1345 { 1346 uint8_t i; 1347 uint64_t macaddr; 1348 uint64_t mul_addr; 1349 uint64_t mul_mask; 1350 nge_rx_cntrl0 rx_cntl; 1351 nge_uni_addr1 uni_adr1; 1352 1353 NGE_TRACE(("nge_chip_sync($%p)", (void *)ngep)); 1354 1355 macaddr = 0x0ull; 1356 mul_addr = 0x0ull; 1357 mul_mask = 0x0ull; 1358 rx_cntl.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1359 1360 if (ngep->promisc) { 1361 rx_cntl.cntl_bits.afen = NGE_CLEAR; 1362 rx_cntl.cntl_bits.brdis = NGE_SET; 1363 } else { 1364 rx_cntl.cntl_bits.afen = NGE_SET; 1365 rx_cntl.cntl_bits.brdis = NGE_CLEAR; 1366 } 1367 1368 /* 1369 * Transform the MAC address from host to chip format, the unicast 1370 * MAC address(es) ... 1371 */ 1372 for (i = ETHERADDRL, macaddr = 0ull; i != 0; --i) { 1373 macaddr |= ngep->cur_uni_addr.addr[i-1]; 1374 macaddr <<= (i > 1) ? 8 : 0; 1375 } 1376 1377 nge_reg_put32(ngep, NGE_UNI_ADDR0, (uint32_t)macaddr); 1378 macaddr = macaddr >>32; 1379 uni_adr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 1380 uni_adr1.addr_bits.addr = (uint16_t)macaddr; 1381 uni_adr1.addr_bits.resv16_31 = (uint16_t)0; 1382 nge_reg_put32(ngep, NGE_UNI_ADDR1, uni_adr1.addr_val); 1383 1384 /* 1385 * Reprogram the multicast address table ... 1386 */ 1387 for (i = ETHERADDRL, mul_addr = 0ull; i != 0; --i) { 1388 mul_addr |= ngep->cur_mul_addr.addr[i-1]; 1389 mul_addr <<= (i > 1) ? 8 : 0; 1390 mul_mask |= ngep->cur_mul_mask.addr[i-1]; 1391 mul_mask <<= (i > 1) ? 8 : 0; 1392 } 1393 nge_reg_put32(ngep, NGE_MUL_ADDR0, (uint32_t)mul_addr); 1394 mul_addr >>= 32; 1395 nge_reg_put32(ngep, NGE_MUL_ADDR1, mul_addr); 1396 nge_reg_put32(ngep, NGE_MUL_MASK, (uint32_t)mul_mask); 1397 mul_mask >>= 32; 1398 nge_reg_put32(ngep, NGE_MUL_MASK1, mul_mask); 1399 /* 1400 * Set or clear the PROMISCUOUS mode bit 1401 */ 1402 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl.cntl_val); 1403 /* 1404 * For internal PHY loopback, the link will 1405 * not be up, so it need to sync mac modes directly. 1406 */ 1407 if (ngep->param_loop_mode == NGE_LOOP_INTERNAL_PHY) 1408 nge_sync_mac_modes(ngep); 1409 } 1410 1411 static void 1412 nge_chip_err(nge_t *ngep) 1413 { 1414 nge_reg010 reg010_ins; 1415 nge_sw_statistics_t *psw_stat; 1416 nge_intr_mask intr_mask; 1417 1418 NGE_TRACE(("nge_chip_err($%p)", (void *)ngep)); 1419 1420 psw_stat = (nge_sw_statistics_t *)&ngep->statistics.sw_statistics; 1421 reg010_ins.reg010_val = nge_reg_get32(ngep, NGE_REG010); 1422 if (reg010_ins.reg010_bits.resv0) 1423 psw_stat->fe_err.tso_err_mss ++; 1424 1425 if (reg010_ins.reg010_bits.resv1) 1426 psw_stat->fe_err.tso_dis ++; 1427 1428 if (reg010_ins.reg010_bits.resv2) 1429 psw_stat->fe_err.tso_err_nosum ++; 1430 1431 if (reg010_ins.reg010_bits.resv3) 1432 psw_stat->fe_err.tso_err_hov ++; 1433 1434 if (reg010_ins.reg010_bits.resv4) 1435 psw_stat->fe_err.tso_err_huf ++; 1436 1437 if (reg010_ins.reg010_bits.resv5) 1438 psw_stat->fe_err.tso_err_l2 ++; 1439 1440 if (reg010_ins.reg010_bits.resv6) 1441 psw_stat->fe_err.tso_err_ip ++; 1442 1443 if (reg010_ins.reg010_bits.resv7) 1444 psw_stat->fe_err.tso_err_l4 ++; 1445 1446 if (reg010_ins.reg010_bits.resv8) 1447 psw_stat->fe_err.tso_err_tcp ++; 1448 1449 if (reg010_ins.reg010_bits.resv9) 1450 psw_stat->fe_err.hsum_err_ip ++; 1451 1452 if (reg010_ins.reg010_bits.resv10) 1453 psw_stat->fe_err.hsum_err_l4 ++; 1454 1455 if (reg010_ins.reg010_val != 0) { 1456 1457 /* 1458 * Fatal error is triggered by malformed driver commands. 1459 * Disable unless debugging. 1460 */ 1461 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1462 intr_mask.mask_bits.feint = NGE_CLEAR; 1463 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1464 ngep->intr_masks = intr_mask.mask_val; 1465 1466 } 1467 } 1468 1469 static void 1470 nge_sync_mac_modes(nge_t *ngep) 1471 { 1472 nge_tx_def tx_def; 1473 nge_tx_fifo_wm tx_fifo; 1474 nge_bkoff_cntl bk_cntl; 1475 nge_mac2phy m2p; 1476 nge_rx_cntrl0 rx_cntl0; 1477 nge_dev_spec_param_t *dev_param_p; 1478 1479 dev_param_p = &ngep->dev_spec_param; 1480 1481 tx_def.def_val = nge_reg_get32(ngep, NGE_TX_DEF); 1482 m2p.m2p_val = nge_reg_get32(ngep, NGE_MAC2PHY); 1483 tx_fifo.wm_val = nge_reg_get32(ngep, NGE_TX_FIFO_WM); 1484 bk_cntl.cntl_val = nge_reg_get32(ngep, NGE_BKOFF_CNTL); 1485 bk_cntl.bkoff_bits.rseed = BKOFF_RSEED; 1486 switch (ngep->param_link_speed) { 1487 case 10: 1488 m2p.m2p_bits.speed = low_speed; 1489 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1490 if (ngep->phy_mode == RGMII_IN) { 1491 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1492 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1493 } else { 1494 tx_def.def_bits.if_def = TX_TIFG_MII; 1495 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1496 } 1497 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1498 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1499 break; 1500 1501 case 100: 1502 m2p.m2p_bits.speed = fast_speed; 1503 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1504 if (ngep->phy_mode == RGMII_IN) { 1505 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1506 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1507 } else { 1508 tx_def.def_bits.if_def = TX_TIFG_MII; 1509 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1510 } 1511 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1512 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1513 break; 1514 1515 case 1000: 1516 m2p.m2p_bits.speed = giga_speed; 1517 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1518 if (ngep->param_link_duplex == LINK_DUPLEX_FULL) { 1519 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1520 tx_def.def_bits.if_def = TX_IFG_RGMII_1000_FD; 1521 } else { 1522 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1523 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1524 } 1525 1526 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_GMII; 1527 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_GMII; 1528 break; 1529 } 1530 1531 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1532 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 1533 m2p.m2p_bits.phyintr = NGE_CLEAR; 1534 m2p.m2p_bits.phyintrlvl = NGE_CLEAR; 1535 } 1536 if (ngep->param_link_duplex == LINK_DUPLEX_HALF) { 1537 m2p.m2p_bits.hdup_en = NGE_SET; 1538 } 1539 else 1540 m2p.m2p_bits.hdup_en = NGE_CLEAR; 1541 nge_reg_put32(ngep, NGE_MAC2PHY, m2p.m2p_val); 1542 nge_reg_put32(ngep, NGE_TX_DEF, tx_def.def_val); 1543 1544 tx_fifo.wm_bits.data_lwm = TX_FIFO_DATA_LWM; 1545 tx_fifo.wm_bits.prd_lwm = TX_FIFO_PRD_LWM; 1546 tx_fifo.wm_bits.uprd_hwm = TX_FIFO_PRD_HWM; 1547 tx_fifo.wm_bits.fb_wm = TX_FIFO_TBFW; 1548 nge_reg_put32(ngep, NGE_TX_FIFO_WM, tx_fifo.wm_val); 1549 1550 nge_reg_put32(ngep, NGE_BKOFF_CNTL, bk_cntl.cntl_val); 1551 1552 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1553 if (ngep->param_link_rx_pause && dev_param_p->rx_pause_frame) 1554 rx_cntl0.cntl_bits.paen = NGE_SET; 1555 else 1556 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1557 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1558 } 1559 1560 /* 1561 * Handler for hardware link state change. 1562 * 1563 * When this routine is called, the hardware link state has changed 1564 * and the new state is reflected in the param_* variables. Here 1565 * we must update the softstate, reprogram the MAC to match, and 1566 * record the change in the log and/or on the console. 1567 */ 1568 static void 1569 nge_factotum_link_handler(nge_t *ngep) 1570 { 1571 /* 1572 * Update the s/w link_state 1573 */ 1574 if (ngep->param_link_up) 1575 ngep->link_state = LINK_STATE_UP; 1576 else 1577 ngep->link_state = LINK_STATE_DOWN; 1578 1579 /* 1580 * Reprogram the MAC modes to match 1581 */ 1582 nge_sync_mac_modes(ngep); 1583 } 1584 1585 static boolean_t 1586 nge_factotum_link_check(nge_t *ngep) 1587 { 1588 boolean_t lchg; 1589 boolean_t check; 1590 1591 ASSERT(mutex_owned(ngep->genlock)); 1592 1593 (*ngep->physops->phys_check)(ngep); 1594 switch (ngep->link_state) { 1595 case LINK_STATE_UP: 1596 lchg = (ngep->param_link_up == B_FALSE); 1597 check = (ngep->param_link_up == B_FALSE); 1598 break; 1599 1600 case LINK_STATE_DOWN: 1601 lchg = (ngep->param_link_up == B_TRUE); 1602 check = (ngep->param_link_up == B_TRUE); 1603 break; 1604 1605 default: 1606 check = B_TRUE; 1607 break; 1608 } 1609 1610 /* 1611 * If <check> is false, we're sure the link hasn't changed. 1612 * If true, however, it's not yet definitive; we have to call 1613 * nge_phys_check() to determine whether the link has settled 1614 * into a new state yet ... and if it has, then call the link 1615 * state change handler.But when the chip is 5700 in Dell 6650 1616 * ,even if check is false, the link may have changed.So we 1617 * have to call nge_phys_check() to determine the link state. 1618 */ 1619 if (check) 1620 nge_factotum_link_handler(ngep); 1621 1622 return (lchg); 1623 } 1624 1625 /* 1626 * Factotum routine to check for Tx stall, using the 'watchdog' counter 1627 */ 1628 static boolean_t nge_factotum_stall_check(nge_t *ngep); 1629 1630 static boolean_t 1631 nge_factotum_stall_check(nge_t *ngep) 1632 { 1633 uint32_t dogval; 1634 /* 1635 * Specific check for Tx stall ... 1636 * 1637 * The 'watchdog' counter is incremented whenever a packet 1638 * is queued, reset to 1 when some (but not all) buffers 1639 * are reclaimed, reset to 0 (disabled) when all buffers 1640 * are reclaimed, and shifted left here. If it exceeds the 1641 * threshold value, the chip is assumed to have stalled and 1642 * is put into the ERROR state. The factotum will then reset 1643 * it on the next pass. 1644 * 1645 * All of which should ensure that we don't get into a state 1646 * where packets are left pending indefinitely! 1647 */ 1648 dogval = nge_atomic_shl32(&ngep->watchdog, 1); 1649 if (dogval < nge_watchdog_count) { 1650 ngep->stall_cknum = 0; 1651 } else { 1652 ngep->stall_cknum++; 1653 } 1654 if (ngep->stall_cknum < 16) { 1655 return (B_FALSE); 1656 } else { 1657 ngep->stall_cknum = 0; 1658 ngep->statistics.sw_statistics.tx_stall++; 1659 return (B_TRUE); 1660 } 1661 } 1662 1663 1664 1665 /* 1666 * The factotum is woken up when there's something to do that we'd rather 1667 * not do from inside a hardware interrupt handler or high-level cyclic. 1668 * Its two main tasks are: 1669 * reset & restart the chip after an error 1670 * check the link status whenever necessary 1671 */ 1672 /* ARGSUSED */ 1673 uint_t 1674 nge_chip_factotum(caddr_t args1, caddr_t args2) 1675 { 1676 uint_t result; 1677 nge_t *ngep; 1678 boolean_t err; 1679 boolean_t linkchg; 1680 1681 ngep = (nge_t *)args1; 1682 1683 NGE_TRACE(("nge_chip_factotum($%p)", (void *)ngep)); 1684 1685 mutex_enter(ngep->softlock); 1686 if (ngep->factotum_flag == 0) { 1687 mutex_exit(ngep->softlock); 1688 return (DDI_INTR_UNCLAIMED); 1689 } 1690 ngep->factotum_flag = 0; 1691 mutex_exit(ngep->softlock); 1692 err = B_FALSE; 1693 linkchg = B_FALSE; 1694 result = DDI_INTR_CLAIMED; 1695 1696 mutex_enter(ngep->genlock); 1697 switch (ngep->nge_chip_state) { 1698 default: 1699 break; 1700 1701 case NGE_CHIP_RUNNING: 1702 linkchg = nge_factotum_link_check(ngep); 1703 err = nge_factotum_stall_check(ngep); 1704 break; 1705 1706 case NGE_CHIP_FAULT: 1707 (void) nge_restart(ngep); 1708 NGE_REPORT((ngep, "automatic recovery activated")); 1709 break; 1710 } 1711 1712 if (err) 1713 (void) nge_chip_stop(ngep, B_TRUE); 1714 mutex_exit(ngep->genlock); 1715 1716 /* 1717 * If the link state changed, tell the world about it (if 1718 * this version of MAC supports link state notification). 1719 * Note: can't do this while still holding the mutex. 1720 */ 1721 if (linkchg) 1722 mac_link_update(ngep->mh, ngep->link_state); 1723 1724 return (result); 1725 1726 } 1727 1728 static void 1729 nge_intr_handle(nge_t *ngep, nge_intr_src *pintr_src) 1730 { 1731 boolean_t brx; 1732 boolean_t btx; 1733 nge_mintr_src mintr_src; 1734 1735 brx = B_FALSE; 1736 btx = B_FALSE; 1737 ngep->statistics.sw_statistics.intr_count++; 1738 ngep->statistics.sw_statistics.intr_lval = pintr_src->intr_val; 1739 brx = (pintr_src->int_bits.reint | pintr_src->int_bits.miss 1740 | pintr_src->int_bits.rcint | pintr_src->int_bits.stint) 1741 != 0 ? B_TRUE : B_FALSE; 1742 if (pintr_src->int_bits.reint) 1743 ngep->statistics.sw_statistics.rx_err++; 1744 if (pintr_src->int_bits.miss) 1745 ngep->statistics.sw_statistics.rx_nobuffer++; 1746 1747 btx = (pintr_src->int_bits.teint | pintr_src->int_bits.tcint) 1748 != 0 ? B_TRUE : B_FALSE; 1749 if (pintr_src->int_bits.stint && ngep->poll) 1750 ngep->stint_count ++; 1751 if (ngep->poll && (ngep->stint_count % ngep->param_tx_n_intr == 0)) 1752 btx = B_TRUE; 1753 if (btx) 1754 nge_tx_recycle(ngep, B_TRUE); 1755 if (brx) 1756 nge_receive(ngep); 1757 if (pintr_src->int_bits.teint) 1758 ngep->statistics.sw_statistics.tx_stop_err++; 1759 if (ngep->intr_moderation && brx) { 1760 if (ngep->poll) { 1761 if (ngep->recv_count < ngep->param_rx_intr_hwater) { 1762 ngep->quiet_time++; 1763 if (ngep->quiet_time == 1764 ngep->param_poll_quiet_time) { 1765 ngep->poll = B_FALSE; 1766 ngep->quiet_time = 0; 1767 ngep->stint_count = 0; 1768 nge_tx_recycle(ngep, B_TRUE); 1769 } 1770 } else 1771 ngep->quiet_time = 0; 1772 } else { 1773 if (ngep->recv_count > ngep->param_rx_intr_lwater) { 1774 ngep->busy_time++; 1775 if (ngep->busy_time == 1776 ngep->param_poll_busy_time) { 1777 ngep->poll = B_TRUE; 1778 ngep->busy_time = 0; 1779 } 1780 } else 1781 ngep->busy_time = 0; 1782 } 1783 } 1784 ngep->recv_count = 0; 1785 if (pintr_src->int_bits.feint) 1786 nge_chip_err(ngep); 1787 /* link interrupt, check the link state */ 1788 if (pintr_src->int_bits.mint) { 1789 mintr_src.src_val = nge_reg_get32(ngep, NGE_MINTR_SRC); 1790 nge_reg_put32(ngep, NGE_MINTR_SRC, mintr_src.src_val); 1791 nge_wake_factotum(ngep); 1792 } 1793 } 1794 1795 /* 1796 * nge_chip_intr() -- handle chip interrupts 1797 */ 1798 /* ARGSUSED */ 1799 uint_t 1800 nge_chip_intr(caddr_t arg1, caddr_t arg2) 1801 { 1802 nge_t *ngep = (nge_t *)arg1; 1803 nge_intr_src intr_src; 1804 nge_intr_mask intr_mask; 1805 1806 mutex_enter(ngep->genlock); 1807 1808 if (ngep->suspended) { 1809 mutex_exit(ngep->genlock); 1810 return (DDI_INTR_UNCLAIMED); 1811 } 1812 1813 /* 1814 * Check whether chip's says it's asserting #INTA; 1815 * if not, don't process or claim the interrupt. 1816 */ 1817 intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC); 1818 if (intr_src.intr_val == 0) { 1819 mutex_exit(ngep->genlock); 1820 return (DDI_INTR_UNCLAIMED); 1821 } 1822 /* 1823 * Ack the interrupt 1824 */ 1825 nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val); 1826 1827 if (ngep->nge_chip_state != NGE_CHIP_RUNNING) { 1828 mutex_exit(ngep->genlock); 1829 return (DDI_INTR_CLAIMED); 1830 } 1831 nge_intr_handle(ngep, &intr_src); 1832 if (ngep->poll && !ngep->ch_intr_mode) { 1833 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1834 intr_mask.mask_bits.stint = NGE_SET; 1835 intr_mask.mask_bits.rcint = NGE_CLEAR; 1836 intr_mask.mask_bits.reint = NGE_CLEAR; 1837 intr_mask.mask_bits.tcint = NGE_CLEAR; 1838 intr_mask.mask_bits.teint = NGE_CLEAR; 1839 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1840 ngep->ch_intr_mode = B_TRUE; 1841 } else if ((ngep->ch_intr_mode) && (!ngep->poll)) { 1842 nge_reg_put32(ngep, NGE_INTR_MASK, ngep->intr_masks); 1843 ngep->ch_intr_mode = B_FALSE; 1844 } 1845 mutex_exit(ngep->genlock); 1846 return (DDI_INTR_CLAIMED); 1847 } 1848 1849 static enum ioc_reply 1850 nge_pp_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1851 { 1852 int err; 1853 uint64_t sizemask; 1854 uint64_t mem_va; 1855 uint64_t maxoff; 1856 boolean_t peek; 1857 nge_peekpoke_t *ppd; 1858 int (*ppfn)(nge_t *ngep, nge_peekpoke_t *ppd); 1859 1860 switch (cmd) { 1861 default: 1862 return (IOC_INVAL); 1863 1864 case NGE_PEEK: 1865 peek = B_TRUE; 1866 break; 1867 1868 case NGE_POKE: 1869 peek = B_FALSE; 1870 break; 1871 } 1872 1873 /* 1874 * Validate format of ioctl 1875 */ 1876 if (iocp->ioc_count != sizeof (nge_peekpoke_t)) 1877 return (IOC_INVAL); 1878 if (mp->b_cont == NULL) 1879 return (IOC_INVAL); 1880 ppd = (nge_peekpoke_t *)mp->b_cont->b_rptr; 1881 1882 /* 1883 * Validate request parameters 1884 */ 1885 switch (ppd->pp_acc_space) { 1886 default: 1887 return (IOC_INVAL); 1888 1889 case NGE_PP_SPACE_CFG: 1890 /* 1891 * Config space 1892 */ 1893 sizemask = 8|4|2|1; 1894 mem_va = 0; 1895 maxoff = PCI_CONF_HDR_SIZE; 1896 ppfn = peek ? nge_chip_peek_cfg : nge_chip_poke_cfg; 1897 break; 1898 1899 case NGE_PP_SPACE_REG: 1900 /* 1901 * Memory-mapped I/O space 1902 */ 1903 sizemask = 8|4|2|1; 1904 mem_va = 0; 1905 maxoff = NGE_REG_SIZE; 1906 ppfn = peek ? nge_chip_peek_reg : nge_chip_poke_reg; 1907 break; 1908 1909 case NGE_PP_SPACE_MII: 1910 sizemask = 4|2|1; 1911 mem_va = 0; 1912 maxoff = NGE_MII_SIZE; 1913 ppfn = peek ? nge_chip_peek_mii : nge_chip_poke_mii; 1914 break; 1915 1916 case NGE_PP_SPACE_SEEPROM: 1917 sizemask = 4|2|1; 1918 mem_va = 0; 1919 maxoff = NGE_SEEROM_SIZE; 1920 ppfn = peek ? nge_chip_peek_seeprom : nge_chip_poke_seeprom; 1921 break; 1922 } 1923 1924 switch (ppd->pp_acc_size) { 1925 default: 1926 return (IOC_INVAL); 1927 1928 case 8: 1929 case 4: 1930 case 2: 1931 case 1: 1932 if ((ppd->pp_acc_size & sizemask) == 0) 1933 return (IOC_INVAL); 1934 break; 1935 } 1936 1937 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 1938 return (IOC_INVAL); 1939 1940 if (ppd->pp_acc_offset >= maxoff) 1941 return (IOC_INVAL); 1942 1943 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff) 1944 return (IOC_INVAL); 1945 1946 /* 1947 * All OK - go do it! 1948 */ 1949 ppd->pp_acc_offset += mem_va; 1950 if (ppfn) 1951 err = (*ppfn)(ngep, ppd); 1952 if (err != DDI_SUCCESS) 1953 return (IOC_INVAL); 1954 return (peek ? IOC_REPLY : IOC_ACK); 1955 } 1956 1957 static enum ioc_reply nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, 1958 struct iocblk *iocp); 1959 #pragma no_inline(nge_diag_ioctl) 1960 1961 static enum ioc_reply 1962 nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1963 { 1964 ASSERT(mutex_owned(ngep->genlock)); 1965 1966 switch (cmd) { 1967 default: 1968 nge_error(ngep, "nge_diag_ioctl: invalid cmd 0x%x", cmd); 1969 return (IOC_INVAL); 1970 1971 case NGE_DIAG: 1972 return (IOC_ACK); 1973 1974 case NGE_PEEK: 1975 case NGE_POKE: 1976 return (nge_pp_ioctl(ngep, cmd, mp, iocp)); 1977 1978 case NGE_PHY_RESET: 1979 return (IOC_RESTART_ACK); 1980 1981 case NGE_SOFT_RESET: 1982 case NGE_HARD_RESET: 1983 return (IOC_ACK); 1984 } 1985 1986 /* NOTREACHED */ 1987 } 1988 1989 enum ioc_reply 1990 nge_chip_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 1991 { 1992 int cmd; 1993 1994 ASSERT(mutex_owned(ngep->genlock)); 1995 1996 cmd = iocp->ioc_cmd; 1997 1998 switch (cmd) { 1999 default: 2000 return (IOC_INVAL); 2001 2002 case NGE_DIAG: 2003 case NGE_PEEK: 2004 case NGE_POKE: 2005 case NGE_PHY_RESET: 2006 case NGE_SOFT_RESET: 2007 case NGE_HARD_RESET: 2008 #if NGE_DEBUGGING 2009 return (nge_diag_ioctl(ngep, cmd, mp, iocp)); 2010 #else 2011 return (IOC_INVAL); 2012 #endif 2013 2014 case NGE_MII_READ: 2015 case NGE_MII_WRITE: 2016 return (IOC_INVAL); 2017 2018 #if NGE_SEE_IO32 2019 case NGE_SEE_READ: 2020 case NGE_SEE_WRITE: 2021 return (IOC_INVAL); 2022 #endif 2023 2024 #if NGE_FLASH_IO32 2025 case NGE_FLASH_READ: 2026 case NGE_FLASH_WRITE: 2027 return (IOC_INVAL); 2028 #endif 2029 } 2030 } 2031