1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "nge.h" 28 static uint32_t nge_watchdog_count = 1 << 29; 29 extern boolean_t nge_enable_msi; 30 static void nge_sync_mac_modes(nge_t *); 31 32 #undef NGE_DBG 33 #define NGE_DBG NGE_DBG_CHIP 34 35 /* 36 * Operating register get/set access routines 37 */ 38 uint8_t nge_reg_get8(nge_t *ngep, nge_regno_t regno); 39 #pragma inline(nge_reg_get8) 40 41 uint8_t 42 nge_reg_get8(nge_t *ngep, nge_regno_t regno) 43 { 44 NGE_TRACE(("nge_reg_get8($%p, 0x%lx)", (void *)ngep, regno)); 45 46 return (ddi_get8(ngep->io_handle, PIO_ADDR(ngep, regno))); 47 } 48 49 void nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data); 50 #pragma inline(nge_reg_put8) 51 52 void 53 nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data) 54 { 55 NGE_TRACE(("nge_reg_put8($%p, 0x%lx, 0x%x)", 56 (void *)ngep, regno, data)); 57 ddi_put8(ngep->io_handle, PIO_ADDR(ngep, regno), data); 58 59 } 60 61 uint16_t nge_reg_get16(nge_t *ngep, nge_regno_t regno); 62 #pragma inline(nge_reg_get16) 63 64 uint16_t 65 nge_reg_get16(nge_t *ngep, nge_regno_t regno) 66 { 67 NGE_TRACE(("nge_reg_get16($%p, 0x%lx)", (void *)ngep, regno)); 68 return (ddi_get16(ngep->io_handle, PIO_ADDR(ngep, regno))); 69 } 70 71 void nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data); 72 #pragma inline(nge_reg_put16) 73 74 void 75 nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data) 76 { 77 NGE_TRACE(("nge_reg_put16($%p, 0x%lx, 0x%x)", 78 (void *)ngep, regno, data)); 79 ddi_put16(ngep->io_handle, PIO_ADDR(ngep, regno), data); 80 81 } 82 83 uint32_t nge_reg_get32(nge_t *ngep, nge_regno_t regno); 84 #pragma inline(nge_reg_get32) 85 86 uint32_t 87 nge_reg_get32(nge_t *ngep, nge_regno_t regno) 88 { 89 NGE_TRACE(("nge_reg_get32($%p, 0x%lx)", (void *)ngep, regno)); 90 return (ddi_get32(ngep->io_handle, PIO_ADDR(ngep, regno))); 91 } 92 93 void nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data); 94 #pragma inline(nge_reg_put32) 95 96 void 97 nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data) 98 { 99 NGE_TRACE(("nge_reg_put32($%p, 0x%lx, 0x%x)", 100 (void *)ngep, regno, data)); 101 ddi_put32(ngep->io_handle, PIO_ADDR(ngep, regno), data); 102 103 } 104 105 106 static int nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 107 #pragma no_inline(nge_chip_peek_cfg) 108 109 static int 110 nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 111 { 112 int err; 113 uint64_t regval; 114 uint64_t regno; 115 116 NGE_TRACE(("nge_chip_peek_cfg($%p, $%p)", 117 (void *)ngep, (void *)ppd)); 118 119 err = DDI_SUCCESS; 120 regno = ppd->pp_acc_offset; 121 122 switch (ppd->pp_acc_size) { 123 case 1: 124 regval = pci_config_get8(ngep->cfg_handle, regno); 125 break; 126 127 case 2: 128 regval = pci_config_get16(ngep->cfg_handle, regno); 129 break; 130 131 case 4: 132 regval = pci_config_get32(ngep->cfg_handle, regno); 133 break; 134 135 case 8: 136 regval = pci_config_get64(ngep->cfg_handle, regno); 137 break; 138 } 139 ppd->pp_acc_data = regval; 140 return (err); 141 } 142 143 static int nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 144 145 static int 146 nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 147 { 148 int err; 149 uint64_t regval; 150 uint64_t regno; 151 152 NGE_TRACE(("nge_chip_poke_cfg($%p, $%p)", 153 (void *)ngep, (void *)ppd)); 154 155 err = DDI_SUCCESS; 156 regno = ppd->pp_acc_offset; 157 regval = ppd->pp_acc_data; 158 159 switch (ppd->pp_acc_size) { 160 case 1: 161 pci_config_put8(ngep->cfg_handle, regno, regval); 162 break; 163 164 case 2: 165 pci_config_put16(ngep->cfg_handle, regno, regval); 166 break; 167 168 case 4: 169 pci_config_put32(ngep->cfg_handle, regno, regval); 170 break; 171 172 case 8: 173 pci_config_put64(ngep->cfg_handle, regno, regval); 174 break; 175 } 176 177 return (err); 178 179 } 180 181 static int nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd); 182 183 static int 184 nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd) 185 { 186 int err; 187 uint64_t regval; 188 void *regaddr; 189 190 NGE_TRACE(("nge_chip_peek_reg($%p, $%p)", 191 (void *)ngep, (void *)ppd)); 192 193 err = DDI_SUCCESS; 194 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 195 196 switch (ppd->pp_acc_size) { 197 case 1: 198 regval = ddi_get8(ngep->io_handle, regaddr); 199 break; 200 201 case 2: 202 regval = ddi_get16(ngep->io_handle, regaddr); 203 break; 204 205 case 4: 206 regval = ddi_get32(ngep->io_handle, regaddr); 207 break; 208 209 case 8: 210 regval = ddi_get64(ngep->io_handle, regaddr); 211 break; 212 213 default: 214 regval = 0x0ull; 215 break; 216 } 217 ppd->pp_acc_data = regval; 218 return (err); 219 } 220 221 static int nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd); 222 223 static int 224 nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd) 225 { 226 int err; 227 uint64_t regval; 228 void *regaddr; 229 230 NGE_TRACE(("nge_chip_poke_reg($%p, $%p)", 231 (void *)ngep, (void *)ppd)); 232 233 err = DDI_SUCCESS; 234 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 235 regval = ppd->pp_acc_data; 236 237 switch (ppd->pp_acc_size) { 238 case 1: 239 ddi_put8(ngep->io_handle, regaddr, regval); 240 break; 241 242 case 2: 243 ddi_put16(ngep->io_handle, regaddr, regval); 244 break; 245 246 case 4: 247 ddi_put32(ngep->io_handle, regaddr, regval); 248 break; 249 250 case 8: 251 ddi_put64(ngep->io_handle, regaddr, regval); 252 break; 253 } 254 return (err); 255 } 256 257 static int nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd); 258 #pragma no_inline(nge_chip_peek_mii) 259 260 static int 261 nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd) 262 { 263 int err; 264 265 err = DDI_SUCCESS; 266 ppd->pp_acc_data = nge_mii_get16(ngep, ppd->pp_acc_offset/2); 267 return (err); 268 } 269 270 static int nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd); 271 #pragma no_inline(nge_chip_poke_mii) 272 273 static int 274 nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd) 275 { 276 int err; 277 err = DDI_SUCCESS; 278 nge_mii_put16(ngep, ppd->pp_acc_offset/2, ppd->pp_acc_data); 279 return (err); 280 } 281 282 /* 283 * Basic SEEPROM get/set access routine 284 * 285 * This uses the chip's SEEPROM auto-access method, controlled by the 286 * Serial EEPROM Address/Data Registers at 0x504h, so the CPU 287 * doesn't have to fiddle with the individual bits. 288 * 289 * The caller should hold <genlock> and *also* have already acquired 290 * the right to access the SEEPROM. 291 * 292 * Return value: 293 * 0 on success, 294 * ENODATA on access timeout (maybe retryable: device may just be busy) 295 * EPROTO on other h/w or s/w errors. 296 * 297 * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output 298 * from a (successful) SEEPROM_ACCESS_READ. 299 */ 300 301 static int 302 nge_seeprom_access(nge_t *ngep, uint32_t cmd, nge_regno_t addr, uint16_t *dp) 303 { 304 uint32_t tries; 305 nge_ep_cmd cmd_reg; 306 nge_ep_data data_reg; 307 308 NGE_TRACE(("nge_seeprom_access($%p, %d, %x, $%p)", 309 (void *)ngep, cmd, addr, (void *)dp)); 310 311 ASSERT(mutex_owned(ngep->genlock)); 312 313 /* 314 * Check there's no command in progress. 315 * 316 * Note: this *shouldn't* ever find that there is a command 317 * in progress, because we already hold the <genlock> mutex. 318 * Also, to ensure we don't have a conflict with the chip's 319 * internal firmware or a process accessing the same (shared) 320 * So this is just a final consistency check: we shouldn't 321 * see EITHER the START bit (command started but not complete) 322 * OR the COMPLETE bit (command completed but not cleared). 323 */ 324 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 325 for (tries = 0; tries < 30; tries++) { 326 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 327 break; 328 drv_usecwait(10); 329 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 330 } 331 332 /* 333 * This should not happen. If so, we have to restart eeprom 334 * state machine 335 */ 336 if (tries == 30) { 337 cmd_reg.cmd_bits.sts = SEEPROM_READY; 338 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 339 drv_usecwait(10); 340 /* 341 * Polling the status bit to make assure the eeprom is ready 342 */ 343 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 344 for (tries = 0; tries < 30; tries++) { 345 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 346 break; 347 drv_usecwait(10); 348 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 349 } 350 } 351 352 /* 353 * Assemble the command ... 354 */ 355 cmd_reg.cmd_bits.addr = (uint32_t)addr; 356 cmd_reg.cmd_bits.cmd = cmd; 357 cmd_reg.cmd_bits.sts = 0; 358 359 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 360 361 /* 362 * Polling whether the access is successful. 363 * 364 */ 365 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 366 for (tries = 0; tries < 30; tries++) { 367 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 368 break; 369 drv_usecwait(10); 370 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 371 } 372 373 if (tries == 30) { 374 nge_report(ngep, NGE_HW_ROM); 375 return (DDI_FAILURE); 376 } 377 switch (cmd) { 378 default: 379 case SEEPROM_CMD_WRITE_ENABLE: 380 case SEEPROM_CMD_ERASE: 381 case SEEPROM_CMD_ERALSE_ALL: 382 case SEEPROM_CMD_WRITE_DIS: 383 break; 384 385 case SEEPROM_CMD_READ: 386 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 387 *dp = data_reg.data_bits.data; 388 break; 389 390 case SEEPROM_CMD_WRITE: 391 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 392 data_reg.data_bits.data = *dp; 393 nge_reg_put32(ngep, NGE_EP_DATA, data_reg.data_val); 394 break; 395 } 396 397 return (DDI_SUCCESS); 398 } 399 400 401 static int 402 nge_chip_peek_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 403 { 404 uint16_t data; 405 int err; 406 407 err = nge_seeprom_access(ngep, SEEPROM_CMD_READ, 408 ppd->pp_acc_offset, &data); 409 ppd->pp_acc_data = data; 410 return (err); 411 } 412 413 static int 414 nge_chip_poke_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 415 { 416 uint16_t data; 417 int err; 418 419 data = ppd->pp_acc_data; 420 err = nge_seeprom_access(ngep, SEEPROM_CMD_WRITE, 421 ppd->pp_acc_offset, &data); 422 return (err); 423 } 424 425 void 426 nge_init_dev_spec_param(nge_t *ngep) 427 { 428 nge_dev_spec_param_t *dev_param_p; 429 chip_info_t *infop; 430 431 dev_param_p = &ngep->dev_spec_param; 432 infop = (chip_info_t *)&ngep->chipinfo; 433 434 switch (infop->device) { 435 case DEVICE_ID_NF3_E6: 436 case DEVICE_ID_NF3_DF: 437 case DEVICE_ID_MCP04_37: 438 case DEVICE_ID_MCP04_38: 439 dev_param_p->msi = B_FALSE; 440 dev_param_p->msi_x = B_FALSE; 441 dev_param_p->vlan = B_FALSE; 442 dev_param_p->advanced_pm = B_FALSE; 443 dev_param_p->tx_pause_frame = B_FALSE; 444 dev_param_p->rx_pause_frame = B_FALSE; 445 dev_param_p->jumbo = B_FALSE; 446 dev_param_p->tx_rx_64byte = B_FALSE; 447 dev_param_p->rx_hw_checksum = B_FALSE; 448 dev_param_p->tx_hw_checksum = 0; 449 dev_param_p->desc_type = DESC_OFFLOAD; 450 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 451 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 452 dev_param_p->nge_split = NGE_SPLIT_32; 453 break; 454 455 case DEVICE_ID_CK804_56: 456 case DEVICE_ID_CK804_57: 457 dev_param_p->msi = B_TRUE; 458 dev_param_p->msi_x = B_TRUE; 459 dev_param_p->vlan = B_FALSE; 460 dev_param_p->advanced_pm = B_FALSE; 461 dev_param_p->tx_pause_frame = B_FALSE; 462 dev_param_p->rx_pause_frame = B_TRUE; 463 dev_param_p->jumbo = B_TRUE; 464 dev_param_p->tx_rx_64byte = B_FALSE; 465 dev_param_p->rx_hw_checksum = B_TRUE; 466 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 467 dev_param_p->desc_type = DESC_HOT; 468 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 469 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 470 dev_param_p->nge_split = NGE_SPLIT_96; 471 break; 472 473 case DEVICE_ID_MCP61_3EE: 474 case DEVICE_ID_MCP61_3EF: 475 case DEVICE_ID_MCP51_268: 476 case DEVICE_ID_MCP51_269: 477 dev_param_p->msi = B_FALSE; 478 dev_param_p->msi_x = B_FALSE; 479 dev_param_p->vlan = B_FALSE; 480 dev_param_p->advanced_pm = B_TRUE; 481 dev_param_p->tx_pause_frame = B_FALSE; 482 dev_param_p->rx_pause_frame = B_FALSE; 483 dev_param_p->jumbo = B_FALSE; 484 dev_param_p->tx_rx_64byte = B_TRUE; 485 dev_param_p->rx_hw_checksum = B_FALSE; 486 dev_param_p->tx_hw_checksum = 0; 487 dev_param_p->desc_type = DESC_OFFLOAD; 488 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 489 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 490 dev_param_p->nge_split = NGE_SPLIT_32; 491 break; 492 493 case DEVICE_ID_MCP55_372: 494 case DEVICE_ID_MCP55_373: 495 dev_param_p->msi = B_TRUE; 496 dev_param_p->msi_x = B_TRUE; 497 dev_param_p->vlan = B_TRUE; 498 dev_param_p->advanced_pm = B_TRUE; 499 dev_param_p->tx_pause_frame = B_TRUE; 500 dev_param_p->rx_pause_frame = B_TRUE; 501 dev_param_p->jumbo = B_TRUE; 502 dev_param_p->tx_rx_64byte = B_TRUE; 503 dev_param_p->rx_hw_checksum = B_TRUE; 504 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 505 dev_param_p->desc_type = DESC_HOT; 506 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 507 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 508 dev_param_p->nge_split = NGE_SPLIT_96; 509 break; 510 511 default: 512 dev_param_p->msi = B_FALSE; 513 dev_param_p->msi_x = B_FALSE; 514 dev_param_p->vlan = B_FALSE; 515 dev_param_p->advanced_pm = B_FALSE; 516 dev_param_p->tx_pause_frame = B_FALSE; 517 dev_param_p->rx_pause_frame = B_FALSE; 518 dev_param_p->jumbo = B_FALSE; 519 dev_param_p->tx_rx_64byte = B_FALSE; 520 dev_param_p->rx_hw_checksum = B_FALSE; 521 dev_param_p->tx_hw_checksum = 0; 522 dev_param_p->desc_type = DESC_OFFLOAD; 523 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 524 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 525 dev_param_p->nge_split = NGE_SPLIT_32; 526 return; 527 } 528 } 529 /* 530 * Perform first-stage chip (re-)initialisation, using only config-space 531 * accesses: 532 * 533 * + Read the vendor/device/revision/subsystem/cache-line-size registers, 534 * returning the data in the structure pointed to by <infop>. 535 */ 536 void nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset); 537 #pragma no_inline(nge_chip_cfg_init) 538 539 void 540 nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset) 541 { 542 uint16_t command; 543 ddi_acc_handle_t handle; 544 nge_interbus_conf interbus_conf; 545 nge_msi_mask_conf msi_mask_conf; 546 nge_msi_map_cap_conf cap_conf; 547 548 NGE_TRACE(("nge_chip_cfg_init($%p, $%p, %d)", 549 (void *)ngep, (void *)infop, reset)); 550 551 /* 552 * save PCI cache line size and subsystem vendor ID 553 * 554 * Read all the config-space registers that characterise the 555 * chip, specifically vendor/device/revision/subsystem vendor 556 * and subsystem device id. We expect (but don't check) that 557 */ 558 handle = ngep->cfg_handle; 559 /* reading the vendor information once */ 560 if (reset == B_FALSE) { 561 infop->command = pci_config_get16(handle, 562 PCI_CONF_COMM); 563 infop->vendor = pci_config_get16(handle, 564 PCI_CONF_VENID); 565 infop->device = pci_config_get16(handle, 566 PCI_CONF_DEVID); 567 infop->subven = pci_config_get16(handle, 568 PCI_CONF_SUBVENID); 569 infop->subdev = pci_config_get16(handle, 570 PCI_CONF_SUBSYSID); 571 infop->class_code = pci_config_get8(handle, 572 PCI_CONF_BASCLASS); 573 infop->revision = pci_config_get8(handle, 574 PCI_CONF_REVID); 575 infop->clsize = pci_config_get8(handle, 576 PCI_CONF_CACHE_LINESZ); 577 infop->latency = pci_config_get8(handle, 578 PCI_CONF_LATENCY_TIMER); 579 } 580 if (nge_enable_msi) { 581 /* Disable the hidden for MSI support */ 582 interbus_conf.conf_val = pci_config_get32(handle, 583 PCI_CONF_HT_INTERNAL); 584 if ((infop->device == DEVICE_ID_MCP55_373) || 585 (infop->device == DEVICE_ID_MCP55_372)) 586 interbus_conf.conf_bits.msix_off = NGE_SET; 587 interbus_conf.conf_bits.msi_off = NGE_CLEAR; 588 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 589 interbus_conf.conf_val); 590 591 if ((infop->device == DEVICE_ID_MCP55_373) || 592 (infop->device == DEVICE_ID_MCP55_372)) { 593 594 /* Disable the vector off for mcp55 */ 595 msi_mask_conf.msi_mask_conf_val = 596 pci_config_get32(handle, PCI_CONF_HT_MSI_MASK); 597 msi_mask_conf.msi_mask_bits.vec0_off = NGE_CLEAR; 598 msi_mask_conf.msi_mask_bits.vec1_off = NGE_CLEAR; 599 msi_mask_conf.msi_mask_bits.vec2_off = NGE_CLEAR; 600 msi_mask_conf.msi_mask_bits.vec3_off = NGE_CLEAR; 601 msi_mask_conf.msi_mask_bits.vec4_off = NGE_CLEAR; 602 msi_mask_conf.msi_mask_bits.vec5_off = NGE_CLEAR; 603 msi_mask_conf.msi_mask_bits.vec6_off = NGE_CLEAR; 604 msi_mask_conf.msi_mask_bits.vec7_off = NGE_CLEAR; 605 pci_config_put32(handle, PCI_CONF_HT_MSI_MASK, 606 msi_mask_conf.msi_mask_conf_val); 607 608 /* Enable the MSI mapping */ 609 cap_conf.msi_map_cap_conf_val = 610 pci_config_get32(handle, PCI_CONF_HT_MSI_MAP_CAP); 611 cap_conf.map_cap_conf_bits.map_en = NGE_SET; 612 pci_config_put32(handle, PCI_CONF_HT_MSI_MAP_CAP, 613 cap_conf.msi_map_cap_conf_val); 614 } 615 } else { 616 interbus_conf.conf_val = pci_config_get32(handle, 617 PCI_CONF_HT_INTERNAL); 618 interbus_conf.conf_bits.msi_off = NGE_SET; 619 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 620 interbus_conf.conf_val); 621 } 622 command = infop->command | PCI_COMM_MAE; 623 command &= ~PCI_COMM_MEMWR_INVAL; 624 command |= PCI_COMM_ME; 625 pci_config_put16(handle, PCI_CONF_COMM, command); 626 pci_config_put16(handle, PCI_CONF_STAT, ~0); 627 628 } 629 630 int 631 nge_chip_stop(nge_t *ngep, boolean_t fault) 632 { 633 int err; 634 uint32_t reg_val; 635 uint32_t tries; 636 nge_mintr_src mintr_src; 637 nge_mii_cs mii_cs; 638 nge_rx_poll rx_poll; 639 nge_tx_poll tx_poll; 640 nge_rx_en rx_en; 641 nge_tx_en tx_en; 642 nge_tx_sta tx_sta; 643 nge_rx_sta rx_sta; 644 nge_mode_cntl mode; 645 nge_pmu_cntl2 pmu_cntl2; 646 647 NGE_TRACE(("nge_chip_stop($%p, %d)", (void *)ngep, fault)); 648 649 err = DDI_SUCCESS; 650 651 /* Clear any pending PHY interrupt */ 652 mintr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC); 653 nge_reg_put8(ngep, NGE_MINTR_SRC, mintr_src.src_val); 654 655 /* Mask all interrupts */ 656 reg_val = nge_reg_get32(ngep, NGE_INTR_MASK); 657 reg_val &= ~NGE_INTR_ALL_EN; 658 nge_reg_put32(ngep, NGE_INTR_MASK, reg_val); 659 660 /* Disable auto-polling of phy */ 661 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 662 mii_cs.cs_bits.ap_en = NGE_CLEAR; 663 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 664 665 /* Reset buffer management & DMA */ 666 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 667 mode.mode_bits.dma_dis = NGE_SET; 668 mode.mode_bits.desc_type = ngep->desc_mode; 669 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 670 671 for (tries = 0; tries < 10000; tries++) { 672 drv_usecwait(10); 673 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 674 if (mode.mode_bits.dma_status == NGE_SET) 675 break; 676 } 677 if (tries == 10000) { 678 ngep->nge_chip_state = NGE_CHIP_FAULT; 679 return (DDI_FAILURE); 680 } 681 682 /* 683 * For mcp55, the bits 1:31 of NGE_RX_EN and NGE_TX_EN are 684 * defined to be used by SMU. The newer PXE than 527 began to 685 * support SMU and bit 24 of NGE_RX_EN/NGE_TX_EN are set 686 * when leaving PXE to prevents the MAC from winning 687 * arbitration to the main transmit/receive channels. 688 */ 689 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 690 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 691 692 /* Disable rx's machine */ 693 nge_reg_put32(ngep, NGE_RX_EN, 0x0); 694 695 /* Disable tx's machine */ 696 nge_reg_put32(ngep, NGE_TX_EN, 0x0); 697 } else { 698 699 /* Disable rx's machine */ 700 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 701 rx_en.bits.rx_en = NGE_CLEAR; 702 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 703 704 705 /* Disable tx's machine */ 706 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 707 tx_en.bits.tx_en = NGE_CLEAR; 708 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 709 } 710 711 /* 712 * Clean the status of tx's state machine 713 * and Make assure the tx's channel is idle 714 */ 715 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 716 for (tries = 0; tries < 1000; tries++) { 717 if (tx_sta.sta_bits.tx_chan_sta == NGE_CLEAR) 718 break; 719 drv_usecwait(10); 720 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 721 } 722 if (tries == 1000) { 723 ngep->nge_chip_state = NGE_CHIP_FAULT; 724 return (DDI_FAILURE); 725 } 726 nge_reg_put32(ngep, NGE_TX_STA, tx_sta.sta_val); 727 728 /* 729 * Clean the status of rx's state machine 730 * and Make assure the tx's channel is idle 731 */ 732 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 733 for (tries = 0; tries < 1000; tries++) { 734 if (rx_sta.sta_bits.rx_chan_sta == NGE_CLEAR) 735 break; 736 drv_usecwait(10); 737 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 738 } 739 if (tries == 1000) { 740 ngep->nge_chip_state = NGE_CHIP_FAULT; 741 return (DDI_FAILURE); 742 } 743 nge_reg_put32(ngep, NGE_RX_STA, rx_sta.sta_val); 744 745 /* Disable auto-poll of rx's state machine */ 746 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 747 rx_poll.poll_bits.rpen = NGE_CLEAR; 748 rx_poll.poll_bits.rpi = NGE_CLEAR; 749 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 750 751 /* Disable auto-polling of tx's state machine */ 752 tx_poll.poll_val = nge_reg_get32(ngep, NGE_TX_POLL); 753 tx_poll.poll_bits.tpen = NGE_CLEAR; 754 tx_poll.poll_bits.tpi = NGE_CLEAR; 755 nge_reg_put32(ngep, NGE_TX_POLL, tx_poll.poll_val); 756 757 /* Restore buffer management */ 758 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 759 mode.mode_bits.bm_reset = NGE_SET; 760 mode.mode_bits.tx_rcom_en = NGE_SET; 761 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 762 763 if (ngep->dev_spec_param.advanced_pm) { 764 765 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 0); 766 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 0); 767 768 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 769 pmu_cntl2.cntl2_bits.cidle_timer = NGE_CLEAR; 770 pmu_cntl2.cntl2_bits.didle_timer = NGE_CLEAR; 771 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 772 } 773 if (fault) 774 ngep->nge_chip_state = NGE_CHIP_FAULT; 775 else 776 ngep->nge_chip_state = NGE_CHIP_STOPPED; 777 778 return (err); 779 } 780 781 static void 782 nge_rx_setup(nge_t *ngep) 783 { 784 uint64_t desc_addr; 785 nge_rxtx_dlen dlen; 786 nge_rx_poll rx_poll; 787 788 /* 789 * Filling the address and length of rx's descriptors 790 */ 791 desc_addr = ngep->recv->desc.cookie.dmac_laddress; 792 nge_reg_put32(ngep, NGE_RX_DADR, desc_addr); 793 nge_reg_put32(ngep, NGE_RX_DADR_HI, desc_addr >> 32); 794 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 795 dlen.dlen_bits.rdlen = ngep->recv->desc.nslots - 1; 796 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 797 798 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 799 rx_poll.poll_bits.rpi = RX_POLL_INTV_1G; 800 rx_poll.poll_bits.rpen = NGE_SET; 801 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 802 } 803 804 static void 805 nge_tx_setup(nge_t *ngep) 806 { 807 uint64_t desc_addr; 808 nge_rxtx_dlen dlen; 809 810 /* 811 * Filling the address and length of tx's descriptors 812 */ 813 desc_addr = ngep->send->desc.cookie.dmac_laddress; 814 nge_reg_put32(ngep, NGE_TX_DADR, desc_addr); 815 nge_reg_put32(ngep, NGE_TX_DADR_HI, desc_addr >> 32); 816 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 817 dlen.dlen_bits.tdlen = ngep->send->desc.nslots - 1; 818 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 819 } 820 821 static int 822 nge_buff_setup(nge_t *ngep) 823 { 824 nge_mode_cntl mode_cntl; 825 nge_dev_spec_param_t *dev_param_p; 826 827 dev_param_p = &ngep->dev_spec_param; 828 829 /* 830 * Configure Rx&Tx's buffer 831 */ 832 nge_rx_setup(ngep); 833 nge_tx_setup(ngep); 834 835 /* 836 * Configure buffer attribute 837 */ 838 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 839 840 /* 841 * Enable Dma access request 842 */ 843 mode_cntl.mode_bits.dma_dis = NGE_CLEAR; 844 845 /* 846 * Enbale Buffer management 847 */ 848 mode_cntl.mode_bits.bm_reset = NGE_CLEAR; 849 850 /* 851 * Support Standoffload Descriptor 852 */ 853 mode_cntl.mode_bits.desc_type = ngep->desc_mode; 854 855 /* 856 * Support receive hardware checksum 857 */ 858 if (dev_param_p->rx_hw_checksum) { 859 mode_cntl.mode_bits.rx_sum_en = NGE_SET; 860 } else 861 mode_cntl.mode_bits.rx_sum_en = NGE_CLEAR; 862 863 /* 864 * Disable Tx PRD coarse update 865 */ 866 mode_cntl.mode_bits.tx_prd_cu_en = NGE_CLEAR; 867 868 /* 869 * Disable 64-byte access 870 */ 871 mode_cntl.mode_bits.w64_dis = NGE_SET; 872 873 /* 874 * Skip Rx Error Frame is not supported and if 875 * enable it, jumbo frame does not work any more. 876 */ 877 mode_cntl.mode_bits.rx_filter_en = NGE_CLEAR; 878 879 /* 880 * Can not support hot mode now 881 */ 882 mode_cntl.mode_bits.resv15 = NGE_CLEAR; 883 884 if (dev_param_p->vlan) { 885 /* Disable the vlan strip for devices which support vlan */ 886 mode_cntl.mode_bits.vlan_strip = NGE_CLEAR; 887 888 /* Disable the vlan insert for devices which supprot vlan */ 889 mode_cntl.mode_bits.vlan_ins = NGE_CLEAR; 890 } 891 892 if (dev_param_p->tx_rx_64byte) { 893 894 /* Set the maximum TX PRD fetch size to 64 bytes */ 895 mode_cntl.mode_bits.tx_fetch_prd = NGE_SET; 896 897 /* Set the maximum RX PRD fetch size to 64 bytes */ 898 mode_cntl.mode_bits.rx_fetch_prd = NGE_SET; 899 } 900 /* 901 * Upload Rx data as it arrives, rather than waiting for full frame 902 */ 903 mode_cntl.mode_bits.resv16 = NGE_CLEAR; 904 905 /* 906 * Normal HOT table accesses 907 */ 908 mode_cntl.mode_bits.resv17 = NGE_CLEAR; 909 910 /* 911 * Normal HOT buffer requesting 912 */ 913 mode_cntl.mode_bits.resv18 = NGE_CLEAR; 914 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 915 916 /* 917 * Signal controller to check for new Rx descriptors 918 */ 919 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 920 mode_cntl.mode_bits.rxdm = NGE_SET; 921 mode_cntl.mode_bits.tx_rcom_en = NGE_SET; 922 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 923 924 925 return (DDI_SUCCESS); 926 } 927 928 /* 929 * When chipset resets, the chipset can not restore the orignial 930 * mac address to the mac address registers. 931 * 932 * When the driver is dettached, the function will write the orignial 933 * mac address to the mac address registers. 934 */ 935 936 void 937 nge_restore_mac_addr(nge_t *ngep) 938 { 939 uint32_t mac_addr; 940 941 mac_addr = (uint32_t)ngep->chipinfo.hw_mac_addr; 942 nge_reg_put32(ngep, NGE_UNI_ADDR0, mac_addr); 943 mac_addr = (uint32_t)(ngep->chipinfo.hw_mac_addr >> 32); 944 nge_reg_put32(ngep, NGE_UNI_ADDR1, mac_addr); 945 } 946 947 int 948 nge_chip_reset(nge_t *ngep) 949 { 950 int err; 951 uint8_t i; 952 uint32_t regno; 953 uint64_t mac; 954 nge_uni_addr1 uaddr1; 955 nge_cp_cntl ee_cntl; 956 nge_soft_misc soft_misc; 957 nge_pmu_cntl0 pmu_cntl0; 958 nge_pmu_cntl2 pmu_cntl2; 959 nge_pm_cntl2 pm_cntl2; 960 const nge_ksindex_t *ksip; 961 962 NGE_TRACE(("nge_chip_reset($%p)", (void *)ngep)); 963 964 /* 965 * Clear the statistics by reading the statistics register 966 */ 967 for (ksip = nge_statistics; ksip->name != NULL; ++ksip) { 968 regno = KS_BASE + ksip->index * sizeof (uint32_t); 969 (void) nge_reg_get32(ngep, regno); 970 } 971 972 /* 973 * Setup seeprom control 974 */ 975 ee_cntl.cntl_val = nge_reg_get32(ngep, NGE_EP_CNTL); 976 ee_cntl.cntl_bits.clkdiv = EEPROM_CLKDIV; 977 ee_cntl.cntl_bits.rom_size = EEPROM_32K; 978 ee_cntl.cntl_bits.word_wid = ACCESS_16BIT; 979 ee_cntl.cntl_bits.wait_slots = EEPROM_WAITCLK; 980 nge_reg_put32(ngep, NGE_EP_CNTL, ee_cntl.cntl_val); 981 982 /* 983 * Reading the unicast mac address table 984 */ 985 if (ngep->nge_chip_state == NGE_CHIP_INITIAL) { 986 uaddr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 987 mac = uaddr1.addr_bits.addr; 988 mac <<= 32; 989 mac |= nge_reg_get32(ngep, NGE_UNI_ADDR0); 990 if (mac != 0ULL && mac != ~0ULL) { 991 ngep->chipinfo.hw_mac_addr = mac; 992 for (i = ETHERADDRL; i-- != 0; ) { 993 ngep->chipinfo.vendor_addr.addr[i] = 994 (uchar_t)mac; 995 ngep->cur_uni_addr.addr[i] = (uchar_t)mac; 996 mac >>= 8; 997 } 998 ngep->chipinfo.vendor_addr.set = 1; 999 } 1000 } 1001 pci_config_put8(ngep->cfg_handle, PCI_CONF_CACHE_LINESZ, 1002 ngep->chipinfo.clsize); 1003 pci_config_put8(ngep->cfg_handle, PCI_CONF_LATENCY_TIMER, 1004 ngep->chipinfo.latency); 1005 1006 1007 if (ngep->dev_spec_param.advanced_pm) { 1008 1009 /* Program software misc register */ 1010 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1011 soft_misc.misc_bits.rx_clk_vx_rst = NGE_SET; 1012 soft_misc.misc_bits.tx_clk_vx_rst = NGE_SET; 1013 soft_misc.misc_bits.clk12m_vx_rst = NGE_SET; 1014 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_SET; 1015 soft_misc.misc_bits.rx_clk_vc_rst = NGE_SET; 1016 soft_misc.misc_bits.tx_clk_vc_rst = NGE_SET; 1017 soft_misc.misc_bits.fs_clk_vc_rst = NGE_SET; 1018 soft_misc.misc_bits.rst_ex_m2pintf = NGE_SET; 1019 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1020 1021 /* wait for 32 us */ 1022 drv_usecwait(32); 1023 1024 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1025 soft_misc.misc_bits.rx_clk_vx_rst = NGE_CLEAR; 1026 soft_misc.misc_bits.tx_clk_vx_rst = NGE_CLEAR; 1027 soft_misc.misc_bits.clk12m_vx_rst = NGE_CLEAR; 1028 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_CLEAR; 1029 soft_misc.misc_bits.rx_clk_vc_rst = NGE_CLEAR; 1030 soft_misc.misc_bits.tx_clk_vc_rst = NGE_CLEAR; 1031 soft_misc.misc_bits.fs_clk_vc_rst = NGE_CLEAR; 1032 soft_misc.misc_bits.rst_ex_m2pintf = NGE_CLEAR; 1033 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1034 1035 /* Program PMU registers */ 1036 pmu_cntl0.cntl0_val = nge_reg_get32(ngep, NGE_PMU_CNTL0); 1037 pmu_cntl0.cntl0_bits.core_spd10_fp = 1038 NGE_PMU_CORE_SPD10_BUSY; 1039 pmu_cntl0.cntl0_bits.core_spd10_idle = 1040 NGE_PMU_CORE_SPD10_IDLE; 1041 pmu_cntl0.cntl0_bits.core_spd100_fp = 1042 NGE_PMU_CORE_SPD100_BUSY; 1043 pmu_cntl0.cntl0_bits.core_spd100_idle = 1044 NGE_PMU_CORE_SPD100_IDLE; 1045 pmu_cntl0.cntl0_bits.core_spd1000_fp = 1046 NGE_PMU_CORE_SPD1000_BUSY; 1047 pmu_cntl0.cntl0_bits.core_spd1000_idle = 1048 NGE_PMU_CORE_SPD100_IDLE; 1049 pmu_cntl0.cntl0_bits.core_spd10_idle = 1050 NGE_PMU_CORE_SPD10_IDLE; 1051 nge_reg_put32(ngep, NGE_PMU_CNTL0, pmu_cntl0.cntl0_val); 1052 1053 /* Set the core idle limit value */ 1054 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 1055 NGE_PMU_CIDLE_LIMIT_DEF); 1056 1057 /* Set the device idle limit value */ 1058 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 1059 NGE_PMU_DIDLE_LIMIT_DEF); 1060 1061 /* Enable the core/device idle timer in PMU control 2 */ 1062 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 1063 pmu_cntl2.cntl2_bits.cidle_timer = NGE_SET; 1064 pmu_cntl2.cntl2_bits.didle_timer = NGE_SET; 1065 pmu_cntl2.cntl2_bits.core_enable = NGE_SET; 1066 pmu_cntl2.cntl2_bits.dev_enable = NGE_SET; 1067 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 1068 } 1069 /* 1070 * Stop the chipset and clear buffer management 1071 */ 1072 err = nge_chip_stop(ngep, B_FALSE); 1073 if (err == DDI_FAILURE) 1074 return (err); 1075 /* 1076 * Clear the power state bits for phy since interface no longer 1077 * works after rebooting from Windows on a multi-boot machine 1078 */ 1079 if (ngep->chipinfo.device == DEVICE_ID_MCP51_268 || 1080 ngep->chipinfo.device == DEVICE_ID_MCP51_269 || 1081 ngep->chipinfo.device == DEVICE_ID_MCP55_372 || 1082 ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1083 ngep->chipinfo.device == DEVICE_ID_MCP61_3EE || 1084 ngep->chipinfo.device == DEVICE_ID_MCP61_3EF) { 1085 1086 pm_cntl2.cntl_val = nge_reg_get32(ngep, NGE_PM_CNTL2); 1087 /* bring phy out of coma mode */ 1088 pm_cntl2.cntl_bits.phy_coma_set = NGE_CLEAR; 1089 /* disable auto reset coma bits */ 1090 pm_cntl2.cntl_bits.resv4 = NGE_CLEAR; 1091 /* restore power to gated clocks */ 1092 pm_cntl2.cntl_bits.resv8_11 = NGE_CLEAR; 1093 nge_reg_put32(ngep, NGE_PM_CNTL2, pm_cntl2.cntl_val); 1094 } 1095 1096 /* 1097 * Reset the external phy 1098 */ 1099 if (!nge_phy_reset(ngep)) 1100 return (DDI_FAILURE); 1101 ngep->nge_chip_state = NGE_CHIP_RESET; 1102 return (DDI_SUCCESS); 1103 } 1104 1105 int 1106 nge_chip_start(nge_t *ngep) 1107 { 1108 int err; 1109 nge_itc itc; 1110 nge_tx_cntl tx_cntl; 1111 nge_rx_cntrl0 rx_cntl0; 1112 nge_rx_cntl1 rx_cntl1; 1113 nge_tx_en tx_en; 1114 nge_rx_en rx_en; 1115 nge_mii_cs mii_cs; 1116 nge_swtr_cntl swtr_cntl; 1117 nge_rx_fifo_wm rx_fifo; 1118 nge_intr_mask intr_mask; 1119 nge_mintr_mask mintr_mask; 1120 nge_dev_spec_param_t *dev_param_p; 1121 1122 NGE_TRACE(("nge_chip_start($%p)", (void *)ngep)); 1123 1124 /* 1125 * Setup buffer management 1126 */ 1127 err = nge_buff_setup(ngep); 1128 if (err == DDI_FAILURE) 1129 return (err); 1130 1131 dev_param_p = &ngep->dev_spec_param; 1132 1133 /* 1134 * Enable polling attribute 1135 */ 1136 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 1137 mii_cs.cs_bits.ap_paddr = ngep->phy_xmii_addr; 1138 mii_cs.cs_bits.ap_en = NGE_SET; 1139 mii_cs.cs_bits.ap_intv = MII_POLL_INTV; 1140 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 1141 1142 /* 1143 * Setup link 1144 */ 1145 (*ngep->physops->phys_update)(ngep); 1146 1147 /* 1148 * Configure the tx's parameters 1149 */ 1150 tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL); 1151 if (dev_param_p->tx_pause_frame) 1152 tx_cntl.cntl_bits.paen = NGE_SET; 1153 else 1154 tx_cntl.cntl_bits.paen = NGE_CLEAR; 1155 tx_cntl.cntl_bits.retry_en = NGE_SET; 1156 tx_cntl.cntl_bits.pad_en = NGE_SET; 1157 tx_cntl.cntl_bits.fappend_en = NGE_SET; 1158 tx_cntl.cntl_bits.two_def_en = NGE_SET; 1159 tx_cntl.cntl_bits.max_retry = 15; 1160 tx_cntl.cntl_bits.burst_en = NGE_CLEAR; 1161 tx_cntl.cntl_bits.uflo_err_mask = NGE_CLEAR; 1162 tx_cntl.cntl_bits.tlcol_mask = NGE_CLEAR; 1163 tx_cntl.cntl_bits.lcar_mask = NGE_CLEAR; 1164 tx_cntl.cntl_bits.def_mask = NGE_CLEAR; 1165 tx_cntl.cntl_bits.exdef_mask = NGE_SET; 1166 tx_cntl.cntl_bits.lcar_mask = NGE_SET; 1167 tx_cntl.cntl_bits.tlcol_mask = NGE_SET; 1168 tx_cntl.cntl_bits.uflo_err_mask = NGE_SET; 1169 tx_cntl.cntl_bits.jam_seq_en = NGE_CLEAR; 1170 nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val); 1171 1172 1173 /* 1174 * Configure the parameters of Rx's state machine 1175 * Enabe the parameters: 1176 * 1). Pad Strip 1177 * 2). FCS Relay 1178 * 3). Pause 1179 * 4). Address filter 1180 * 5). Runt Packet receive 1181 * 6). Broadcast 1182 * 7). Receive Deferral 1183 * 1184 * Disable the following parameters for decreasing 1185 * the number of interrupts: 1186 * 1). Runt Inerrupt. 1187 * 2). Rx's Late Collision interrupt. 1188 * 3). Rx's Max length Error Interrupt. 1189 * 4). Rx's Length Field error Interrupt. 1190 * 5). Rx's FCS error interrupt. 1191 * 6). Rx's overflow error interrupt. 1192 * 7). Rx's Frame alignment error interrupt. 1193 */ 1194 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1195 rx_cntl0.cntl_bits.padsen = NGE_CLEAR; 1196 rx_cntl0.cntl_bits.fcsren = NGE_CLEAR; 1197 if (dev_param_p->rx_pause_frame) 1198 rx_cntl0.cntl_bits.paen = NGE_SET; 1199 else 1200 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1201 rx_cntl0.cntl_bits.lben = NGE_CLEAR; 1202 rx_cntl0.cntl_bits.afen = NGE_SET; 1203 rx_cntl0.cntl_bits.runten = NGE_CLEAR; 1204 rx_cntl0.cntl_bits.brdis = NGE_CLEAR; 1205 rx_cntl0.cntl_bits.rdfen = NGE_CLEAR; 1206 rx_cntl0.cntl_bits.runtm = NGE_CLEAR; 1207 rx_cntl0.cntl_bits.slfb = NGE_CLEAR; 1208 rx_cntl0.cntl_bits.rlcolm = NGE_CLEAR; 1209 rx_cntl0.cntl_bits.maxerm = NGE_CLEAR; 1210 rx_cntl0.cntl_bits.lferm = NGE_CLEAR; 1211 rx_cntl0.cntl_bits.crcm = NGE_CLEAR; 1212 rx_cntl0.cntl_bits.ofolm = NGE_CLEAR; 1213 rx_cntl0.cntl_bits.framerm = NGE_CLEAR; 1214 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1215 1216 /* 1217 * Configure the watermark for the rx's statemachine 1218 */ 1219 rx_fifo.wm_val = nge_reg_get32(ngep, NGE_RX_FIFO_WM); 1220 rx_fifo.wm_bits.data_hwm = ngep->rx_datahwm; 1221 rx_fifo.wm_bits.prd_lwm = ngep->rx_prdlwm; 1222 rx_fifo.wm_bits.prd_hwm = ngep->rx_prdhwm; 1223 nge_reg_put32(ngep, NGE_RX_FIFO_WM, rx_fifo.wm_val); 1224 1225 /* 1226 * Configure the deffer time slot for rx's state machine 1227 */ 1228 nge_reg_put8(ngep, NGE_RX_DEf, ngep->rx_def); 1229 1230 /* 1231 * Configure the length of rx's packet 1232 */ 1233 rx_cntl1.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL1); 1234 rx_cntl1.cntl_bits.length = ngep->max_sdu; 1235 nge_reg_put32(ngep, NGE_RX_CNTL1, rx_cntl1.cntl_val); 1236 /* 1237 * Enable Tx's state machine 1238 */ 1239 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 1240 tx_en.bits.tx_en = NGE_SET; 1241 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 1242 1243 /* 1244 * Enable Rx's state machine 1245 */ 1246 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 1247 rx_en.bits.rx_en = NGE_SET; 1248 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 1249 1250 itc.itc_val = nge_reg_get32(ngep, NGE_SWTR_ITC); 1251 itc.itc_bits.sw_intv = ngep->sw_intr_intv; 1252 nge_reg_put32(ngep, NGE_SWTR_ITC, itc.itc_val); 1253 1254 swtr_cntl.ctrl_val = nge_reg_get8(ngep, NGE_SWTR_CNTL); 1255 swtr_cntl.cntl_bits.sten = NGE_SET; 1256 swtr_cntl.cntl_bits.stren = NGE_SET; 1257 nge_reg_put32(ngep, NGE_SWTR_CNTL, swtr_cntl.ctrl_val); 1258 1259 /* 1260 * Disable all mii read/write operation Interrupt 1261 */ 1262 mintr_mask.mask_val = nge_reg_get8(ngep, NGE_MINTR_MASK); 1263 mintr_mask.mask_bits.mrei = NGE_CLEAR; 1264 mintr_mask.mask_bits.mcc2 = NGE_CLEAR; 1265 mintr_mask.mask_bits.mcc1 = NGE_CLEAR; 1266 mintr_mask.mask_bits.mapi = NGE_SET; 1267 mintr_mask.mask_bits.mpdi = NGE_SET; 1268 nge_reg_put8(ngep, NGE_MINTR_MASK, mintr_mask.mask_val); 1269 1270 /* 1271 * Enable all interrupt event 1272 */ 1273 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1274 intr_mask.mask_bits.reint = NGE_SET; 1275 intr_mask.mask_bits.rcint = NGE_SET; 1276 intr_mask.mask_bits.miss = NGE_SET; 1277 intr_mask.mask_bits.teint = NGE_CLEAR; 1278 intr_mask.mask_bits.tcint = NGE_SET; 1279 intr_mask.mask_bits.stint = NGE_CLEAR; 1280 intr_mask.mask_bits.mint = NGE_CLEAR; 1281 intr_mask.mask_bits.rfint = NGE_CLEAR; 1282 intr_mask.mask_bits.tfint = NGE_CLEAR; 1283 intr_mask.mask_bits.feint = NGE_SET; 1284 intr_mask.mask_bits.resv10 = NGE_CLEAR; 1285 intr_mask.mask_bits.resv11 = NGE_CLEAR; 1286 intr_mask.mask_bits.resv12 = NGE_CLEAR; 1287 intr_mask.mask_bits.resv13 = NGE_CLEAR; 1288 intr_mask.mask_bits.phyint = NGE_CLEAR; 1289 ngep->intr_masks = intr_mask.mask_val; 1290 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1291 ngep->nge_chip_state = NGE_CHIP_RUNNING; 1292 return (DDI_SUCCESS); 1293 } 1294 1295 /* 1296 * nge_chip_sync() -- program the chip with the unicast MAC address, 1297 * the multicast hash table, the required level of promiscuity. 1298 */ 1299 void 1300 nge_chip_sync(nge_t *ngep) 1301 { 1302 uint8_t i; 1303 uint64_t macaddr; 1304 uint64_t mul_addr; 1305 uint64_t mul_mask; 1306 nge_rx_cntrl0 rx_cntl; 1307 nge_uni_addr1 uni_adr1; 1308 1309 NGE_TRACE(("nge_chip_sync($%p)", (void *)ngep)); 1310 1311 macaddr = 0x0ull; 1312 mul_addr = 0x0ull; 1313 mul_mask = 0x0ull; 1314 rx_cntl.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1315 1316 if (ngep->promisc) { 1317 rx_cntl.cntl_bits.afen = NGE_CLEAR; 1318 rx_cntl.cntl_bits.brdis = NGE_SET; 1319 } else { 1320 rx_cntl.cntl_bits.afen = NGE_SET; 1321 rx_cntl.cntl_bits.brdis = NGE_CLEAR; 1322 } 1323 1324 /* 1325 * Transform the MAC address from host to chip format, the unicast 1326 * MAC address(es) ... 1327 */ 1328 for (i = ETHERADDRL, macaddr = 0ull; i != 0; --i) { 1329 macaddr |= ngep->cur_uni_addr.addr[i-1]; 1330 macaddr <<= (i > 1) ? 8 : 0; 1331 } 1332 1333 nge_reg_put32(ngep, NGE_UNI_ADDR0, (uint32_t)macaddr); 1334 macaddr = macaddr >>32; 1335 uni_adr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 1336 uni_adr1.addr_bits.addr = (uint16_t)macaddr; 1337 uni_adr1.addr_bits.resv16_31 = (uint16_t)0; 1338 nge_reg_put32(ngep, NGE_UNI_ADDR1, uni_adr1.addr_val); 1339 1340 /* 1341 * Reprogram the multicast address table ... 1342 */ 1343 for (i = ETHERADDRL, mul_addr = 0ull; i != 0; --i) { 1344 mul_addr |= ngep->cur_mul_addr.addr[i-1]; 1345 mul_addr <<= (i > 1) ? 8 : 0; 1346 mul_mask |= ngep->cur_mul_mask.addr[i-1]; 1347 mul_mask <<= (i > 1) ? 8 : 0; 1348 } 1349 nge_reg_put32(ngep, NGE_MUL_ADDR0, (uint32_t)mul_addr); 1350 mul_addr >>= 32; 1351 nge_reg_put32(ngep, NGE_MUL_ADDR1, mul_addr); 1352 nge_reg_put32(ngep, NGE_MUL_MASK, (uint32_t)mul_mask); 1353 mul_mask >>= 32; 1354 nge_reg_put32(ngep, NGE_MUL_MASK1, mul_mask); 1355 /* 1356 * Set or clear the PROMISCUOUS mode bit 1357 */ 1358 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl.cntl_val); 1359 /* 1360 * For internal PHY loopback, the link will 1361 * not be up, so it need to sync mac modes directly. 1362 */ 1363 if (ngep->param_loop_mode == NGE_LOOP_INTERNAL_PHY) 1364 nge_sync_mac_modes(ngep); 1365 } 1366 1367 static void 1368 nge_chip_err(nge_t *ngep) 1369 { 1370 nge_reg010 reg010_ins; 1371 nge_sw_statistics_t *psw_stat; 1372 nge_intr_mask intr_mask; 1373 1374 NGE_TRACE(("nge_chip_err($%p)", (void *)ngep)); 1375 1376 psw_stat = (nge_sw_statistics_t *)&ngep->statistics.sw_statistics; 1377 reg010_ins.reg010_val = nge_reg_get32(ngep, NGE_REG010); 1378 if (reg010_ins.reg010_bits.resv0) 1379 psw_stat->fe_err.tso_err_mss ++; 1380 1381 if (reg010_ins.reg010_bits.resv1) 1382 psw_stat->fe_err.tso_dis ++; 1383 1384 if (reg010_ins.reg010_bits.resv2) 1385 psw_stat->fe_err.tso_err_nosum ++; 1386 1387 if (reg010_ins.reg010_bits.resv3) 1388 psw_stat->fe_err.tso_err_hov ++; 1389 1390 if (reg010_ins.reg010_bits.resv4) 1391 psw_stat->fe_err.tso_err_huf ++; 1392 1393 if (reg010_ins.reg010_bits.resv5) 1394 psw_stat->fe_err.tso_err_l2 ++; 1395 1396 if (reg010_ins.reg010_bits.resv6) 1397 psw_stat->fe_err.tso_err_ip ++; 1398 1399 if (reg010_ins.reg010_bits.resv7) 1400 psw_stat->fe_err.tso_err_l4 ++; 1401 1402 if (reg010_ins.reg010_bits.resv8) 1403 psw_stat->fe_err.tso_err_tcp ++; 1404 1405 if (reg010_ins.reg010_bits.resv9) 1406 psw_stat->fe_err.hsum_err_ip ++; 1407 1408 if (reg010_ins.reg010_bits.resv10) 1409 psw_stat->fe_err.hsum_err_l4 ++; 1410 1411 if (reg010_ins.reg010_val != 0) { 1412 1413 /* 1414 * Fatal error is triggered by malformed driver commands. 1415 * Disable unless debugging. 1416 */ 1417 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1418 intr_mask.mask_bits.feint = NGE_CLEAR; 1419 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1420 ngep->intr_masks = intr_mask.mask_val; 1421 1422 } 1423 } 1424 1425 static void 1426 nge_sync_mac_modes(nge_t *ngep) 1427 { 1428 nge_tx_def tx_def; 1429 nge_tx_fifo_wm tx_fifo; 1430 nge_bkoff_cntl bk_cntl; 1431 nge_mac2phy m2p; 1432 nge_rx_cntrl0 rx_cntl0; 1433 nge_dev_spec_param_t *dev_param_p; 1434 1435 dev_param_p = &ngep->dev_spec_param; 1436 1437 tx_def.def_val = nge_reg_get32(ngep, NGE_TX_DEF); 1438 m2p.m2p_val = nge_reg_get32(ngep, NGE_MAC2PHY); 1439 tx_fifo.wm_val = nge_reg_get32(ngep, NGE_TX_FIFO_WM); 1440 bk_cntl.cntl_val = nge_reg_get32(ngep, NGE_BKOFF_CNTL); 1441 bk_cntl.bkoff_bits.rseed = BKOFF_RSEED; 1442 switch (ngep->param_link_speed) { 1443 case 10: 1444 m2p.m2p_bits.speed = low_speed; 1445 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1446 if (ngep->phy_mode == RGMII_IN) { 1447 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1448 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1449 } else { 1450 tx_def.def_bits.if_def = TX_TIFG_MII; 1451 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1452 } 1453 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1454 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1455 break; 1456 1457 case 100: 1458 m2p.m2p_bits.speed = fast_speed; 1459 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1460 if (ngep->phy_mode == RGMII_IN) { 1461 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1462 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1463 } else { 1464 tx_def.def_bits.if_def = TX_TIFG_MII; 1465 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1466 } 1467 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1468 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1469 break; 1470 1471 case 1000: 1472 m2p.m2p_bits.speed = giga_speed; 1473 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1474 if (ngep->param_link_duplex == LINK_DUPLEX_FULL) { 1475 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1476 tx_def.def_bits.if_def = TX_IFG_RGMII_1000_FD; 1477 } else { 1478 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1479 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1480 } 1481 1482 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_GMII; 1483 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_GMII; 1484 break; 1485 } 1486 1487 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1488 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 1489 m2p.m2p_bits.phyintr = NGE_CLEAR; 1490 m2p.m2p_bits.phyintrlvl = NGE_CLEAR; 1491 } 1492 if (ngep->param_link_duplex == LINK_DUPLEX_HALF) { 1493 m2p.m2p_bits.hdup_en = NGE_SET; 1494 } 1495 else 1496 m2p.m2p_bits.hdup_en = NGE_CLEAR; 1497 nge_reg_put32(ngep, NGE_MAC2PHY, m2p.m2p_val); 1498 nge_reg_put32(ngep, NGE_TX_DEF, tx_def.def_val); 1499 1500 tx_fifo.wm_bits.data_lwm = TX_FIFO_DATA_LWM; 1501 tx_fifo.wm_bits.prd_lwm = TX_FIFO_PRD_LWM; 1502 tx_fifo.wm_bits.uprd_hwm = TX_FIFO_PRD_HWM; 1503 tx_fifo.wm_bits.fb_wm = TX_FIFO_TBFW; 1504 nge_reg_put32(ngep, NGE_TX_FIFO_WM, tx_fifo.wm_val); 1505 1506 nge_reg_put32(ngep, NGE_BKOFF_CNTL, bk_cntl.cntl_val); 1507 1508 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1509 if (ngep->param_link_rx_pause && dev_param_p->rx_pause_frame) 1510 rx_cntl0.cntl_bits.paen = NGE_SET; 1511 else 1512 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1513 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1514 } 1515 1516 /* 1517 * Handler for hardware link state change. 1518 * 1519 * When this routine is called, the hardware link state has changed 1520 * and the new state is reflected in the param_* variables. Here 1521 * we must update the softstate, reprogram the MAC to match, and 1522 * record the change in the log and/or on the console. 1523 */ 1524 static void 1525 nge_factotum_link_handler(nge_t *ngep) 1526 { 1527 /* 1528 * Update the s/w link_state 1529 */ 1530 if (ngep->param_link_up) 1531 ngep->link_state = LINK_STATE_UP; 1532 else 1533 ngep->link_state = LINK_STATE_DOWN; 1534 1535 /* 1536 * Reprogram the MAC modes to match 1537 */ 1538 nge_sync_mac_modes(ngep); 1539 } 1540 1541 static boolean_t 1542 nge_factotum_link_check(nge_t *ngep) 1543 { 1544 boolean_t lchg; 1545 boolean_t check; 1546 1547 ASSERT(mutex_owned(ngep->genlock)); 1548 1549 (*ngep->physops->phys_check)(ngep); 1550 switch (ngep->link_state) { 1551 case LINK_STATE_UP: 1552 lchg = (ngep->param_link_up == B_FALSE); 1553 check = (ngep->param_link_up == B_FALSE); 1554 break; 1555 1556 case LINK_STATE_DOWN: 1557 lchg = (ngep->param_link_up == B_TRUE); 1558 check = (ngep->param_link_up == B_TRUE); 1559 break; 1560 1561 default: 1562 check = B_TRUE; 1563 break; 1564 } 1565 1566 /* 1567 * If <check> is false, we're sure the link hasn't changed. 1568 * If true, however, it's not yet definitive; we have to call 1569 * nge_phys_check() to determine whether the link has settled 1570 * into a new state yet ... and if it has, then call the link 1571 * state change handler.But when the chip is 5700 in Dell 6650 1572 * ,even if check is false, the link may have changed.So we 1573 * have to call nge_phys_check() to determine the link state. 1574 */ 1575 if (check) 1576 nge_factotum_link_handler(ngep); 1577 1578 return (lchg); 1579 } 1580 1581 /* 1582 * Factotum routine to check for Tx stall, using the 'watchdog' counter 1583 */ 1584 static boolean_t nge_factotum_stall_check(nge_t *ngep); 1585 1586 static boolean_t 1587 nge_factotum_stall_check(nge_t *ngep) 1588 { 1589 uint32_t dogval; 1590 /* 1591 * Specific check for Tx stall ... 1592 * 1593 * The 'watchdog' counter is incremented whenever a packet 1594 * is queued, reset to 1 when some (but not all) buffers 1595 * are reclaimed, reset to 0 (disabled) when all buffers 1596 * are reclaimed, and shifted left here. If it exceeds the 1597 * threshold value, the chip is assumed to have stalled and 1598 * is put into the ERROR state. The factotum will then reset 1599 * it on the next pass. 1600 * 1601 * All of which should ensure that we don't get into a state 1602 * where packets are left pending indefinitely! 1603 */ 1604 dogval = nge_atomic_shl32(&ngep->watchdog, 1); 1605 if (dogval < nge_watchdog_count) { 1606 ngep->stall_cknum = 0; 1607 } else { 1608 ngep->stall_cknum++; 1609 } 1610 if (ngep->stall_cknum < 16) { 1611 return (B_FALSE); 1612 } else { 1613 ngep->stall_cknum = 0; 1614 ngep->statistics.sw_statistics.tx_stall++; 1615 return (B_TRUE); 1616 } 1617 } 1618 1619 1620 1621 /* 1622 * The factotum is woken up when there's something to do that we'd rather 1623 * not do from inside a hardware interrupt handler or high-level cyclic. 1624 * Its two main tasks are: 1625 * reset & restart the chip after an error 1626 * check the link status whenever necessary 1627 */ 1628 /* ARGSUSED */ 1629 uint_t 1630 nge_chip_factotum(caddr_t args1, caddr_t args2) 1631 { 1632 uint_t result; 1633 nge_t *ngep; 1634 boolean_t err; 1635 boolean_t linkchg; 1636 1637 ngep = (nge_t *)args1; 1638 1639 NGE_TRACE(("nge_chip_factotum($%p)", (void *)ngep)); 1640 1641 mutex_enter(ngep->softlock); 1642 if (ngep->factotum_flag == 0) { 1643 mutex_exit(ngep->softlock); 1644 return (DDI_INTR_UNCLAIMED); 1645 } 1646 ngep->factotum_flag = 0; 1647 mutex_exit(ngep->softlock); 1648 err = B_FALSE; 1649 linkchg = B_FALSE; 1650 result = DDI_INTR_CLAIMED; 1651 1652 mutex_enter(ngep->genlock); 1653 switch (ngep->nge_chip_state) { 1654 default: 1655 break; 1656 1657 case NGE_CHIP_RUNNING: 1658 linkchg = nge_factotum_link_check(ngep); 1659 err = nge_factotum_stall_check(ngep); 1660 break; 1661 1662 case NGE_CHIP_FAULT: 1663 (void) nge_restart(ngep); 1664 NGE_REPORT((ngep, "automatic recovery activated")); 1665 break; 1666 } 1667 1668 if (err) 1669 (void) nge_chip_stop(ngep, B_TRUE); 1670 mutex_exit(ngep->genlock); 1671 1672 /* 1673 * If the link state changed, tell the world about it (if 1674 * this version of MAC supports link state notification). 1675 * Note: can't do this while still holding the mutex. 1676 */ 1677 if (linkchg) 1678 mac_link_update(ngep->mh, ngep->link_state); 1679 1680 return (result); 1681 1682 } 1683 1684 static void 1685 nge_intr_handle(nge_t *ngep, nge_intr_src *pintr_src) 1686 { 1687 boolean_t brx; 1688 boolean_t btx; 1689 nge_mintr_src mintr_src; 1690 1691 brx = B_FALSE; 1692 btx = B_FALSE; 1693 ngep->statistics.sw_statistics.intr_count++; 1694 ngep->statistics.sw_statistics.intr_lval = pintr_src->intr_val; 1695 brx = (pintr_src->int_bits.reint | pintr_src->int_bits.miss 1696 | pintr_src->int_bits.rcint | pintr_src->int_bits.stint) 1697 != 0 ? B_TRUE : B_FALSE; 1698 if (pintr_src->int_bits.reint) 1699 ngep->statistics.sw_statistics.rx_err++; 1700 if (pintr_src->int_bits.miss) 1701 ngep->statistics.sw_statistics.rx_nobuffer++; 1702 1703 btx = (pintr_src->int_bits.teint | pintr_src->int_bits.tcint) 1704 != 0 ? B_TRUE : B_FALSE; 1705 if (pintr_src->int_bits.stint && ngep->poll) 1706 ngep->stint_count ++; 1707 if (ngep->poll && (ngep->stint_count % ngep->param_tx_n_intr == 0)) 1708 btx = B_TRUE; 1709 if (btx) 1710 nge_tx_recycle(ngep, B_TRUE); 1711 if (brx) 1712 nge_receive(ngep); 1713 if (pintr_src->int_bits.teint) 1714 ngep->statistics.sw_statistics.tx_stop_err++; 1715 if (ngep->intr_moderation && brx) { 1716 if (ngep->poll) { 1717 if (ngep->recv_count < ngep->param_rx_intr_hwater) { 1718 ngep->quiet_time++; 1719 if (ngep->quiet_time == 1720 ngep->param_poll_quiet_time) { 1721 ngep->poll = B_FALSE; 1722 ngep->quiet_time = 0; 1723 ngep->stint_count = 0; 1724 nge_tx_recycle(ngep, B_TRUE); 1725 } 1726 } else 1727 ngep->quiet_time = 0; 1728 } else { 1729 if (ngep->recv_count > ngep->param_rx_intr_lwater) { 1730 ngep->busy_time++; 1731 if (ngep->busy_time == 1732 ngep->param_poll_busy_time) { 1733 ngep->poll = B_TRUE; 1734 ngep->busy_time = 0; 1735 } 1736 } else 1737 ngep->busy_time = 0; 1738 } 1739 } 1740 ngep->recv_count = 0; 1741 if (pintr_src->int_bits.feint) 1742 nge_chip_err(ngep); 1743 /* link interrupt, check the link state */ 1744 if (pintr_src->int_bits.mint) { 1745 mintr_src.src_val = nge_reg_get32(ngep, NGE_MINTR_SRC); 1746 nge_reg_put32(ngep, NGE_MINTR_SRC, mintr_src.src_val); 1747 nge_wake_factotum(ngep); 1748 } 1749 } 1750 1751 /* 1752 * nge_chip_intr() -- handle chip interrupts 1753 */ 1754 /* ARGSUSED */ 1755 uint_t 1756 nge_chip_intr(caddr_t arg1, caddr_t arg2) 1757 { 1758 nge_t *ngep = (nge_t *)arg1; 1759 nge_intr_src intr_src; 1760 nge_intr_mask intr_mask; 1761 1762 mutex_enter(ngep->genlock); 1763 1764 if (ngep->suspended) { 1765 mutex_exit(ngep->genlock); 1766 return (DDI_INTR_UNCLAIMED); 1767 } 1768 1769 /* 1770 * Check whether chip's says it's asserting #INTA; 1771 * if not, don't process or claim the interrupt. 1772 */ 1773 intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC); 1774 if (intr_src.intr_val == 0) { 1775 mutex_exit(ngep->genlock); 1776 return (DDI_INTR_UNCLAIMED); 1777 } 1778 /* 1779 * Ack the interrupt 1780 */ 1781 nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val); 1782 1783 if (ngep->nge_chip_state != NGE_CHIP_RUNNING) { 1784 mutex_exit(ngep->genlock); 1785 return (DDI_INTR_CLAIMED); 1786 } 1787 nge_intr_handle(ngep, &intr_src); 1788 if (ngep->poll && !ngep->ch_intr_mode) { 1789 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1790 intr_mask.mask_bits.stint = NGE_SET; 1791 intr_mask.mask_bits.rcint = NGE_CLEAR; 1792 intr_mask.mask_bits.reint = NGE_CLEAR; 1793 intr_mask.mask_bits.tcint = NGE_CLEAR; 1794 intr_mask.mask_bits.teint = NGE_CLEAR; 1795 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1796 ngep->ch_intr_mode = B_TRUE; 1797 } else if ((ngep->ch_intr_mode) && (!ngep->poll)) { 1798 nge_reg_put32(ngep, NGE_INTR_MASK, ngep->intr_masks); 1799 ngep->ch_intr_mode = B_FALSE; 1800 } 1801 mutex_exit(ngep->genlock); 1802 return (DDI_INTR_CLAIMED); 1803 } 1804 1805 static enum ioc_reply 1806 nge_pp_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1807 { 1808 int err; 1809 uint64_t sizemask; 1810 uint64_t mem_va; 1811 uint64_t maxoff; 1812 boolean_t peek; 1813 nge_peekpoke_t *ppd; 1814 int (*ppfn)(nge_t *ngep, nge_peekpoke_t *ppd); 1815 1816 switch (cmd) { 1817 default: 1818 return (IOC_INVAL); 1819 1820 case NGE_PEEK: 1821 peek = B_TRUE; 1822 break; 1823 1824 case NGE_POKE: 1825 peek = B_FALSE; 1826 break; 1827 } 1828 1829 /* 1830 * Validate format of ioctl 1831 */ 1832 if (iocp->ioc_count != sizeof (nge_peekpoke_t)) 1833 return (IOC_INVAL); 1834 if (mp->b_cont == NULL) 1835 return (IOC_INVAL); 1836 ppd = (nge_peekpoke_t *)mp->b_cont->b_rptr; 1837 1838 /* 1839 * Validate request parameters 1840 */ 1841 switch (ppd->pp_acc_space) { 1842 default: 1843 return (IOC_INVAL); 1844 1845 case NGE_PP_SPACE_CFG: 1846 /* 1847 * Config space 1848 */ 1849 sizemask = 8|4|2|1; 1850 mem_va = 0; 1851 maxoff = PCI_CONF_HDR_SIZE; 1852 ppfn = peek ? nge_chip_peek_cfg : nge_chip_poke_cfg; 1853 break; 1854 1855 case NGE_PP_SPACE_REG: 1856 /* 1857 * Memory-mapped I/O space 1858 */ 1859 sizemask = 8|4|2|1; 1860 mem_va = 0; 1861 maxoff = NGE_REG_SIZE; 1862 ppfn = peek ? nge_chip_peek_reg : nge_chip_poke_reg; 1863 break; 1864 1865 case NGE_PP_SPACE_MII: 1866 sizemask = 4|2|1; 1867 mem_va = 0; 1868 maxoff = NGE_MII_SIZE; 1869 ppfn = peek ? nge_chip_peek_mii : nge_chip_poke_mii; 1870 break; 1871 1872 case NGE_PP_SPACE_SEEPROM: 1873 sizemask = 4|2|1; 1874 mem_va = 0; 1875 maxoff = NGE_SEEROM_SIZE; 1876 ppfn = peek ? nge_chip_peek_seeprom : nge_chip_poke_seeprom; 1877 break; 1878 } 1879 1880 switch (ppd->pp_acc_size) { 1881 default: 1882 return (IOC_INVAL); 1883 1884 case 8: 1885 case 4: 1886 case 2: 1887 case 1: 1888 if ((ppd->pp_acc_size & sizemask) == 0) 1889 return (IOC_INVAL); 1890 break; 1891 } 1892 1893 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 1894 return (IOC_INVAL); 1895 1896 if (ppd->pp_acc_offset >= maxoff) 1897 return (IOC_INVAL); 1898 1899 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff) 1900 return (IOC_INVAL); 1901 1902 /* 1903 * All OK - go do it! 1904 */ 1905 ppd->pp_acc_offset += mem_va; 1906 if (ppfn) 1907 err = (*ppfn)(ngep, ppd); 1908 if (err != DDI_SUCCESS) 1909 return (IOC_INVAL); 1910 return (peek ? IOC_REPLY : IOC_ACK); 1911 } 1912 1913 static enum ioc_reply nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, 1914 struct iocblk *iocp); 1915 #pragma no_inline(nge_diag_ioctl) 1916 1917 static enum ioc_reply 1918 nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1919 { 1920 ASSERT(mutex_owned(ngep->genlock)); 1921 1922 switch (cmd) { 1923 default: 1924 nge_error(ngep, "nge_diag_ioctl: invalid cmd 0x%x", cmd); 1925 return (IOC_INVAL); 1926 1927 case NGE_DIAG: 1928 return (IOC_ACK); 1929 1930 case NGE_PEEK: 1931 case NGE_POKE: 1932 return (nge_pp_ioctl(ngep, cmd, mp, iocp)); 1933 1934 case NGE_PHY_RESET: 1935 return (IOC_RESTART_ACK); 1936 1937 case NGE_SOFT_RESET: 1938 case NGE_HARD_RESET: 1939 return (IOC_ACK); 1940 } 1941 1942 /* NOTREACHED */ 1943 } 1944 1945 enum ioc_reply 1946 nge_chip_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 1947 { 1948 int cmd; 1949 1950 ASSERT(mutex_owned(ngep->genlock)); 1951 1952 cmd = iocp->ioc_cmd; 1953 1954 switch (cmd) { 1955 default: 1956 return (IOC_INVAL); 1957 1958 case NGE_DIAG: 1959 case NGE_PEEK: 1960 case NGE_POKE: 1961 case NGE_PHY_RESET: 1962 case NGE_SOFT_RESET: 1963 case NGE_HARD_RESET: 1964 #if NGE_DEBUGGING 1965 return (nge_diag_ioctl(ngep, cmd, mp, iocp)); 1966 #else 1967 return (IOC_INVAL); 1968 #endif 1969 1970 case NGE_MII_READ: 1971 case NGE_MII_WRITE: 1972 return (IOC_INVAL); 1973 1974 #if NGE_SEE_IO32 1975 case NGE_SEE_READ: 1976 case NGE_SEE_WRITE: 1977 return (IOC_INVAL); 1978 #endif 1979 1980 #if NGE_FLASH_IO32 1981 case NGE_FLASH_READ: 1982 case NGE_FLASH_WRITE: 1983 return (IOC_INVAL); 1984 #endif 1985 } 1986 } 1987