1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "nge.h" 30 static uint32_t nge_watchdog_count = 1 << 29; 31 extern boolean_t nge_enable_msi; 32 static void nge_sync_mac_modes(nge_t *); 33 34 #undef NGE_DBG 35 #define NGE_DBG NGE_DBG_CHIP 36 37 /* 38 * Operating register get/set access routines 39 */ 40 uint8_t nge_reg_get8(nge_t *ngep, nge_regno_t regno); 41 #pragma inline(nge_reg_get8) 42 43 uint8_t 44 nge_reg_get8(nge_t *ngep, nge_regno_t regno) 45 { 46 NGE_TRACE(("nge_reg_get8($%p, 0x%lx)", (void *)ngep, regno)); 47 48 return (ddi_get8(ngep->io_handle, PIO_ADDR(ngep, regno))); 49 } 50 51 void nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data); 52 #pragma inline(nge_reg_put8) 53 54 void 55 nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data) 56 { 57 NGE_TRACE(("nge_reg_put8($%p, 0x%lx, 0x%x)", 58 (void *)ngep, regno, data)); 59 ddi_put8(ngep->io_handle, PIO_ADDR(ngep, regno), data); 60 61 } 62 63 uint16_t nge_reg_get16(nge_t *ngep, nge_regno_t regno); 64 #pragma inline(nge_reg_get16) 65 66 uint16_t 67 nge_reg_get16(nge_t *ngep, nge_regno_t regno) 68 { 69 NGE_TRACE(("nge_reg_get16($%p, 0x%lx)", (void *)ngep, regno)); 70 return (ddi_get16(ngep->io_handle, PIO_ADDR(ngep, regno))); 71 } 72 73 void nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data); 74 #pragma inline(nge_reg_put16) 75 76 void 77 nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data) 78 { 79 NGE_TRACE(("nge_reg_put16($%p, 0x%lx, 0x%x)", 80 (void *)ngep, regno, data)); 81 ddi_put16(ngep->io_handle, PIO_ADDR(ngep, regno), data); 82 83 } 84 85 uint32_t nge_reg_get32(nge_t *ngep, nge_regno_t regno); 86 #pragma inline(nge_reg_get32) 87 88 uint32_t 89 nge_reg_get32(nge_t *ngep, nge_regno_t regno) 90 { 91 NGE_TRACE(("nge_reg_get32($%p, 0x%lx)", (void *)ngep, regno)); 92 return (ddi_get32(ngep->io_handle, PIO_ADDR(ngep, regno))); 93 } 94 95 void nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data); 96 #pragma inline(nge_reg_put32) 97 98 void 99 nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data) 100 { 101 NGE_TRACE(("nge_reg_put32($%p, 0x%lx, 0x%x)", 102 (void *)ngep, regno, data)); 103 ddi_put32(ngep->io_handle, PIO_ADDR(ngep, regno), data); 104 105 } 106 107 108 static int nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 109 #pragma no_inline(nge_chip_peek_cfg) 110 111 static int 112 nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 113 { 114 int err; 115 uint64_t regval; 116 uint64_t regno; 117 118 NGE_TRACE(("nge_chip_peek_cfg($%p, $%p)", 119 (void *)ngep, (void *)ppd)); 120 121 err = DDI_SUCCESS; 122 regno = ppd->pp_acc_offset; 123 124 switch (ppd->pp_acc_size) { 125 case 1: 126 regval = pci_config_get8(ngep->cfg_handle, regno); 127 break; 128 129 case 2: 130 regval = pci_config_get16(ngep->cfg_handle, regno); 131 break; 132 133 case 4: 134 regval = pci_config_get32(ngep->cfg_handle, regno); 135 break; 136 137 case 8: 138 regval = pci_config_get64(ngep->cfg_handle, regno); 139 break; 140 } 141 ppd->pp_acc_data = regval; 142 return (err); 143 } 144 145 static int nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 146 147 static int 148 nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 149 { 150 int err; 151 uint64_t regval; 152 uint64_t regno; 153 154 NGE_TRACE(("nge_chip_poke_cfg($%p, $%p)", 155 (void *)ngep, (void *)ppd)); 156 157 err = DDI_SUCCESS; 158 regno = ppd->pp_acc_offset; 159 regval = ppd->pp_acc_data; 160 161 switch (ppd->pp_acc_size) { 162 case 1: 163 pci_config_put8(ngep->cfg_handle, regno, regval); 164 break; 165 166 case 2: 167 pci_config_put16(ngep->cfg_handle, regno, regval); 168 break; 169 170 case 4: 171 pci_config_put32(ngep->cfg_handle, regno, regval); 172 break; 173 174 case 8: 175 pci_config_put64(ngep->cfg_handle, regno, regval); 176 break; 177 } 178 179 return (err); 180 181 } 182 183 static int nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd); 184 185 static int 186 nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd) 187 { 188 int err; 189 uint64_t regval; 190 void *regaddr; 191 192 NGE_TRACE(("nge_chip_peek_reg($%p, $%p)", 193 (void *)ngep, (void *)ppd)); 194 195 err = DDI_SUCCESS; 196 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 197 198 switch (ppd->pp_acc_size) { 199 case 1: 200 regval = ddi_get8(ngep->io_handle, regaddr); 201 break; 202 203 case 2: 204 regval = ddi_get16(ngep->io_handle, regaddr); 205 break; 206 207 case 4: 208 regval = ddi_get32(ngep->io_handle, regaddr); 209 break; 210 211 case 8: 212 regval = ddi_get64(ngep->io_handle, regaddr); 213 break; 214 215 default: 216 regval = 0x0ull; 217 break; 218 } 219 ppd->pp_acc_data = regval; 220 return (err); 221 } 222 223 static int nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd); 224 225 static int 226 nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd) 227 { 228 int err; 229 uint64_t regval; 230 void *regaddr; 231 232 NGE_TRACE(("nge_chip_poke_reg($%p, $%p)", 233 (void *)ngep, (void *)ppd)); 234 235 err = DDI_SUCCESS; 236 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 237 regval = ppd->pp_acc_data; 238 239 switch (ppd->pp_acc_size) { 240 case 1: 241 ddi_put8(ngep->io_handle, regaddr, regval); 242 break; 243 244 case 2: 245 ddi_put16(ngep->io_handle, regaddr, regval); 246 break; 247 248 case 4: 249 ddi_put32(ngep->io_handle, regaddr, regval); 250 break; 251 252 case 8: 253 ddi_put64(ngep->io_handle, regaddr, regval); 254 break; 255 } 256 return (err); 257 } 258 259 static int nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd); 260 #pragma no_inline(nge_chip_peek_mii) 261 262 static int 263 nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd) 264 { 265 int err; 266 267 err = DDI_SUCCESS; 268 ppd->pp_acc_data = nge_mii_get16(ngep, ppd->pp_acc_offset/2); 269 return (err); 270 } 271 272 static int nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd); 273 #pragma no_inline(nge_chip_poke_mii) 274 275 static int 276 nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd) 277 { 278 int err; 279 err = DDI_SUCCESS; 280 nge_mii_put16(ngep, ppd->pp_acc_offset/2, ppd->pp_acc_data); 281 return (err); 282 } 283 284 /* 285 * Basic SEEPROM get/set access routine 286 * 287 * This uses the chip's SEEPROM auto-access method, controlled by the 288 * Serial EEPROM Address/Data Registers at 0x504h, so the CPU 289 * doesn't have to fiddle with the individual bits. 290 * 291 * The caller should hold <genlock> and *also* have already acquired 292 * the right to access the SEEPROM. 293 * 294 * Return value: 295 * 0 on success, 296 * ENODATA on access timeout (maybe retryable: device may just be busy) 297 * EPROTO on other h/w or s/w errors. 298 * 299 * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output 300 * from a (successful) SEEPROM_ACCESS_READ. 301 */ 302 303 static int 304 nge_seeprom_access(nge_t *ngep, uint32_t cmd, nge_regno_t addr, uint16_t *dp) 305 { 306 uint32_t tries; 307 nge_ep_cmd cmd_reg; 308 nge_ep_data data_reg; 309 310 NGE_TRACE(("nge_seeprom_access($%p, %d, %x, $%p)", 311 (void *)ngep, cmd, addr, (void *)dp)); 312 313 ASSERT(mutex_owned(ngep->genlock)); 314 315 /* 316 * Check there's no command in progress. 317 * 318 * Note: this *shouldn't* ever find that there is a command 319 * in progress, because we already hold the <genlock> mutex. 320 * Also, to ensure we don't have a conflict with the chip's 321 * internal firmware or a process accessing the same (shared) 322 * So this is just a final consistency check: we shouldn't 323 * see EITHER the START bit (command started but not complete) 324 * OR the COMPLETE bit (command completed but not cleared). 325 */ 326 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 327 for (tries = 0; tries < 30; tries++) { 328 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 329 break; 330 drv_usecwait(10); 331 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 332 } 333 334 /* 335 * This should not happen. If so, we have to restart eeprom 336 * state machine 337 */ 338 if (tries == 30) { 339 cmd_reg.cmd_bits.sts = SEEPROM_READY; 340 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 341 drv_usecwait(10); 342 /* 343 * Polling the status bit to make assure the eeprom is ready 344 */ 345 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 346 for (tries = 0; tries < 30; tries++) { 347 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 348 break; 349 drv_usecwait(10); 350 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 351 } 352 } 353 354 /* 355 * Assemble the command ... 356 */ 357 cmd_reg.cmd_bits.addr = addr; 358 cmd_reg.cmd_bits.cmd = cmd; 359 cmd_reg.cmd_bits.sts = 0; 360 361 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 362 363 /* 364 * Polling whether the access is successful. 365 * 366 */ 367 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 368 for (tries = 0; tries < 30; tries++) { 369 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 370 break; 371 drv_usecwait(10); 372 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 373 } 374 375 if (tries == 30) { 376 nge_report(ngep, NGE_HW_ROM); 377 return (DDI_FAILURE); 378 } 379 switch (cmd) { 380 default: 381 case SEEPROM_CMD_WRITE_ENABLE: 382 case SEEPROM_CMD_ERASE: 383 case SEEPROM_CMD_ERALSE_ALL: 384 case SEEPROM_CMD_WRITE_DIS: 385 break; 386 387 case SEEPROM_CMD_READ: 388 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 389 *dp = data_reg.data_bits.data; 390 break; 391 392 case SEEPROM_CMD_WRITE: 393 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 394 data_reg.data_bits.data = *dp; 395 nge_reg_put32(ngep, NGE_EP_DATA, data_reg.data_val); 396 break; 397 } 398 399 return (DDI_SUCCESS); 400 } 401 402 403 static int 404 nge_chip_peek_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 405 { 406 uint16_t data; 407 int err; 408 409 err = nge_seeprom_access(ngep, SEEPROM_CMD_READ, 410 ppd->pp_acc_offset, &data); 411 ppd->pp_acc_data = data; 412 return (err); 413 } 414 415 static int 416 nge_chip_poke_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 417 { 418 uint16_t data; 419 int err; 420 421 data = ppd->pp_acc_data; 422 err = nge_seeprom_access(ngep, SEEPROM_CMD_WRITE, 423 ppd->pp_acc_offset, &data); 424 return (err); 425 } 426 427 void 428 nge_init_dev_spec_param(nge_t *ngep) 429 { 430 nge_dev_spec_param_t *dev_param_p; 431 chip_info_t *infop; 432 433 dev_param_p = &ngep->dev_spec_param; 434 infop = (chip_info_t *)&ngep->chipinfo; 435 436 switch (infop->device) { 437 case DEVICE_ID_NF3_E6: 438 case DEVICE_ID_NF3_DF: 439 case DEVICE_ID_MCP61_3EE: 440 case DEVICE_ID_MCP61_3EF: 441 case DEVICE_ID_MCP04_37: 442 case DEVICE_ID_MCP04_38: 443 dev_param_p->msi = B_FALSE; 444 dev_param_p->msi_x = B_FALSE; 445 dev_param_p->vlan = B_FALSE; 446 dev_param_p->tx_pause_frame = B_FALSE; 447 dev_param_p->rx_pause_frame = B_FALSE; 448 dev_param_p->jumbo = B_FALSE; 449 dev_param_p->tx_rx_64byte = B_FALSE; 450 dev_param_p->rx_hw_checksum = B_FALSE; 451 dev_param_p->tx_hw_checksum = 0; 452 dev_param_p->desc_type = DESC_OFFLOAD; 453 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 454 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 455 dev_param_p->nge_split = NGE_SPLIT_32; 456 break; 457 458 case DEVICE_ID_CK804_56: 459 case DEVICE_ID_CK804_57: 460 dev_param_p->msi = B_TRUE; 461 dev_param_p->msi_x = B_TRUE; 462 dev_param_p->vlan = B_FALSE; 463 dev_param_p->tx_pause_frame = B_FALSE; 464 dev_param_p->rx_pause_frame = B_TRUE; 465 dev_param_p->jumbo = B_TRUE; 466 dev_param_p->tx_rx_64byte = B_FALSE; 467 dev_param_p->rx_hw_checksum = B_TRUE; 468 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 469 dev_param_p->desc_type = DESC_HOT; 470 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 471 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 472 dev_param_p->nge_split = NGE_SPLIT_96; 473 break; 474 475 case DEVICE_ID_MCP51_268: 476 case DEVICE_ID_MCP51_269: 477 dev_param_p->msi = B_FALSE; 478 dev_param_p->msi_x = B_FALSE; 479 dev_param_p->vlan = B_FALSE; 480 dev_param_p->tx_pause_frame = B_FALSE; 481 dev_param_p->rx_pause_frame = B_FALSE; 482 dev_param_p->jumbo = B_FALSE; 483 dev_param_p->tx_rx_64byte = B_TRUE; 484 dev_param_p->rx_hw_checksum = B_FALSE; 485 dev_param_p->tx_hw_checksum = 0; 486 dev_param_p->desc_type = DESC_OFFLOAD; 487 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 488 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 489 dev_param_p->nge_split = NGE_SPLIT_32; 490 break; 491 492 case DEVICE_ID_MCP55_372: 493 case DEVICE_ID_MCP55_373: 494 dev_param_p->msi = B_TRUE; 495 dev_param_p->msi_x = B_TRUE; 496 dev_param_p->vlan = B_TRUE; 497 dev_param_p->tx_pause_frame = B_TRUE; 498 dev_param_p->rx_pause_frame = B_TRUE; 499 dev_param_p->jumbo = B_TRUE; 500 dev_param_p->tx_rx_64byte = B_TRUE; 501 dev_param_p->rx_hw_checksum = B_TRUE; 502 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 503 dev_param_p->desc_type = DESC_HOT; 504 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 505 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 506 dev_param_p->nge_split = NGE_SPLIT_96; 507 break; 508 509 default: 510 dev_param_p->msi = B_FALSE; 511 dev_param_p->msi_x = B_FALSE; 512 dev_param_p->vlan = B_FALSE; 513 dev_param_p->tx_pause_frame = B_FALSE; 514 dev_param_p->rx_pause_frame = B_FALSE; 515 dev_param_p->jumbo = B_FALSE; 516 dev_param_p->tx_rx_64byte = B_FALSE; 517 dev_param_p->rx_hw_checksum = B_FALSE; 518 dev_param_p->tx_hw_checksum = 0; 519 dev_param_p->desc_type = DESC_OFFLOAD; 520 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 521 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 522 dev_param_p->nge_split = NGE_SPLIT_32; 523 return; 524 } 525 } 526 /* 527 * Perform first-stage chip (re-)initialisation, using only config-space 528 * accesses: 529 * 530 * + Read the vendor/device/revision/subsystem/cache-line-size registers, 531 * returning the data in the structure pointed to by <infop>. 532 */ 533 void nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset); 534 #pragma no_inline(nge_chip_cfg_init) 535 536 void 537 nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset) 538 { 539 uint16_t command; 540 ddi_acc_handle_t handle; 541 nge_interbus_conf interbus_conf; 542 nge_msi_mask_conf msi_mask_conf; 543 nge_msi_map_cap_conf cap_conf; 544 545 NGE_TRACE(("nge_chip_cfg_init($%p, $%p, %d)", 546 (void *)ngep, (void *)infop, reset)); 547 548 /* 549 * save PCI cache line size and subsystem vendor ID 550 * 551 * Read all the config-space registers that characterise the 552 * chip, specifically vendor/device/revision/subsystem vendor 553 * and subsystem device id. We expect (but don't check) that 554 */ 555 handle = ngep->cfg_handle; 556 /* reading the vendor information once */ 557 if (reset == B_FALSE) { 558 infop->command = pci_config_get16(handle, 559 PCI_CONF_COMM); 560 infop->vendor = pci_config_get16(handle, 561 PCI_CONF_VENID); 562 infop->device = pci_config_get16(handle, 563 PCI_CONF_DEVID); 564 infop->subven = pci_config_get16(handle, 565 PCI_CONF_SUBVENID); 566 infop->subdev = pci_config_get16(handle, 567 PCI_CONF_SUBSYSID); 568 infop->class_code = pci_config_get8(handle, 569 PCI_CONF_BASCLASS); 570 infop->revision = pci_config_get8(handle, 571 PCI_CONF_REVID); 572 infop->clsize = pci_config_get8(handle, 573 PCI_CONF_CACHE_LINESZ); 574 infop->latency = pci_config_get8(handle, 575 PCI_CONF_LATENCY_TIMER); 576 } 577 if (nge_enable_msi) { 578 /* Disable the hidden for MSI support */ 579 interbus_conf.conf_val = pci_config_get32(handle, 580 PCI_CONF_HT_INTERNAL); 581 if ((infop->device == DEVICE_ID_MCP55_373) || 582 (infop->device == DEVICE_ID_MCP55_372)) 583 interbus_conf.conf_bits.msix_off = NGE_SET; 584 interbus_conf.conf_bits.msi_off = NGE_CLEAR; 585 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 586 interbus_conf.conf_val); 587 588 if ((infop->device == DEVICE_ID_MCP55_373) || 589 (infop->device == DEVICE_ID_MCP55_372)) { 590 591 /* Disable the vector off for mcp55 */ 592 msi_mask_conf.msi_mask_conf_val = 593 pci_config_get32(handle, PCI_CONF_HT_MSI_MASK); 594 msi_mask_conf.msi_mask_bits.vec0_off = NGE_CLEAR; 595 msi_mask_conf.msi_mask_bits.vec1_off = NGE_CLEAR; 596 msi_mask_conf.msi_mask_bits.vec2_off = NGE_CLEAR; 597 msi_mask_conf.msi_mask_bits.vec3_off = NGE_CLEAR; 598 msi_mask_conf.msi_mask_bits.vec4_off = NGE_CLEAR; 599 msi_mask_conf.msi_mask_bits.vec5_off = NGE_CLEAR; 600 msi_mask_conf.msi_mask_bits.vec6_off = NGE_CLEAR; 601 msi_mask_conf.msi_mask_bits.vec7_off = NGE_CLEAR; 602 pci_config_put32(handle, PCI_CONF_HT_MSI_MASK, 603 msi_mask_conf.msi_mask_conf_val); 604 605 /* Enable the MSI mapping */ 606 cap_conf.msi_map_cap_conf_val = 607 pci_config_get32(handle, PCI_CONF_HT_MSI_MAP_CAP); 608 cap_conf.map_cap_conf_bits.map_en = NGE_SET; 609 pci_config_put32(handle, PCI_CONF_HT_MSI_MAP_CAP, 610 cap_conf.msi_map_cap_conf_val); 611 } 612 } else { 613 interbus_conf.conf_val = pci_config_get32(handle, 614 PCI_CONF_HT_INTERNAL); 615 interbus_conf.conf_bits.msi_off = NGE_SET; 616 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 617 interbus_conf.conf_val); 618 } 619 command = infop->command | PCI_COMM_MAE; 620 command &= ~PCI_COMM_MEMWR_INVAL; 621 command |= PCI_COMM_ME; 622 pci_config_put16(handle, PCI_CONF_COMM, command); 623 pci_config_put16(handle, PCI_CONF_STAT, ~0); 624 625 } 626 627 int 628 nge_chip_stop(nge_t *ngep, boolean_t fault) 629 { 630 int err; 631 uint32_t reg_val; 632 uint32_t tries; 633 nge_intr_src intr_src; 634 nge_mintr_src mintr_src; 635 nge_mii_cs mii_cs; 636 nge_rx_poll rx_poll; 637 nge_tx_poll tx_poll; 638 nge_rx_en rx_en; 639 nge_tx_en tx_en; 640 nge_tx_sta tx_sta; 641 nge_rx_sta rx_sta; 642 nge_mode_cntl mode; 643 nge_pmu_cntl2 pmu_cntl2; 644 645 NGE_TRACE(("nge_chip_stop($%p, %d)", (void *)ngep, fault)); 646 647 err = DDI_SUCCESS; 648 /* Clear all pending interrupts */ 649 intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC); 650 nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val); 651 mintr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC); 652 nge_reg_put8(ngep, NGE_MINTR_SRC, mintr_src.src_val); 653 654 /* Mask all interrupts */ 655 reg_val = nge_reg_get32(ngep, NGE_INTR_MASK); 656 reg_val &= ~NGE_INTR_ALL_EN; 657 nge_reg_put32(ngep, NGE_INTR_MASK, reg_val); 658 659 /* Disable auto-polling of phy */ 660 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 661 mii_cs.cs_bits.ap_en = NGE_CLEAR; 662 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 663 664 /* Reset buffer management & DMA */ 665 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 666 mode.mode_bits.bm_reset = NGE_SET; 667 mode.mode_bits.dma_dis = NGE_SET; 668 mode.mode_bits.desc_type = ngep->desc_mode; 669 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 670 671 drv_usecwait(50000); 672 673 /* Restore buffer management */ 674 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 675 mode.mode_bits.bm_reset = NGE_CLEAR; 676 mode.mode_bits.tx_rcom_en = NGE_SET; 677 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 678 679 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 680 for (tries = 0; tries < 5000; tries++) { 681 drv_usecwait(10); 682 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 683 if (mode.mode_bits.dma_status == NGE_SET) 684 break; 685 } 686 if (tries == 5000) { 687 return (DDI_FAILURE); 688 } 689 690 /* 691 * For mcp55, the bits 1:31 of NGE_RX_EN and NGE_TX_EN are 692 * defined to be used by SMU. The newer PXE than 527 began to 693 * support SMU and bit 24 of NGE_RX_EN/NGE_TX_EN are set 694 * when leaving PXE to prevents the MAC from winning 695 * arbitration to the main transmit/receive channels. 696 */ 697 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 698 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 699 700 /* Disable rx's machine */ 701 nge_reg_put32(ngep, NGE_RX_EN, 0x0); 702 703 /* Disable tx's machine */ 704 nge_reg_put32(ngep, NGE_TX_EN, 0x0); 705 } else { 706 707 /* Disable rx's machine */ 708 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 709 rx_en.bits.rx_en = NGE_CLEAR; 710 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 711 712 713 /* Disable tx's machine */ 714 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 715 tx_en.bits.tx_en = NGE_CLEAR; 716 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 717 } 718 719 /* Disable auto-poll of rx's state machine */ 720 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 721 rx_poll.poll_bits.rpen = NGE_CLEAR; 722 rx_poll.poll_bits.rpi = NGE_CLEAR; 723 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 724 725 /* Disable auto-polling of tx's state machine */ 726 tx_poll.poll_val = nge_reg_get32(ngep, NGE_TX_POLL); 727 tx_poll.poll_bits.tpen = NGE_CLEAR; 728 tx_poll.poll_bits.tpi = NGE_CLEAR; 729 nge_reg_put32(ngep, NGE_TX_POLL, tx_poll.poll_val); 730 731 732 /* 733 * Clean the status of tx's state machine 734 * and Make assure the tx's channel is idle 735 */ 736 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 737 for (tries = 0; tries < 1000; tries++) { 738 if (tx_sta.sta_bits.tx_chan_sta == NGE_CLEAR) 739 break; 740 drv_usecwait(10); 741 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 742 } 743 if (tries == 1000) { 744 return (DDI_FAILURE); 745 } 746 nge_reg_put32(ngep, NGE_TX_STA, tx_sta.sta_val); 747 748 /* 749 * Clean the status of rx's state machine 750 * and Make assure the tx's channel is idle 751 */ 752 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 753 for (tries = 0; tries < 1000; tries++) { 754 if (rx_sta.sta_bits.rx_chan_sta == NGE_CLEAR) 755 break; 756 drv_usecwait(10); 757 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 758 } 759 if (tries == 1000) { 760 return (DDI_FAILURE); 761 } 762 nge_reg_put32(ngep, NGE_RX_STA, rx_sta.sta_val); 763 764 if (ngep->chipinfo.device == DEVICE_ID_MCP51_269 || 765 ngep->chipinfo.device == DEVICE_ID_MCP51_268) { 766 767 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 0); 768 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 0); 769 770 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 771 pmu_cntl2.cntl2_bits.cidle_timer = NGE_CLEAR; 772 pmu_cntl2.cntl2_bits.didle_timer = NGE_CLEAR; 773 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 774 } 775 if (fault) 776 ngep->nge_chip_state = NGE_CHIP_FAULT; 777 else 778 ngep->nge_chip_state = NGE_CHIP_STOPPED; 779 780 return (err); 781 } 782 783 static void 784 nge_rx_setup(nge_t *ngep) 785 { 786 uint64_t desc_addr; 787 nge_rxtx_dlen dlen; 788 nge_rx_poll rx_poll; 789 790 /* 791 * Filling the address and length of rx's descriptors 792 */ 793 desc_addr = ngep->recv->desc.cookie.dmac_laddress; 794 nge_reg_put32(ngep, NGE_RX_DADR, desc_addr); 795 nge_reg_put32(ngep, NGE_RX_DADR_HI, desc_addr >> 32); 796 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 797 dlen.dlen_bits.rdlen = ngep->recv->desc.nslots - 1; 798 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 799 800 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 801 rx_poll.poll_bits.rpi = RX_POLL_INTV_1G; 802 rx_poll.poll_bits.rpen = NGE_SET; 803 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 804 } 805 806 static void 807 nge_tx_setup(nge_t *ngep) 808 { 809 uint64_t desc_addr; 810 nge_rxtx_dlen dlen; 811 812 /* 813 * Filling the address and length of tx's descriptors 814 */ 815 desc_addr = ngep->send->desc.cookie.dmac_laddress; 816 nge_reg_put32(ngep, NGE_TX_DADR, desc_addr); 817 nge_reg_put32(ngep, NGE_TX_DADR_HI, desc_addr >> 32); 818 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 819 dlen.dlen_bits.tdlen = ngep->send->desc.nslots - 1; 820 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 821 } 822 823 static int 824 nge_buff_setup(nge_t *ngep) 825 { 826 nge_mode_cntl mode_cntl; 827 nge_dev_spec_param_t *dev_param_p; 828 829 dev_param_p = &ngep->dev_spec_param; 830 831 /* 832 * Configure Rx&Tx's buffer 833 */ 834 nge_rx_setup(ngep); 835 nge_tx_setup(ngep); 836 837 /* 838 * Configure buffer attribute 839 */ 840 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 841 842 /* 843 * Enable Dma access request 844 */ 845 mode_cntl.mode_bits.dma_dis = NGE_CLEAR; 846 847 /* 848 * Enbale Buffer management 849 */ 850 mode_cntl.mode_bits.bm_reset = NGE_CLEAR; 851 852 /* 853 * Support Standoffload Descriptor 854 */ 855 mode_cntl.mode_bits.desc_type = ngep->desc_mode; 856 857 /* 858 * Support receive hardware checksum 859 */ 860 if (dev_param_p->rx_hw_checksum) { 861 mode_cntl.mode_bits.rx_sum_en = NGE_SET; 862 } else 863 mode_cntl.mode_bits.rx_sum_en = NGE_CLEAR; 864 865 /* 866 * Disable Tx PRD coarse update 867 */ 868 mode_cntl.mode_bits.tx_prd_cu_en = NGE_CLEAR; 869 870 /* 871 * Disable 64-byte access 872 */ 873 mode_cntl.mode_bits.w64_dis = NGE_SET; 874 875 /* 876 * Skip Rx Error Frame is not supported and if 877 * enable it, jumbo frame does not work any more. 878 */ 879 mode_cntl.mode_bits.rx_filter_en = NGE_CLEAR; 880 881 /* 882 * Can not support hot mode now 883 */ 884 mode_cntl.mode_bits.resv15 = NGE_CLEAR; 885 886 if (dev_param_p->vlan) { 887 /* Disable the vlan strip for devices which support vlan */ 888 mode_cntl.mode_bits.vlan_strip = NGE_CLEAR; 889 890 /* Disable the vlan insert for devices which supprot vlan */ 891 mode_cntl.mode_bits.vlan_ins = NGE_CLEAR; 892 } 893 894 if (dev_param_p->tx_rx_64byte) { 895 896 /* Set the maximum TX PRD fetch size to 64 bytes */ 897 mode_cntl.mode_bits.tx_fetch_prd = NGE_SET; 898 899 /* Set the maximum RX PRD fetch size to 64 bytes */ 900 mode_cntl.mode_bits.rx_fetch_prd = NGE_SET; 901 } 902 /* 903 * Upload Rx data as it arrives, rather than waiting for full frame 904 */ 905 mode_cntl.mode_bits.resv16 = NGE_CLEAR; 906 907 /* 908 * Normal HOT table accesses 909 */ 910 mode_cntl.mode_bits.resv17 = NGE_CLEAR; 911 912 /* 913 * Normal HOT buffer requesting 914 */ 915 mode_cntl.mode_bits.resv18 = NGE_CLEAR; 916 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 917 918 /* 919 * Signal controller to check for new Rx descriptors 920 */ 921 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 922 mode_cntl.mode_bits.rxdm = NGE_SET; 923 mode_cntl.mode_bits.tx_rcom_en = NGE_SET; 924 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 925 926 927 return (DDI_SUCCESS); 928 } 929 930 /* 931 * When chipset resets, the chipset can not restore the orignial 932 * mac address to the mac address registers. 933 * 934 * When the driver is dettached, the function will write the orignial 935 * mac address to the mac address registers. 936 */ 937 938 void 939 nge_restore_mac_addr(nge_t *ngep) 940 { 941 uint32_t mac_addr; 942 943 mac_addr = (uint32_t)ngep->chipinfo.hw_mac_addr; 944 nge_reg_put32(ngep, NGE_UNI_ADDR0, mac_addr); 945 mac_addr = (uint32_t)(ngep->chipinfo.hw_mac_addr >> 32); 946 nge_reg_put32(ngep, NGE_UNI_ADDR1, mac_addr); 947 } 948 949 int 950 nge_chip_reset(nge_t *ngep) 951 { 952 int err; 953 uint8_t i; 954 uint32_t regno; 955 uint64_t mac; 956 nge_uni_addr1 uaddr1; 957 nge_mul_addr1 maddr1; 958 nge_cp_cntl ee_cntl; 959 nge_soft_misc soft_misc; 960 nge_pmu_cntl0 pmu_cntl0; 961 nge_pmu_cntl2 pmu_cntl2; 962 nge_pm_cntl2 pm_cntl2; 963 const nge_ksindex_t *ksip; 964 nge_sw_statistics_t *sw_stp; 965 sw_stp = &ngep->statistics.sw_statistics; 966 967 NGE_TRACE(("nge_chip_reset($%p)", (void *)ngep)); 968 969 /* 970 * Clear the statistics by reading the statistics register 971 */ 972 for (ksip = nge_statistics; ksip->name != NULL; ++ksip) { 973 regno = KS_BASE + ksip->index * sizeof (uint32_t); 974 (void) nge_reg_get32(ngep, regno); 975 } 976 /* Clear the software statistics */ 977 sw_stp->recv_count = 0; 978 sw_stp->xmit_count = 0; 979 sw_stp->rbytes = 0; 980 sw_stp->obytes = 0; 981 982 /* 983 * Clear the Multicast mac address table 984 */ 985 nge_reg_put32(ngep, NGE_MUL_ADDR0, 0); 986 maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1); 987 maddr1.addr_bits.addr = 0; 988 nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val); 989 990 /* 991 * Setup seeprom control 992 */ 993 ee_cntl.cntl_val = nge_reg_get32(ngep, NGE_EP_CNTL); 994 ee_cntl.cntl_bits.clkdiv = EEPROM_CLKDIV; 995 ee_cntl.cntl_bits.rom_size = EEPROM_32K; 996 ee_cntl.cntl_bits.word_wid = ACCESS_16BIT; 997 ee_cntl.cntl_bits.wait_slots = EEPROM_WAITCLK; 998 nge_reg_put32(ngep, NGE_EP_CNTL, ee_cntl.cntl_val); 999 1000 /* 1001 * Reading the unicast mac address table 1002 */ 1003 if (ngep->nge_chip_state == NGE_CHIP_INITIAL) { 1004 uaddr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 1005 mac = uaddr1.addr_bits.addr; 1006 mac <<= 32; 1007 mac |= nge_reg_get32(ngep, NGE_UNI_ADDR0); 1008 if (mac != 0ULL && mac != ~0ULL) { 1009 ngep->chipinfo.hw_mac_addr = mac; 1010 for (i = ETHERADDRL; i-- != 0; ) { 1011 ngep->chipinfo.vendor_addr.addr[i] = 1012 (uchar_t)mac; 1013 ngep->cur_uni_addr.addr[i] = (uchar_t)mac; 1014 mac >>= 8; 1015 } 1016 ngep->chipinfo.vendor_addr.set = 1; 1017 } 1018 } 1019 pci_config_put8(ngep->cfg_handle, PCI_CONF_CACHE_LINESZ, 1020 ngep->chipinfo.clsize); 1021 pci_config_put8(ngep->cfg_handle, PCI_CONF_LATENCY_TIMER, 1022 ngep->chipinfo.latency); 1023 1024 /* 1025 * Stop the chipset and clear buffer management 1026 */ 1027 err = nge_chip_stop(ngep, B_FALSE); 1028 if (err == DDI_FAILURE) 1029 return (err); 1030 if (ngep->chipinfo.device == DEVICE_ID_MCP51_269 || 1031 ngep->chipinfo.device == DEVICE_ID_MCP51_268) { 1032 1033 /* Program software misc register */ 1034 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1035 soft_misc.misc_bits.rx_clk_vx_rst = NGE_SET; 1036 soft_misc.misc_bits.tx_clk_vx_rst = NGE_SET; 1037 soft_misc.misc_bits.clk12m_vx_rst = NGE_SET; 1038 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_SET; 1039 soft_misc.misc_bits.rx_clk_vc_rst = NGE_SET; 1040 soft_misc.misc_bits.tx_clk_vc_rst = NGE_SET; 1041 soft_misc.misc_bits.fs_clk_vc_rst = NGE_SET; 1042 soft_misc.misc_bits.rst_ex_m2pintf = NGE_SET; 1043 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1044 1045 /* wait for 4 us */ 1046 drv_usecwait(4); 1047 1048 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1049 soft_misc.misc_bits.rx_clk_vx_rst = NGE_CLEAR; 1050 soft_misc.misc_bits.tx_clk_vx_rst = NGE_CLEAR; 1051 soft_misc.misc_bits.clk12m_vx_rst = NGE_CLEAR; 1052 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_CLEAR; 1053 soft_misc.misc_bits.rx_clk_vc_rst = NGE_CLEAR; 1054 soft_misc.misc_bits.tx_clk_vc_rst = NGE_CLEAR; 1055 soft_misc.misc_bits.fs_clk_vc_rst = NGE_CLEAR; 1056 soft_misc.misc_bits.rst_ex_m2pintf = NGE_CLEAR; 1057 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1058 1059 /* Program PMU registers */ 1060 pmu_cntl0.cntl0_val = nge_reg_get32(ngep, NGE_PMU_CNTL0); 1061 pmu_cntl0.cntl0_bits.core_spd10_fp = 1062 NGE_PMU_CORE_SPD10_BUSY; 1063 pmu_cntl0.cntl0_bits.core_spd10_idle = 1064 NGE_PMU_CORE_SPD10_IDLE; 1065 pmu_cntl0.cntl0_bits.core_spd100_fp = 1066 NGE_PMU_CORE_SPD100_BUSY; 1067 pmu_cntl0.cntl0_bits.core_spd100_idle = 1068 NGE_PMU_CORE_SPD100_IDLE; 1069 pmu_cntl0.cntl0_bits.core_spd1000_fp = 1070 NGE_PMU_CORE_SPD1000_BUSY; 1071 pmu_cntl0.cntl0_bits.core_spd1000_idle = 1072 NGE_PMU_CORE_SPD100_IDLE; 1073 pmu_cntl0.cntl0_bits.core_spd10_idle = 1074 NGE_PMU_CORE_SPD10_IDLE; 1075 nge_reg_put32(ngep, NGE_PMU_CNTL0, pmu_cntl0.cntl0_val); 1076 1077 /* Set the core idle limit value */ 1078 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 1079 NGE_PMU_CIDLE_LIMIT_DEF); 1080 1081 /* Set the device idle limit value */ 1082 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 1083 NGE_PMU_DIDLE_LIMIT_DEF); 1084 1085 /* Enable the core/device idle timer in PMU control 2 */ 1086 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 1087 pmu_cntl2.cntl2_bits.cidle_timer = NGE_SET; 1088 pmu_cntl2.cntl2_bits.didle_timer = NGE_SET; 1089 pmu_cntl2.cntl2_bits.core_enable = NGE_SET; 1090 pmu_cntl2.cntl2_bits.dev_enable = NGE_SET; 1091 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 1092 } 1093 1094 /* 1095 * Clear the power state bits for phy since interface no longer 1096 * works after rebooting from Windows on a multi-boot machine 1097 */ 1098 if (ngep->chipinfo.device == DEVICE_ID_MCP51_268 || 1099 ngep->chipinfo.device == DEVICE_ID_MCP51_269 || 1100 ngep->chipinfo.device == DEVICE_ID_MCP55_372 || 1101 ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1102 ngep->chipinfo.device == DEVICE_ID_MCP61_3EE || 1103 ngep->chipinfo.device == DEVICE_ID_MCP61_3EF) { 1104 1105 pm_cntl2.cntl_val = nge_reg_get32(ngep, NGE_PM_CNTL2); 1106 /* bring phy out of coma mode */ 1107 pm_cntl2.cntl_bits.phy_coma_set = NGE_CLEAR; 1108 /* disable auto reset coma bits */ 1109 pm_cntl2.cntl_bits.resv4 = NGE_CLEAR; 1110 /* restore power to gated clocks */ 1111 pm_cntl2.cntl_bits.resv8_11 = NGE_CLEAR; 1112 nge_reg_put32(ngep, NGE_PM_CNTL2, pm_cntl2.cntl_val); 1113 } 1114 1115 /* 1116 * Reset the external phy 1117 */ 1118 (void) nge_phy_reset(ngep); 1119 ngep->nge_chip_state = NGE_CHIP_RESET; 1120 return (DDI_SUCCESS); 1121 } 1122 1123 int 1124 nge_chip_start(nge_t *ngep) 1125 { 1126 int err; 1127 nge_itc itc; 1128 nge_tx_cntl tx_cntl; 1129 nge_rx_cntrl0 rx_cntl0; 1130 nge_rx_cntl1 rx_cntl1; 1131 nge_tx_en tx_en; 1132 nge_rx_en rx_en; 1133 nge_mii_cs mii_cs; 1134 nge_swtr_cntl swtr_cntl; 1135 nge_rx_fifo_wm rx_fifo; 1136 nge_intr_mask intr_mask; 1137 nge_mintr_mask mintr_mask; 1138 nge_dev_spec_param_t *dev_param_p; 1139 1140 NGE_TRACE(("nge_chip_start($%p)", (void *)ngep)); 1141 1142 /* 1143 * Setup buffer management 1144 */ 1145 err = nge_buff_setup(ngep); 1146 if (err == DDI_FAILURE) 1147 return (err); 1148 1149 dev_param_p = &ngep->dev_spec_param; 1150 1151 /* 1152 * Enable polling attribute 1153 */ 1154 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 1155 mii_cs.cs_bits.ap_paddr = ngep->phy_xmii_addr; 1156 mii_cs.cs_bits.ap_en = NGE_SET; 1157 mii_cs.cs_bits.ap_intv = MII_POLL_INTV; 1158 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 1159 1160 /* 1161 * Setup link 1162 */ 1163 (*ngep->physops->phys_update)(ngep); 1164 1165 /* 1166 * Configure the tx's parameters 1167 */ 1168 tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL); 1169 if (dev_param_p->tx_pause_frame) 1170 tx_cntl.cntl_bits.paen = NGE_SET; 1171 else 1172 tx_cntl.cntl_bits.paen = NGE_CLEAR; 1173 tx_cntl.cntl_bits.retry_en = NGE_SET; 1174 tx_cntl.cntl_bits.pad_en = NGE_SET; 1175 tx_cntl.cntl_bits.fappend_en = NGE_SET; 1176 tx_cntl.cntl_bits.two_def_en = NGE_SET; 1177 tx_cntl.cntl_bits.max_retry = 15; 1178 tx_cntl.cntl_bits.burst_en = NGE_CLEAR; 1179 tx_cntl.cntl_bits.uflo_err_mask = NGE_CLEAR; 1180 tx_cntl.cntl_bits.tlcol_mask = NGE_CLEAR; 1181 tx_cntl.cntl_bits.lcar_mask = NGE_CLEAR; 1182 tx_cntl.cntl_bits.def_mask = NGE_CLEAR; 1183 tx_cntl.cntl_bits.exdef_mask = NGE_SET; 1184 tx_cntl.cntl_bits.lcar_mask = NGE_SET; 1185 tx_cntl.cntl_bits.tlcol_mask = NGE_SET; 1186 tx_cntl.cntl_bits.uflo_err_mask = NGE_SET; 1187 tx_cntl.cntl_bits.jam_seq_en = NGE_CLEAR; 1188 nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val); 1189 1190 1191 /* 1192 * Configure the parameters of Rx's state machine 1193 * Enabe the parameters: 1194 * 1). Pad Strip 1195 * 2). FCS Relay 1196 * 3). Pause 1197 * 4). Address filter 1198 * 5). Runt Packet receive 1199 * 6). Broadcast 1200 * 7). Receive Deferral 1201 * 1202 * Disable the following parameters for decreasing 1203 * the number of interrupts: 1204 * 1). Runt Inerrupt. 1205 * 2). Rx's Late Collision interrupt. 1206 * 3). Rx's Max length Error Interrupt. 1207 * 4). Rx's Length Field error Interrupt. 1208 * 5). Rx's FCS error interrupt. 1209 * 6). Rx's overflow error interrupt. 1210 * 7). Rx's Frame alignment error interrupt. 1211 */ 1212 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1213 rx_cntl0.cntl_bits.padsen = NGE_CLEAR; 1214 rx_cntl0.cntl_bits.fcsren = NGE_CLEAR; 1215 if (dev_param_p->rx_pause_frame) 1216 rx_cntl0.cntl_bits.paen = NGE_SET; 1217 else 1218 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1219 rx_cntl0.cntl_bits.lben = NGE_CLEAR; 1220 rx_cntl0.cntl_bits.afen = NGE_SET; 1221 rx_cntl0.cntl_bits.runten = NGE_CLEAR; 1222 rx_cntl0.cntl_bits.brdis = NGE_CLEAR; 1223 rx_cntl0.cntl_bits.rdfen = NGE_CLEAR; 1224 rx_cntl0.cntl_bits.runtm = NGE_CLEAR; 1225 rx_cntl0.cntl_bits.slfb = NGE_CLEAR; 1226 rx_cntl0.cntl_bits.rlcolm = NGE_CLEAR; 1227 rx_cntl0.cntl_bits.maxerm = NGE_CLEAR; 1228 rx_cntl0.cntl_bits.lferm = NGE_CLEAR; 1229 rx_cntl0.cntl_bits.crcm = NGE_CLEAR; 1230 rx_cntl0.cntl_bits.ofolm = NGE_CLEAR; 1231 rx_cntl0.cntl_bits.framerm = NGE_CLEAR; 1232 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1233 1234 /* 1235 * Configure the watermark for the rx's statemachine 1236 */ 1237 rx_fifo.wm_val = nge_reg_get32(ngep, NGE_RX_FIFO_WM); 1238 rx_fifo.wm_bits.data_hwm = ngep->rx_datahwm; 1239 rx_fifo.wm_bits.prd_lwm = ngep->rx_prdlwm; 1240 rx_fifo.wm_bits.prd_hwm = ngep->rx_prdhwm; 1241 nge_reg_put32(ngep, NGE_RX_FIFO_WM, rx_fifo.wm_val); 1242 1243 /* 1244 * Configure the deffer time slot for rx's state machine 1245 */ 1246 nge_reg_put8(ngep, NGE_RX_DEf, ngep->rx_def); 1247 1248 /* 1249 * Configure the length of rx's packet 1250 */ 1251 rx_cntl1.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL1); 1252 rx_cntl1.cntl_bits.length = ngep->max_sdu; 1253 nge_reg_put32(ngep, NGE_RX_CNTL1, rx_cntl1.cntl_val); 1254 /* 1255 * Enable Tx's state machine 1256 */ 1257 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 1258 tx_en.bits.tx_en = NGE_SET; 1259 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 1260 1261 /* 1262 * Enable Rx's state machine 1263 */ 1264 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 1265 rx_en.bits.rx_en = NGE_SET; 1266 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 1267 1268 itc.itc_val = nge_reg_get32(ngep, NGE_SWTR_ITC); 1269 itc.itc_bits.sw_intv = ngep->sw_intr_intv; 1270 nge_reg_put32(ngep, NGE_SWTR_ITC, itc.itc_val); 1271 1272 swtr_cntl.ctrl_val = nge_reg_get8(ngep, NGE_SWTR_CNTL); 1273 swtr_cntl.cntl_bits.sten = NGE_SET; 1274 swtr_cntl.cntl_bits.stren = NGE_SET; 1275 nge_reg_put32(ngep, NGE_SWTR_CNTL, swtr_cntl.ctrl_val); 1276 1277 /* 1278 * Disable all mii read/write operation Interrupt 1279 */ 1280 mintr_mask.mask_val = nge_reg_get8(ngep, NGE_MINTR_MASK); 1281 mintr_mask.mask_bits.mrei = NGE_CLEAR; 1282 mintr_mask.mask_bits.mcc2 = NGE_CLEAR; 1283 mintr_mask.mask_bits.mcc1 = NGE_CLEAR; 1284 mintr_mask.mask_bits.mapi = NGE_SET; 1285 mintr_mask.mask_bits.mpdi = NGE_SET; 1286 nge_reg_put8(ngep, NGE_MINTR_MASK, mintr_mask.mask_val); 1287 1288 /* 1289 * Enable all interrupt event 1290 */ 1291 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1292 intr_mask.mask_bits.reint = NGE_SET; 1293 intr_mask.mask_bits.rcint = NGE_SET; 1294 intr_mask.mask_bits.miss = NGE_SET; 1295 intr_mask.mask_bits.teint = NGE_CLEAR; 1296 intr_mask.mask_bits.tcint = NGE_SET; 1297 intr_mask.mask_bits.stint = NGE_CLEAR; 1298 intr_mask.mask_bits.mint = NGE_CLEAR; 1299 intr_mask.mask_bits.rfint = NGE_CLEAR; 1300 intr_mask.mask_bits.tfint = NGE_CLEAR; 1301 intr_mask.mask_bits.feint = NGE_SET; 1302 intr_mask.mask_bits.resv10 = NGE_CLEAR; 1303 intr_mask.mask_bits.resv11 = NGE_CLEAR; 1304 intr_mask.mask_bits.resv12 = NGE_CLEAR; 1305 intr_mask.mask_bits.resv13 = NGE_CLEAR; 1306 intr_mask.mask_bits.phyint = NGE_CLEAR; 1307 ngep->intr_masks = intr_mask.mask_val; 1308 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1309 ngep->nge_chip_state = NGE_CHIP_RUNNING; 1310 return (DDI_SUCCESS); 1311 } 1312 1313 /* 1314 * nge_chip_sync() -- program the chip with the unicast MAC address, 1315 * the multicast hash table, the required level of promiscuity. 1316 */ 1317 void 1318 nge_chip_sync(nge_t *ngep) 1319 { 1320 uint8_t i; 1321 uint64_t macaddr; 1322 uint64_t mul_addr; 1323 uint64_t mul_mask; 1324 nge_rx_cntrl0 rx_cntl; 1325 nge_uni_addr1 uni_adr1; 1326 1327 NGE_TRACE(("nge_chip_sync($%p)", (void *)ngep)); 1328 1329 macaddr = 0x0ull; 1330 mul_addr = 0x0ull; 1331 mul_mask = 0x0ull; 1332 rx_cntl.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1333 1334 if (ngep->promisc) { 1335 rx_cntl.cntl_bits.afen = NGE_CLEAR; 1336 rx_cntl.cntl_bits.brdis = NGE_SET; 1337 } else { 1338 rx_cntl.cntl_bits.afen = NGE_SET; 1339 rx_cntl.cntl_bits.brdis = NGE_CLEAR; 1340 } 1341 1342 /* 1343 * Transform the MAC address from host to chip format, the unicast 1344 * MAC address(es) ... 1345 */ 1346 for (i = ETHERADDRL, macaddr = 0ull; i != 0; --i) { 1347 macaddr |= ngep->cur_uni_addr.addr[i-1]; 1348 macaddr <<= (i > 1) ? 8 : 0; 1349 } 1350 1351 nge_reg_put32(ngep, NGE_UNI_ADDR0, (uint32_t)macaddr); 1352 macaddr = macaddr >>32; 1353 uni_adr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 1354 uni_adr1.addr_bits.addr = (uint16_t)macaddr; 1355 uni_adr1.addr_bits.resv16_31 = (uint16_t)0; 1356 nge_reg_put32(ngep, NGE_UNI_ADDR1, uni_adr1.addr_val); 1357 1358 /* 1359 * Reprogram the multicast address table ... 1360 */ 1361 for (i = ETHERADDRL, mul_addr = 0ull; i != 0; --i) { 1362 mul_addr |= ngep->cur_mul_addr.addr[i-1]; 1363 mul_addr <<= (i > 1) ? 8 : 0; 1364 mul_mask |= ngep->cur_mul_mask.addr[i-1]; 1365 mul_mask <<= (i > 1) ? 8 : 0; 1366 } 1367 nge_reg_put32(ngep, NGE_MUL_ADDR0, (uint32_t)mul_addr); 1368 mul_addr >>= 32; 1369 nge_reg_put32(ngep, NGE_MUL_ADDR1, mul_addr); 1370 nge_reg_put32(ngep, NGE_MUL_MASK, (uint32_t)mul_mask); 1371 mul_mask >>= 32; 1372 nge_reg_put32(ngep, NGE_MUL_MASK1, mul_mask); 1373 /* 1374 * Set or clear the PROMISCUOUS mode bit 1375 */ 1376 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl.cntl_val); 1377 /* 1378 * For internal PHY loopback, the link will 1379 * not be up, so it need to sync mac modes directly. 1380 */ 1381 if (ngep->param_loop_mode == NGE_LOOP_INTERNAL_PHY) 1382 nge_sync_mac_modes(ngep); 1383 } 1384 1385 static void 1386 nge_chip_err(nge_t *ngep) 1387 { 1388 nge_reg010 reg010_ins; 1389 nge_sw_statistics_t *psw_stat; 1390 nge_intr_mask intr_mask; 1391 1392 NGE_TRACE(("nge_chip_err($%p)", (void *)ngep)); 1393 1394 psw_stat = (nge_sw_statistics_t *)&ngep->statistics.sw_statistics; 1395 reg010_ins.reg010_val = nge_reg_get32(ngep, NGE_REG010); 1396 if (reg010_ins.reg010_bits.resv0) 1397 psw_stat->fe_err.tso_err_mss ++; 1398 1399 if (reg010_ins.reg010_bits.resv1) 1400 psw_stat->fe_err.tso_dis ++; 1401 1402 if (reg010_ins.reg010_bits.resv2) 1403 psw_stat->fe_err.tso_err_nosum ++; 1404 1405 if (reg010_ins.reg010_bits.resv3) 1406 psw_stat->fe_err.tso_err_hov ++; 1407 1408 if (reg010_ins.reg010_bits.resv4) 1409 psw_stat->fe_err.tso_err_huf ++; 1410 1411 if (reg010_ins.reg010_bits.resv5) 1412 psw_stat->fe_err.tso_err_l2 ++; 1413 1414 if (reg010_ins.reg010_bits.resv6) 1415 psw_stat->fe_err.tso_err_ip ++; 1416 1417 if (reg010_ins.reg010_bits.resv7) 1418 psw_stat->fe_err.tso_err_l4 ++; 1419 1420 if (reg010_ins.reg010_bits.resv8) 1421 psw_stat->fe_err.tso_err_tcp ++; 1422 1423 if (reg010_ins.reg010_bits.resv9) 1424 psw_stat->fe_err.hsum_err_ip ++; 1425 1426 if (reg010_ins.reg010_bits.resv10) 1427 psw_stat->fe_err.hsum_err_l4 ++; 1428 1429 if (reg010_ins.reg010_val != 0) { 1430 1431 /* 1432 * Fatal error is triggered by malformed driver commands. 1433 * Disable unless debugging. 1434 */ 1435 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1436 intr_mask.mask_bits.feint = NGE_CLEAR; 1437 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1438 ngep->intr_masks = intr_mask.mask_val; 1439 1440 } 1441 } 1442 1443 static void 1444 nge_sync_mac_modes(nge_t *ngep) 1445 { 1446 nge_tx_def tx_def; 1447 nge_tx_fifo_wm tx_fifo; 1448 nge_bkoff_cntl bk_cntl; 1449 nge_mac2phy m2p; 1450 nge_rx_cntrl0 rx_cntl0; 1451 nge_dev_spec_param_t *dev_param_p; 1452 1453 dev_param_p = &ngep->dev_spec_param; 1454 1455 tx_def.def_val = nge_reg_get32(ngep, NGE_TX_DEF); 1456 m2p.m2p_val = nge_reg_get32(ngep, NGE_MAC2PHY); 1457 tx_fifo.wm_val = nge_reg_get32(ngep, NGE_TX_FIFO_WM); 1458 bk_cntl.cntl_val = nge_reg_get32(ngep, NGE_BKOFF_CNTL); 1459 bk_cntl.bkoff_bits.rseed = BKOFF_RSEED; 1460 switch (ngep->param_link_speed) { 1461 case 10: 1462 m2p.m2p_bits.speed = low_speed; 1463 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1464 if (ngep->phy_mode == RGMII_IN) { 1465 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1466 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1467 } else { 1468 tx_def.def_bits.if_def = TX_TIFG_MII; 1469 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1470 } 1471 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1472 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1473 break; 1474 1475 case 100: 1476 m2p.m2p_bits.speed = fast_speed; 1477 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1478 if (ngep->phy_mode == RGMII_IN) { 1479 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1480 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1481 } else { 1482 tx_def.def_bits.if_def = TX_TIFG_MII; 1483 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1484 } 1485 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1486 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1487 break; 1488 1489 case 1000: 1490 m2p.m2p_bits.speed = giga_speed; 1491 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1492 if (ngep->param_link_duplex == LINK_DUPLEX_FULL) { 1493 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1494 tx_def.def_bits.if_def = TX_IFG_RGMII_1000_FD; 1495 } else { 1496 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1497 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1498 } 1499 1500 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_GMII; 1501 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_GMII; 1502 break; 1503 } 1504 1505 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1506 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 1507 m2p.m2p_bits.phyintr = NGE_CLEAR; 1508 m2p.m2p_bits.phyintrlvl = NGE_CLEAR; 1509 } 1510 if (ngep->param_link_duplex == LINK_DUPLEX_HALF) { 1511 m2p.m2p_bits.hdup_en = NGE_SET; 1512 } 1513 else 1514 m2p.m2p_bits.hdup_en = NGE_CLEAR; 1515 nge_reg_put32(ngep, NGE_MAC2PHY, m2p.m2p_val); 1516 nge_reg_put32(ngep, NGE_TX_DEF, tx_def.def_val); 1517 1518 tx_fifo.wm_bits.data_lwm = TX_FIFO_DATA_LWM; 1519 tx_fifo.wm_bits.prd_lwm = TX_FIFO_PRD_LWM; 1520 tx_fifo.wm_bits.uprd_hwm = TX_FIFO_PRD_HWM; 1521 tx_fifo.wm_bits.fb_wm = TX_FIFO_TBFW; 1522 nge_reg_put32(ngep, NGE_TX_FIFO_WM, tx_fifo.wm_val); 1523 1524 nge_reg_put32(ngep, NGE_BKOFF_CNTL, bk_cntl.cntl_val); 1525 1526 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1527 if (ngep->param_link_rx_pause && dev_param_p->rx_pause_frame) 1528 rx_cntl0.cntl_bits.paen = NGE_SET; 1529 else 1530 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1531 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1532 } 1533 1534 /* 1535 * Handler for hardware link state change. 1536 * 1537 * When this routine is called, the hardware link state has changed 1538 * and the new state is reflected in the param_* variables. Here 1539 * we must update the softstate, reprogram the MAC to match, and 1540 * record the change in the log and/or on the console. 1541 */ 1542 static void 1543 nge_factotum_link_handler(nge_t *ngep) 1544 { 1545 /* 1546 * Update the s/w link_state 1547 */ 1548 if (ngep->param_link_up) 1549 ngep->link_state = LINK_STATE_UP; 1550 else 1551 ngep->link_state = LINK_STATE_DOWN; 1552 1553 /* 1554 * Reprogram the MAC modes to match 1555 */ 1556 nge_sync_mac_modes(ngep); 1557 } 1558 1559 static boolean_t 1560 nge_factotum_link_check(nge_t *ngep) 1561 { 1562 boolean_t lchg; 1563 boolean_t check; 1564 1565 ASSERT(mutex_owned(ngep->genlock)); 1566 1567 (*ngep->physops->phys_check)(ngep); 1568 switch (ngep->link_state) { 1569 case LINK_STATE_UP: 1570 lchg = (ngep->param_link_up == B_FALSE); 1571 check = (ngep->param_link_up == B_FALSE); 1572 break; 1573 1574 case LINK_STATE_DOWN: 1575 lchg = (ngep->param_link_up == B_TRUE); 1576 check = (ngep->param_link_up == B_TRUE); 1577 break; 1578 1579 default: 1580 check = B_TRUE; 1581 break; 1582 } 1583 1584 /* 1585 * If <check> is false, we're sure the link hasn't changed. 1586 * If true, however, it's not yet definitive; we have to call 1587 * nge_phys_check() to determine whether the link has settled 1588 * into a new state yet ... and if it has, then call the link 1589 * state change handler.But when the chip is 5700 in Dell 6650 1590 * ,even if check is false, the link may have changed.So we 1591 * have to call nge_phys_check() to determine the link state. 1592 */ 1593 if (check) 1594 nge_factotum_link_handler(ngep); 1595 1596 return (lchg); 1597 } 1598 1599 /* 1600 * Factotum routine to check for Tx stall, using the 'watchdog' counter 1601 */ 1602 static boolean_t nge_factotum_stall_check(nge_t *ngep); 1603 1604 static boolean_t 1605 nge_factotum_stall_check(nge_t *ngep) 1606 { 1607 uint32_t dogval; 1608 /* 1609 * Specific check for Tx stall ... 1610 * 1611 * The 'watchdog' counter is incremented whenever a packet 1612 * is queued, reset to 1 when some (but not all) buffers 1613 * are reclaimed, reset to 0 (disabled) when all buffers 1614 * are reclaimed, and shifted left here. If it exceeds the 1615 * threshold value, the chip is assumed to have stalled and 1616 * is put into the ERROR state. The factotum will then reset 1617 * it on the next pass. 1618 * 1619 * All of which should ensure that we don't get into a state 1620 * where packets are left pending indefinitely! 1621 */ 1622 dogval = nge_atomic_shl32(&ngep->watchdog, 1); 1623 if (dogval < nge_watchdog_count) { 1624 ngep->stall_cknum = 0; 1625 } else { 1626 ngep->stall_cknum++; 1627 } 1628 if (ngep->stall_cknum < 8) { 1629 return (B_FALSE); 1630 } else { 1631 ngep->stall_cknum = 0; 1632 ngep->statistics.sw_statistics.tx_stall++; 1633 return (B_TRUE); 1634 } 1635 } 1636 1637 1638 1639 /* 1640 * The factotum is woken up when there's something to do that we'd rather 1641 * not do from inside a hardware interrupt handler or high-level cyclic. 1642 * Its two main tasks are: 1643 * reset & restart the chip after an error 1644 * check the link status whenever necessary 1645 */ 1646 /* ARGSUSED */ 1647 uint_t 1648 nge_chip_factotum(caddr_t args1, caddr_t args2) 1649 { 1650 uint_t result; 1651 nge_t *ngep; 1652 boolean_t err; 1653 boolean_t linkchg; 1654 1655 ngep = (nge_t *)args1; 1656 1657 NGE_TRACE(("nge_chip_factotum($%p)", (void *)ngep)); 1658 1659 mutex_enter(ngep->softlock); 1660 if (ngep->factotum_flag == 0) { 1661 mutex_exit(ngep->softlock); 1662 return (DDI_INTR_UNCLAIMED); 1663 } 1664 ngep->factotum_flag = 0; 1665 mutex_exit(ngep->softlock); 1666 err = B_FALSE; 1667 linkchg = B_FALSE; 1668 result = DDI_INTR_CLAIMED; 1669 1670 mutex_enter(ngep->genlock); 1671 switch (ngep->nge_chip_state) { 1672 default: 1673 break; 1674 1675 case NGE_CHIP_RUNNING: 1676 linkchg = nge_factotum_link_check(ngep); 1677 err = nge_factotum_stall_check(ngep); 1678 break; 1679 1680 case NGE_CHIP_FAULT: 1681 (void) nge_restart(ngep); 1682 NGE_REPORT((ngep, "automatic recovery activated")); 1683 break; 1684 } 1685 1686 if (err) 1687 (void) nge_chip_stop(ngep, B_TRUE); 1688 mutex_exit(ngep->genlock); 1689 1690 /* 1691 * If the link state changed, tell the world about it (if 1692 * this version of MAC supports link state notification). 1693 * Note: can't do this while still holding the mutex. 1694 */ 1695 if (linkchg) 1696 mac_link_update(ngep->mh, ngep->link_state); 1697 1698 return (result); 1699 1700 } 1701 1702 static void 1703 nge_intr_handle(nge_t *ngep, nge_intr_src *pintr_src) 1704 { 1705 boolean_t brx; 1706 boolean_t btx; 1707 nge_mintr_src mintr_src; 1708 1709 brx = B_FALSE; 1710 btx = B_FALSE; 1711 ngep->statistics.sw_statistics.intr_count++; 1712 ngep->statistics.sw_statistics.intr_lval = pintr_src->intr_val; 1713 brx = (pintr_src->int_bits.reint | pintr_src->int_bits.miss 1714 | pintr_src->int_bits.rcint | pintr_src->int_bits.stint) 1715 > 0 ? B_TRUE : B_FALSE; 1716 if (pintr_src->int_bits.reint) 1717 ngep->statistics.sw_statistics.rx_err++; 1718 if (pintr_src->int_bits.miss) 1719 ngep->statistics.sw_statistics.rx_nobuffer++; 1720 1721 if (brx) 1722 nge_receive(ngep); 1723 btx = (pintr_src->int_bits.teint | pintr_src->int_bits.tcint) 1724 > 0 ? B_TRUE : B_FALSE; 1725 if (pintr_src->int_bits.stint && ngep->poll) 1726 ngep->stint_count ++; 1727 btx |= (ngep->poll && (ngep->stint_count % ngep->param_tx_n_intr == 0)); 1728 if (btx) 1729 nge_tx_recycle(ngep, B_TRUE); 1730 if (pintr_src->int_bits.teint) 1731 ngep->statistics.sw_statistics.tx_stop_err++; 1732 if (ngep->intr_moderation && brx) { 1733 if (ngep->poll) { 1734 if (ngep->recv_count < ngep->param_rx_intr_hwater) { 1735 ngep->quiet_time++; 1736 if (ngep->quiet_time == 1737 ngep->param_poll_quiet_time) { 1738 ngep->poll = B_FALSE; 1739 ngep->quiet_time = 0; 1740 ngep->stint_count = 0; 1741 nge_tx_recycle(ngep, B_TRUE); 1742 } 1743 } else 1744 ngep->quiet_time = 0; 1745 } else { 1746 if (ngep->recv_count > ngep->param_rx_intr_lwater) { 1747 ngep->busy_time++; 1748 if (ngep->busy_time == 1749 ngep->param_poll_busy_time) { 1750 ngep->poll = B_TRUE; 1751 ngep->busy_time = 0; 1752 } 1753 } else 1754 ngep->busy_time = 0; 1755 } 1756 } 1757 ngep->recv_count = 0; 1758 if (pintr_src->int_bits.feint) 1759 nge_chip_err(ngep); 1760 /* link interrupt, check the link state */ 1761 if (pintr_src->int_bits.mint) { 1762 mintr_src.src_val = nge_reg_get32(ngep, NGE_MINTR_SRC); 1763 nge_reg_put32(ngep, NGE_MINTR_SRC, mintr_src.src_val); 1764 nge_wake_factotum(ngep); 1765 } 1766 } 1767 1768 /* 1769 * nge_chip_intr() -- handle chip interrupts 1770 */ 1771 /* ARGSUSED */ 1772 uint_t 1773 nge_chip_intr(caddr_t arg1, caddr_t arg2) 1774 { 1775 nge_t *ngep = (nge_t *)arg1; 1776 nge_intr_src intr_src; 1777 nge_intr_mask intr_mask; 1778 1779 mutex_enter(ngep->genlock); 1780 1781 /* 1782 * Check whether chip's says it's asserting #INTA; 1783 * if not, don't process or claim the interrupt. 1784 */ 1785 intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC); 1786 if (intr_src.intr_val == 0) { 1787 mutex_exit(ngep->genlock); 1788 return (DDI_INTR_UNCLAIMED); 1789 } 1790 /* 1791 * Ack the interrupt 1792 */ 1793 nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val); 1794 1795 if (ngep->nge_chip_state != NGE_CHIP_RUNNING) { 1796 mutex_exit(ngep->genlock); 1797 return (DDI_INTR_CLAIMED); 1798 } 1799 nge_intr_handle(ngep, &intr_src); 1800 if (ngep->poll && !ngep->ch_intr_mode) { 1801 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1802 intr_mask.mask_bits.stint = NGE_SET; 1803 intr_mask.mask_bits.rcint = NGE_CLEAR; 1804 intr_mask.mask_bits.reint = NGE_CLEAR; 1805 intr_mask.mask_bits.tcint = NGE_CLEAR; 1806 intr_mask.mask_bits.teint = NGE_CLEAR; 1807 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1808 ngep->ch_intr_mode = B_TRUE; 1809 } else if ((ngep->ch_intr_mode) && (!ngep->poll)) { 1810 nge_reg_put32(ngep, NGE_INTR_MASK, ngep->intr_masks); 1811 ngep->ch_intr_mode = B_FALSE; 1812 } 1813 mutex_exit(ngep->genlock); 1814 return (DDI_INTR_CLAIMED); 1815 } 1816 1817 static enum ioc_reply 1818 nge_pp_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1819 { 1820 int err; 1821 uint64_t sizemask; 1822 uint64_t mem_va; 1823 uint64_t maxoff; 1824 boolean_t peek; 1825 nge_peekpoke_t *ppd; 1826 int (*ppfn)(nge_t *ngep, nge_peekpoke_t *ppd); 1827 1828 switch (cmd) { 1829 default: 1830 return (IOC_INVAL); 1831 1832 case NGE_PEEK: 1833 peek = B_TRUE; 1834 break; 1835 1836 case NGE_POKE: 1837 peek = B_FALSE; 1838 break; 1839 } 1840 1841 /* 1842 * Validate format of ioctl 1843 */ 1844 if (iocp->ioc_count != sizeof (nge_peekpoke_t)) 1845 return (IOC_INVAL); 1846 if (mp->b_cont == NULL) 1847 return (IOC_INVAL); 1848 ppd = (nge_peekpoke_t *)mp->b_cont->b_rptr; 1849 1850 /* 1851 * Validate request parameters 1852 */ 1853 switch (ppd->pp_acc_space) { 1854 default: 1855 return (IOC_INVAL); 1856 1857 case NGE_PP_SPACE_CFG: 1858 /* 1859 * Config space 1860 */ 1861 sizemask = 8|4|2|1; 1862 mem_va = 0; 1863 maxoff = PCI_CONF_HDR_SIZE; 1864 ppfn = peek ? nge_chip_peek_cfg : nge_chip_poke_cfg; 1865 break; 1866 1867 case NGE_PP_SPACE_REG: 1868 /* 1869 * Memory-mapped I/O space 1870 */ 1871 sizemask = 8|4|2|1; 1872 mem_va = 0; 1873 maxoff = NGE_REG_SIZE; 1874 ppfn = peek ? nge_chip_peek_reg : nge_chip_poke_reg; 1875 break; 1876 1877 case NGE_PP_SPACE_MII: 1878 sizemask = 4|2|1; 1879 mem_va = 0; 1880 maxoff = NGE_MII_SIZE; 1881 ppfn = peek ? nge_chip_peek_mii : nge_chip_poke_mii; 1882 break; 1883 1884 case NGE_PP_SPACE_SEEPROM: 1885 sizemask = 4|2|1; 1886 mem_va = 0; 1887 maxoff = NGE_SEEROM_SIZE; 1888 ppfn = peek ? nge_chip_peek_seeprom : nge_chip_poke_seeprom; 1889 break; 1890 } 1891 1892 switch (ppd->pp_acc_size) { 1893 default: 1894 return (IOC_INVAL); 1895 1896 case 8: 1897 case 4: 1898 case 2: 1899 case 1: 1900 if ((ppd->pp_acc_size & sizemask) == 0) 1901 return (IOC_INVAL); 1902 break; 1903 } 1904 1905 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 1906 return (IOC_INVAL); 1907 1908 if (ppd->pp_acc_offset >= maxoff) 1909 return (IOC_INVAL); 1910 1911 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff) 1912 return (IOC_INVAL); 1913 1914 /* 1915 * All OK - go do it! 1916 */ 1917 ppd->pp_acc_offset += mem_va; 1918 if (ppfn) 1919 err = (*ppfn)(ngep, ppd); 1920 if (err != DDI_SUCCESS) 1921 return (IOC_INVAL); 1922 return (peek ? IOC_REPLY : IOC_ACK); 1923 } 1924 1925 static enum ioc_reply nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, 1926 struct iocblk *iocp); 1927 #pragma no_inline(nge_diag_ioctl) 1928 1929 static enum ioc_reply 1930 nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1931 { 1932 ASSERT(mutex_owned(ngep->genlock)); 1933 1934 switch (cmd) { 1935 default: 1936 nge_error(ngep, "nge_diag_ioctl: invalid cmd 0x%x", cmd); 1937 return (IOC_INVAL); 1938 1939 case NGE_DIAG: 1940 return (IOC_ACK); 1941 1942 case NGE_PEEK: 1943 case NGE_POKE: 1944 return (nge_pp_ioctl(ngep, cmd, mp, iocp)); 1945 1946 case NGE_PHY_RESET: 1947 return (IOC_RESTART_ACK); 1948 1949 case NGE_SOFT_RESET: 1950 case NGE_HARD_RESET: 1951 return (IOC_ACK); 1952 } 1953 1954 /* NOTREACHED */ 1955 } 1956 1957 enum ioc_reply 1958 nge_chip_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 1959 { 1960 int cmd; 1961 1962 ASSERT(mutex_owned(ngep->genlock)); 1963 1964 cmd = iocp->ioc_cmd; 1965 1966 switch (cmd) { 1967 default: 1968 return (IOC_INVAL); 1969 1970 case NGE_DIAG: 1971 case NGE_PEEK: 1972 case NGE_POKE: 1973 case NGE_PHY_RESET: 1974 case NGE_SOFT_RESET: 1975 case NGE_HARD_RESET: 1976 #if NGE_DEBUGGING 1977 return (nge_diag_ioctl(ngep, cmd, mp, iocp)); 1978 #else 1979 return (IOC_INVAL); 1980 #endif 1981 1982 case NGE_MII_READ: 1983 case NGE_MII_WRITE: 1984 return (IOC_INVAL); 1985 1986 #if NGE_SEE_IO32 1987 case NGE_SEE_READ: 1988 case NGE_SEE_WRITE: 1989 return (IOC_INVAL); 1990 #endif 1991 1992 #if NGE_FLASH_IO32 1993 case NGE_FLASH_READ: 1994 case NGE_FLASH_WRITE: 1995 return (IOC_INVAL); 1996 #endif 1997 } 1998 } 1999