1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "nge.h" 30 static uint32_t nge_watchdog_count = 1 << 29; 31 extern boolean_t nge_enable_msi; 32 static void nge_sync_mac_modes(nge_t *); 33 34 #undef NGE_DBG 35 #define NGE_DBG NGE_DBG_CHIP 36 37 /* 38 * Operating register get/set access routines 39 */ 40 uint8_t nge_reg_get8(nge_t *ngep, nge_regno_t regno); 41 #pragma inline(nge_reg_get8) 42 43 uint8_t 44 nge_reg_get8(nge_t *ngep, nge_regno_t regno) 45 { 46 NGE_TRACE(("nge_reg_get8($%p, 0x%lx)", (void *)ngep, regno)); 47 48 return (ddi_get8(ngep->io_handle, PIO_ADDR(ngep, regno))); 49 } 50 51 void nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data); 52 #pragma inline(nge_reg_put8) 53 54 void 55 nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data) 56 { 57 NGE_TRACE(("nge_reg_put8($%p, 0x%lx, 0x%x)", 58 (void *)ngep, regno, data)); 59 ddi_put8(ngep->io_handle, PIO_ADDR(ngep, regno), data); 60 61 } 62 63 uint16_t nge_reg_get16(nge_t *ngep, nge_regno_t regno); 64 #pragma inline(nge_reg_get16) 65 66 uint16_t 67 nge_reg_get16(nge_t *ngep, nge_regno_t regno) 68 { 69 NGE_TRACE(("nge_reg_get16($%p, 0x%lx)", (void *)ngep, regno)); 70 return (ddi_get16(ngep->io_handle, PIO_ADDR(ngep, regno))); 71 } 72 73 void nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data); 74 #pragma inline(nge_reg_put16) 75 76 void 77 nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data) 78 { 79 NGE_TRACE(("nge_reg_put16($%p, 0x%lx, 0x%x)", 80 (void *)ngep, regno, data)); 81 ddi_put16(ngep->io_handle, PIO_ADDR(ngep, regno), data); 82 83 } 84 85 uint32_t nge_reg_get32(nge_t *ngep, nge_regno_t regno); 86 #pragma inline(nge_reg_get32) 87 88 uint32_t 89 nge_reg_get32(nge_t *ngep, nge_regno_t regno) 90 { 91 NGE_TRACE(("nge_reg_get32($%p, 0x%lx)", (void *)ngep, regno)); 92 return (ddi_get32(ngep->io_handle, PIO_ADDR(ngep, regno))); 93 } 94 95 void nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data); 96 #pragma inline(nge_reg_put32) 97 98 void 99 nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data) 100 { 101 NGE_TRACE(("nge_reg_put32($%p, 0x%lx, 0x%x)", 102 (void *)ngep, regno, data)); 103 ddi_put32(ngep->io_handle, PIO_ADDR(ngep, regno), data); 104 105 } 106 107 108 static int nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 109 #pragma no_inline(nge_chip_peek_cfg) 110 111 static int 112 nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 113 { 114 int err; 115 uint64_t regval; 116 uint64_t regno; 117 118 NGE_TRACE(("nge_chip_peek_cfg($%p, $%p)", 119 (void *)ngep, (void *)ppd)); 120 121 err = DDI_SUCCESS; 122 regno = ppd->pp_acc_offset; 123 124 switch (ppd->pp_acc_size) { 125 case 1: 126 regval = pci_config_get8(ngep->cfg_handle, regno); 127 break; 128 129 case 2: 130 regval = pci_config_get16(ngep->cfg_handle, regno); 131 break; 132 133 case 4: 134 regval = pci_config_get32(ngep->cfg_handle, regno); 135 break; 136 137 case 8: 138 regval = pci_config_get64(ngep->cfg_handle, regno); 139 break; 140 } 141 ppd->pp_acc_data = regval; 142 return (err); 143 } 144 145 static int nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 146 147 static int 148 nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 149 { 150 int err; 151 uint64_t regval; 152 uint64_t regno; 153 154 NGE_TRACE(("nge_chip_poke_cfg($%p, $%p)", 155 (void *)ngep, (void *)ppd)); 156 157 err = DDI_SUCCESS; 158 regno = ppd->pp_acc_offset; 159 regval = ppd->pp_acc_data; 160 161 switch (ppd->pp_acc_size) { 162 case 1: 163 pci_config_put8(ngep->cfg_handle, regno, regval); 164 break; 165 166 case 2: 167 pci_config_put16(ngep->cfg_handle, regno, regval); 168 break; 169 170 case 4: 171 pci_config_put32(ngep->cfg_handle, regno, regval); 172 break; 173 174 case 8: 175 pci_config_put64(ngep->cfg_handle, regno, regval); 176 break; 177 } 178 179 return (err); 180 181 } 182 183 static int nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd); 184 185 static int 186 nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd) 187 { 188 int err; 189 uint64_t regval; 190 void *regaddr; 191 192 NGE_TRACE(("nge_chip_peek_reg($%p, $%p)", 193 (void *)ngep, (void *)ppd)); 194 195 err = DDI_SUCCESS; 196 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 197 198 switch (ppd->pp_acc_size) { 199 case 1: 200 regval = ddi_get8(ngep->io_handle, regaddr); 201 break; 202 203 case 2: 204 regval = ddi_get16(ngep->io_handle, regaddr); 205 break; 206 207 case 4: 208 regval = ddi_get32(ngep->io_handle, regaddr); 209 break; 210 211 case 8: 212 regval = ddi_get64(ngep->io_handle, regaddr); 213 break; 214 215 default: 216 regval = 0x0ull; 217 break; 218 } 219 ppd->pp_acc_data = regval; 220 return (err); 221 } 222 223 static int nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd); 224 225 static int 226 nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd) 227 { 228 int err; 229 uint64_t regval; 230 void *regaddr; 231 232 NGE_TRACE(("nge_chip_poke_reg($%p, $%p)", 233 (void *)ngep, (void *)ppd)); 234 235 err = DDI_SUCCESS; 236 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 237 regval = ppd->pp_acc_data; 238 239 switch (ppd->pp_acc_size) { 240 case 1: 241 ddi_put8(ngep->io_handle, regaddr, regval); 242 break; 243 244 case 2: 245 ddi_put16(ngep->io_handle, regaddr, regval); 246 break; 247 248 case 4: 249 ddi_put32(ngep->io_handle, regaddr, regval); 250 break; 251 252 case 8: 253 ddi_put64(ngep->io_handle, regaddr, regval); 254 break; 255 } 256 return (err); 257 } 258 259 static int nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd); 260 #pragma no_inline(nge_chip_peek_mii) 261 262 static int 263 nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd) 264 { 265 int err; 266 267 err = DDI_SUCCESS; 268 ppd->pp_acc_data = nge_mii_get16(ngep, ppd->pp_acc_offset/2); 269 return (err); 270 } 271 272 static int nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd); 273 #pragma no_inline(nge_chip_poke_mii) 274 275 static int 276 nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd) 277 { 278 int err; 279 err = DDI_SUCCESS; 280 nge_mii_put16(ngep, ppd->pp_acc_offset/2, ppd->pp_acc_data); 281 return (err); 282 } 283 284 /* 285 * Basic SEEPROM get/set access routine 286 * 287 * This uses the chip's SEEPROM auto-access method, controlled by the 288 * Serial EEPROM Address/Data Registers at 0x504h, so the CPU 289 * doesn't have to fiddle with the individual bits. 290 * 291 * The caller should hold <genlock> and *also* have already acquired 292 * the right to access the SEEPROM. 293 * 294 * Return value: 295 * 0 on success, 296 * ENODATA on access timeout (maybe retryable: device may just be busy) 297 * EPROTO on other h/w or s/w errors. 298 * 299 * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output 300 * from a (successful) SEEPROM_ACCESS_READ. 301 */ 302 303 static int 304 nge_seeprom_access(nge_t *ngep, uint32_t cmd, nge_regno_t addr, uint16_t *dp) 305 { 306 uint32_t tries; 307 nge_ep_cmd cmd_reg; 308 nge_ep_data data_reg; 309 310 NGE_TRACE(("nge_seeprom_access($%p, %d, %x, $%p)", 311 (void *)ngep, cmd, addr, (void *)dp)); 312 313 ASSERT(mutex_owned(ngep->genlock)); 314 315 /* 316 * Check there's no command in progress. 317 * 318 * Note: this *shouldn't* ever find that there is a command 319 * in progress, because we already hold the <genlock> mutex. 320 * Also, to ensure we don't have a conflict with the chip's 321 * internal firmware or a process accessing the same (shared) 322 * So this is just a final consistency check: we shouldn't 323 * see EITHER the START bit (command started but not complete) 324 * OR the COMPLETE bit (command completed but not cleared). 325 */ 326 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 327 for (tries = 0; tries < 30; tries++) { 328 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 329 break; 330 drv_usecwait(10); 331 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 332 } 333 334 /* 335 * This should not happen. If so, we have to restart eeprom 336 * state machine 337 */ 338 if (tries == 30) { 339 cmd_reg.cmd_bits.sts = SEEPROM_READY; 340 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 341 drv_usecwait(10); 342 /* 343 * Polling the status bit to make assure the eeprom is ready 344 */ 345 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 346 for (tries = 0; tries < 30; tries++) { 347 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 348 break; 349 drv_usecwait(10); 350 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 351 } 352 } 353 354 /* 355 * Assemble the command ... 356 */ 357 cmd_reg.cmd_bits.addr = addr; 358 cmd_reg.cmd_bits.cmd = cmd; 359 cmd_reg.cmd_bits.sts = 0; 360 361 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 362 363 /* 364 * Polling whether the access is successful. 365 * 366 */ 367 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 368 for (tries = 0; tries < 30; tries++) { 369 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 370 break; 371 drv_usecwait(10); 372 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 373 } 374 375 if (tries == 30) { 376 nge_report(ngep, NGE_HW_ROM); 377 return (DDI_FAILURE); 378 } 379 switch (cmd) { 380 default: 381 case SEEPROM_CMD_WRITE_ENABLE: 382 case SEEPROM_CMD_ERASE: 383 case SEEPROM_CMD_ERALSE_ALL: 384 case SEEPROM_CMD_WRITE_DIS: 385 break; 386 387 case SEEPROM_CMD_READ: 388 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 389 *dp = data_reg.data_bits.data; 390 break; 391 392 case SEEPROM_CMD_WRITE: 393 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 394 data_reg.data_bits.data = *dp; 395 nge_reg_put32(ngep, NGE_EP_DATA, data_reg.data_val); 396 break; 397 } 398 399 return (DDI_SUCCESS); 400 } 401 402 403 static int 404 nge_chip_peek_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 405 { 406 uint16_t data; 407 int err; 408 409 err = nge_seeprom_access(ngep, SEEPROM_CMD_READ, 410 ppd->pp_acc_offset, &data); 411 ppd->pp_acc_data = data; 412 return (err); 413 } 414 415 static int 416 nge_chip_poke_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 417 { 418 uint16_t data; 419 int err; 420 421 data = ppd->pp_acc_data; 422 err = nge_seeprom_access(ngep, SEEPROM_CMD_WRITE, 423 ppd->pp_acc_offset, &data); 424 return (err); 425 } 426 427 void 428 nge_init_dev_spec_param(nge_t *ngep) 429 { 430 nge_dev_spec_param_t *dev_param_p; 431 chip_info_t *infop; 432 433 dev_param_p = &ngep->dev_spec_param; 434 infop = (chip_info_t *)&ngep->chipinfo; 435 436 switch (infop->device) { 437 case DEVICE_ID_NF3_E6: 438 case DEVICE_ID_NF3_DF: 439 case DEVICE_ID_MCP04_37: 440 case DEVICE_ID_MCP04_38: 441 dev_param_p->msi = B_FALSE; 442 dev_param_p->msi_x = B_FALSE; 443 dev_param_p->vlan = B_FALSE; 444 dev_param_p->advanced_pm = B_FALSE; 445 dev_param_p->tx_pause_frame = B_FALSE; 446 dev_param_p->rx_pause_frame = B_FALSE; 447 dev_param_p->jumbo = B_FALSE; 448 dev_param_p->tx_rx_64byte = B_FALSE; 449 dev_param_p->rx_hw_checksum = B_FALSE; 450 dev_param_p->tx_hw_checksum = 0; 451 dev_param_p->desc_type = DESC_OFFLOAD; 452 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 453 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 454 dev_param_p->nge_split = NGE_SPLIT_32; 455 break; 456 457 case DEVICE_ID_CK804_56: 458 case DEVICE_ID_CK804_57: 459 dev_param_p->msi = B_TRUE; 460 dev_param_p->msi_x = B_TRUE; 461 dev_param_p->vlan = B_FALSE; 462 dev_param_p->advanced_pm = B_FALSE; 463 dev_param_p->tx_pause_frame = B_FALSE; 464 dev_param_p->rx_pause_frame = B_TRUE; 465 dev_param_p->jumbo = B_TRUE; 466 dev_param_p->tx_rx_64byte = B_FALSE; 467 dev_param_p->rx_hw_checksum = B_TRUE; 468 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 469 dev_param_p->desc_type = DESC_HOT; 470 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 471 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 472 dev_param_p->nge_split = NGE_SPLIT_96; 473 break; 474 475 case DEVICE_ID_MCP61_3EE: 476 case DEVICE_ID_MCP61_3EF: 477 case DEVICE_ID_MCP51_268: 478 case DEVICE_ID_MCP51_269: 479 dev_param_p->msi = B_FALSE; 480 dev_param_p->msi_x = B_FALSE; 481 dev_param_p->vlan = B_FALSE; 482 dev_param_p->advanced_pm = B_TRUE; 483 dev_param_p->tx_pause_frame = B_FALSE; 484 dev_param_p->rx_pause_frame = B_FALSE; 485 dev_param_p->jumbo = B_FALSE; 486 dev_param_p->tx_rx_64byte = B_TRUE; 487 dev_param_p->rx_hw_checksum = B_FALSE; 488 dev_param_p->tx_hw_checksum = 0; 489 dev_param_p->desc_type = DESC_OFFLOAD; 490 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 491 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 492 dev_param_p->nge_split = NGE_SPLIT_32; 493 break; 494 495 case DEVICE_ID_MCP55_372: 496 case DEVICE_ID_MCP55_373: 497 dev_param_p->msi = B_TRUE; 498 dev_param_p->msi_x = B_TRUE; 499 dev_param_p->vlan = B_TRUE; 500 dev_param_p->advanced_pm = B_TRUE; 501 dev_param_p->tx_pause_frame = B_TRUE; 502 dev_param_p->rx_pause_frame = B_TRUE; 503 dev_param_p->jumbo = B_TRUE; 504 dev_param_p->tx_rx_64byte = B_TRUE; 505 dev_param_p->rx_hw_checksum = B_TRUE; 506 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 507 dev_param_p->desc_type = DESC_HOT; 508 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 509 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 510 dev_param_p->nge_split = NGE_SPLIT_96; 511 break; 512 513 default: 514 dev_param_p->msi = B_FALSE; 515 dev_param_p->msi_x = B_FALSE; 516 dev_param_p->vlan = B_FALSE; 517 dev_param_p->advanced_pm = B_FALSE; 518 dev_param_p->tx_pause_frame = B_FALSE; 519 dev_param_p->rx_pause_frame = B_FALSE; 520 dev_param_p->jumbo = B_FALSE; 521 dev_param_p->tx_rx_64byte = B_FALSE; 522 dev_param_p->rx_hw_checksum = B_FALSE; 523 dev_param_p->tx_hw_checksum = 0; 524 dev_param_p->desc_type = DESC_OFFLOAD; 525 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 526 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 527 dev_param_p->nge_split = NGE_SPLIT_32; 528 return; 529 } 530 } 531 /* 532 * Perform first-stage chip (re-)initialisation, using only config-space 533 * accesses: 534 * 535 * + Read the vendor/device/revision/subsystem/cache-line-size registers, 536 * returning the data in the structure pointed to by <infop>. 537 */ 538 void nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset); 539 #pragma no_inline(nge_chip_cfg_init) 540 541 void 542 nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset) 543 { 544 uint16_t command; 545 ddi_acc_handle_t handle; 546 nge_interbus_conf interbus_conf; 547 nge_msi_mask_conf msi_mask_conf; 548 nge_msi_map_cap_conf cap_conf; 549 550 NGE_TRACE(("nge_chip_cfg_init($%p, $%p, %d)", 551 (void *)ngep, (void *)infop, reset)); 552 553 /* 554 * save PCI cache line size and subsystem vendor ID 555 * 556 * Read all the config-space registers that characterise the 557 * chip, specifically vendor/device/revision/subsystem vendor 558 * and subsystem device id. We expect (but don't check) that 559 */ 560 handle = ngep->cfg_handle; 561 /* reading the vendor information once */ 562 if (reset == B_FALSE) { 563 infop->command = pci_config_get16(handle, 564 PCI_CONF_COMM); 565 infop->vendor = pci_config_get16(handle, 566 PCI_CONF_VENID); 567 infop->device = pci_config_get16(handle, 568 PCI_CONF_DEVID); 569 infop->subven = pci_config_get16(handle, 570 PCI_CONF_SUBVENID); 571 infop->subdev = pci_config_get16(handle, 572 PCI_CONF_SUBSYSID); 573 infop->class_code = pci_config_get8(handle, 574 PCI_CONF_BASCLASS); 575 infop->revision = pci_config_get8(handle, 576 PCI_CONF_REVID); 577 infop->clsize = pci_config_get8(handle, 578 PCI_CONF_CACHE_LINESZ); 579 infop->latency = pci_config_get8(handle, 580 PCI_CONF_LATENCY_TIMER); 581 } 582 if (nge_enable_msi) { 583 /* Disable the hidden for MSI support */ 584 interbus_conf.conf_val = pci_config_get32(handle, 585 PCI_CONF_HT_INTERNAL); 586 if ((infop->device == DEVICE_ID_MCP55_373) || 587 (infop->device == DEVICE_ID_MCP55_372)) 588 interbus_conf.conf_bits.msix_off = NGE_SET; 589 interbus_conf.conf_bits.msi_off = NGE_CLEAR; 590 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 591 interbus_conf.conf_val); 592 593 if ((infop->device == DEVICE_ID_MCP55_373) || 594 (infop->device == DEVICE_ID_MCP55_372)) { 595 596 /* Disable the vector off for mcp55 */ 597 msi_mask_conf.msi_mask_conf_val = 598 pci_config_get32(handle, PCI_CONF_HT_MSI_MASK); 599 msi_mask_conf.msi_mask_bits.vec0_off = NGE_CLEAR; 600 msi_mask_conf.msi_mask_bits.vec1_off = NGE_CLEAR; 601 msi_mask_conf.msi_mask_bits.vec2_off = NGE_CLEAR; 602 msi_mask_conf.msi_mask_bits.vec3_off = NGE_CLEAR; 603 msi_mask_conf.msi_mask_bits.vec4_off = NGE_CLEAR; 604 msi_mask_conf.msi_mask_bits.vec5_off = NGE_CLEAR; 605 msi_mask_conf.msi_mask_bits.vec6_off = NGE_CLEAR; 606 msi_mask_conf.msi_mask_bits.vec7_off = NGE_CLEAR; 607 pci_config_put32(handle, PCI_CONF_HT_MSI_MASK, 608 msi_mask_conf.msi_mask_conf_val); 609 610 /* Enable the MSI mapping */ 611 cap_conf.msi_map_cap_conf_val = 612 pci_config_get32(handle, PCI_CONF_HT_MSI_MAP_CAP); 613 cap_conf.map_cap_conf_bits.map_en = NGE_SET; 614 pci_config_put32(handle, PCI_CONF_HT_MSI_MAP_CAP, 615 cap_conf.msi_map_cap_conf_val); 616 } 617 } else { 618 interbus_conf.conf_val = pci_config_get32(handle, 619 PCI_CONF_HT_INTERNAL); 620 interbus_conf.conf_bits.msi_off = NGE_SET; 621 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 622 interbus_conf.conf_val); 623 } 624 command = infop->command | PCI_COMM_MAE; 625 command &= ~PCI_COMM_MEMWR_INVAL; 626 command |= PCI_COMM_ME; 627 pci_config_put16(handle, PCI_CONF_COMM, command); 628 pci_config_put16(handle, PCI_CONF_STAT, ~0); 629 630 } 631 632 int 633 nge_chip_stop(nge_t *ngep, boolean_t fault) 634 { 635 int err; 636 uint32_t reg_val; 637 uint32_t tries; 638 nge_mintr_src mintr_src; 639 nge_mii_cs mii_cs; 640 nge_rx_poll rx_poll; 641 nge_tx_poll tx_poll; 642 nge_rx_en rx_en; 643 nge_tx_en tx_en; 644 nge_tx_sta tx_sta; 645 nge_rx_sta rx_sta; 646 nge_mode_cntl mode; 647 nge_pmu_cntl2 pmu_cntl2; 648 649 NGE_TRACE(("nge_chip_stop($%p, %d)", (void *)ngep, fault)); 650 651 err = DDI_SUCCESS; 652 653 /* Clear any pending PHY interrupt */ 654 mintr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC); 655 nge_reg_put8(ngep, NGE_MINTR_SRC, mintr_src.src_val); 656 657 /* Mask all interrupts */ 658 reg_val = nge_reg_get32(ngep, NGE_INTR_MASK); 659 reg_val &= ~NGE_INTR_ALL_EN; 660 nge_reg_put32(ngep, NGE_INTR_MASK, reg_val); 661 662 /* Disable auto-polling of phy */ 663 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 664 mii_cs.cs_bits.ap_en = NGE_CLEAR; 665 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 666 667 /* Reset buffer management & DMA */ 668 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 669 mode.mode_bits.dma_dis = NGE_SET; 670 mode.mode_bits.desc_type = ngep->desc_mode; 671 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 672 673 for (tries = 0; tries < 5000; tries++) { 674 drv_usecwait(10); 675 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 676 if (mode.mode_bits.dma_status == NGE_SET) 677 break; 678 } 679 if (tries == 5000) { 680 return (DDI_FAILURE); 681 } 682 683 /* 684 * For mcp55, the bits 1:31 of NGE_RX_EN and NGE_TX_EN are 685 * defined to be used by SMU. The newer PXE than 527 began to 686 * support SMU and bit 24 of NGE_RX_EN/NGE_TX_EN are set 687 * when leaving PXE to prevents the MAC from winning 688 * arbitration to the main transmit/receive channels. 689 */ 690 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 691 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 692 693 /* Disable rx's machine */ 694 nge_reg_put32(ngep, NGE_RX_EN, 0x0); 695 696 /* Disable tx's machine */ 697 nge_reg_put32(ngep, NGE_TX_EN, 0x0); 698 } else { 699 700 /* Disable rx's machine */ 701 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 702 rx_en.bits.rx_en = NGE_CLEAR; 703 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 704 705 706 /* Disable tx's machine */ 707 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 708 tx_en.bits.tx_en = NGE_CLEAR; 709 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 710 } 711 712 /* 713 * Clean the status of tx's state machine 714 * and Make assure the tx's channel is idle 715 */ 716 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 717 for (tries = 0; tries < 1000; tries++) { 718 if (tx_sta.sta_bits.tx_chan_sta == NGE_CLEAR) 719 break; 720 drv_usecwait(10); 721 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 722 } 723 if (tries == 1000) { 724 return (DDI_FAILURE); 725 } 726 nge_reg_put32(ngep, NGE_TX_STA, tx_sta.sta_val); 727 728 /* 729 * Clean the status of rx's state machine 730 * and Make assure the tx's channel is idle 731 */ 732 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 733 for (tries = 0; tries < 1000; tries++) { 734 if (rx_sta.sta_bits.rx_chan_sta == NGE_CLEAR) 735 break; 736 drv_usecwait(10); 737 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 738 } 739 if (tries == 1000) { 740 return (DDI_FAILURE); 741 } 742 nge_reg_put32(ngep, NGE_RX_STA, rx_sta.sta_val); 743 744 /* Disable auto-poll of rx's state machine */ 745 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 746 rx_poll.poll_bits.rpen = NGE_CLEAR; 747 rx_poll.poll_bits.rpi = NGE_CLEAR; 748 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 749 750 /* Disable auto-polling of tx's state machine */ 751 tx_poll.poll_val = nge_reg_get32(ngep, NGE_TX_POLL); 752 tx_poll.poll_bits.tpen = NGE_CLEAR; 753 tx_poll.poll_bits.tpi = NGE_CLEAR; 754 nge_reg_put32(ngep, NGE_TX_POLL, tx_poll.poll_val); 755 756 /* Restore buffer management */ 757 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 758 mode.mode_bits.bm_reset = NGE_SET; 759 mode.mode_bits.tx_rcom_en = NGE_SET; 760 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 761 762 if (ngep->dev_spec_param.advanced_pm) { 763 764 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 0); 765 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 0); 766 767 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 768 pmu_cntl2.cntl2_bits.cidle_timer = NGE_CLEAR; 769 pmu_cntl2.cntl2_bits.didle_timer = NGE_CLEAR; 770 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 771 } 772 if (fault) 773 ngep->nge_chip_state = NGE_CHIP_FAULT; 774 else 775 ngep->nge_chip_state = NGE_CHIP_STOPPED; 776 777 return (err); 778 } 779 780 static void 781 nge_rx_setup(nge_t *ngep) 782 { 783 uint64_t desc_addr; 784 nge_rxtx_dlen dlen; 785 nge_rx_poll rx_poll; 786 787 /* 788 * Filling the address and length of rx's descriptors 789 */ 790 desc_addr = ngep->recv->desc.cookie.dmac_laddress; 791 nge_reg_put32(ngep, NGE_RX_DADR, desc_addr); 792 nge_reg_put32(ngep, NGE_RX_DADR_HI, desc_addr >> 32); 793 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 794 dlen.dlen_bits.rdlen = ngep->recv->desc.nslots - 1; 795 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 796 797 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 798 rx_poll.poll_bits.rpi = RX_POLL_INTV_1G; 799 rx_poll.poll_bits.rpen = NGE_SET; 800 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 801 } 802 803 static void 804 nge_tx_setup(nge_t *ngep) 805 { 806 uint64_t desc_addr; 807 nge_rxtx_dlen dlen; 808 809 /* 810 * Filling the address and length of tx's descriptors 811 */ 812 desc_addr = ngep->send->desc.cookie.dmac_laddress; 813 nge_reg_put32(ngep, NGE_TX_DADR, desc_addr); 814 nge_reg_put32(ngep, NGE_TX_DADR_HI, desc_addr >> 32); 815 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 816 dlen.dlen_bits.tdlen = ngep->send->desc.nslots - 1; 817 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 818 } 819 820 static int 821 nge_buff_setup(nge_t *ngep) 822 { 823 nge_mode_cntl mode_cntl; 824 nge_dev_spec_param_t *dev_param_p; 825 826 dev_param_p = &ngep->dev_spec_param; 827 828 /* 829 * Configure Rx&Tx's buffer 830 */ 831 nge_rx_setup(ngep); 832 nge_tx_setup(ngep); 833 834 /* 835 * Configure buffer attribute 836 */ 837 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 838 839 /* 840 * Enable Dma access request 841 */ 842 mode_cntl.mode_bits.dma_dis = NGE_CLEAR; 843 844 /* 845 * Enbale Buffer management 846 */ 847 mode_cntl.mode_bits.bm_reset = NGE_CLEAR; 848 849 /* 850 * Support Standoffload Descriptor 851 */ 852 mode_cntl.mode_bits.desc_type = ngep->desc_mode; 853 854 /* 855 * Support receive hardware checksum 856 */ 857 if (dev_param_p->rx_hw_checksum) { 858 mode_cntl.mode_bits.rx_sum_en = NGE_SET; 859 } else 860 mode_cntl.mode_bits.rx_sum_en = NGE_CLEAR; 861 862 /* 863 * Disable Tx PRD coarse update 864 */ 865 mode_cntl.mode_bits.tx_prd_cu_en = NGE_CLEAR; 866 867 /* 868 * Disable 64-byte access 869 */ 870 mode_cntl.mode_bits.w64_dis = NGE_SET; 871 872 /* 873 * Skip Rx Error Frame is not supported and if 874 * enable it, jumbo frame does not work any more. 875 */ 876 mode_cntl.mode_bits.rx_filter_en = NGE_CLEAR; 877 878 /* 879 * Can not support hot mode now 880 */ 881 mode_cntl.mode_bits.resv15 = NGE_CLEAR; 882 883 if (dev_param_p->vlan) { 884 /* Disable the vlan strip for devices which support vlan */ 885 mode_cntl.mode_bits.vlan_strip = NGE_CLEAR; 886 887 /* Disable the vlan insert for devices which supprot vlan */ 888 mode_cntl.mode_bits.vlan_ins = NGE_CLEAR; 889 } 890 891 if (dev_param_p->tx_rx_64byte) { 892 893 /* Set the maximum TX PRD fetch size to 64 bytes */ 894 mode_cntl.mode_bits.tx_fetch_prd = NGE_SET; 895 896 /* Set the maximum RX PRD fetch size to 64 bytes */ 897 mode_cntl.mode_bits.rx_fetch_prd = NGE_SET; 898 } 899 /* 900 * Upload Rx data as it arrives, rather than waiting for full frame 901 */ 902 mode_cntl.mode_bits.resv16 = NGE_CLEAR; 903 904 /* 905 * Normal HOT table accesses 906 */ 907 mode_cntl.mode_bits.resv17 = NGE_CLEAR; 908 909 /* 910 * Normal HOT buffer requesting 911 */ 912 mode_cntl.mode_bits.resv18 = NGE_CLEAR; 913 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 914 915 /* 916 * Signal controller to check for new Rx descriptors 917 */ 918 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 919 mode_cntl.mode_bits.rxdm = NGE_SET; 920 mode_cntl.mode_bits.tx_rcom_en = NGE_SET; 921 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 922 923 924 return (DDI_SUCCESS); 925 } 926 927 /* 928 * When chipset resets, the chipset can not restore the orignial 929 * mac address to the mac address registers. 930 * 931 * When the driver is dettached, the function will write the orignial 932 * mac address to the mac address registers. 933 */ 934 935 void 936 nge_restore_mac_addr(nge_t *ngep) 937 { 938 uint32_t mac_addr; 939 940 mac_addr = (uint32_t)ngep->chipinfo.hw_mac_addr; 941 nge_reg_put32(ngep, NGE_UNI_ADDR0, mac_addr); 942 mac_addr = (uint32_t)(ngep->chipinfo.hw_mac_addr >> 32); 943 nge_reg_put32(ngep, NGE_UNI_ADDR1, mac_addr); 944 } 945 946 int 947 nge_chip_reset(nge_t *ngep) 948 { 949 int err; 950 uint8_t i; 951 uint32_t regno; 952 uint64_t mac; 953 nge_uni_addr1 uaddr1; 954 nge_mul_addr1 maddr1; 955 nge_cp_cntl ee_cntl; 956 nge_soft_misc soft_misc; 957 nge_pmu_cntl0 pmu_cntl0; 958 nge_pmu_cntl2 pmu_cntl2; 959 nge_pm_cntl2 pm_cntl2; 960 const nge_ksindex_t *ksip; 961 nge_sw_statistics_t *sw_stp; 962 sw_stp = &ngep->statistics.sw_statistics; 963 964 NGE_TRACE(("nge_chip_reset($%p)", (void *)ngep)); 965 966 /* 967 * Clear the statistics by reading the statistics register 968 */ 969 for (ksip = nge_statistics; ksip->name != NULL; ++ksip) { 970 regno = KS_BASE + ksip->index * sizeof (uint32_t); 971 (void) nge_reg_get32(ngep, regno); 972 } 973 /* Clear the software statistics */ 974 sw_stp->recv_count = 0; 975 sw_stp->xmit_count = 0; 976 sw_stp->rbytes = 0; 977 sw_stp->obytes = 0; 978 979 /* 980 * Clear the Multicast mac address table 981 */ 982 nge_reg_put32(ngep, NGE_MUL_ADDR0, 0); 983 maddr1.addr_val = nge_reg_get32(ngep, NGE_MUL_ADDR1); 984 maddr1.addr_bits.addr = 0; 985 nge_reg_put32(ngep, NGE_MUL_ADDR1, maddr1.addr_val); 986 987 /* 988 * Setup seeprom control 989 */ 990 ee_cntl.cntl_val = nge_reg_get32(ngep, NGE_EP_CNTL); 991 ee_cntl.cntl_bits.clkdiv = EEPROM_CLKDIV; 992 ee_cntl.cntl_bits.rom_size = EEPROM_32K; 993 ee_cntl.cntl_bits.word_wid = ACCESS_16BIT; 994 ee_cntl.cntl_bits.wait_slots = EEPROM_WAITCLK; 995 nge_reg_put32(ngep, NGE_EP_CNTL, ee_cntl.cntl_val); 996 997 /* 998 * Reading the unicast mac address table 999 */ 1000 if (ngep->nge_chip_state == NGE_CHIP_INITIAL) { 1001 uaddr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 1002 mac = uaddr1.addr_bits.addr; 1003 mac <<= 32; 1004 mac |= nge_reg_get32(ngep, NGE_UNI_ADDR0); 1005 if (mac != 0ULL && mac != ~0ULL) { 1006 ngep->chipinfo.hw_mac_addr = mac; 1007 for (i = ETHERADDRL; i-- != 0; ) { 1008 ngep->chipinfo.vendor_addr.addr[i] = 1009 (uchar_t)mac; 1010 ngep->cur_uni_addr.addr[i] = (uchar_t)mac; 1011 mac >>= 8; 1012 } 1013 ngep->chipinfo.vendor_addr.set = 1; 1014 } 1015 } 1016 pci_config_put8(ngep->cfg_handle, PCI_CONF_CACHE_LINESZ, 1017 ngep->chipinfo.clsize); 1018 pci_config_put8(ngep->cfg_handle, PCI_CONF_LATENCY_TIMER, 1019 ngep->chipinfo.latency); 1020 1021 1022 if (ngep->dev_spec_param.advanced_pm) { 1023 1024 /* Program software misc register */ 1025 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1026 soft_misc.misc_bits.rx_clk_vx_rst = NGE_SET; 1027 soft_misc.misc_bits.tx_clk_vx_rst = NGE_SET; 1028 soft_misc.misc_bits.clk12m_vx_rst = NGE_SET; 1029 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_SET; 1030 soft_misc.misc_bits.rx_clk_vc_rst = NGE_SET; 1031 soft_misc.misc_bits.tx_clk_vc_rst = NGE_SET; 1032 soft_misc.misc_bits.fs_clk_vc_rst = NGE_SET; 1033 soft_misc.misc_bits.rst_ex_m2pintf = NGE_SET; 1034 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1035 1036 /* wait for 32 us */ 1037 drv_usecwait(32); 1038 1039 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1040 soft_misc.misc_bits.rx_clk_vx_rst = NGE_CLEAR; 1041 soft_misc.misc_bits.tx_clk_vx_rst = NGE_CLEAR; 1042 soft_misc.misc_bits.clk12m_vx_rst = NGE_CLEAR; 1043 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_CLEAR; 1044 soft_misc.misc_bits.rx_clk_vc_rst = NGE_CLEAR; 1045 soft_misc.misc_bits.tx_clk_vc_rst = NGE_CLEAR; 1046 soft_misc.misc_bits.fs_clk_vc_rst = NGE_CLEAR; 1047 soft_misc.misc_bits.rst_ex_m2pintf = NGE_CLEAR; 1048 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1049 1050 /* Program PMU registers */ 1051 pmu_cntl0.cntl0_val = nge_reg_get32(ngep, NGE_PMU_CNTL0); 1052 pmu_cntl0.cntl0_bits.core_spd10_fp = 1053 NGE_PMU_CORE_SPD10_BUSY; 1054 pmu_cntl0.cntl0_bits.core_spd10_idle = 1055 NGE_PMU_CORE_SPD10_IDLE; 1056 pmu_cntl0.cntl0_bits.core_spd100_fp = 1057 NGE_PMU_CORE_SPD100_BUSY; 1058 pmu_cntl0.cntl0_bits.core_spd100_idle = 1059 NGE_PMU_CORE_SPD100_IDLE; 1060 pmu_cntl0.cntl0_bits.core_spd1000_fp = 1061 NGE_PMU_CORE_SPD1000_BUSY; 1062 pmu_cntl0.cntl0_bits.core_spd1000_idle = 1063 NGE_PMU_CORE_SPD100_IDLE; 1064 pmu_cntl0.cntl0_bits.core_spd10_idle = 1065 NGE_PMU_CORE_SPD10_IDLE; 1066 nge_reg_put32(ngep, NGE_PMU_CNTL0, pmu_cntl0.cntl0_val); 1067 1068 /* Set the core idle limit value */ 1069 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 1070 NGE_PMU_CIDLE_LIMIT_DEF); 1071 1072 /* Set the device idle limit value */ 1073 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 1074 NGE_PMU_DIDLE_LIMIT_DEF); 1075 1076 /* Enable the core/device idle timer in PMU control 2 */ 1077 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 1078 pmu_cntl2.cntl2_bits.cidle_timer = NGE_SET; 1079 pmu_cntl2.cntl2_bits.didle_timer = NGE_SET; 1080 pmu_cntl2.cntl2_bits.core_enable = NGE_SET; 1081 pmu_cntl2.cntl2_bits.dev_enable = NGE_SET; 1082 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 1083 } 1084 /* 1085 * Stop the chipset and clear buffer management 1086 */ 1087 err = nge_chip_stop(ngep, B_FALSE); 1088 if (err == DDI_FAILURE) 1089 return (err); 1090 /* 1091 * Clear the power state bits for phy since interface no longer 1092 * works after rebooting from Windows on a multi-boot machine 1093 */ 1094 if (ngep->chipinfo.device == DEVICE_ID_MCP51_268 || 1095 ngep->chipinfo.device == DEVICE_ID_MCP51_269 || 1096 ngep->chipinfo.device == DEVICE_ID_MCP55_372 || 1097 ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1098 ngep->chipinfo.device == DEVICE_ID_MCP61_3EE || 1099 ngep->chipinfo.device == DEVICE_ID_MCP61_3EF) { 1100 1101 pm_cntl2.cntl_val = nge_reg_get32(ngep, NGE_PM_CNTL2); 1102 /* bring phy out of coma mode */ 1103 pm_cntl2.cntl_bits.phy_coma_set = NGE_CLEAR; 1104 /* disable auto reset coma bits */ 1105 pm_cntl2.cntl_bits.resv4 = NGE_CLEAR; 1106 /* restore power to gated clocks */ 1107 pm_cntl2.cntl_bits.resv8_11 = NGE_CLEAR; 1108 nge_reg_put32(ngep, NGE_PM_CNTL2, pm_cntl2.cntl_val); 1109 } 1110 1111 /* 1112 * Reset the external phy 1113 */ 1114 (void) nge_phy_reset(ngep); 1115 ngep->nge_chip_state = NGE_CHIP_RESET; 1116 return (DDI_SUCCESS); 1117 } 1118 1119 int 1120 nge_chip_start(nge_t *ngep) 1121 { 1122 int err; 1123 nge_itc itc; 1124 nge_tx_cntl tx_cntl; 1125 nge_rx_cntrl0 rx_cntl0; 1126 nge_rx_cntl1 rx_cntl1; 1127 nge_tx_en tx_en; 1128 nge_rx_en rx_en; 1129 nge_mii_cs mii_cs; 1130 nge_swtr_cntl swtr_cntl; 1131 nge_rx_fifo_wm rx_fifo; 1132 nge_intr_mask intr_mask; 1133 nge_mintr_mask mintr_mask; 1134 nge_dev_spec_param_t *dev_param_p; 1135 1136 NGE_TRACE(("nge_chip_start($%p)", (void *)ngep)); 1137 1138 /* 1139 * Setup buffer management 1140 */ 1141 err = nge_buff_setup(ngep); 1142 if (err == DDI_FAILURE) 1143 return (err); 1144 1145 dev_param_p = &ngep->dev_spec_param; 1146 1147 /* 1148 * Enable polling attribute 1149 */ 1150 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 1151 mii_cs.cs_bits.ap_paddr = ngep->phy_xmii_addr; 1152 mii_cs.cs_bits.ap_en = NGE_SET; 1153 mii_cs.cs_bits.ap_intv = MII_POLL_INTV; 1154 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 1155 1156 /* 1157 * Setup link 1158 */ 1159 (*ngep->physops->phys_update)(ngep); 1160 1161 /* 1162 * Configure the tx's parameters 1163 */ 1164 tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL); 1165 if (dev_param_p->tx_pause_frame) 1166 tx_cntl.cntl_bits.paen = NGE_SET; 1167 else 1168 tx_cntl.cntl_bits.paen = NGE_CLEAR; 1169 tx_cntl.cntl_bits.retry_en = NGE_SET; 1170 tx_cntl.cntl_bits.pad_en = NGE_SET; 1171 tx_cntl.cntl_bits.fappend_en = NGE_SET; 1172 tx_cntl.cntl_bits.two_def_en = NGE_SET; 1173 tx_cntl.cntl_bits.max_retry = 15; 1174 tx_cntl.cntl_bits.burst_en = NGE_CLEAR; 1175 tx_cntl.cntl_bits.uflo_err_mask = NGE_CLEAR; 1176 tx_cntl.cntl_bits.tlcol_mask = NGE_CLEAR; 1177 tx_cntl.cntl_bits.lcar_mask = NGE_CLEAR; 1178 tx_cntl.cntl_bits.def_mask = NGE_CLEAR; 1179 tx_cntl.cntl_bits.exdef_mask = NGE_SET; 1180 tx_cntl.cntl_bits.lcar_mask = NGE_SET; 1181 tx_cntl.cntl_bits.tlcol_mask = NGE_SET; 1182 tx_cntl.cntl_bits.uflo_err_mask = NGE_SET; 1183 tx_cntl.cntl_bits.jam_seq_en = NGE_CLEAR; 1184 nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val); 1185 1186 1187 /* 1188 * Configure the parameters of Rx's state machine 1189 * Enabe the parameters: 1190 * 1). Pad Strip 1191 * 2). FCS Relay 1192 * 3). Pause 1193 * 4). Address filter 1194 * 5). Runt Packet receive 1195 * 6). Broadcast 1196 * 7). Receive Deferral 1197 * 1198 * Disable the following parameters for decreasing 1199 * the number of interrupts: 1200 * 1). Runt Inerrupt. 1201 * 2). Rx's Late Collision interrupt. 1202 * 3). Rx's Max length Error Interrupt. 1203 * 4). Rx's Length Field error Interrupt. 1204 * 5). Rx's FCS error interrupt. 1205 * 6). Rx's overflow error interrupt. 1206 * 7). Rx's Frame alignment error interrupt. 1207 */ 1208 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1209 rx_cntl0.cntl_bits.padsen = NGE_CLEAR; 1210 rx_cntl0.cntl_bits.fcsren = NGE_CLEAR; 1211 if (dev_param_p->rx_pause_frame) 1212 rx_cntl0.cntl_bits.paen = NGE_SET; 1213 else 1214 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1215 rx_cntl0.cntl_bits.lben = NGE_CLEAR; 1216 rx_cntl0.cntl_bits.afen = NGE_SET; 1217 rx_cntl0.cntl_bits.runten = NGE_CLEAR; 1218 rx_cntl0.cntl_bits.brdis = NGE_CLEAR; 1219 rx_cntl0.cntl_bits.rdfen = NGE_CLEAR; 1220 rx_cntl0.cntl_bits.runtm = NGE_CLEAR; 1221 rx_cntl0.cntl_bits.slfb = NGE_CLEAR; 1222 rx_cntl0.cntl_bits.rlcolm = NGE_CLEAR; 1223 rx_cntl0.cntl_bits.maxerm = NGE_CLEAR; 1224 rx_cntl0.cntl_bits.lferm = NGE_CLEAR; 1225 rx_cntl0.cntl_bits.crcm = NGE_CLEAR; 1226 rx_cntl0.cntl_bits.ofolm = NGE_CLEAR; 1227 rx_cntl0.cntl_bits.framerm = NGE_CLEAR; 1228 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1229 1230 /* 1231 * Configure the watermark for the rx's statemachine 1232 */ 1233 rx_fifo.wm_val = nge_reg_get32(ngep, NGE_RX_FIFO_WM); 1234 rx_fifo.wm_bits.data_hwm = ngep->rx_datahwm; 1235 rx_fifo.wm_bits.prd_lwm = ngep->rx_prdlwm; 1236 rx_fifo.wm_bits.prd_hwm = ngep->rx_prdhwm; 1237 nge_reg_put32(ngep, NGE_RX_FIFO_WM, rx_fifo.wm_val); 1238 1239 /* 1240 * Configure the deffer time slot for rx's state machine 1241 */ 1242 nge_reg_put8(ngep, NGE_RX_DEf, ngep->rx_def); 1243 1244 /* 1245 * Configure the length of rx's packet 1246 */ 1247 rx_cntl1.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL1); 1248 rx_cntl1.cntl_bits.length = ngep->max_sdu; 1249 nge_reg_put32(ngep, NGE_RX_CNTL1, rx_cntl1.cntl_val); 1250 /* 1251 * Enable Tx's state machine 1252 */ 1253 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 1254 tx_en.bits.tx_en = NGE_SET; 1255 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 1256 1257 /* 1258 * Enable Rx's state machine 1259 */ 1260 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 1261 rx_en.bits.rx_en = NGE_SET; 1262 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 1263 1264 itc.itc_val = nge_reg_get32(ngep, NGE_SWTR_ITC); 1265 itc.itc_bits.sw_intv = ngep->sw_intr_intv; 1266 nge_reg_put32(ngep, NGE_SWTR_ITC, itc.itc_val); 1267 1268 swtr_cntl.ctrl_val = nge_reg_get8(ngep, NGE_SWTR_CNTL); 1269 swtr_cntl.cntl_bits.sten = NGE_SET; 1270 swtr_cntl.cntl_bits.stren = NGE_SET; 1271 nge_reg_put32(ngep, NGE_SWTR_CNTL, swtr_cntl.ctrl_val); 1272 1273 /* 1274 * Disable all mii read/write operation Interrupt 1275 */ 1276 mintr_mask.mask_val = nge_reg_get8(ngep, NGE_MINTR_MASK); 1277 mintr_mask.mask_bits.mrei = NGE_CLEAR; 1278 mintr_mask.mask_bits.mcc2 = NGE_CLEAR; 1279 mintr_mask.mask_bits.mcc1 = NGE_CLEAR; 1280 mintr_mask.mask_bits.mapi = NGE_SET; 1281 mintr_mask.mask_bits.mpdi = NGE_SET; 1282 nge_reg_put8(ngep, NGE_MINTR_MASK, mintr_mask.mask_val); 1283 1284 /* 1285 * Enable all interrupt event 1286 */ 1287 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1288 intr_mask.mask_bits.reint = NGE_SET; 1289 intr_mask.mask_bits.rcint = NGE_SET; 1290 intr_mask.mask_bits.miss = NGE_SET; 1291 intr_mask.mask_bits.teint = NGE_CLEAR; 1292 intr_mask.mask_bits.tcint = NGE_SET; 1293 intr_mask.mask_bits.stint = NGE_CLEAR; 1294 intr_mask.mask_bits.mint = NGE_CLEAR; 1295 intr_mask.mask_bits.rfint = NGE_CLEAR; 1296 intr_mask.mask_bits.tfint = NGE_CLEAR; 1297 intr_mask.mask_bits.feint = NGE_SET; 1298 intr_mask.mask_bits.resv10 = NGE_CLEAR; 1299 intr_mask.mask_bits.resv11 = NGE_CLEAR; 1300 intr_mask.mask_bits.resv12 = NGE_CLEAR; 1301 intr_mask.mask_bits.resv13 = NGE_CLEAR; 1302 intr_mask.mask_bits.phyint = NGE_CLEAR; 1303 ngep->intr_masks = intr_mask.mask_val; 1304 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1305 ngep->nge_chip_state = NGE_CHIP_RUNNING; 1306 return (DDI_SUCCESS); 1307 } 1308 1309 /* 1310 * nge_chip_sync() -- program the chip with the unicast MAC address, 1311 * the multicast hash table, the required level of promiscuity. 1312 */ 1313 void 1314 nge_chip_sync(nge_t *ngep) 1315 { 1316 uint8_t i; 1317 uint64_t macaddr; 1318 uint64_t mul_addr; 1319 uint64_t mul_mask; 1320 nge_rx_cntrl0 rx_cntl; 1321 nge_uni_addr1 uni_adr1; 1322 1323 NGE_TRACE(("nge_chip_sync($%p)", (void *)ngep)); 1324 1325 macaddr = 0x0ull; 1326 mul_addr = 0x0ull; 1327 mul_mask = 0x0ull; 1328 rx_cntl.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1329 1330 if (ngep->promisc) { 1331 rx_cntl.cntl_bits.afen = NGE_CLEAR; 1332 rx_cntl.cntl_bits.brdis = NGE_SET; 1333 } else { 1334 rx_cntl.cntl_bits.afen = NGE_SET; 1335 rx_cntl.cntl_bits.brdis = NGE_CLEAR; 1336 } 1337 1338 /* 1339 * Transform the MAC address from host to chip format, the unicast 1340 * MAC address(es) ... 1341 */ 1342 for (i = ETHERADDRL, macaddr = 0ull; i != 0; --i) { 1343 macaddr |= ngep->cur_uni_addr.addr[i-1]; 1344 macaddr <<= (i > 1) ? 8 : 0; 1345 } 1346 1347 nge_reg_put32(ngep, NGE_UNI_ADDR0, (uint32_t)macaddr); 1348 macaddr = macaddr >>32; 1349 uni_adr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 1350 uni_adr1.addr_bits.addr = (uint16_t)macaddr; 1351 uni_adr1.addr_bits.resv16_31 = (uint16_t)0; 1352 nge_reg_put32(ngep, NGE_UNI_ADDR1, uni_adr1.addr_val); 1353 1354 /* 1355 * Reprogram the multicast address table ... 1356 */ 1357 for (i = ETHERADDRL, mul_addr = 0ull; i != 0; --i) { 1358 mul_addr |= ngep->cur_mul_addr.addr[i-1]; 1359 mul_addr <<= (i > 1) ? 8 : 0; 1360 mul_mask |= ngep->cur_mul_mask.addr[i-1]; 1361 mul_mask <<= (i > 1) ? 8 : 0; 1362 } 1363 nge_reg_put32(ngep, NGE_MUL_ADDR0, (uint32_t)mul_addr); 1364 mul_addr >>= 32; 1365 nge_reg_put32(ngep, NGE_MUL_ADDR1, mul_addr); 1366 nge_reg_put32(ngep, NGE_MUL_MASK, (uint32_t)mul_mask); 1367 mul_mask >>= 32; 1368 nge_reg_put32(ngep, NGE_MUL_MASK1, mul_mask); 1369 /* 1370 * Set or clear the PROMISCUOUS mode bit 1371 */ 1372 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl.cntl_val); 1373 /* 1374 * For internal PHY loopback, the link will 1375 * not be up, so it need to sync mac modes directly. 1376 */ 1377 if (ngep->param_loop_mode == NGE_LOOP_INTERNAL_PHY) 1378 nge_sync_mac_modes(ngep); 1379 } 1380 1381 static void 1382 nge_chip_err(nge_t *ngep) 1383 { 1384 nge_reg010 reg010_ins; 1385 nge_sw_statistics_t *psw_stat; 1386 nge_intr_mask intr_mask; 1387 1388 NGE_TRACE(("nge_chip_err($%p)", (void *)ngep)); 1389 1390 psw_stat = (nge_sw_statistics_t *)&ngep->statistics.sw_statistics; 1391 reg010_ins.reg010_val = nge_reg_get32(ngep, NGE_REG010); 1392 if (reg010_ins.reg010_bits.resv0) 1393 psw_stat->fe_err.tso_err_mss ++; 1394 1395 if (reg010_ins.reg010_bits.resv1) 1396 psw_stat->fe_err.tso_dis ++; 1397 1398 if (reg010_ins.reg010_bits.resv2) 1399 psw_stat->fe_err.tso_err_nosum ++; 1400 1401 if (reg010_ins.reg010_bits.resv3) 1402 psw_stat->fe_err.tso_err_hov ++; 1403 1404 if (reg010_ins.reg010_bits.resv4) 1405 psw_stat->fe_err.tso_err_huf ++; 1406 1407 if (reg010_ins.reg010_bits.resv5) 1408 psw_stat->fe_err.tso_err_l2 ++; 1409 1410 if (reg010_ins.reg010_bits.resv6) 1411 psw_stat->fe_err.tso_err_ip ++; 1412 1413 if (reg010_ins.reg010_bits.resv7) 1414 psw_stat->fe_err.tso_err_l4 ++; 1415 1416 if (reg010_ins.reg010_bits.resv8) 1417 psw_stat->fe_err.tso_err_tcp ++; 1418 1419 if (reg010_ins.reg010_bits.resv9) 1420 psw_stat->fe_err.hsum_err_ip ++; 1421 1422 if (reg010_ins.reg010_bits.resv10) 1423 psw_stat->fe_err.hsum_err_l4 ++; 1424 1425 if (reg010_ins.reg010_val != 0) { 1426 1427 /* 1428 * Fatal error is triggered by malformed driver commands. 1429 * Disable unless debugging. 1430 */ 1431 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1432 intr_mask.mask_bits.feint = NGE_CLEAR; 1433 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1434 ngep->intr_masks = intr_mask.mask_val; 1435 1436 } 1437 } 1438 1439 static void 1440 nge_sync_mac_modes(nge_t *ngep) 1441 { 1442 nge_tx_def tx_def; 1443 nge_tx_fifo_wm tx_fifo; 1444 nge_bkoff_cntl bk_cntl; 1445 nge_mac2phy m2p; 1446 nge_rx_cntrl0 rx_cntl0; 1447 nge_dev_spec_param_t *dev_param_p; 1448 1449 dev_param_p = &ngep->dev_spec_param; 1450 1451 tx_def.def_val = nge_reg_get32(ngep, NGE_TX_DEF); 1452 m2p.m2p_val = nge_reg_get32(ngep, NGE_MAC2PHY); 1453 tx_fifo.wm_val = nge_reg_get32(ngep, NGE_TX_FIFO_WM); 1454 bk_cntl.cntl_val = nge_reg_get32(ngep, NGE_BKOFF_CNTL); 1455 bk_cntl.bkoff_bits.rseed = BKOFF_RSEED; 1456 switch (ngep->param_link_speed) { 1457 case 10: 1458 m2p.m2p_bits.speed = low_speed; 1459 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1460 if (ngep->phy_mode == RGMII_IN) { 1461 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1462 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1463 } else { 1464 tx_def.def_bits.if_def = TX_TIFG_MII; 1465 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1466 } 1467 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1468 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1469 break; 1470 1471 case 100: 1472 m2p.m2p_bits.speed = fast_speed; 1473 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1474 if (ngep->phy_mode == RGMII_IN) { 1475 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1476 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1477 } else { 1478 tx_def.def_bits.if_def = TX_TIFG_MII; 1479 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1480 } 1481 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1482 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1483 break; 1484 1485 case 1000: 1486 m2p.m2p_bits.speed = giga_speed; 1487 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1488 if (ngep->param_link_duplex == LINK_DUPLEX_FULL) { 1489 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1490 tx_def.def_bits.if_def = TX_IFG_RGMII_1000_FD; 1491 } else { 1492 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1493 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1494 } 1495 1496 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_GMII; 1497 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_GMII; 1498 break; 1499 } 1500 1501 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1502 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 1503 m2p.m2p_bits.phyintr = NGE_CLEAR; 1504 m2p.m2p_bits.phyintrlvl = NGE_CLEAR; 1505 } 1506 if (ngep->param_link_duplex == LINK_DUPLEX_HALF) { 1507 m2p.m2p_bits.hdup_en = NGE_SET; 1508 } 1509 else 1510 m2p.m2p_bits.hdup_en = NGE_CLEAR; 1511 nge_reg_put32(ngep, NGE_MAC2PHY, m2p.m2p_val); 1512 nge_reg_put32(ngep, NGE_TX_DEF, tx_def.def_val); 1513 1514 tx_fifo.wm_bits.data_lwm = TX_FIFO_DATA_LWM; 1515 tx_fifo.wm_bits.prd_lwm = TX_FIFO_PRD_LWM; 1516 tx_fifo.wm_bits.uprd_hwm = TX_FIFO_PRD_HWM; 1517 tx_fifo.wm_bits.fb_wm = TX_FIFO_TBFW; 1518 nge_reg_put32(ngep, NGE_TX_FIFO_WM, tx_fifo.wm_val); 1519 1520 nge_reg_put32(ngep, NGE_BKOFF_CNTL, bk_cntl.cntl_val); 1521 1522 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1523 if (ngep->param_link_rx_pause && dev_param_p->rx_pause_frame) 1524 rx_cntl0.cntl_bits.paen = NGE_SET; 1525 else 1526 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1527 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1528 } 1529 1530 /* 1531 * Handler for hardware link state change. 1532 * 1533 * When this routine is called, the hardware link state has changed 1534 * and the new state is reflected in the param_* variables. Here 1535 * we must update the softstate, reprogram the MAC to match, and 1536 * record the change in the log and/or on the console. 1537 */ 1538 static void 1539 nge_factotum_link_handler(nge_t *ngep) 1540 { 1541 /* 1542 * Update the s/w link_state 1543 */ 1544 if (ngep->param_link_up) 1545 ngep->link_state = LINK_STATE_UP; 1546 else 1547 ngep->link_state = LINK_STATE_DOWN; 1548 1549 /* 1550 * Reprogram the MAC modes to match 1551 */ 1552 nge_sync_mac_modes(ngep); 1553 } 1554 1555 static boolean_t 1556 nge_factotum_link_check(nge_t *ngep) 1557 { 1558 boolean_t lchg; 1559 boolean_t check; 1560 1561 ASSERT(mutex_owned(ngep->genlock)); 1562 1563 (*ngep->physops->phys_check)(ngep); 1564 switch (ngep->link_state) { 1565 case LINK_STATE_UP: 1566 lchg = (ngep->param_link_up == B_FALSE); 1567 check = (ngep->param_link_up == B_FALSE); 1568 break; 1569 1570 case LINK_STATE_DOWN: 1571 lchg = (ngep->param_link_up == B_TRUE); 1572 check = (ngep->param_link_up == B_TRUE); 1573 break; 1574 1575 default: 1576 check = B_TRUE; 1577 break; 1578 } 1579 1580 /* 1581 * If <check> is false, we're sure the link hasn't changed. 1582 * If true, however, it's not yet definitive; we have to call 1583 * nge_phys_check() to determine whether the link has settled 1584 * into a new state yet ... and if it has, then call the link 1585 * state change handler.But when the chip is 5700 in Dell 6650 1586 * ,even if check is false, the link may have changed.So we 1587 * have to call nge_phys_check() to determine the link state. 1588 */ 1589 if (check) 1590 nge_factotum_link_handler(ngep); 1591 1592 return (lchg); 1593 } 1594 1595 /* 1596 * Factotum routine to check for Tx stall, using the 'watchdog' counter 1597 */ 1598 static boolean_t nge_factotum_stall_check(nge_t *ngep); 1599 1600 static boolean_t 1601 nge_factotum_stall_check(nge_t *ngep) 1602 { 1603 uint32_t dogval; 1604 /* 1605 * Specific check for Tx stall ... 1606 * 1607 * The 'watchdog' counter is incremented whenever a packet 1608 * is queued, reset to 1 when some (but not all) buffers 1609 * are reclaimed, reset to 0 (disabled) when all buffers 1610 * are reclaimed, and shifted left here. If it exceeds the 1611 * threshold value, the chip is assumed to have stalled and 1612 * is put into the ERROR state. The factotum will then reset 1613 * it on the next pass. 1614 * 1615 * All of which should ensure that we don't get into a state 1616 * where packets are left pending indefinitely! 1617 */ 1618 dogval = nge_atomic_shl32(&ngep->watchdog, 1); 1619 if (dogval < nge_watchdog_count) { 1620 ngep->stall_cknum = 0; 1621 } else { 1622 ngep->stall_cknum++; 1623 } 1624 if (ngep->stall_cknum < 8) { 1625 return (B_FALSE); 1626 } else { 1627 ngep->stall_cknum = 0; 1628 ngep->statistics.sw_statistics.tx_stall++; 1629 return (B_TRUE); 1630 } 1631 } 1632 1633 1634 1635 /* 1636 * The factotum is woken up when there's something to do that we'd rather 1637 * not do from inside a hardware interrupt handler or high-level cyclic. 1638 * Its two main tasks are: 1639 * reset & restart the chip after an error 1640 * check the link status whenever necessary 1641 */ 1642 /* ARGSUSED */ 1643 uint_t 1644 nge_chip_factotum(caddr_t args1, caddr_t args2) 1645 { 1646 uint_t result; 1647 nge_t *ngep; 1648 boolean_t err; 1649 boolean_t linkchg; 1650 1651 ngep = (nge_t *)args1; 1652 1653 NGE_TRACE(("nge_chip_factotum($%p)", (void *)ngep)); 1654 1655 mutex_enter(ngep->softlock); 1656 if (ngep->factotum_flag == 0) { 1657 mutex_exit(ngep->softlock); 1658 return (DDI_INTR_UNCLAIMED); 1659 } 1660 ngep->factotum_flag = 0; 1661 mutex_exit(ngep->softlock); 1662 err = B_FALSE; 1663 linkchg = B_FALSE; 1664 result = DDI_INTR_CLAIMED; 1665 1666 mutex_enter(ngep->genlock); 1667 switch (ngep->nge_chip_state) { 1668 default: 1669 break; 1670 1671 case NGE_CHIP_RUNNING: 1672 linkchg = nge_factotum_link_check(ngep); 1673 err = nge_factotum_stall_check(ngep); 1674 break; 1675 1676 case NGE_CHIP_FAULT: 1677 (void) nge_restart(ngep); 1678 NGE_REPORT((ngep, "automatic recovery activated")); 1679 break; 1680 } 1681 1682 if (err) 1683 (void) nge_chip_stop(ngep, B_TRUE); 1684 mutex_exit(ngep->genlock); 1685 1686 /* 1687 * If the link state changed, tell the world about it (if 1688 * this version of MAC supports link state notification). 1689 * Note: can't do this while still holding the mutex. 1690 */ 1691 if (linkchg) 1692 mac_link_update(ngep->mh, ngep->link_state); 1693 1694 return (result); 1695 1696 } 1697 1698 static void 1699 nge_intr_handle(nge_t *ngep, nge_intr_src *pintr_src) 1700 { 1701 boolean_t brx; 1702 boolean_t btx; 1703 nge_mintr_src mintr_src; 1704 1705 brx = B_FALSE; 1706 btx = B_FALSE; 1707 ngep->statistics.sw_statistics.intr_count++; 1708 ngep->statistics.sw_statistics.intr_lval = pintr_src->intr_val; 1709 brx = (pintr_src->int_bits.reint | pintr_src->int_bits.miss 1710 | pintr_src->int_bits.rcint | pintr_src->int_bits.stint) 1711 != 0 ? B_TRUE : B_FALSE; 1712 if (pintr_src->int_bits.reint) 1713 ngep->statistics.sw_statistics.rx_err++; 1714 if (pintr_src->int_bits.miss) 1715 ngep->statistics.sw_statistics.rx_nobuffer++; 1716 1717 btx = (pintr_src->int_bits.teint | pintr_src->int_bits.tcint) 1718 != 0 ? B_TRUE : B_FALSE; 1719 if (pintr_src->int_bits.stint && ngep->poll) 1720 ngep->stint_count ++; 1721 if (ngep->poll && (ngep->stint_count % ngep->param_tx_n_intr == 0)) 1722 btx = B_TRUE; 1723 if (btx) 1724 nge_tx_recycle(ngep, B_TRUE); 1725 if (brx) 1726 nge_receive(ngep); 1727 if (pintr_src->int_bits.teint) 1728 ngep->statistics.sw_statistics.tx_stop_err++; 1729 if (ngep->intr_moderation && brx) { 1730 if (ngep->poll) { 1731 if (ngep->recv_count < ngep->param_rx_intr_hwater) { 1732 ngep->quiet_time++; 1733 if (ngep->quiet_time == 1734 ngep->param_poll_quiet_time) { 1735 ngep->poll = B_FALSE; 1736 ngep->quiet_time = 0; 1737 ngep->stint_count = 0; 1738 nge_tx_recycle(ngep, B_TRUE); 1739 } 1740 } else 1741 ngep->quiet_time = 0; 1742 } else { 1743 if (ngep->recv_count > ngep->param_rx_intr_lwater) { 1744 ngep->busy_time++; 1745 if (ngep->busy_time == 1746 ngep->param_poll_busy_time) { 1747 ngep->poll = B_TRUE; 1748 ngep->busy_time = 0; 1749 } 1750 } else 1751 ngep->busy_time = 0; 1752 } 1753 } 1754 ngep->recv_count = 0; 1755 if (pintr_src->int_bits.feint) 1756 nge_chip_err(ngep); 1757 /* link interrupt, check the link state */ 1758 if (pintr_src->int_bits.mint) { 1759 mintr_src.src_val = nge_reg_get32(ngep, NGE_MINTR_SRC); 1760 nge_reg_put32(ngep, NGE_MINTR_SRC, mintr_src.src_val); 1761 nge_wake_factotum(ngep); 1762 } 1763 } 1764 1765 /* 1766 * nge_chip_intr() -- handle chip interrupts 1767 */ 1768 /* ARGSUSED */ 1769 uint_t 1770 nge_chip_intr(caddr_t arg1, caddr_t arg2) 1771 { 1772 nge_t *ngep = (nge_t *)arg1; 1773 nge_intr_src intr_src; 1774 nge_intr_mask intr_mask; 1775 1776 mutex_enter(ngep->genlock); 1777 1778 /* 1779 * Check whether chip's says it's asserting #INTA; 1780 * if not, don't process or claim the interrupt. 1781 */ 1782 intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC); 1783 if (intr_src.intr_val == 0) { 1784 mutex_exit(ngep->genlock); 1785 return (DDI_INTR_UNCLAIMED); 1786 } 1787 /* 1788 * Ack the interrupt 1789 */ 1790 nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val); 1791 1792 if (ngep->nge_chip_state != NGE_CHIP_RUNNING) { 1793 mutex_exit(ngep->genlock); 1794 return (DDI_INTR_CLAIMED); 1795 } 1796 nge_intr_handle(ngep, &intr_src); 1797 if (ngep->poll && !ngep->ch_intr_mode) { 1798 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1799 intr_mask.mask_bits.stint = NGE_SET; 1800 intr_mask.mask_bits.rcint = NGE_CLEAR; 1801 intr_mask.mask_bits.reint = NGE_CLEAR; 1802 intr_mask.mask_bits.tcint = NGE_CLEAR; 1803 intr_mask.mask_bits.teint = NGE_CLEAR; 1804 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1805 ngep->ch_intr_mode = B_TRUE; 1806 } else if ((ngep->ch_intr_mode) && (!ngep->poll)) { 1807 nge_reg_put32(ngep, NGE_INTR_MASK, ngep->intr_masks); 1808 ngep->ch_intr_mode = B_FALSE; 1809 } 1810 mutex_exit(ngep->genlock); 1811 return (DDI_INTR_CLAIMED); 1812 } 1813 1814 static enum ioc_reply 1815 nge_pp_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1816 { 1817 int err; 1818 uint64_t sizemask; 1819 uint64_t mem_va; 1820 uint64_t maxoff; 1821 boolean_t peek; 1822 nge_peekpoke_t *ppd; 1823 int (*ppfn)(nge_t *ngep, nge_peekpoke_t *ppd); 1824 1825 switch (cmd) { 1826 default: 1827 return (IOC_INVAL); 1828 1829 case NGE_PEEK: 1830 peek = B_TRUE; 1831 break; 1832 1833 case NGE_POKE: 1834 peek = B_FALSE; 1835 break; 1836 } 1837 1838 /* 1839 * Validate format of ioctl 1840 */ 1841 if (iocp->ioc_count != sizeof (nge_peekpoke_t)) 1842 return (IOC_INVAL); 1843 if (mp->b_cont == NULL) 1844 return (IOC_INVAL); 1845 ppd = (nge_peekpoke_t *)mp->b_cont->b_rptr; 1846 1847 /* 1848 * Validate request parameters 1849 */ 1850 switch (ppd->pp_acc_space) { 1851 default: 1852 return (IOC_INVAL); 1853 1854 case NGE_PP_SPACE_CFG: 1855 /* 1856 * Config space 1857 */ 1858 sizemask = 8|4|2|1; 1859 mem_va = 0; 1860 maxoff = PCI_CONF_HDR_SIZE; 1861 ppfn = peek ? nge_chip_peek_cfg : nge_chip_poke_cfg; 1862 break; 1863 1864 case NGE_PP_SPACE_REG: 1865 /* 1866 * Memory-mapped I/O space 1867 */ 1868 sizemask = 8|4|2|1; 1869 mem_va = 0; 1870 maxoff = NGE_REG_SIZE; 1871 ppfn = peek ? nge_chip_peek_reg : nge_chip_poke_reg; 1872 break; 1873 1874 case NGE_PP_SPACE_MII: 1875 sizemask = 4|2|1; 1876 mem_va = 0; 1877 maxoff = NGE_MII_SIZE; 1878 ppfn = peek ? nge_chip_peek_mii : nge_chip_poke_mii; 1879 break; 1880 1881 case NGE_PP_SPACE_SEEPROM: 1882 sizemask = 4|2|1; 1883 mem_va = 0; 1884 maxoff = NGE_SEEROM_SIZE; 1885 ppfn = peek ? nge_chip_peek_seeprom : nge_chip_poke_seeprom; 1886 break; 1887 } 1888 1889 switch (ppd->pp_acc_size) { 1890 default: 1891 return (IOC_INVAL); 1892 1893 case 8: 1894 case 4: 1895 case 2: 1896 case 1: 1897 if ((ppd->pp_acc_size & sizemask) == 0) 1898 return (IOC_INVAL); 1899 break; 1900 } 1901 1902 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 1903 return (IOC_INVAL); 1904 1905 if (ppd->pp_acc_offset >= maxoff) 1906 return (IOC_INVAL); 1907 1908 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff) 1909 return (IOC_INVAL); 1910 1911 /* 1912 * All OK - go do it! 1913 */ 1914 ppd->pp_acc_offset += mem_va; 1915 if (ppfn) 1916 err = (*ppfn)(ngep, ppd); 1917 if (err != DDI_SUCCESS) 1918 return (IOC_INVAL); 1919 return (peek ? IOC_REPLY : IOC_ACK); 1920 } 1921 1922 static enum ioc_reply nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, 1923 struct iocblk *iocp); 1924 #pragma no_inline(nge_diag_ioctl) 1925 1926 static enum ioc_reply 1927 nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1928 { 1929 ASSERT(mutex_owned(ngep->genlock)); 1930 1931 switch (cmd) { 1932 default: 1933 nge_error(ngep, "nge_diag_ioctl: invalid cmd 0x%x", cmd); 1934 return (IOC_INVAL); 1935 1936 case NGE_DIAG: 1937 return (IOC_ACK); 1938 1939 case NGE_PEEK: 1940 case NGE_POKE: 1941 return (nge_pp_ioctl(ngep, cmd, mp, iocp)); 1942 1943 case NGE_PHY_RESET: 1944 return (IOC_RESTART_ACK); 1945 1946 case NGE_SOFT_RESET: 1947 case NGE_HARD_RESET: 1948 return (IOC_ACK); 1949 } 1950 1951 /* NOTREACHED */ 1952 } 1953 1954 enum ioc_reply 1955 nge_chip_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 1956 { 1957 int cmd; 1958 1959 ASSERT(mutex_owned(ngep->genlock)); 1960 1961 cmd = iocp->ioc_cmd; 1962 1963 switch (cmd) { 1964 default: 1965 return (IOC_INVAL); 1966 1967 case NGE_DIAG: 1968 case NGE_PEEK: 1969 case NGE_POKE: 1970 case NGE_PHY_RESET: 1971 case NGE_SOFT_RESET: 1972 case NGE_HARD_RESET: 1973 #if NGE_DEBUGGING 1974 return (nge_diag_ioctl(ngep, cmd, mp, iocp)); 1975 #else 1976 return (IOC_INVAL); 1977 #endif 1978 1979 case NGE_MII_READ: 1980 case NGE_MII_WRITE: 1981 return (IOC_INVAL); 1982 1983 #if NGE_SEE_IO32 1984 case NGE_SEE_READ: 1985 case NGE_SEE_WRITE: 1986 return (IOC_INVAL); 1987 #endif 1988 1989 #if NGE_FLASH_IO32 1990 case NGE_FLASH_READ: 1991 case NGE_FLASH_WRITE: 1992 return (IOC_INVAL); 1993 #endif 1994 } 1995 } 1996