1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include "nge.h" 30 static uint32_t nge_watchdog_count = 1 << 29; 31 extern boolean_t nge_enable_msi; 32 static void nge_sync_mac_modes(nge_t *); 33 34 #undef NGE_DBG 35 #define NGE_DBG NGE_DBG_CHIP 36 37 /* 38 * Operating register get/set access routines 39 */ 40 uint8_t nge_reg_get8(nge_t *ngep, nge_regno_t regno); 41 #pragma inline(nge_reg_get8) 42 43 uint8_t 44 nge_reg_get8(nge_t *ngep, nge_regno_t regno) 45 { 46 NGE_TRACE(("nge_reg_get8($%p, 0x%lx)", (void *)ngep, regno)); 47 48 return (ddi_get8(ngep->io_handle, PIO_ADDR(ngep, regno))); 49 } 50 51 void nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data); 52 #pragma inline(nge_reg_put8) 53 54 void 55 nge_reg_put8(nge_t *ngep, nge_regno_t regno, uint8_t data) 56 { 57 NGE_TRACE(("nge_reg_put8($%p, 0x%lx, 0x%x)", 58 (void *)ngep, regno, data)); 59 ddi_put8(ngep->io_handle, PIO_ADDR(ngep, regno), data); 60 61 } 62 63 uint16_t nge_reg_get16(nge_t *ngep, nge_regno_t regno); 64 #pragma inline(nge_reg_get16) 65 66 uint16_t 67 nge_reg_get16(nge_t *ngep, nge_regno_t regno) 68 { 69 NGE_TRACE(("nge_reg_get16($%p, 0x%lx)", (void *)ngep, regno)); 70 return (ddi_get16(ngep->io_handle, PIO_ADDR(ngep, regno))); 71 } 72 73 void nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data); 74 #pragma inline(nge_reg_put16) 75 76 void 77 nge_reg_put16(nge_t *ngep, nge_regno_t regno, uint16_t data) 78 { 79 NGE_TRACE(("nge_reg_put16($%p, 0x%lx, 0x%x)", 80 (void *)ngep, regno, data)); 81 ddi_put16(ngep->io_handle, PIO_ADDR(ngep, regno), data); 82 83 } 84 85 uint32_t nge_reg_get32(nge_t *ngep, nge_regno_t regno); 86 #pragma inline(nge_reg_get32) 87 88 uint32_t 89 nge_reg_get32(nge_t *ngep, nge_regno_t regno) 90 { 91 NGE_TRACE(("nge_reg_get32($%p, 0x%lx)", (void *)ngep, regno)); 92 return (ddi_get32(ngep->io_handle, PIO_ADDR(ngep, regno))); 93 } 94 95 void nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data); 96 #pragma inline(nge_reg_put32) 97 98 void 99 nge_reg_put32(nge_t *ngep, nge_regno_t regno, uint32_t data) 100 { 101 NGE_TRACE(("nge_reg_put32($%p, 0x%lx, 0x%x)", 102 (void *)ngep, regno, data)); 103 ddi_put32(ngep->io_handle, PIO_ADDR(ngep, regno), data); 104 105 } 106 107 108 static int nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 109 #pragma no_inline(nge_chip_peek_cfg) 110 111 static int 112 nge_chip_peek_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 113 { 114 int err; 115 uint64_t regval; 116 uint64_t regno; 117 118 NGE_TRACE(("nge_chip_peek_cfg($%p, $%p)", 119 (void *)ngep, (void *)ppd)); 120 121 err = DDI_SUCCESS; 122 regno = ppd->pp_acc_offset; 123 124 switch (ppd->pp_acc_size) { 125 case 1: 126 regval = pci_config_get8(ngep->cfg_handle, regno); 127 break; 128 129 case 2: 130 regval = pci_config_get16(ngep->cfg_handle, regno); 131 break; 132 133 case 4: 134 regval = pci_config_get32(ngep->cfg_handle, regno); 135 break; 136 137 case 8: 138 regval = pci_config_get64(ngep->cfg_handle, regno); 139 break; 140 } 141 ppd->pp_acc_data = regval; 142 return (err); 143 } 144 145 static int nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd); 146 147 static int 148 nge_chip_poke_cfg(nge_t *ngep, nge_peekpoke_t *ppd) 149 { 150 int err; 151 uint64_t regval; 152 uint64_t regno; 153 154 NGE_TRACE(("nge_chip_poke_cfg($%p, $%p)", 155 (void *)ngep, (void *)ppd)); 156 157 err = DDI_SUCCESS; 158 regno = ppd->pp_acc_offset; 159 regval = ppd->pp_acc_data; 160 161 switch (ppd->pp_acc_size) { 162 case 1: 163 pci_config_put8(ngep->cfg_handle, regno, regval); 164 break; 165 166 case 2: 167 pci_config_put16(ngep->cfg_handle, regno, regval); 168 break; 169 170 case 4: 171 pci_config_put32(ngep->cfg_handle, regno, regval); 172 break; 173 174 case 8: 175 pci_config_put64(ngep->cfg_handle, regno, regval); 176 break; 177 } 178 179 return (err); 180 181 } 182 183 static int nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd); 184 185 static int 186 nge_chip_peek_reg(nge_t *ngep, nge_peekpoke_t *ppd) 187 { 188 int err; 189 uint64_t regval; 190 void *regaddr; 191 192 NGE_TRACE(("nge_chip_peek_reg($%p, $%p)", 193 (void *)ngep, (void *)ppd)); 194 195 err = DDI_SUCCESS; 196 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 197 198 switch (ppd->pp_acc_size) { 199 case 1: 200 regval = ddi_get8(ngep->io_handle, regaddr); 201 break; 202 203 case 2: 204 regval = ddi_get16(ngep->io_handle, regaddr); 205 break; 206 207 case 4: 208 regval = ddi_get32(ngep->io_handle, regaddr); 209 break; 210 211 case 8: 212 regval = ddi_get64(ngep->io_handle, regaddr); 213 break; 214 215 default: 216 regval = 0x0ull; 217 break; 218 } 219 ppd->pp_acc_data = regval; 220 return (err); 221 } 222 223 static int nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd); 224 225 static int 226 nge_chip_poke_reg(nge_t *ngep, nge_peekpoke_t *ppd) 227 { 228 int err; 229 uint64_t regval; 230 void *regaddr; 231 232 NGE_TRACE(("nge_chip_poke_reg($%p, $%p)", 233 (void *)ngep, (void *)ppd)); 234 235 err = DDI_SUCCESS; 236 regaddr = PIO_ADDR(ngep, ppd->pp_acc_offset); 237 regval = ppd->pp_acc_data; 238 239 switch (ppd->pp_acc_size) { 240 case 1: 241 ddi_put8(ngep->io_handle, regaddr, regval); 242 break; 243 244 case 2: 245 ddi_put16(ngep->io_handle, regaddr, regval); 246 break; 247 248 case 4: 249 ddi_put32(ngep->io_handle, regaddr, regval); 250 break; 251 252 case 8: 253 ddi_put64(ngep->io_handle, regaddr, regval); 254 break; 255 } 256 return (err); 257 } 258 259 static int nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd); 260 #pragma no_inline(nge_chip_peek_mii) 261 262 static int 263 nge_chip_peek_mii(nge_t *ngep, nge_peekpoke_t *ppd) 264 { 265 int err; 266 267 err = DDI_SUCCESS; 268 ppd->pp_acc_data = nge_mii_get16(ngep, ppd->pp_acc_offset/2); 269 return (err); 270 } 271 272 static int nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd); 273 #pragma no_inline(nge_chip_poke_mii) 274 275 static int 276 nge_chip_poke_mii(nge_t *ngep, nge_peekpoke_t *ppd) 277 { 278 int err; 279 err = DDI_SUCCESS; 280 nge_mii_put16(ngep, ppd->pp_acc_offset/2, ppd->pp_acc_data); 281 return (err); 282 } 283 284 /* 285 * Basic SEEPROM get/set access routine 286 * 287 * This uses the chip's SEEPROM auto-access method, controlled by the 288 * Serial EEPROM Address/Data Registers at 0x504h, so the CPU 289 * doesn't have to fiddle with the individual bits. 290 * 291 * The caller should hold <genlock> and *also* have already acquired 292 * the right to access the SEEPROM. 293 * 294 * Return value: 295 * 0 on success, 296 * ENODATA on access timeout (maybe retryable: device may just be busy) 297 * EPROTO on other h/w or s/w errors. 298 * 299 * <*dp> is an input to a SEEPROM_ACCESS_WRITE operation, or an output 300 * from a (successful) SEEPROM_ACCESS_READ. 301 */ 302 303 static int 304 nge_seeprom_access(nge_t *ngep, uint32_t cmd, nge_regno_t addr, uint16_t *dp) 305 { 306 uint32_t tries; 307 nge_ep_cmd cmd_reg; 308 nge_ep_data data_reg; 309 310 NGE_TRACE(("nge_seeprom_access($%p, %d, %x, $%p)", 311 (void *)ngep, cmd, addr, (void *)dp)); 312 313 ASSERT(mutex_owned(ngep->genlock)); 314 315 /* 316 * Check there's no command in progress. 317 * 318 * Note: this *shouldn't* ever find that there is a command 319 * in progress, because we already hold the <genlock> mutex. 320 * Also, to ensure we don't have a conflict with the chip's 321 * internal firmware or a process accessing the same (shared) 322 * So this is just a final consistency check: we shouldn't 323 * see EITHER the START bit (command started but not complete) 324 * OR the COMPLETE bit (command completed but not cleared). 325 */ 326 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 327 for (tries = 0; tries < 30; tries++) { 328 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 329 break; 330 drv_usecwait(10); 331 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 332 } 333 334 /* 335 * This should not happen. If so, we have to restart eeprom 336 * state machine 337 */ 338 if (tries == 30) { 339 cmd_reg.cmd_bits.sts = SEEPROM_READY; 340 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 341 drv_usecwait(10); 342 /* 343 * Polling the status bit to make assure the eeprom is ready 344 */ 345 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 346 for (tries = 0; tries < 30; tries++) { 347 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 348 break; 349 drv_usecwait(10); 350 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 351 } 352 } 353 354 /* 355 * Assemble the command ... 356 */ 357 cmd_reg.cmd_bits.addr = addr; 358 cmd_reg.cmd_bits.cmd = cmd; 359 cmd_reg.cmd_bits.sts = 0; 360 361 nge_reg_put32(ngep, NGE_EP_CMD, cmd_reg.cmd_val); 362 363 /* 364 * Polling whether the access is successful. 365 * 366 */ 367 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 368 for (tries = 0; tries < 30; tries++) { 369 if (cmd_reg.cmd_bits.sts == SEEPROM_READY) 370 break; 371 drv_usecwait(10); 372 cmd_reg.cmd_val = nge_reg_get32(ngep, NGE_EP_CMD); 373 } 374 375 if (tries == 30) { 376 nge_report(ngep, NGE_HW_ROM); 377 return (DDI_FAILURE); 378 } 379 switch (cmd) { 380 default: 381 case SEEPROM_CMD_WRITE_ENABLE: 382 case SEEPROM_CMD_ERASE: 383 case SEEPROM_CMD_ERALSE_ALL: 384 case SEEPROM_CMD_WRITE_DIS: 385 break; 386 387 case SEEPROM_CMD_READ: 388 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 389 *dp = data_reg.data_bits.data; 390 break; 391 392 case SEEPROM_CMD_WRITE: 393 data_reg.data_val = nge_reg_get32(ngep, NGE_EP_DATA); 394 data_reg.data_bits.data = *dp; 395 nge_reg_put32(ngep, NGE_EP_DATA, data_reg.data_val); 396 break; 397 } 398 399 return (DDI_SUCCESS); 400 } 401 402 403 static int 404 nge_chip_peek_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 405 { 406 uint16_t data; 407 int err; 408 409 err = nge_seeprom_access(ngep, SEEPROM_CMD_READ, 410 ppd->pp_acc_offset, &data); 411 ppd->pp_acc_data = data; 412 return (err); 413 } 414 415 static int 416 nge_chip_poke_seeprom(nge_t *ngep, nge_peekpoke_t *ppd) 417 { 418 uint16_t data; 419 int err; 420 421 data = ppd->pp_acc_data; 422 err = nge_seeprom_access(ngep, SEEPROM_CMD_WRITE, 423 ppd->pp_acc_offset, &data); 424 return (err); 425 } 426 427 void 428 nge_init_dev_spec_param(nge_t *ngep) 429 { 430 nge_dev_spec_param_t *dev_param_p; 431 chip_info_t *infop; 432 433 dev_param_p = &ngep->dev_spec_param; 434 infop = (chip_info_t *)&ngep->chipinfo; 435 436 switch (infop->device) { 437 case DEVICE_ID_NF3_E6: 438 case DEVICE_ID_NF3_DF: 439 case DEVICE_ID_MCP04_37: 440 case DEVICE_ID_MCP04_38: 441 dev_param_p->msi = B_FALSE; 442 dev_param_p->msi_x = B_FALSE; 443 dev_param_p->vlan = B_FALSE; 444 dev_param_p->advanced_pm = B_FALSE; 445 dev_param_p->tx_pause_frame = B_FALSE; 446 dev_param_p->rx_pause_frame = B_FALSE; 447 dev_param_p->jumbo = B_FALSE; 448 dev_param_p->tx_rx_64byte = B_FALSE; 449 dev_param_p->rx_hw_checksum = B_FALSE; 450 dev_param_p->tx_hw_checksum = 0; 451 dev_param_p->desc_type = DESC_OFFLOAD; 452 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 453 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 454 dev_param_p->nge_split = NGE_SPLIT_32; 455 break; 456 457 case DEVICE_ID_CK804_56: 458 case DEVICE_ID_CK804_57: 459 dev_param_p->msi = B_TRUE; 460 dev_param_p->msi_x = B_TRUE; 461 dev_param_p->vlan = B_FALSE; 462 dev_param_p->advanced_pm = B_FALSE; 463 dev_param_p->tx_pause_frame = B_FALSE; 464 dev_param_p->rx_pause_frame = B_TRUE; 465 dev_param_p->jumbo = B_TRUE; 466 dev_param_p->tx_rx_64byte = B_FALSE; 467 dev_param_p->rx_hw_checksum = B_TRUE; 468 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 469 dev_param_p->desc_type = DESC_HOT; 470 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 471 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 472 dev_param_p->nge_split = NGE_SPLIT_96; 473 break; 474 475 case DEVICE_ID_MCP61_3EE: 476 case DEVICE_ID_MCP61_3EF: 477 case DEVICE_ID_MCP51_268: 478 case DEVICE_ID_MCP51_269: 479 dev_param_p->msi = B_FALSE; 480 dev_param_p->msi_x = B_FALSE; 481 dev_param_p->vlan = B_FALSE; 482 dev_param_p->advanced_pm = B_TRUE; 483 dev_param_p->tx_pause_frame = B_FALSE; 484 dev_param_p->rx_pause_frame = B_FALSE; 485 dev_param_p->jumbo = B_FALSE; 486 dev_param_p->tx_rx_64byte = B_TRUE; 487 dev_param_p->rx_hw_checksum = B_FALSE; 488 dev_param_p->tx_hw_checksum = 0; 489 dev_param_p->desc_type = DESC_OFFLOAD; 490 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 491 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 492 dev_param_p->nge_split = NGE_SPLIT_32; 493 break; 494 495 case DEVICE_ID_MCP55_372: 496 case DEVICE_ID_MCP55_373: 497 dev_param_p->msi = B_TRUE; 498 dev_param_p->msi_x = B_TRUE; 499 dev_param_p->vlan = B_TRUE; 500 dev_param_p->advanced_pm = B_TRUE; 501 dev_param_p->tx_pause_frame = B_TRUE; 502 dev_param_p->rx_pause_frame = B_TRUE; 503 dev_param_p->jumbo = B_TRUE; 504 dev_param_p->tx_rx_64byte = B_TRUE; 505 dev_param_p->rx_hw_checksum = B_TRUE; 506 dev_param_p->tx_hw_checksum = HCKSUM_IPHDRCKSUM; 507 dev_param_p->desc_type = DESC_HOT; 508 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_3072; 509 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_3072; 510 dev_param_p->nge_split = NGE_SPLIT_96; 511 break; 512 513 default: 514 dev_param_p->msi = B_FALSE; 515 dev_param_p->msi_x = B_FALSE; 516 dev_param_p->vlan = B_FALSE; 517 dev_param_p->advanced_pm = B_FALSE; 518 dev_param_p->tx_pause_frame = B_FALSE; 519 dev_param_p->rx_pause_frame = B_FALSE; 520 dev_param_p->jumbo = B_FALSE; 521 dev_param_p->tx_rx_64byte = B_FALSE; 522 dev_param_p->rx_hw_checksum = B_FALSE; 523 dev_param_p->tx_hw_checksum = 0; 524 dev_param_p->desc_type = DESC_OFFLOAD; 525 dev_param_p->rx_desc_num = NGE_RECV_SLOTS_DESC_1024; 526 dev_param_p->tx_desc_num = NGE_SEND_SLOTS_DESC_1024; 527 dev_param_p->nge_split = NGE_SPLIT_32; 528 return; 529 } 530 } 531 /* 532 * Perform first-stage chip (re-)initialisation, using only config-space 533 * accesses: 534 * 535 * + Read the vendor/device/revision/subsystem/cache-line-size registers, 536 * returning the data in the structure pointed to by <infop>. 537 */ 538 void nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset); 539 #pragma no_inline(nge_chip_cfg_init) 540 541 void 542 nge_chip_cfg_init(nge_t *ngep, chip_info_t *infop, boolean_t reset) 543 { 544 uint16_t command; 545 ddi_acc_handle_t handle; 546 nge_interbus_conf interbus_conf; 547 nge_msi_mask_conf msi_mask_conf; 548 nge_msi_map_cap_conf cap_conf; 549 550 NGE_TRACE(("nge_chip_cfg_init($%p, $%p, %d)", 551 (void *)ngep, (void *)infop, reset)); 552 553 /* 554 * save PCI cache line size and subsystem vendor ID 555 * 556 * Read all the config-space registers that characterise the 557 * chip, specifically vendor/device/revision/subsystem vendor 558 * and subsystem device id. We expect (but don't check) that 559 */ 560 handle = ngep->cfg_handle; 561 /* reading the vendor information once */ 562 if (reset == B_FALSE) { 563 infop->command = pci_config_get16(handle, 564 PCI_CONF_COMM); 565 infop->vendor = pci_config_get16(handle, 566 PCI_CONF_VENID); 567 infop->device = pci_config_get16(handle, 568 PCI_CONF_DEVID); 569 infop->subven = pci_config_get16(handle, 570 PCI_CONF_SUBVENID); 571 infop->subdev = pci_config_get16(handle, 572 PCI_CONF_SUBSYSID); 573 infop->class_code = pci_config_get8(handle, 574 PCI_CONF_BASCLASS); 575 infop->revision = pci_config_get8(handle, 576 PCI_CONF_REVID); 577 infop->clsize = pci_config_get8(handle, 578 PCI_CONF_CACHE_LINESZ); 579 infop->latency = pci_config_get8(handle, 580 PCI_CONF_LATENCY_TIMER); 581 } 582 if (nge_enable_msi) { 583 /* Disable the hidden for MSI support */ 584 interbus_conf.conf_val = pci_config_get32(handle, 585 PCI_CONF_HT_INTERNAL); 586 if ((infop->device == DEVICE_ID_MCP55_373) || 587 (infop->device == DEVICE_ID_MCP55_372)) 588 interbus_conf.conf_bits.msix_off = NGE_SET; 589 interbus_conf.conf_bits.msi_off = NGE_CLEAR; 590 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 591 interbus_conf.conf_val); 592 593 if ((infop->device == DEVICE_ID_MCP55_373) || 594 (infop->device == DEVICE_ID_MCP55_372)) { 595 596 /* Disable the vector off for mcp55 */ 597 msi_mask_conf.msi_mask_conf_val = 598 pci_config_get32(handle, PCI_CONF_HT_MSI_MASK); 599 msi_mask_conf.msi_mask_bits.vec0_off = NGE_CLEAR; 600 msi_mask_conf.msi_mask_bits.vec1_off = NGE_CLEAR; 601 msi_mask_conf.msi_mask_bits.vec2_off = NGE_CLEAR; 602 msi_mask_conf.msi_mask_bits.vec3_off = NGE_CLEAR; 603 msi_mask_conf.msi_mask_bits.vec4_off = NGE_CLEAR; 604 msi_mask_conf.msi_mask_bits.vec5_off = NGE_CLEAR; 605 msi_mask_conf.msi_mask_bits.vec6_off = NGE_CLEAR; 606 msi_mask_conf.msi_mask_bits.vec7_off = NGE_CLEAR; 607 pci_config_put32(handle, PCI_CONF_HT_MSI_MASK, 608 msi_mask_conf.msi_mask_conf_val); 609 610 /* Enable the MSI mapping */ 611 cap_conf.msi_map_cap_conf_val = 612 pci_config_get32(handle, PCI_CONF_HT_MSI_MAP_CAP); 613 cap_conf.map_cap_conf_bits.map_en = NGE_SET; 614 pci_config_put32(handle, PCI_CONF_HT_MSI_MAP_CAP, 615 cap_conf.msi_map_cap_conf_val); 616 } 617 } else { 618 interbus_conf.conf_val = pci_config_get32(handle, 619 PCI_CONF_HT_INTERNAL); 620 interbus_conf.conf_bits.msi_off = NGE_SET; 621 pci_config_put32(handle, PCI_CONF_HT_INTERNAL, 622 interbus_conf.conf_val); 623 } 624 command = infop->command | PCI_COMM_MAE; 625 command &= ~PCI_COMM_MEMWR_INVAL; 626 command |= PCI_COMM_ME; 627 pci_config_put16(handle, PCI_CONF_COMM, command); 628 pci_config_put16(handle, PCI_CONF_STAT, ~0); 629 630 } 631 632 int 633 nge_chip_stop(nge_t *ngep, boolean_t fault) 634 { 635 int err; 636 uint32_t reg_val; 637 uint32_t tries; 638 nge_mintr_src mintr_src; 639 nge_mii_cs mii_cs; 640 nge_rx_poll rx_poll; 641 nge_tx_poll tx_poll; 642 nge_rx_en rx_en; 643 nge_tx_en tx_en; 644 nge_tx_sta tx_sta; 645 nge_rx_sta rx_sta; 646 nge_mode_cntl mode; 647 nge_pmu_cntl2 pmu_cntl2; 648 649 NGE_TRACE(("nge_chip_stop($%p, %d)", (void *)ngep, fault)); 650 651 err = DDI_SUCCESS; 652 653 /* Clear any pending PHY interrupt */ 654 mintr_src.src_val = nge_reg_get8(ngep, NGE_MINTR_SRC); 655 nge_reg_put8(ngep, NGE_MINTR_SRC, mintr_src.src_val); 656 657 /* Mask all interrupts */ 658 reg_val = nge_reg_get32(ngep, NGE_INTR_MASK); 659 reg_val &= ~NGE_INTR_ALL_EN; 660 nge_reg_put32(ngep, NGE_INTR_MASK, reg_val); 661 662 /* Disable auto-polling of phy */ 663 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 664 mii_cs.cs_bits.ap_en = NGE_CLEAR; 665 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 666 667 /* Reset buffer management & DMA */ 668 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 669 mode.mode_bits.dma_dis = NGE_SET; 670 mode.mode_bits.desc_type = ngep->desc_mode; 671 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 672 673 for (tries = 0; tries < 5000; tries++) { 674 drv_usecwait(10); 675 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 676 if (mode.mode_bits.dma_status == NGE_SET) 677 break; 678 } 679 if (tries == 5000) { 680 return (DDI_FAILURE); 681 } 682 683 /* 684 * For mcp55, the bits 1:31 of NGE_RX_EN and NGE_TX_EN are 685 * defined to be used by SMU. The newer PXE than 527 began to 686 * support SMU and bit 24 of NGE_RX_EN/NGE_TX_EN are set 687 * when leaving PXE to prevents the MAC from winning 688 * arbitration to the main transmit/receive channels. 689 */ 690 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 691 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 692 693 /* Disable rx's machine */ 694 nge_reg_put32(ngep, NGE_RX_EN, 0x0); 695 696 /* Disable tx's machine */ 697 nge_reg_put32(ngep, NGE_TX_EN, 0x0); 698 } else { 699 700 /* Disable rx's machine */ 701 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 702 rx_en.bits.rx_en = NGE_CLEAR; 703 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 704 705 706 /* Disable tx's machine */ 707 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 708 tx_en.bits.tx_en = NGE_CLEAR; 709 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 710 } 711 712 /* 713 * Clean the status of tx's state machine 714 * and Make assure the tx's channel is idle 715 */ 716 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 717 for (tries = 0; tries < 1000; tries++) { 718 if (tx_sta.sta_bits.tx_chan_sta == NGE_CLEAR) 719 break; 720 drv_usecwait(10); 721 tx_sta.sta_val = nge_reg_get32(ngep, NGE_TX_STA); 722 } 723 if (tries == 1000) { 724 return (DDI_FAILURE); 725 } 726 nge_reg_put32(ngep, NGE_TX_STA, tx_sta.sta_val); 727 728 /* 729 * Clean the status of rx's state machine 730 * and Make assure the tx's channel is idle 731 */ 732 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 733 for (tries = 0; tries < 1000; tries++) { 734 if (rx_sta.sta_bits.rx_chan_sta == NGE_CLEAR) 735 break; 736 drv_usecwait(10); 737 rx_sta.sta_val = nge_reg_get32(ngep, NGE_RX_STA); 738 } 739 if (tries == 1000) { 740 return (DDI_FAILURE); 741 } 742 nge_reg_put32(ngep, NGE_RX_STA, rx_sta.sta_val); 743 744 /* Disable auto-poll of rx's state machine */ 745 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 746 rx_poll.poll_bits.rpen = NGE_CLEAR; 747 rx_poll.poll_bits.rpi = NGE_CLEAR; 748 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 749 750 /* Disable auto-polling of tx's state machine */ 751 tx_poll.poll_val = nge_reg_get32(ngep, NGE_TX_POLL); 752 tx_poll.poll_bits.tpen = NGE_CLEAR; 753 tx_poll.poll_bits.tpi = NGE_CLEAR; 754 nge_reg_put32(ngep, NGE_TX_POLL, tx_poll.poll_val); 755 756 /* Restore buffer management */ 757 mode.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 758 mode.mode_bits.bm_reset = NGE_SET; 759 mode.mode_bits.tx_rcom_en = NGE_SET; 760 nge_reg_put32(ngep, NGE_MODE_CNTL, mode.mode_val); 761 762 if (ngep->dev_spec_param.advanced_pm) { 763 764 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 0); 765 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 0); 766 767 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 768 pmu_cntl2.cntl2_bits.cidle_timer = NGE_CLEAR; 769 pmu_cntl2.cntl2_bits.didle_timer = NGE_CLEAR; 770 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 771 } 772 if (fault) 773 ngep->nge_chip_state = NGE_CHIP_FAULT; 774 else 775 ngep->nge_chip_state = NGE_CHIP_STOPPED; 776 777 return (err); 778 } 779 780 static void 781 nge_rx_setup(nge_t *ngep) 782 { 783 uint64_t desc_addr; 784 nge_rxtx_dlen dlen; 785 nge_rx_poll rx_poll; 786 787 /* 788 * Filling the address and length of rx's descriptors 789 */ 790 desc_addr = ngep->recv->desc.cookie.dmac_laddress; 791 nge_reg_put32(ngep, NGE_RX_DADR, desc_addr); 792 nge_reg_put32(ngep, NGE_RX_DADR_HI, desc_addr >> 32); 793 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 794 dlen.dlen_bits.rdlen = ngep->recv->desc.nslots - 1; 795 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 796 797 rx_poll.poll_val = nge_reg_get32(ngep, NGE_RX_POLL); 798 rx_poll.poll_bits.rpi = RX_POLL_INTV_1G; 799 rx_poll.poll_bits.rpen = NGE_SET; 800 nge_reg_put32(ngep, NGE_RX_POLL, rx_poll.poll_val); 801 } 802 803 static void 804 nge_tx_setup(nge_t *ngep) 805 { 806 uint64_t desc_addr; 807 nge_rxtx_dlen dlen; 808 809 /* 810 * Filling the address and length of tx's descriptors 811 */ 812 desc_addr = ngep->send->desc.cookie.dmac_laddress; 813 nge_reg_put32(ngep, NGE_TX_DADR, desc_addr); 814 nge_reg_put32(ngep, NGE_TX_DADR_HI, desc_addr >> 32); 815 dlen.dlen_val = nge_reg_get32(ngep, NGE_RXTX_DLEN); 816 dlen.dlen_bits.tdlen = ngep->send->desc.nslots - 1; 817 nge_reg_put32(ngep, NGE_RXTX_DLEN, dlen.dlen_val); 818 } 819 820 static int 821 nge_buff_setup(nge_t *ngep) 822 { 823 nge_mode_cntl mode_cntl; 824 nge_dev_spec_param_t *dev_param_p; 825 826 dev_param_p = &ngep->dev_spec_param; 827 828 /* 829 * Configure Rx&Tx's buffer 830 */ 831 nge_rx_setup(ngep); 832 nge_tx_setup(ngep); 833 834 /* 835 * Configure buffer attribute 836 */ 837 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 838 839 /* 840 * Enable Dma access request 841 */ 842 mode_cntl.mode_bits.dma_dis = NGE_CLEAR; 843 844 /* 845 * Enbale Buffer management 846 */ 847 mode_cntl.mode_bits.bm_reset = NGE_CLEAR; 848 849 /* 850 * Support Standoffload Descriptor 851 */ 852 mode_cntl.mode_bits.desc_type = ngep->desc_mode; 853 854 /* 855 * Support receive hardware checksum 856 */ 857 if (dev_param_p->rx_hw_checksum) { 858 mode_cntl.mode_bits.rx_sum_en = NGE_SET; 859 } else 860 mode_cntl.mode_bits.rx_sum_en = NGE_CLEAR; 861 862 /* 863 * Disable Tx PRD coarse update 864 */ 865 mode_cntl.mode_bits.tx_prd_cu_en = NGE_CLEAR; 866 867 /* 868 * Disable 64-byte access 869 */ 870 mode_cntl.mode_bits.w64_dis = NGE_SET; 871 872 /* 873 * Skip Rx Error Frame is not supported and if 874 * enable it, jumbo frame does not work any more. 875 */ 876 mode_cntl.mode_bits.rx_filter_en = NGE_CLEAR; 877 878 /* 879 * Can not support hot mode now 880 */ 881 mode_cntl.mode_bits.resv15 = NGE_CLEAR; 882 883 if (dev_param_p->vlan) { 884 /* Disable the vlan strip for devices which support vlan */ 885 mode_cntl.mode_bits.vlan_strip = NGE_CLEAR; 886 887 /* Disable the vlan insert for devices which supprot vlan */ 888 mode_cntl.mode_bits.vlan_ins = NGE_CLEAR; 889 } 890 891 if (dev_param_p->tx_rx_64byte) { 892 893 /* Set the maximum TX PRD fetch size to 64 bytes */ 894 mode_cntl.mode_bits.tx_fetch_prd = NGE_SET; 895 896 /* Set the maximum RX PRD fetch size to 64 bytes */ 897 mode_cntl.mode_bits.rx_fetch_prd = NGE_SET; 898 } 899 /* 900 * Upload Rx data as it arrives, rather than waiting for full frame 901 */ 902 mode_cntl.mode_bits.resv16 = NGE_CLEAR; 903 904 /* 905 * Normal HOT table accesses 906 */ 907 mode_cntl.mode_bits.resv17 = NGE_CLEAR; 908 909 /* 910 * Normal HOT buffer requesting 911 */ 912 mode_cntl.mode_bits.resv18 = NGE_CLEAR; 913 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 914 915 /* 916 * Signal controller to check for new Rx descriptors 917 */ 918 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 919 mode_cntl.mode_bits.rxdm = NGE_SET; 920 mode_cntl.mode_bits.tx_rcom_en = NGE_SET; 921 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 922 923 924 return (DDI_SUCCESS); 925 } 926 927 /* 928 * When chipset resets, the chipset can not restore the orignial 929 * mac address to the mac address registers. 930 * 931 * When the driver is dettached, the function will write the orignial 932 * mac address to the mac address registers. 933 */ 934 935 void 936 nge_restore_mac_addr(nge_t *ngep) 937 { 938 uint32_t mac_addr; 939 940 mac_addr = (uint32_t)ngep->chipinfo.hw_mac_addr; 941 nge_reg_put32(ngep, NGE_UNI_ADDR0, mac_addr); 942 mac_addr = (uint32_t)(ngep->chipinfo.hw_mac_addr >> 32); 943 nge_reg_put32(ngep, NGE_UNI_ADDR1, mac_addr); 944 } 945 946 int 947 nge_chip_reset(nge_t *ngep) 948 { 949 int err; 950 uint8_t i; 951 uint32_t regno; 952 uint64_t mac; 953 nge_uni_addr1 uaddr1; 954 nge_cp_cntl ee_cntl; 955 nge_soft_misc soft_misc; 956 nge_pmu_cntl0 pmu_cntl0; 957 nge_pmu_cntl2 pmu_cntl2; 958 nge_pm_cntl2 pm_cntl2; 959 const nge_ksindex_t *ksip; 960 961 NGE_TRACE(("nge_chip_reset($%p)", (void *)ngep)); 962 963 /* 964 * Clear the statistics by reading the statistics register 965 */ 966 for (ksip = nge_statistics; ksip->name != NULL; ++ksip) { 967 regno = KS_BASE + ksip->index * sizeof (uint32_t); 968 (void) nge_reg_get32(ngep, regno); 969 } 970 971 /* 972 * Setup seeprom control 973 */ 974 ee_cntl.cntl_val = nge_reg_get32(ngep, NGE_EP_CNTL); 975 ee_cntl.cntl_bits.clkdiv = EEPROM_CLKDIV; 976 ee_cntl.cntl_bits.rom_size = EEPROM_32K; 977 ee_cntl.cntl_bits.word_wid = ACCESS_16BIT; 978 ee_cntl.cntl_bits.wait_slots = EEPROM_WAITCLK; 979 nge_reg_put32(ngep, NGE_EP_CNTL, ee_cntl.cntl_val); 980 981 /* 982 * Reading the unicast mac address table 983 */ 984 if (ngep->nge_chip_state == NGE_CHIP_INITIAL) { 985 uaddr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 986 mac = uaddr1.addr_bits.addr; 987 mac <<= 32; 988 mac |= nge_reg_get32(ngep, NGE_UNI_ADDR0); 989 if (mac != 0ULL && mac != ~0ULL) { 990 ngep->chipinfo.hw_mac_addr = mac; 991 for (i = ETHERADDRL; i-- != 0; ) { 992 ngep->chipinfo.vendor_addr.addr[i] = 993 (uchar_t)mac; 994 ngep->cur_uni_addr.addr[i] = (uchar_t)mac; 995 mac >>= 8; 996 } 997 ngep->chipinfo.vendor_addr.set = 1; 998 } 999 } 1000 pci_config_put8(ngep->cfg_handle, PCI_CONF_CACHE_LINESZ, 1001 ngep->chipinfo.clsize); 1002 pci_config_put8(ngep->cfg_handle, PCI_CONF_LATENCY_TIMER, 1003 ngep->chipinfo.latency); 1004 1005 1006 if (ngep->dev_spec_param.advanced_pm) { 1007 1008 /* Program software misc register */ 1009 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1010 soft_misc.misc_bits.rx_clk_vx_rst = NGE_SET; 1011 soft_misc.misc_bits.tx_clk_vx_rst = NGE_SET; 1012 soft_misc.misc_bits.clk12m_vx_rst = NGE_SET; 1013 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_SET; 1014 soft_misc.misc_bits.rx_clk_vc_rst = NGE_SET; 1015 soft_misc.misc_bits.tx_clk_vc_rst = NGE_SET; 1016 soft_misc.misc_bits.fs_clk_vc_rst = NGE_SET; 1017 soft_misc.misc_bits.rst_ex_m2pintf = NGE_SET; 1018 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1019 1020 /* wait for 32 us */ 1021 drv_usecwait(32); 1022 1023 soft_misc.misc_val = nge_reg_get32(ngep, NGE_SOFT_MISC); 1024 soft_misc.misc_bits.rx_clk_vx_rst = NGE_CLEAR; 1025 soft_misc.misc_bits.tx_clk_vx_rst = NGE_CLEAR; 1026 soft_misc.misc_bits.clk12m_vx_rst = NGE_CLEAR; 1027 soft_misc.misc_bits.fpci_clk_vx_rst = NGE_CLEAR; 1028 soft_misc.misc_bits.rx_clk_vc_rst = NGE_CLEAR; 1029 soft_misc.misc_bits.tx_clk_vc_rst = NGE_CLEAR; 1030 soft_misc.misc_bits.fs_clk_vc_rst = NGE_CLEAR; 1031 soft_misc.misc_bits.rst_ex_m2pintf = NGE_CLEAR; 1032 nge_reg_put32(ngep, NGE_SOFT_MISC, soft_misc.misc_val); 1033 1034 /* Program PMU registers */ 1035 pmu_cntl0.cntl0_val = nge_reg_get32(ngep, NGE_PMU_CNTL0); 1036 pmu_cntl0.cntl0_bits.core_spd10_fp = 1037 NGE_PMU_CORE_SPD10_BUSY; 1038 pmu_cntl0.cntl0_bits.core_spd10_idle = 1039 NGE_PMU_CORE_SPD10_IDLE; 1040 pmu_cntl0.cntl0_bits.core_spd100_fp = 1041 NGE_PMU_CORE_SPD100_BUSY; 1042 pmu_cntl0.cntl0_bits.core_spd100_idle = 1043 NGE_PMU_CORE_SPD100_IDLE; 1044 pmu_cntl0.cntl0_bits.core_spd1000_fp = 1045 NGE_PMU_CORE_SPD1000_BUSY; 1046 pmu_cntl0.cntl0_bits.core_spd1000_idle = 1047 NGE_PMU_CORE_SPD100_IDLE; 1048 pmu_cntl0.cntl0_bits.core_spd10_idle = 1049 NGE_PMU_CORE_SPD10_IDLE; 1050 nge_reg_put32(ngep, NGE_PMU_CNTL0, pmu_cntl0.cntl0_val); 1051 1052 /* Set the core idle limit value */ 1053 nge_reg_put32(ngep, NGE_PMU_CIDLE_LIMIT, 1054 NGE_PMU_CIDLE_LIMIT_DEF); 1055 1056 /* Set the device idle limit value */ 1057 nge_reg_put32(ngep, NGE_PMU_DIDLE_LIMIT, 1058 NGE_PMU_DIDLE_LIMIT_DEF); 1059 1060 /* Enable the core/device idle timer in PMU control 2 */ 1061 pmu_cntl2.cntl2_val = nge_reg_get32(ngep, NGE_PMU_CNTL2); 1062 pmu_cntl2.cntl2_bits.cidle_timer = NGE_SET; 1063 pmu_cntl2.cntl2_bits.didle_timer = NGE_SET; 1064 pmu_cntl2.cntl2_bits.core_enable = NGE_SET; 1065 pmu_cntl2.cntl2_bits.dev_enable = NGE_SET; 1066 nge_reg_put32(ngep, NGE_PMU_CNTL2, pmu_cntl2.cntl2_val); 1067 } 1068 /* 1069 * Stop the chipset and clear buffer management 1070 */ 1071 err = nge_chip_stop(ngep, B_FALSE); 1072 if (err == DDI_FAILURE) 1073 return (err); 1074 /* 1075 * Clear the power state bits for phy since interface no longer 1076 * works after rebooting from Windows on a multi-boot machine 1077 */ 1078 if (ngep->chipinfo.device == DEVICE_ID_MCP51_268 || 1079 ngep->chipinfo.device == DEVICE_ID_MCP51_269 || 1080 ngep->chipinfo.device == DEVICE_ID_MCP55_372 || 1081 ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1082 ngep->chipinfo.device == DEVICE_ID_MCP61_3EE || 1083 ngep->chipinfo.device == DEVICE_ID_MCP61_3EF) { 1084 1085 pm_cntl2.cntl_val = nge_reg_get32(ngep, NGE_PM_CNTL2); 1086 /* bring phy out of coma mode */ 1087 pm_cntl2.cntl_bits.phy_coma_set = NGE_CLEAR; 1088 /* disable auto reset coma bits */ 1089 pm_cntl2.cntl_bits.resv4 = NGE_CLEAR; 1090 /* restore power to gated clocks */ 1091 pm_cntl2.cntl_bits.resv8_11 = NGE_CLEAR; 1092 nge_reg_put32(ngep, NGE_PM_CNTL2, pm_cntl2.cntl_val); 1093 } 1094 1095 /* 1096 * Reset the external phy 1097 */ 1098 if (!nge_phy_reset(ngep)) 1099 return (DDI_FAILURE); 1100 ngep->nge_chip_state = NGE_CHIP_RESET; 1101 return (DDI_SUCCESS); 1102 } 1103 1104 int 1105 nge_chip_start(nge_t *ngep) 1106 { 1107 int err; 1108 nge_itc itc; 1109 nge_tx_cntl tx_cntl; 1110 nge_rx_cntrl0 rx_cntl0; 1111 nge_rx_cntl1 rx_cntl1; 1112 nge_tx_en tx_en; 1113 nge_rx_en rx_en; 1114 nge_mii_cs mii_cs; 1115 nge_swtr_cntl swtr_cntl; 1116 nge_rx_fifo_wm rx_fifo; 1117 nge_intr_mask intr_mask; 1118 nge_mintr_mask mintr_mask; 1119 nge_dev_spec_param_t *dev_param_p; 1120 1121 NGE_TRACE(("nge_chip_start($%p)", (void *)ngep)); 1122 1123 /* 1124 * Setup buffer management 1125 */ 1126 err = nge_buff_setup(ngep); 1127 if (err == DDI_FAILURE) 1128 return (err); 1129 1130 dev_param_p = &ngep->dev_spec_param; 1131 1132 /* 1133 * Enable polling attribute 1134 */ 1135 mii_cs.cs_val = nge_reg_get32(ngep, NGE_MII_CS); 1136 mii_cs.cs_bits.ap_paddr = ngep->phy_xmii_addr; 1137 mii_cs.cs_bits.ap_en = NGE_SET; 1138 mii_cs.cs_bits.ap_intv = MII_POLL_INTV; 1139 nge_reg_put32(ngep, NGE_MII_CS, mii_cs.cs_val); 1140 1141 /* 1142 * Setup link 1143 */ 1144 (*ngep->physops->phys_update)(ngep); 1145 1146 /* 1147 * Configure the tx's parameters 1148 */ 1149 tx_cntl.cntl_val = nge_reg_get32(ngep, NGE_TX_CNTL); 1150 if (dev_param_p->tx_pause_frame) 1151 tx_cntl.cntl_bits.paen = NGE_SET; 1152 else 1153 tx_cntl.cntl_bits.paen = NGE_CLEAR; 1154 tx_cntl.cntl_bits.retry_en = NGE_SET; 1155 tx_cntl.cntl_bits.pad_en = NGE_SET; 1156 tx_cntl.cntl_bits.fappend_en = NGE_SET; 1157 tx_cntl.cntl_bits.two_def_en = NGE_SET; 1158 tx_cntl.cntl_bits.max_retry = 15; 1159 tx_cntl.cntl_bits.burst_en = NGE_CLEAR; 1160 tx_cntl.cntl_bits.uflo_err_mask = NGE_CLEAR; 1161 tx_cntl.cntl_bits.tlcol_mask = NGE_CLEAR; 1162 tx_cntl.cntl_bits.lcar_mask = NGE_CLEAR; 1163 tx_cntl.cntl_bits.def_mask = NGE_CLEAR; 1164 tx_cntl.cntl_bits.exdef_mask = NGE_SET; 1165 tx_cntl.cntl_bits.lcar_mask = NGE_SET; 1166 tx_cntl.cntl_bits.tlcol_mask = NGE_SET; 1167 tx_cntl.cntl_bits.uflo_err_mask = NGE_SET; 1168 tx_cntl.cntl_bits.jam_seq_en = NGE_CLEAR; 1169 nge_reg_put32(ngep, NGE_TX_CNTL, tx_cntl.cntl_val); 1170 1171 1172 /* 1173 * Configure the parameters of Rx's state machine 1174 * Enabe the parameters: 1175 * 1). Pad Strip 1176 * 2). FCS Relay 1177 * 3). Pause 1178 * 4). Address filter 1179 * 5). Runt Packet receive 1180 * 6). Broadcast 1181 * 7). Receive Deferral 1182 * 1183 * Disable the following parameters for decreasing 1184 * the number of interrupts: 1185 * 1). Runt Inerrupt. 1186 * 2). Rx's Late Collision interrupt. 1187 * 3). Rx's Max length Error Interrupt. 1188 * 4). Rx's Length Field error Interrupt. 1189 * 5). Rx's FCS error interrupt. 1190 * 6). Rx's overflow error interrupt. 1191 * 7). Rx's Frame alignment error interrupt. 1192 */ 1193 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1194 rx_cntl0.cntl_bits.padsen = NGE_CLEAR; 1195 rx_cntl0.cntl_bits.fcsren = NGE_CLEAR; 1196 if (dev_param_p->rx_pause_frame) 1197 rx_cntl0.cntl_bits.paen = NGE_SET; 1198 else 1199 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1200 rx_cntl0.cntl_bits.lben = NGE_CLEAR; 1201 rx_cntl0.cntl_bits.afen = NGE_SET; 1202 rx_cntl0.cntl_bits.runten = NGE_CLEAR; 1203 rx_cntl0.cntl_bits.brdis = NGE_CLEAR; 1204 rx_cntl0.cntl_bits.rdfen = NGE_CLEAR; 1205 rx_cntl0.cntl_bits.runtm = NGE_CLEAR; 1206 rx_cntl0.cntl_bits.slfb = NGE_CLEAR; 1207 rx_cntl0.cntl_bits.rlcolm = NGE_CLEAR; 1208 rx_cntl0.cntl_bits.maxerm = NGE_CLEAR; 1209 rx_cntl0.cntl_bits.lferm = NGE_CLEAR; 1210 rx_cntl0.cntl_bits.crcm = NGE_CLEAR; 1211 rx_cntl0.cntl_bits.ofolm = NGE_CLEAR; 1212 rx_cntl0.cntl_bits.framerm = NGE_CLEAR; 1213 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1214 1215 /* 1216 * Configure the watermark for the rx's statemachine 1217 */ 1218 rx_fifo.wm_val = nge_reg_get32(ngep, NGE_RX_FIFO_WM); 1219 rx_fifo.wm_bits.data_hwm = ngep->rx_datahwm; 1220 rx_fifo.wm_bits.prd_lwm = ngep->rx_prdlwm; 1221 rx_fifo.wm_bits.prd_hwm = ngep->rx_prdhwm; 1222 nge_reg_put32(ngep, NGE_RX_FIFO_WM, rx_fifo.wm_val); 1223 1224 /* 1225 * Configure the deffer time slot for rx's state machine 1226 */ 1227 nge_reg_put8(ngep, NGE_RX_DEf, ngep->rx_def); 1228 1229 /* 1230 * Configure the length of rx's packet 1231 */ 1232 rx_cntl1.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL1); 1233 rx_cntl1.cntl_bits.length = ngep->max_sdu; 1234 nge_reg_put32(ngep, NGE_RX_CNTL1, rx_cntl1.cntl_val); 1235 /* 1236 * Enable Tx's state machine 1237 */ 1238 tx_en.val = nge_reg_get8(ngep, NGE_TX_EN); 1239 tx_en.bits.tx_en = NGE_SET; 1240 nge_reg_put8(ngep, NGE_TX_EN, tx_en.val); 1241 1242 /* 1243 * Enable Rx's state machine 1244 */ 1245 rx_en.val = nge_reg_get8(ngep, NGE_RX_EN); 1246 rx_en.bits.rx_en = NGE_SET; 1247 nge_reg_put8(ngep, NGE_RX_EN, rx_en.val); 1248 1249 itc.itc_val = nge_reg_get32(ngep, NGE_SWTR_ITC); 1250 itc.itc_bits.sw_intv = ngep->sw_intr_intv; 1251 nge_reg_put32(ngep, NGE_SWTR_ITC, itc.itc_val); 1252 1253 swtr_cntl.ctrl_val = nge_reg_get8(ngep, NGE_SWTR_CNTL); 1254 swtr_cntl.cntl_bits.sten = NGE_SET; 1255 swtr_cntl.cntl_bits.stren = NGE_SET; 1256 nge_reg_put32(ngep, NGE_SWTR_CNTL, swtr_cntl.ctrl_val); 1257 1258 /* 1259 * Disable all mii read/write operation Interrupt 1260 */ 1261 mintr_mask.mask_val = nge_reg_get8(ngep, NGE_MINTR_MASK); 1262 mintr_mask.mask_bits.mrei = NGE_CLEAR; 1263 mintr_mask.mask_bits.mcc2 = NGE_CLEAR; 1264 mintr_mask.mask_bits.mcc1 = NGE_CLEAR; 1265 mintr_mask.mask_bits.mapi = NGE_SET; 1266 mintr_mask.mask_bits.mpdi = NGE_SET; 1267 nge_reg_put8(ngep, NGE_MINTR_MASK, mintr_mask.mask_val); 1268 1269 /* 1270 * Enable all interrupt event 1271 */ 1272 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1273 intr_mask.mask_bits.reint = NGE_SET; 1274 intr_mask.mask_bits.rcint = NGE_SET; 1275 intr_mask.mask_bits.miss = NGE_SET; 1276 intr_mask.mask_bits.teint = NGE_CLEAR; 1277 intr_mask.mask_bits.tcint = NGE_SET; 1278 intr_mask.mask_bits.stint = NGE_CLEAR; 1279 intr_mask.mask_bits.mint = NGE_CLEAR; 1280 intr_mask.mask_bits.rfint = NGE_CLEAR; 1281 intr_mask.mask_bits.tfint = NGE_CLEAR; 1282 intr_mask.mask_bits.feint = NGE_SET; 1283 intr_mask.mask_bits.resv10 = NGE_CLEAR; 1284 intr_mask.mask_bits.resv11 = NGE_CLEAR; 1285 intr_mask.mask_bits.resv12 = NGE_CLEAR; 1286 intr_mask.mask_bits.resv13 = NGE_CLEAR; 1287 intr_mask.mask_bits.phyint = NGE_CLEAR; 1288 ngep->intr_masks = intr_mask.mask_val; 1289 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1290 ngep->nge_chip_state = NGE_CHIP_RUNNING; 1291 return (DDI_SUCCESS); 1292 } 1293 1294 /* 1295 * nge_chip_sync() -- program the chip with the unicast MAC address, 1296 * the multicast hash table, the required level of promiscuity. 1297 */ 1298 void 1299 nge_chip_sync(nge_t *ngep) 1300 { 1301 uint8_t i; 1302 uint64_t macaddr; 1303 uint64_t mul_addr; 1304 uint64_t mul_mask; 1305 nge_rx_cntrl0 rx_cntl; 1306 nge_uni_addr1 uni_adr1; 1307 1308 NGE_TRACE(("nge_chip_sync($%p)", (void *)ngep)); 1309 1310 macaddr = 0x0ull; 1311 mul_addr = 0x0ull; 1312 mul_mask = 0x0ull; 1313 rx_cntl.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1314 1315 if (ngep->promisc) { 1316 rx_cntl.cntl_bits.afen = NGE_CLEAR; 1317 rx_cntl.cntl_bits.brdis = NGE_SET; 1318 } else { 1319 rx_cntl.cntl_bits.afen = NGE_SET; 1320 rx_cntl.cntl_bits.brdis = NGE_CLEAR; 1321 } 1322 1323 /* 1324 * Transform the MAC address from host to chip format, the unicast 1325 * MAC address(es) ... 1326 */ 1327 for (i = ETHERADDRL, macaddr = 0ull; i != 0; --i) { 1328 macaddr |= ngep->cur_uni_addr.addr[i-1]; 1329 macaddr <<= (i > 1) ? 8 : 0; 1330 } 1331 1332 nge_reg_put32(ngep, NGE_UNI_ADDR0, (uint32_t)macaddr); 1333 macaddr = macaddr >>32; 1334 uni_adr1.addr_val = nge_reg_get32(ngep, NGE_UNI_ADDR1); 1335 uni_adr1.addr_bits.addr = (uint16_t)macaddr; 1336 uni_adr1.addr_bits.resv16_31 = (uint16_t)0; 1337 nge_reg_put32(ngep, NGE_UNI_ADDR1, uni_adr1.addr_val); 1338 1339 /* 1340 * Reprogram the multicast address table ... 1341 */ 1342 for (i = ETHERADDRL, mul_addr = 0ull; i != 0; --i) { 1343 mul_addr |= ngep->cur_mul_addr.addr[i-1]; 1344 mul_addr <<= (i > 1) ? 8 : 0; 1345 mul_mask |= ngep->cur_mul_mask.addr[i-1]; 1346 mul_mask <<= (i > 1) ? 8 : 0; 1347 } 1348 nge_reg_put32(ngep, NGE_MUL_ADDR0, (uint32_t)mul_addr); 1349 mul_addr >>= 32; 1350 nge_reg_put32(ngep, NGE_MUL_ADDR1, mul_addr); 1351 nge_reg_put32(ngep, NGE_MUL_MASK, (uint32_t)mul_mask); 1352 mul_mask >>= 32; 1353 nge_reg_put32(ngep, NGE_MUL_MASK1, mul_mask); 1354 /* 1355 * Set or clear the PROMISCUOUS mode bit 1356 */ 1357 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl.cntl_val); 1358 /* 1359 * For internal PHY loopback, the link will 1360 * not be up, so it need to sync mac modes directly. 1361 */ 1362 if (ngep->param_loop_mode == NGE_LOOP_INTERNAL_PHY) 1363 nge_sync_mac_modes(ngep); 1364 } 1365 1366 static void 1367 nge_chip_err(nge_t *ngep) 1368 { 1369 nge_reg010 reg010_ins; 1370 nge_sw_statistics_t *psw_stat; 1371 nge_intr_mask intr_mask; 1372 1373 NGE_TRACE(("nge_chip_err($%p)", (void *)ngep)); 1374 1375 psw_stat = (nge_sw_statistics_t *)&ngep->statistics.sw_statistics; 1376 reg010_ins.reg010_val = nge_reg_get32(ngep, NGE_REG010); 1377 if (reg010_ins.reg010_bits.resv0) 1378 psw_stat->fe_err.tso_err_mss ++; 1379 1380 if (reg010_ins.reg010_bits.resv1) 1381 psw_stat->fe_err.tso_dis ++; 1382 1383 if (reg010_ins.reg010_bits.resv2) 1384 psw_stat->fe_err.tso_err_nosum ++; 1385 1386 if (reg010_ins.reg010_bits.resv3) 1387 psw_stat->fe_err.tso_err_hov ++; 1388 1389 if (reg010_ins.reg010_bits.resv4) 1390 psw_stat->fe_err.tso_err_huf ++; 1391 1392 if (reg010_ins.reg010_bits.resv5) 1393 psw_stat->fe_err.tso_err_l2 ++; 1394 1395 if (reg010_ins.reg010_bits.resv6) 1396 psw_stat->fe_err.tso_err_ip ++; 1397 1398 if (reg010_ins.reg010_bits.resv7) 1399 psw_stat->fe_err.tso_err_l4 ++; 1400 1401 if (reg010_ins.reg010_bits.resv8) 1402 psw_stat->fe_err.tso_err_tcp ++; 1403 1404 if (reg010_ins.reg010_bits.resv9) 1405 psw_stat->fe_err.hsum_err_ip ++; 1406 1407 if (reg010_ins.reg010_bits.resv10) 1408 psw_stat->fe_err.hsum_err_l4 ++; 1409 1410 if (reg010_ins.reg010_val != 0) { 1411 1412 /* 1413 * Fatal error is triggered by malformed driver commands. 1414 * Disable unless debugging. 1415 */ 1416 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1417 intr_mask.mask_bits.feint = NGE_CLEAR; 1418 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1419 ngep->intr_masks = intr_mask.mask_val; 1420 1421 } 1422 } 1423 1424 static void 1425 nge_sync_mac_modes(nge_t *ngep) 1426 { 1427 nge_tx_def tx_def; 1428 nge_tx_fifo_wm tx_fifo; 1429 nge_bkoff_cntl bk_cntl; 1430 nge_mac2phy m2p; 1431 nge_rx_cntrl0 rx_cntl0; 1432 nge_dev_spec_param_t *dev_param_p; 1433 1434 dev_param_p = &ngep->dev_spec_param; 1435 1436 tx_def.def_val = nge_reg_get32(ngep, NGE_TX_DEF); 1437 m2p.m2p_val = nge_reg_get32(ngep, NGE_MAC2PHY); 1438 tx_fifo.wm_val = nge_reg_get32(ngep, NGE_TX_FIFO_WM); 1439 bk_cntl.cntl_val = nge_reg_get32(ngep, NGE_BKOFF_CNTL); 1440 bk_cntl.bkoff_bits.rseed = BKOFF_RSEED; 1441 switch (ngep->param_link_speed) { 1442 case 10: 1443 m2p.m2p_bits.speed = low_speed; 1444 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1445 if (ngep->phy_mode == RGMII_IN) { 1446 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1447 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1448 } else { 1449 tx_def.def_bits.if_def = TX_TIFG_MII; 1450 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1451 } 1452 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1453 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1454 break; 1455 1456 case 100: 1457 m2p.m2p_bits.speed = fast_speed; 1458 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1459 if (ngep->phy_mode == RGMII_IN) { 1460 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_10_100; 1461 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1462 } else { 1463 tx_def.def_bits.if_def = TX_TIFG_MII; 1464 tx_def.def_bits.ifg2_def = TX_IFG2_MII; 1465 } 1466 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_MII; 1467 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_MII; 1468 break; 1469 1470 case 1000: 1471 m2p.m2p_bits.speed = giga_speed; 1472 tx_def.def_bits.ifg1_def = TX_IFG1_DEFAULT; 1473 if (ngep->param_link_duplex == LINK_DUPLEX_FULL) { 1474 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1475 tx_def.def_bits.if_def = TX_IFG_RGMII_1000_FD; 1476 } else { 1477 tx_def.def_bits.ifg2_def = TX_IFG2_RGMII_1000; 1478 tx_def.def_bits.if_def = TX_IFG_RGMII_OTHER; 1479 } 1480 1481 tx_fifo.wm_bits.nbfb_wm = TX_FIFO_NOB_WM_GMII; 1482 bk_cntl.bkoff_bits.sltm = BKOFF_SLIM_GMII; 1483 break; 1484 } 1485 1486 if (ngep->chipinfo.device == DEVICE_ID_MCP55_373 || 1487 ngep->chipinfo.device == DEVICE_ID_MCP55_372) { 1488 m2p.m2p_bits.phyintr = NGE_CLEAR; 1489 m2p.m2p_bits.phyintrlvl = NGE_CLEAR; 1490 } 1491 if (ngep->param_link_duplex == LINK_DUPLEX_HALF) { 1492 m2p.m2p_bits.hdup_en = NGE_SET; 1493 } 1494 else 1495 m2p.m2p_bits.hdup_en = NGE_CLEAR; 1496 nge_reg_put32(ngep, NGE_MAC2PHY, m2p.m2p_val); 1497 nge_reg_put32(ngep, NGE_TX_DEF, tx_def.def_val); 1498 1499 tx_fifo.wm_bits.data_lwm = TX_FIFO_DATA_LWM; 1500 tx_fifo.wm_bits.prd_lwm = TX_FIFO_PRD_LWM; 1501 tx_fifo.wm_bits.uprd_hwm = TX_FIFO_PRD_HWM; 1502 tx_fifo.wm_bits.fb_wm = TX_FIFO_TBFW; 1503 nge_reg_put32(ngep, NGE_TX_FIFO_WM, tx_fifo.wm_val); 1504 1505 nge_reg_put32(ngep, NGE_BKOFF_CNTL, bk_cntl.cntl_val); 1506 1507 rx_cntl0.cntl_val = nge_reg_get32(ngep, NGE_RX_CNTL0); 1508 if (ngep->param_link_rx_pause && dev_param_p->rx_pause_frame) 1509 rx_cntl0.cntl_bits.paen = NGE_SET; 1510 else 1511 rx_cntl0.cntl_bits.paen = NGE_CLEAR; 1512 nge_reg_put32(ngep, NGE_RX_CNTL0, rx_cntl0.cntl_val); 1513 } 1514 1515 /* 1516 * Handler for hardware link state change. 1517 * 1518 * When this routine is called, the hardware link state has changed 1519 * and the new state is reflected in the param_* variables. Here 1520 * we must update the softstate, reprogram the MAC to match, and 1521 * record the change in the log and/or on the console. 1522 */ 1523 static void 1524 nge_factotum_link_handler(nge_t *ngep) 1525 { 1526 /* 1527 * Update the s/w link_state 1528 */ 1529 if (ngep->param_link_up) 1530 ngep->link_state = LINK_STATE_UP; 1531 else 1532 ngep->link_state = LINK_STATE_DOWN; 1533 1534 /* 1535 * Reprogram the MAC modes to match 1536 */ 1537 nge_sync_mac_modes(ngep); 1538 } 1539 1540 static boolean_t 1541 nge_factotum_link_check(nge_t *ngep) 1542 { 1543 boolean_t lchg; 1544 boolean_t check; 1545 1546 ASSERT(mutex_owned(ngep->genlock)); 1547 1548 (*ngep->physops->phys_check)(ngep); 1549 switch (ngep->link_state) { 1550 case LINK_STATE_UP: 1551 lchg = (ngep->param_link_up == B_FALSE); 1552 check = (ngep->param_link_up == B_FALSE); 1553 break; 1554 1555 case LINK_STATE_DOWN: 1556 lchg = (ngep->param_link_up == B_TRUE); 1557 check = (ngep->param_link_up == B_TRUE); 1558 break; 1559 1560 default: 1561 check = B_TRUE; 1562 break; 1563 } 1564 1565 /* 1566 * If <check> is false, we're sure the link hasn't changed. 1567 * If true, however, it's not yet definitive; we have to call 1568 * nge_phys_check() to determine whether the link has settled 1569 * into a new state yet ... and if it has, then call the link 1570 * state change handler.But when the chip is 5700 in Dell 6650 1571 * ,even if check is false, the link may have changed.So we 1572 * have to call nge_phys_check() to determine the link state. 1573 */ 1574 if (check) 1575 nge_factotum_link_handler(ngep); 1576 1577 return (lchg); 1578 } 1579 1580 /* 1581 * Factotum routine to check for Tx stall, using the 'watchdog' counter 1582 */ 1583 static boolean_t nge_factotum_stall_check(nge_t *ngep); 1584 1585 static boolean_t 1586 nge_factotum_stall_check(nge_t *ngep) 1587 { 1588 uint32_t dogval; 1589 /* 1590 * Specific check for Tx stall ... 1591 * 1592 * The 'watchdog' counter is incremented whenever a packet 1593 * is queued, reset to 1 when some (but not all) buffers 1594 * are reclaimed, reset to 0 (disabled) when all buffers 1595 * are reclaimed, and shifted left here. If it exceeds the 1596 * threshold value, the chip is assumed to have stalled and 1597 * is put into the ERROR state. The factotum will then reset 1598 * it on the next pass. 1599 * 1600 * All of which should ensure that we don't get into a state 1601 * where packets are left pending indefinitely! 1602 */ 1603 dogval = nge_atomic_shl32(&ngep->watchdog, 1); 1604 if (dogval < nge_watchdog_count) { 1605 ngep->stall_cknum = 0; 1606 } else { 1607 ngep->stall_cknum++; 1608 } 1609 if (ngep->stall_cknum < 8) { 1610 return (B_FALSE); 1611 } else { 1612 ngep->stall_cknum = 0; 1613 ngep->statistics.sw_statistics.tx_stall++; 1614 return (B_TRUE); 1615 } 1616 } 1617 1618 1619 1620 /* 1621 * The factotum is woken up when there's something to do that we'd rather 1622 * not do from inside a hardware interrupt handler or high-level cyclic. 1623 * Its two main tasks are: 1624 * reset & restart the chip after an error 1625 * check the link status whenever necessary 1626 */ 1627 /* ARGSUSED */ 1628 uint_t 1629 nge_chip_factotum(caddr_t args1, caddr_t args2) 1630 { 1631 uint_t result; 1632 nge_t *ngep; 1633 boolean_t err; 1634 boolean_t linkchg; 1635 1636 ngep = (nge_t *)args1; 1637 1638 NGE_TRACE(("nge_chip_factotum($%p)", (void *)ngep)); 1639 1640 mutex_enter(ngep->softlock); 1641 if (ngep->factotum_flag == 0) { 1642 mutex_exit(ngep->softlock); 1643 return (DDI_INTR_UNCLAIMED); 1644 } 1645 ngep->factotum_flag = 0; 1646 mutex_exit(ngep->softlock); 1647 err = B_FALSE; 1648 linkchg = B_FALSE; 1649 result = DDI_INTR_CLAIMED; 1650 1651 mutex_enter(ngep->genlock); 1652 switch (ngep->nge_chip_state) { 1653 default: 1654 break; 1655 1656 case NGE_CHIP_RUNNING: 1657 linkchg = nge_factotum_link_check(ngep); 1658 err = nge_factotum_stall_check(ngep); 1659 break; 1660 1661 case NGE_CHIP_FAULT: 1662 (void) nge_restart(ngep); 1663 NGE_REPORT((ngep, "automatic recovery activated")); 1664 break; 1665 } 1666 1667 if (err) 1668 (void) nge_chip_stop(ngep, B_TRUE); 1669 mutex_exit(ngep->genlock); 1670 1671 /* 1672 * If the link state changed, tell the world about it (if 1673 * this version of MAC supports link state notification). 1674 * Note: can't do this while still holding the mutex. 1675 */ 1676 if (linkchg) 1677 mac_link_update(ngep->mh, ngep->link_state); 1678 1679 return (result); 1680 1681 } 1682 1683 static void 1684 nge_intr_handle(nge_t *ngep, nge_intr_src *pintr_src) 1685 { 1686 boolean_t brx; 1687 boolean_t btx; 1688 nge_mintr_src mintr_src; 1689 1690 brx = B_FALSE; 1691 btx = B_FALSE; 1692 ngep->statistics.sw_statistics.intr_count++; 1693 ngep->statistics.sw_statistics.intr_lval = pintr_src->intr_val; 1694 brx = (pintr_src->int_bits.reint | pintr_src->int_bits.miss 1695 | pintr_src->int_bits.rcint | pintr_src->int_bits.stint) 1696 != 0 ? B_TRUE : B_FALSE; 1697 if (pintr_src->int_bits.reint) 1698 ngep->statistics.sw_statistics.rx_err++; 1699 if (pintr_src->int_bits.miss) 1700 ngep->statistics.sw_statistics.rx_nobuffer++; 1701 1702 btx = (pintr_src->int_bits.teint | pintr_src->int_bits.tcint) 1703 != 0 ? B_TRUE : B_FALSE; 1704 if (pintr_src->int_bits.stint && ngep->poll) 1705 ngep->stint_count ++; 1706 if (ngep->poll && (ngep->stint_count % ngep->param_tx_n_intr == 0)) 1707 btx = B_TRUE; 1708 if (btx) 1709 nge_tx_recycle(ngep, B_TRUE); 1710 if (brx) 1711 nge_receive(ngep); 1712 if (pintr_src->int_bits.teint) 1713 ngep->statistics.sw_statistics.tx_stop_err++; 1714 if (ngep->intr_moderation && brx) { 1715 if (ngep->poll) { 1716 if (ngep->recv_count < ngep->param_rx_intr_hwater) { 1717 ngep->quiet_time++; 1718 if (ngep->quiet_time == 1719 ngep->param_poll_quiet_time) { 1720 ngep->poll = B_FALSE; 1721 ngep->quiet_time = 0; 1722 ngep->stint_count = 0; 1723 nge_tx_recycle(ngep, B_TRUE); 1724 } 1725 } else 1726 ngep->quiet_time = 0; 1727 } else { 1728 if (ngep->recv_count > ngep->param_rx_intr_lwater) { 1729 ngep->busy_time++; 1730 if (ngep->busy_time == 1731 ngep->param_poll_busy_time) { 1732 ngep->poll = B_TRUE; 1733 ngep->busy_time = 0; 1734 } 1735 } else 1736 ngep->busy_time = 0; 1737 } 1738 } 1739 ngep->recv_count = 0; 1740 if (pintr_src->int_bits.feint) 1741 nge_chip_err(ngep); 1742 /* link interrupt, check the link state */ 1743 if (pintr_src->int_bits.mint) { 1744 mintr_src.src_val = nge_reg_get32(ngep, NGE_MINTR_SRC); 1745 nge_reg_put32(ngep, NGE_MINTR_SRC, mintr_src.src_val); 1746 nge_wake_factotum(ngep); 1747 } 1748 } 1749 1750 /* 1751 * nge_chip_intr() -- handle chip interrupts 1752 */ 1753 /* ARGSUSED */ 1754 uint_t 1755 nge_chip_intr(caddr_t arg1, caddr_t arg2) 1756 { 1757 nge_t *ngep = (nge_t *)arg1; 1758 nge_intr_src intr_src; 1759 nge_intr_mask intr_mask; 1760 1761 mutex_enter(ngep->genlock); 1762 1763 if (ngep->suspended) { 1764 mutex_exit(ngep->genlock); 1765 return (DDI_INTR_UNCLAIMED); 1766 } 1767 1768 /* 1769 * Check whether chip's says it's asserting #INTA; 1770 * if not, don't process or claim the interrupt. 1771 */ 1772 intr_src.intr_val = nge_reg_get32(ngep, NGE_INTR_SRC); 1773 if (intr_src.intr_val == 0) { 1774 mutex_exit(ngep->genlock); 1775 return (DDI_INTR_UNCLAIMED); 1776 } 1777 /* 1778 * Ack the interrupt 1779 */ 1780 nge_reg_put32(ngep, NGE_INTR_SRC, intr_src.intr_val); 1781 1782 if (ngep->nge_chip_state != NGE_CHIP_RUNNING) { 1783 mutex_exit(ngep->genlock); 1784 return (DDI_INTR_CLAIMED); 1785 } 1786 nge_intr_handle(ngep, &intr_src); 1787 if (ngep->poll && !ngep->ch_intr_mode) { 1788 intr_mask.mask_val = nge_reg_get32(ngep, NGE_INTR_MASK); 1789 intr_mask.mask_bits.stint = NGE_SET; 1790 intr_mask.mask_bits.rcint = NGE_CLEAR; 1791 intr_mask.mask_bits.reint = NGE_CLEAR; 1792 intr_mask.mask_bits.tcint = NGE_CLEAR; 1793 intr_mask.mask_bits.teint = NGE_CLEAR; 1794 nge_reg_put32(ngep, NGE_INTR_MASK, intr_mask.mask_val); 1795 ngep->ch_intr_mode = B_TRUE; 1796 } else if ((ngep->ch_intr_mode) && (!ngep->poll)) { 1797 nge_reg_put32(ngep, NGE_INTR_MASK, ngep->intr_masks); 1798 ngep->ch_intr_mode = B_FALSE; 1799 } 1800 mutex_exit(ngep->genlock); 1801 return (DDI_INTR_CLAIMED); 1802 } 1803 1804 static enum ioc_reply 1805 nge_pp_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1806 { 1807 int err; 1808 uint64_t sizemask; 1809 uint64_t mem_va; 1810 uint64_t maxoff; 1811 boolean_t peek; 1812 nge_peekpoke_t *ppd; 1813 int (*ppfn)(nge_t *ngep, nge_peekpoke_t *ppd); 1814 1815 switch (cmd) { 1816 default: 1817 return (IOC_INVAL); 1818 1819 case NGE_PEEK: 1820 peek = B_TRUE; 1821 break; 1822 1823 case NGE_POKE: 1824 peek = B_FALSE; 1825 break; 1826 } 1827 1828 /* 1829 * Validate format of ioctl 1830 */ 1831 if (iocp->ioc_count != sizeof (nge_peekpoke_t)) 1832 return (IOC_INVAL); 1833 if (mp->b_cont == NULL) 1834 return (IOC_INVAL); 1835 ppd = (nge_peekpoke_t *)mp->b_cont->b_rptr; 1836 1837 /* 1838 * Validate request parameters 1839 */ 1840 switch (ppd->pp_acc_space) { 1841 default: 1842 return (IOC_INVAL); 1843 1844 case NGE_PP_SPACE_CFG: 1845 /* 1846 * Config space 1847 */ 1848 sizemask = 8|4|2|1; 1849 mem_va = 0; 1850 maxoff = PCI_CONF_HDR_SIZE; 1851 ppfn = peek ? nge_chip_peek_cfg : nge_chip_poke_cfg; 1852 break; 1853 1854 case NGE_PP_SPACE_REG: 1855 /* 1856 * Memory-mapped I/O space 1857 */ 1858 sizemask = 8|4|2|1; 1859 mem_va = 0; 1860 maxoff = NGE_REG_SIZE; 1861 ppfn = peek ? nge_chip_peek_reg : nge_chip_poke_reg; 1862 break; 1863 1864 case NGE_PP_SPACE_MII: 1865 sizemask = 4|2|1; 1866 mem_va = 0; 1867 maxoff = NGE_MII_SIZE; 1868 ppfn = peek ? nge_chip_peek_mii : nge_chip_poke_mii; 1869 break; 1870 1871 case NGE_PP_SPACE_SEEPROM: 1872 sizemask = 4|2|1; 1873 mem_va = 0; 1874 maxoff = NGE_SEEROM_SIZE; 1875 ppfn = peek ? nge_chip_peek_seeprom : nge_chip_poke_seeprom; 1876 break; 1877 } 1878 1879 switch (ppd->pp_acc_size) { 1880 default: 1881 return (IOC_INVAL); 1882 1883 case 8: 1884 case 4: 1885 case 2: 1886 case 1: 1887 if ((ppd->pp_acc_size & sizemask) == 0) 1888 return (IOC_INVAL); 1889 break; 1890 } 1891 1892 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 1893 return (IOC_INVAL); 1894 1895 if (ppd->pp_acc_offset >= maxoff) 1896 return (IOC_INVAL); 1897 1898 if (ppd->pp_acc_offset+ppd->pp_acc_size > maxoff) 1899 return (IOC_INVAL); 1900 1901 /* 1902 * All OK - go do it! 1903 */ 1904 ppd->pp_acc_offset += mem_va; 1905 if (ppfn) 1906 err = (*ppfn)(ngep, ppd); 1907 if (err != DDI_SUCCESS) 1908 return (IOC_INVAL); 1909 return (peek ? IOC_REPLY : IOC_ACK); 1910 } 1911 1912 static enum ioc_reply nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, 1913 struct iocblk *iocp); 1914 #pragma no_inline(nge_diag_ioctl) 1915 1916 static enum ioc_reply 1917 nge_diag_ioctl(nge_t *ngep, int cmd, mblk_t *mp, struct iocblk *iocp) 1918 { 1919 ASSERT(mutex_owned(ngep->genlock)); 1920 1921 switch (cmd) { 1922 default: 1923 nge_error(ngep, "nge_diag_ioctl: invalid cmd 0x%x", cmd); 1924 return (IOC_INVAL); 1925 1926 case NGE_DIAG: 1927 return (IOC_ACK); 1928 1929 case NGE_PEEK: 1930 case NGE_POKE: 1931 return (nge_pp_ioctl(ngep, cmd, mp, iocp)); 1932 1933 case NGE_PHY_RESET: 1934 return (IOC_RESTART_ACK); 1935 1936 case NGE_SOFT_RESET: 1937 case NGE_HARD_RESET: 1938 return (IOC_ACK); 1939 } 1940 1941 /* NOTREACHED */ 1942 } 1943 1944 enum ioc_reply 1945 nge_chip_ioctl(nge_t *ngep, mblk_t *mp, struct iocblk *iocp) 1946 { 1947 int cmd; 1948 1949 ASSERT(mutex_owned(ngep->genlock)); 1950 1951 cmd = iocp->ioc_cmd; 1952 1953 switch (cmd) { 1954 default: 1955 return (IOC_INVAL); 1956 1957 case NGE_DIAG: 1958 case NGE_PEEK: 1959 case NGE_POKE: 1960 case NGE_PHY_RESET: 1961 case NGE_SOFT_RESET: 1962 case NGE_HARD_RESET: 1963 #if NGE_DEBUGGING 1964 return (nge_diag_ioctl(ngep, cmd, mp, iocp)); 1965 #else 1966 return (IOC_INVAL); 1967 #endif 1968 1969 case NGE_MII_READ: 1970 case NGE_MII_WRITE: 1971 return (IOC_INVAL); 1972 1973 #if NGE_SEE_IO32 1974 case NGE_SEE_READ: 1975 case NGE_SEE_WRITE: 1976 return (IOC_INVAL); 1977 #endif 1978 1979 #if NGE_FLASH_IO32 1980 case NGE_FLASH_READ: 1981 case NGE_FLASH_WRITE: 1982 return (IOC_INVAL); 1983 #endif 1984 } 1985 } 1986