1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 #include <sys/stream.h> 27 #include <sys/strsun.h> 28 #include <sys/stat.h> 29 #include <sys/pci.h> 30 #include <sys/modctl.h> 31 #include <sys/kstat.h> 32 #include <sys/ethernet.h> 33 #include <sys/devops.h> 34 #include <sys/debug.h> 35 #include <sys/conf.h> 36 #include <sys/sysmacros.h> 37 #include <sys/dditypes.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/miiregs.h> 41 #include <sys/byteorder.h> 42 #include <sys/cyclic.h> 43 #include <sys/note.h> 44 #include <sys/crc32.h> 45 #include <sys/mac_provider.h> 46 #include <sys/mac_ether.h> 47 #include <sys/vlan.h> 48 #include <sys/errno.h> 49 #include <sys/sdt.h> 50 #include <sys/strsubr.h> 51 52 #include "bfe.h" 53 #include "bfe_hw.h" 54 55 56 /* 57 * Broadcom BCM4401 chipsets use two rings : 58 * 59 * - One TX : For sending packets down the wire. 60 * - One RX : For receving packets. 61 * 62 * Each ring can have any number of descriptors (configured during attach). 63 * As of now we configure only 128 descriptor per ring (TX/RX). Each descriptor 64 * has address (desc_addr) and control (desc_ctl) which holds a DMA buffer for 65 * the packet and control information (like start/end of frame or end of table). 66 * The descriptor table is allocated first and then a DMA buffer (for a packet) 67 * is allocated and linked to each descriptor. 68 * 69 * Each descriptor entry is bfe_desc_t structure in bfe. During TX/RX 70 * interrupt, the stat register will point to current descriptor being 71 * processed. 72 * 73 * Here's an example of TX and RX ring : 74 * 75 * TX: 76 * 77 * Base of the descriptor table is programmed using BFE_DMATX_CTRL control 78 * register. Each 'addr' points to DMA buffer (or packet data buffer) to 79 * be transmitted and 'ctl' has the length of the packet (usually MTU). 80 * 81 * ----------------------| 82 * | addr |Descriptor 0 | 83 * | ctl | | 84 * ----------------------| 85 * | addr |Descriptor 1 | SOF (start of the frame) 86 * | ctl | | 87 * ----------------------| 88 * | ... |Descriptor... | EOF (end of the frame) 89 * | ... | | 90 * ----------------------| 91 * | addr |Descritor 127 | 92 * | ctl | EOT | EOT (End of Table) 93 * ----------------------| 94 * 95 * 'r_curr_desc' : pointer to current descriptor which can be used to transmit 96 * a packet. 97 * 'r_avail_desc' : decremented whenever a packet is being sent. 98 * 'r_cons_desc' : incremented whenever a packet is sent down the wire and 99 * notified by an interrupt to bfe driver. 100 * 101 * RX: 102 * 103 * Base of the descriptor table is programmed using BFE_DMARX_CTRL control 104 * register. Each 'addr' points to DMA buffer (or packet data buffer). 'ctl' 105 * contains the size of the DMA buffer and all the DMA buffers are 106 * pre-allocated during attach and hence the maxmium size of the packet is 107 * also known (r_buf_len from the bfe_rint_t structure). During RX interrupt 108 * the packet length is embedded in bfe_header_t which is added by the 109 * chip in the beginning of the packet. 110 * 111 * ----------------------| 112 * | addr |Descriptor 0 | 113 * | ctl | | 114 * ----------------------| 115 * | addr |Descriptor 1 | 116 * | ctl | | 117 * ----------------------| 118 * | ... |Descriptor... | 119 * | ... | | 120 * ----------------------| 121 * | addr |Descriptor 127| 122 * | ctl | EOT | EOT (End of Table) 123 * ----------------------| 124 * 125 * 'r_curr_desc' : pointer to current descriptor while receving a packet. 126 * 127 */ 128 129 #define MODULE_NAME "bfe" 130 131 /* 132 * Used for checking PHY (link state, speed) 133 */ 134 #define BFE_TIMEOUT_INTERVAL (1000 * 1000 * 1000) 135 136 137 /* 138 * Chip restart action and reason for restart 139 */ 140 #define BFE_ACTION_RESTART 0x1 /* For restarting the chip */ 141 #define BFE_ACTION_RESTART_SETPROP 0x2 /* restart due to setprop */ 142 #define BFE_ACTION_RESTART_FAULT 0x4 /* restart due to fault */ 143 #define BFE_ACTION_RESTART_PKT 0x8 /* restart due to pkt timeout */ 144 145 static char bfe_ident[] = "bfe driver for Broadcom BCM4401 chipsets"; 146 147 /* 148 * Function Prototypes for bfe driver. 149 */ 150 static int bfe_check_link(bfe_t *); 151 static void bfe_report_link(bfe_t *); 152 static void bfe_chip_halt(bfe_t *); 153 static void bfe_chip_reset(bfe_t *); 154 static void bfe_tx_desc_init(bfe_ring_t *); 155 static void bfe_rx_desc_init(bfe_ring_t *); 156 static void bfe_set_rx_mode(bfe_t *); 157 static void bfe_enable_chip_intrs(bfe_t *); 158 static void bfe_chip_restart(bfe_t *); 159 static void bfe_init_vars(bfe_t *); 160 static void bfe_clear_stats(bfe_t *); 161 static void bfe_gather_stats(bfe_t *); 162 static void bfe_error(dev_info_t *, char *, ...); 163 static int bfe_mac_getprop(void *, const char *, mac_prop_id_t, uint_t, 164 void *); 165 static int bfe_mac_setprop(void *, const char *, mac_prop_id_t, uint_t, 166 const void *); 167 static int bfe_tx_reclaim(bfe_ring_t *); 168 int bfe_mac_set_ether_addr(void *, const uint8_t *); 169 170 171 /* 172 * Macros for ddi_dma_sync(). 173 */ 174 #define SYNC_DESC(r, s, l, d) \ 175 (void) ddi_dma_sync(r->r_desc_dma_handle, \ 176 (off_t)(s * sizeof (bfe_desc_t)), \ 177 (size_t)(l * sizeof (bfe_desc_t)), \ 178 d) 179 180 #define SYNC_BUF(r, s, b, l, d) \ 181 (void) ddi_dma_sync(r->r_buf_dma[s].handle, \ 182 (off_t)(b), (size_t)(l), d) 183 184 /* 185 * Supported Broadcom BCM4401 Cards. 186 */ 187 static bfe_cards_t bfe_cards[] = { 188 { 0x14e4, 0x170c, "BCM4401 100Base-TX"}, 189 }; 190 191 192 /* 193 * DMA attributes for device registers, packet data (buffer) and 194 * descriptor table. 195 */ 196 static struct ddi_device_acc_attr bfe_dev_attr = { 197 DDI_DEVICE_ATTR_V0, 198 DDI_STRUCTURE_LE_ACC, 199 DDI_STRICTORDER_ACC 200 }; 201 202 static struct ddi_device_acc_attr bfe_buf_attr = { 203 DDI_DEVICE_ATTR_V0, 204 DDI_NEVERSWAP_ACC, /* native endianness */ 205 DDI_STRICTORDER_ACC 206 }; 207 208 static ddi_dma_attr_t bfe_dma_attr_buf = { 209 DMA_ATTR_V0, /* dma_attr_version */ 210 0, /* dma_attr_addr_lo */ 211 BFE_PCI_DMA - 1, /* dma_attr_addr_hi */ 212 0x1fff, /* dma_attr_count_max */ 213 8, /* dma_attr_align */ 214 0, /* dma_attr_burstsizes */ 215 1, /* dma_attr_minxfer */ 216 0x1fff, /* dma_attr_maxxfer */ 217 BFE_PCI_DMA - 1, /* dma_attr_seg */ 218 1, /* dma_attr_sgllen */ 219 1, /* dma_attr_granular */ 220 0 /* dma_attr_flags */ 221 }; 222 223 static ddi_dma_attr_t bfe_dma_attr_desc = { 224 DMA_ATTR_V0, /* dma_attr_version */ 225 0, /* dma_attr_addr_lo */ 226 BFE_PCI_DMA - 1, /* dma_attr_addr_hi */ 227 BFE_PCI_DMA - 1, /* dma_attr_count_max */ 228 BFE_DESC_ALIGN, /* dma_attr_align */ 229 0, /* dma_attr_burstsizes */ 230 1, /* dma_attr_minxfer */ 231 BFE_PCI_DMA - 1, /* dma_attr_maxxfer */ 232 BFE_PCI_DMA - 1, /* dma_attr_seg */ 233 1, /* dma_attr_sgllen */ 234 1, /* dma_attr_granular */ 235 0 /* dma_attr_flags */ 236 }; 237 238 /* 239 * Ethernet broadcast addresses. 240 */ 241 static uchar_t bfe_broadcast[ETHERADDRL] = { 242 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 243 }; 244 245 #define ASSERT_ALL_LOCKS(bfe) { \ 246 ASSERT(mutex_owned(&bfe->bfe_tx_ring.r_lock)); \ 247 ASSERT(rw_write_held(&bfe->bfe_rwlock)); \ 248 } 249 250 /* 251 * Debugging and error reproting code. 252 */ 253 static void 254 bfe_error(dev_info_t *dip, char *fmt, ...) 255 { 256 va_list ap; 257 char buf[256]; 258 259 va_start(ap, fmt); 260 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 261 va_end(ap); 262 263 if (dip) { 264 cmn_err(CE_WARN, "%s%d: %s", 265 ddi_driver_name(dip), ddi_get_instance(dip), buf); 266 } else { 267 cmn_err(CE_WARN, "bfe: %s", buf); 268 } 269 } 270 271 /* 272 * Grabs all necessary locks to block any other operation on the chip. 273 */ 274 static void 275 bfe_grab_locks(bfe_t *bfe) 276 { 277 bfe_ring_t *tx = &bfe->bfe_tx_ring; 278 279 /* 280 * Grab all the locks. 281 * - bfe_rwlock : locks down whole chip including RX. 282 * - tx's r_lock : locks down only TX side. 283 */ 284 rw_enter(&bfe->bfe_rwlock, RW_WRITER); 285 mutex_enter(&tx->r_lock); 286 287 /* 288 * Note that we don't use RX's r_lock. 289 */ 290 } 291 292 /* 293 * Release lock on chip/drver. 294 */ 295 static void 296 bfe_release_locks(bfe_t *bfe) 297 { 298 bfe_ring_t *tx = &bfe->bfe_tx_ring; 299 300 /* 301 * Release all the locks in the order in which they were grabbed. 302 */ 303 mutex_exit(&tx->r_lock); 304 rw_exit(&bfe->bfe_rwlock); 305 } 306 307 308 /* 309 * It's used to make sure that the write to device register was successful. 310 */ 311 static int 312 bfe_wait_bit(bfe_t *bfe, uint32_t reg, uint32_t bit, 313 ulong_t t, const int clear) 314 { 315 ulong_t i; 316 uint32_t v; 317 318 for (i = 0; i < t; i++) { 319 v = INL(bfe, reg); 320 321 if (clear && !(v & bit)) 322 break; 323 324 if (!clear && (v & bit)) 325 break; 326 327 drv_usecwait(10); 328 } 329 330 /* if device still didn't see the value */ 331 if (i == t) 332 return (-1); 333 334 return (0); 335 } 336 337 /* 338 * PHY functions (read, write, stop, reset and startup) 339 */ 340 static int 341 bfe_read_phy(bfe_t *bfe, uint32_t reg) 342 { 343 OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); 344 OUTL(bfe, BFE_MDIO_DATA, (BFE_MDIO_SB_START | 345 (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) | 346 (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) | 347 (reg << BFE_MDIO_RA_SHIFT) | 348 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT))); 349 350 (void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0); 351 352 return ((INL(bfe, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA)); 353 } 354 355 static void 356 bfe_write_phy(bfe_t *bfe, uint32_t reg, uint32_t val) 357 { 358 OUTL(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); 359 OUTL(bfe, BFE_MDIO_DATA, (BFE_MDIO_SB_START | 360 (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) | 361 (bfe->bfe_phy_addr << BFE_MDIO_PMD_SHIFT) | 362 (reg << BFE_MDIO_RA_SHIFT) | 363 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) | 364 (val & BFE_MDIO_DATA_DATA))); 365 366 (void) bfe_wait_bit(bfe, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 10, 0); 367 } 368 369 /* 370 * It resets the PHY layer. 371 */ 372 static int 373 bfe_reset_phy(bfe_t *bfe) 374 { 375 uint32_t i; 376 377 bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_RESET); 378 drv_usecwait(100); 379 for (i = 0; i < 10; i++) { 380 if (bfe_read_phy(bfe, MII_CONTROL) & 381 MII_CONTROL_RESET) { 382 drv_usecwait(500); 383 continue; 384 } 385 386 break; 387 } 388 389 if (i == 10) { 390 bfe_error(bfe->bfe_dip, "Timeout waiting for PHY to reset"); 391 bfe->bfe_phy_state = BFE_PHY_RESET_TIMEOUT; 392 return (BFE_FAILURE); 393 } 394 395 bfe->bfe_phy_state = BFE_PHY_RESET_DONE; 396 397 return (BFE_SUCCESS); 398 } 399 400 /* 401 * Make sure timer function is out of our way and especially during 402 * detach. 403 */ 404 static void 405 bfe_stop_timer(bfe_t *bfe) 406 { 407 if (bfe->bfe_periodic_id) { 408 ddi_periodic_delete(bfe->bfe_periodic_id); 409 bfe->bfe_periodic_id = NULL; 410 } 411 } 412 413 /* 414 * Stops the PHY 415 */ 416 static void 417 bfe_stop_phy(bfe_t *bfe) 418 { 419 bfe_write_phy(bfe, MII_CONTROL, MII_CONTROL_PWRDN | 420 MII_CONTROL_ISOLATE); 421 422 bfe->bfe_chip.link = LINK_STATE_UNKNOWN; 423 bfe->bfe_chip.speed = 0; 424 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN; 425 426 bfe->bfe_phy_state = BFE_PHY_STOPPED; 427 428 /* 429 * Report the link status to MAC layer. 430 */ 431 if (bfe->bfe_machdl != NULL) 432 (void) bfe_report_link(bfe); 433 } 434 435 static int 436 bfe_probe_phy(bfe_t *bfe) 437 { 438 int phy; 439 uint32_t status; 440 441 if (bfe->bfe_phy_addr) { 442 status = bfe_read_phy(bfe, MII_STATUS); 443 if (status != 0xffff && status != 0) { 444 bfe_write_phy(bfe, MII_CONTROL, 0); 445 return (BFE_SUCCESS); 446 } 447 } 448 449 for (phy = 0; phy < 32; phy++) { 450 bfe->bfe_phy_addr = phy; 451 status = bfe_read_phy(bfe, MII_STATUS); 452 if (status != 0xffff && status != 0) { 453 bfe_write_phy(bfe, MII_CONTROL, 0); 454 return (BFE_SUCCESS); 455 } 456 } 457 458 return (BFE_FAILURE); 459 } 460 461 /* 462 * This timeout function fires at BFE_TIMEOUT_INTERVAL to check the link 463 * status. 464 */ 465 static void 466 bfe_timeout(void *arg) 467 { 468 bfe_t *bfe = (bfe_t *)arg; 469 int resched = 0; 470 471 /* 472 * We don't grab any lock because bfe can't go away. 473 * untimeout() will wait for this timeout instance to complete. 474 */ 475 if (bfe->bfe_chip_action & BFE_ACTION_RESTART) { 476 /* 477 * Restart the chip. 478 */ 479 bfe_grab_locks(bfe); 480 bfe_chip_restart(bfe); 481 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART; 482 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_FAULT; 483 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_PKT; 484 bfe_release_locks(bfe); 485 mac_tx_update(bfe->bfe_machdl); 486 /* Restart will register a new timeout */ 487 return; 488 } 489 490 rw_enter(&bfe->bfe_rwlock, RW_READER); 491 492 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) { 493 hrtime_t hr; 494 495 hr = gethrtime(); 496 if (bfe->bfe_tx_stall_time != 0 && 497 hr > bfe->bfe_tx_stall_time) { 498 DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit, 499 char *, "pkt timeout"); 500 bfe->bfe_chip_action |= 501 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_PKT); 502 bfe->bfe_tx_stall_time = 0; 503 } 504 } 505 506 if (bfe->bfe_phy_state == BFE_PHY_STARTED) { 507 /* 508 * Report the link status to MAC layer if link status changed. 509 */ 510 if (bfe_check_link(bfe)) { 511 bfe_report_link(bfe); 512 if (bfe->bfe_chip.link == LINK_STATE_UP) { 513 uint32_t val, flow; 514 515 val = INL(bfe, BFE_TX_CTRL); 516 val &= ~BFE_TX_DUPLEX; 517 if (bfe->bfe_chip.duplex == LINK_DUPLEX_FULL) { 518 val |= BFE_TX_DUPLEX; 519 flow = INL(bfe, BFE_RXCONF); 520 flow &= ~BFE_RXCONF_FLOW; 521 OUTL(bfe, BFE_RXCONF, flow); 522 523 flow = INL(bfe, BFE_MAC_FLOW); 524 flow &= ~(BFE_FLOW_RX_HIWAT); 525 OUTL(bfe, BFE_MAC_FLOW, flow); 526 } 527 528 resched = 1; 529 530 OUTL(bfe, BFE_TX_CTRL, val); 531 DTRACE_PROBE1(link__up, 532 int, bfe->bfe_unit); 533 } 534 } 535 } 536 537 rw_exit(&bfe->bfe_rwlock); 538 539 if (resched) 540 mac_tx_update(bfe->bfe_machdl); 541 } 542 543 /* 544 * Starts PHY layer. 545 */ 546 static int 547 bfe_startup_phy(bfe_t *bfe) 548 { 549 uint16_t bmsr, bmcr, anar; 550 int prog, s; 551 int phyid1, phyid2; 552 553 if (bfe_probe_phy(bfe) == BFE_FAILURE) { 554 bfe->bfe_phy_state = BFE_PHY_NOTFOUND; 555 return (BFE_FAILURE); 556 } 557 558 (void) bfe_reset_phy(bfe); 559 560 phyid1 = bfe_read_phy(bfe, MII_PHYIDH); 561 phyid2 = bfe_read_phy(bfe, MII_PHYIDL); 562 bfe->bfe_phy_id = (phyid1 << 16) | phyid2; 563 564 bmsr = bfe_read_phy(bfe, MII_STATUS); 565 anar = bfe_read_phy(bfe, MII_AN_ADVERT); 566 567 again: 568 anar &= ~(MII_ABILITY_100BASE_T4 | 569 MII_ABILITY_100BASE_TX_FD | MII_ABILITY_100BASE_TX | 570 MII_ABILITY_10BASE_T_FD | MII_ABILITY_10BASE_T); 571 572 /* 573 * Supported hardware modes are in bmsr. 574 */ 575 bfe->bfe_chip.bmsr = bmsr; 576 577 /* 578 * Assume no capabilities are supported in the hardware. 579 */ 580 bfe->bfe_cap_aneg = bfe->bfe_cap_100T4 = 581 bfe->bfe_cap_100fdx = bfe->bfe_cap_100hdx = 582 bfe->bfe_cap_10fdx = bfe->bfe_cap_10hdx = 0; 583 584 /* 585 * Assume property is set. 586 */ 587 s = 1; 588 if (!(bfe->bfe_chip_action & BFE_ACTION_RESTART_SETPROP)) { 589 /* 590 * Property is not set which means bfe_mac_setprop() 591 * is not called on us. 592 */ 593 s = 0; 594 } 595 596 bmcr = prog = 0; 597 598 if (bmsr & MII_STATUS_100_BASEX_FD) { 599 bfe->bfe_cap_100fdx = 1; 600 if (s == 0) { 601 anar |= MII_ABILITY_100BASE_TX_FD; 602 bfe->bfe_adv_100fdx = 1; 603 prog++; 604 } else if (bfe->bfe_adv_100fdx) { 605 anar |= MII_ABILITY_100BASE_TX_FD; 606 prog++; 607 } 608 } 609 610 if (bmsr & MII_STATUS_100_BASE_T4) { 611 bfe->bfe_cap_100T4 = 1; 612 if (s == 0) { 613 anar |= MII_ABILITY_100BASE_T4; 614 bfe->bfe_adv_100T4 = 1; 615 prog++; 616 } else if (bfe->bfe_adv_100T4) { 617 anar |= MII_ABILITY_100BASE_T4; 618 prog++; 619 } 620 } 621 622 if (bmsr & MII_STATUS_100_BASEX) { 623 bfe->bfe_cap_100hdx = 1; 624 if (s == 0) { 625 anar |= MII_ABILITY_100BASE_TX; 626 bfe->bfe_adv_100hdx = 1; 627 prog++; 628 } else if (bfe->bfe_adv_100hdx) { 629 anar |= MII_ABILITY_100BASE_TX; 630 prog++; 631 } 632 } 633 634 if (bmsr & MII_STATUS_10_FD) { 635 bfe->bfe_cap_10fdx = 1; 636 if (s == 0) { 637 anar |= MII_ABILITY_10BASE_T_FD; 638 bfe->bfe_adv_10fdx = 1; 639 prog++; 640 } else if (bfe->bfe_adv_10fdx) { 641 anar |= MII_ABILITY_10BASE_T_FD; 642 prog++; 643 } 644 } 645 646 if (bmsr & MII_STATUS_10) { 647 bfe->bfe_cap_10hdx = 1; 648 if (s == 0) { 649 anar |= MII_ABILITY_10BASE_T; 650 bfe->bfe_adv_10hdx = 1; 651 prog++; 652 } else if (bfe->bfe_adv_10hdx) { 653 anar |= MII_ABILITY_10BASE_T; 654 prog++; 655 } 656 } 657 658 if (bmsr & MII_STATUS_CANAUTONEG) { 659 bfe->bfe_cap_aneg = 1; 660 if (s == 0) { 661 bfe->bfe_adv_aneg = 1; 662 } 663 } 664 665 if (prog == 0) { 666 if (s == 0) { 667 bfe_error(bfe->bfe_dip, 668 "No valid link mode selected. Powering down PHY"); 669 bfe_stop_phy(bfe); 670 bfe_report_link(bfe); 671 return (BFE_FAILURE); 672 } 673 674 /* 675 * If property is set then user would have goofed up. So we 676 * go back to default properties. 677 */ 678 bfe->bfe_chip_action &= ~BFE_ACTION_RESTART_SETPROP; 679 goto again; 680 } 681 682 if (bfe->bfe_adv_aneg && (bmsr & MII_STATUS_CANAUTONEG)) { 683 bmcr = (MII_CONTROL_ANE | MII_CONTROL_RSAN); 684 } else { 685 if (bfe->bfe_adv_100fdx) 686 bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX); 687 else if (bfe->bfe_adv_100hdx) 688 bmcr = MII_CONTROL_100MB; 689 else if (bfe->bfe_adv_10fdx) 690 bmcr = MII_CONTROL_FDUPLEX; 691 else 692 bmcr = 0; /* 10HDX */ 693 } 694 695 if (prog) 696 bfe_write_phy(bfe, MII_AN_ADVERT, anar); 697 698 if (bmcr) 699 bfe_write_phy(bfe, MII_CONTROL, bmcr); 700 701 bfe->bfe_mii_anar = anar; 702 bfe->bfe_mii_bmcr = bmcr; 703 bfe->bfe_phy_state = BFE_PHY_STARTED; 704 705 if (bfe->bfe_periodic_id == NULL) { 706 bfe->bfe_periodic_id = ddi_periodic_add(bfe_timeout, 707 (void *)bfe, BFE_TIMEOUT_INTERVAL, DDI_IPL_0); 708 709 DTRACE_PROBE1(first__timeout, int, bfe->bfe_unit); 710 } 711 712 DTRACE_PROBE4(phy_started, int, bfe->bfe_unit, 713 int, bmsr, int, bmcr, int, anar); 714 715 return (BFE_SUCCESS); 716 } 717 718 /* 719 * Reports link status back to MAC Layer. 720 */ 721 static void 722 bfe_report_link(bfe_t *bfe) 723 { 724 mac_link_update(bfe->bfe_machdl, bfe->bfe_chip.link); 725 } 726 727 /* 728 * Reads PHY/MII registers and get the link status for us. 729 */ 730 static int 731 bfe_check_link(bfe_t *bfe) 732 { 733 uint16_t bmsr, bmcr, anar, anlpar; 734 int speed, duplex, link; 735 736 speed = bfe->bfe_chip.speed; 737 duplex = bfe->bfe_chip.duplex; 738 link = bfe->bfe_chip.link; 739 740 bmsr = bfe_read_phy(bfe, MII_STATUS); 741 bfe->bfe_mii_bmsr = bmsr; 742 743 bmcr = bfe_read_phy(bfe, MII_CONTROL); 744 745 anar = bfe_read_phy(bfe, MII_AN_ADVERT); 746 bfe->bfe_mii_anar = anar; 747 748 anlpar = bfe_read_phy(bfe, MII_AN_LPABLE); 749 bfe->bfe_mii_anlpar = anlpar; 750 751 bfe->bfe_mii_exp = bfe_read_phy(bfe, MII_AN_EXPANSION); 752 753 /* 754 * If exp register is not present in PHY. 755 */ 756 if (bfe->bfe_mii_exp == 0xffff) { 757 bfe->bfe_mii_exp = 0; 758 } 759 760 if ((bmsr & MII_STATUS_LINKUP) == 0) { 761 bfe->bfe_chip.link = LINK_STATE_DOWN; 762 bfe->bfe_chip.speed = 0; 763 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN; 764 goto done; 765 } 766 767 bfe->bfe_chip.link = LINK_STATE_UP; 768 769 if (!(bmcr & MII_CONTROL_ANE)) { 770 /* Forced mode */ 771 if (bmcr & MII_CONTROL_100MB) 772 bfe->bfe_chip.speed = 100000000; 773 else 774 bfe->bfe_chip.speed = 10000000; 775 776 if (bmcr & MII_CONTROL_FDUPLEX) 777 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL; 778 else 779 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF; 780 781 } else if ((!(bmsr & MII_STATUS_CANAUTONEG)) || 782 (!(bmsr & MII_STATUS_ANDONE))) { 783 bfe->bfe_chip.speed = 0; 784 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN; 785 } else if (anar & anlpar & MII_ABILITY_100BASE_TX_FD) { 786 bfe->bfe_chip.speed = 100000000; 787 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL; 788 } else if (anar & anlpar & MII_ABILITY_100BASE_T4) { 789 bfe->bfe_chip.speed = 100000000; 790 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF; 791 } else if (anar & anlpar & MII_ABILITY_100BASE_TX) { 792 bfe->bfe_chip.speed = 100000000; 793 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF; 794 } else if (anar & anlpar & MII_ABILITY_10BASE_T_FD) { 795 bfe->bfe_chip.speed = 10000000; 796 bfe->bfe_chip.duplex = LINK_DUPLEX_FULL; 797 } else if (anar & anlpar & MII_ABILITY_10BASE_T) { 798 bfe->bfe_chip.speed = 10000000; 799 bfe->bfe_chip.duplex = LINK_DUPLEX_HALF; 800 } else { 801 bfe->bfe_chip.speed = 0; 802 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN; 803 } 804 805 done: 806 /* 807 * If speed or link status or duplex mode changed then report to 808 * MAC layer which is done by the caller. 809 */ 810 if (speed != bfe->bfe_chip.speed || 811 duplex != bfe->bfe_chip.duplex || 812 link != bfe->bfe_chip.link) { 813 return (1); 814 } 815 816 return (0); 817 } 818 819 static void 820 bfe_cam_write(bfe_t *bfe, uchar_t *d, int index) 821 { 822 uint32_t v; 823 824 v = ((uint32_t)d[2] << 24); 825 v |= ((uint32_t)d[3] << 16); 826 v |= ((uint32_t)d[4] << 8); 827 v |= (uint32_t)d[5]; 828 829 OUTL(bfe, BFE_CAM_DATA_LO, v); 830 v = (BFE_CAM_HI_VALID | 831 (((uint32_t)d[0]) << 8) | 832 (((uint32_t)d[1]))); 833 834 OUTL(bfe, BFE_CAM_DATA_HI, v); 835 OUTL(bfe, BFE_CAM_CTRL, (BFE_CAM_WRITE | 836 ((uint32_t)index << BFE_CAM_INDEX_SHIFT))); 837 (void) bfe_wait_bit(bfe, BFE_CAM_CTRL, BFE_CAM_BUSY, 10, 1); 838 } 839 840 /* 841 * Chip related functions (halt, reset, start). 842 */ 843 static void 844 bfe_chip_halt(bfe_t *bfe) 845 { 846 /* 847 * Disables interrupts. 848 */ 849 OUTL(bfe, BFE_INTR_MASK, 0); 850 FLUSH(bfe, BFE_INTR_MASK); 851 852 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE); 853 854 /* 855 * Wait until TX and RX finish their job. 856 */ 857 (void) bfe_wait_bit(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE, 20, 1); 858 859 /* 860 * Disables DMA engine. 861 */ 862 OUTL(bfe, BFE_DMARX_CTRL, 0); 863 OUTL(bfe, BFE_DMATX_CTRL, 0); 864 865 drv_usecwait(10); 866 867 bfe->bfe_chip_state = BFE_CHIP_HALT; 868 } 869 870 static void 871 bfe_chip_restart(bfe_t *bfe) 872 { 873 DTRACE_PROBE2(chip__restart, int, bfe->bfe_unit, 874 int, bfe->bfe_chip_action); 875 876 /* 877 * Halt chip and PHY. 878 */ 879 bfe_chip_halt(bfe); 880 bfe_stop_phy(bfe); 881 bfe->bfe_chip_state = BFE_CHIP_STOPPED; 882 883 /* 884 * Init variables. 885 */ 886 bfe_init_vars(bfe); 887 888 /* 889 * Reset chip and start PHY. 890 */ 891 bfe_chip_reset(bfe); 892 893 /* 894 * DMA descriptor rings. 895 */ 896 bfe_tx_desc_init(&bfe->bfe_tx_ring); 897 bfe_rx_desc_init(&bfe->bfe_rx_ring); 898 899 bfe->bfe_chip_state = BFE_CHIP_ACTIVE; 900 bfe_set_rx_mode(bfe); 901 bfe_enable_chip_intrs(bfe); 902 } 903 904 /* 905 * Disables core by stopping the clock. 906 */ 907 static void 908 bfe_core_disable(bfe_t *bfe) 909 { 910 if ((INL(bfe, BFE_SBTMSLOW) & BFE_RESET)) 911 return; 912 913 OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK)); 914 (void) bfe_wait_bit(bfe, BFE_SBTMSLOW, BFE_REJECT, 100, 0); 915 (void) bfe_wait_bit(bfe, BFE_SBTMSHIGH, BFE_BUSY, 100, 1); 916 OUTL(bfe, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | BFE_RESET)); 917 FLUSH(bfe, BFE_SBTMSLOW); 918 drv_usecwait(10); 919 OUTL(bfe, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET)); 920 drv_usecwait(10); 921 } 922 923 /* 924 * Resets core. 925 */ 926 static void 927 bfe_core_reset(bfe_t *bfe) 928 { 929 uint32_t val; 930 931 /* 932 * First disable the core. 933 */ 934 bfe_core_disable(bfe); 935 936 OUTL(bfe, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC)); 937 FLUSH(bfe, BFE_SBTMSLOW); 938 drv_usecwait(1); 939 940 if (INL(bfe, BFE_SBTMSHIGH) & BFE_SERR) 941 OUTL(bfe, BFE_SBTMSHIGH, 0); 942 943 val = INL(bfe, BFE_SBIMSTATE); 944 if (val & (BFE_IBE | BFE_TO)) 945 OUTL(bfe, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO)); 946 947 OUTL(bfe, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC)); 948 FLUSH(bfe, BFE_SBTMSLOW); 949 drv_usecwait(1); 950 951 OUTL(bfe, BFE_SBTMSLOW, BFE_CLOCK); 952 FLUSH(bfe, BFE_SBTMSLOW); 953 drv_usecwait(1); 954 } 955 956 static void 957 bfe_setup_config(bfe_t *bfe, uint32_t cores) 958 { 959 uint32_t bar_orig, val; 960 961 /* 962 * Change bar0 window to map sbtopci registers. 963 */ 964 bar_orig = pci_config_get32(bfe->bfe_conf_handle, BFE_BAR0_WIN); 965 pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, BFE_REG_PCI); 966 967 /* Just read it and don't do anything */ 968 val = INL(bfe, BFE_SBIDHIGH) & BFE_IDH_CORE; 969 970 val = INL(bfe, BFE_SBINTVEC); 971 val |= cores; 972 OUTL(bfe, BFE_SBINTVEC, val); 973 974 val = INL(bfe, BFE_SSB_PCI_TRANS_2); 975 val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST; 976 OUTL(bfe, BFE_SSB_PCI_TRANS_2, val); 977 978 /* 979 * Restore bar0 window mapping. 980 */ 981 pci_config_put32(bfe->bfe_conf_handle, BFE_BAR0_WIN, bar_orig); 982 } 983 984 /* 985 * Resets chip and starts PHY. 986 */ 987 static void 988 bfe_chip_reset(bfe_t *bfe) 989 { 990 uint32_t val; 991 992 /* Set the interrupt vector for the enet core */ 993 bfe_setup_config(bfe, BFE_INTVEC_ENET0); 994 995 /* check if core is up */ 996 val = INL(bfe, BFE_SBTMSLOW) & 997 (BFE_RESET | BFE_REJECT | BFE_CLOCK); 998 999 if (val == BFE_CLOCK) { 1000 OUTL(bfe, BFE_RCV_LAZY, 0); 1001 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_DISABLE); 1002 (void) bfe_wait_bit(bfe, BFE_ENET_CTRL, 1003 BFE_ENET_DISABLE, 10, 1); 1004 OUTL(bfe, BFE_DMATX_CTRL, 0); 1005 FLUSH(bfe, BFE_DMARX_STAT); 1006 drv_usecwait(20000); /* 20 milli seconds */ 1007 if (INL(bfe, BFE_DMARX_STAT) & BFE_STAT_EMASK) { 1008 (void) bfe_wait_bit(bfe, BFE_DMARX_STAT, BFE_STAT_SIDLE, 1009 10, 0); 1010 } 1011 OUTL(bfe, BFE_DMARX_CTRL, 0); 1012 } 1013 1014 bfe_core_reset(bfe); 1015 bfe_clear_stats(bfe); 1016 1017 OUTL(bfe, BFE_MDIO_CTRL, 0x8d); 1018 val = INL(bfe, BFE_DEVCTRL); 1019 if (!(val & BFE_IPP)) 1020 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_EPSEL); 1021 else if (INL(bfe, BFE_DEVCTRL & BFE_EPR)) { 1022 OUTL_AND(bfe, BFE_DEVCTRL, ~BFE_EPR); 1023 drv_usecwait(20000); /* 20 milli seconds */ 1024 } 1025 1026 OUTL_OR(bfe, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED); 1027 1028 OUTL_AND(bfe, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN); 1029 1030 OUTL(bfe, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) & 1031 BFE_LAZY_FC_MASK)); 1032 1033 OUTL_OR(bfe, BFE_RCV_LAZY, 0); 1034 1035 OUTL(bfe, BFE_RXMAXLEN, bfe->bfe_rx_ring.r_buf_len); 1036 OUTL(bfe, BFE_TXMAXLEN, bfe->bfe_tx_ring.r_buf_len); 1037 1038 OUTL(bfe, BFE_TX_WMARK, 56); 1039 1040 /* Program DMA channels */ 1041 OUTL(bfe, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE); 1042 1043 /* 1044 * DMA addresses need to be added to BFE_PCI_DMA 1045 */ 1046 OUTL(bfe, BFE_DMATX_ADDR, 1047 bfe->bfe_tx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA); 1048 1049 OUTL(bfe, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) 1050 | BFE_RX_CTRL_ENABLE); 1051 1052 OUTL(bfe, BFE_DMARX_ADDR, 1053 bfe->bfe_rx_ring.r_desc_cookie.dmac_laddress + BFE_PCI_DMA); 1054 1055 (void) bfe_startup_phy(bfe); 1056 1057 bfe->bfe_chip_state = BFE_CHIP_INITIALIZED; 1058 } 1059 1060 /* 1061 * It enables interrupts. Should be the last step while starting chip. 1062 */ 1063 static void 1064 bfe_enable_chip_intrs(bfe_t *bfe) 1065 { 1066 /* Enable the chip and core */ 1067 OUTL(bfe, BFE_ENET_CTRL, BFE_ENET_ENABLE); 1068 1069 /* Enable interrupts */ 1070 OUTL(bfe, BFE_INTR_MASK, BFE_IMASK_DEF); 1071 } 1072 1073 /* 1074 * Common code to take care of setting RX side mode (filter). 1075 */ 1076 static void 1077 bfe_set_rx_mode(bfe_t *bfe) 1078 { 1079 uint32_t val; 1080 int i; 1081 ether_addr_t mac[ETHERADDRL] = {0, 0, 0, 0, 0, 0}; 1082 1083 /* 1084 * We don't touch RX filter if we were asked to suspend. It's fine 1085 * if chip is not active (no interface is plumbed on us). 1086 */ 1087 if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED) 1088 return; 1089 1090 val = INL(bfe, BFE_RXCONF); 1091 1092 val &= ~BFE_RXCONF_PROMISC; 1093 val &= ~BFE_RXCONF_DBCAST; 1094 1095 if ((bfe->bfe_chip_mode & BFE_RX_MODE_ENABLE) == 0) { 1096 OUTL(bfe, BFE_CAM_CTRL, 0); 1097 FLUSH(bfe, BFE_CAM_CTRL); 1098 } else if (bfe->bfe_chip_mode & BFE_RX_MODE_PROMISC) { 1099 val |= BFE_RXCONF_PROMISC; 1100 val &= ~BFE_RXCONF_DBCAST; 1101 } else { 1102 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) { 1103 /* Flush everything */ 1104 OUTL(bfe, BFE_RXCONF, val | 1105 BFE_RXCONF_PROMISC | BFE_RXCONF_ALLMULTI); 1106 FLUSH(bfe, BFE_RXCONF); 1107 } 1108 1109 /* Disable CAM */ 1110 OUTL(bfe, BFE_CAM_CTRL, 0); 1111 FLUSH(bfe, BFE_CAM_CTRL); 1112 1113 /* 1114 * We receive all multicast packets. 1115 */ 1116 val |= BFE_RXCONF_ALLMULTI; 1117 1118 for (i = 0; i < BFE_MAX_MULTICAST_TABLE - 1; i++) { 1119 bfe_cam_write(bfe, (uchar_t *)mac, i); 1120 } 1121 1122 bfe_cam_write(bfe, bfe->bfe_ether_addr, i); 1123 1124 /* Enable CAM */ 1125 OUTL_OR(bfe, BFE_CAM_CTRL, BFE_CAM_ENABLE); 1126 FLUSH(bfe, BFE_CAM_CTRL); 1127 } 1128 1129 DTRACE_PROBE2(rx__mode__filter, int, bfe->bfe_unit, 1130 int, val); 1131 1132 OUTL(bfe, BFE_RXCONF, val); 1133 FLUSH(bfe, BFE_RXCONF); 1134 } 1135 1136 /* 1137 * Reset various variable values to initial state. 1138 */ 1139 static void 1140 bfe_init_vars(bfe_t *bfe) 1141 { 1142 bfe->bfe_chip_mode = BFE_RX_MODE_ENABLE; 1143 1144 /* Initial assumption */ 1145 bfe->bfe_chip.link = LINK_STATE_UNKNOWN; 1146 bfe->bfe_chip.speed = 0; 1147 bfe->bfe_chip.duplex = LINK_DUPLEX_UNKNOWN; 1148 1149 bfe->bfe_periodic_id = NULL; 1150 bfe->bfe_chip_state = BFE_CHIP_UNINITIALIZED; 1151 1152 bfe->bfe_tx_stall_time = 0; 1153 } 1154 1155 /* 1156 * Initializes TX side descriptor entries (bfe_desc_t). Each descriptor entry 1157 * has control (desc_ctl) and address (desc_addr) member. 1158 */ 1159 static void 1160 bfe_tx_desc_init(bfe_ring_t *r) 1161 { 1162 int i; 1163 uint32_t v; 1164 1165 for (i = 0; i < r->r_ndesc; i++) { 1166 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl), 1167 (r->r_buf_dma[i].len & BFE_DESC_LEN)); 1168 1169 /* 1170 * DMA addresses need to be added to BFE_PCI_DMA 1171 */ 1172 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr), 1173 (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA)); 1174 } 1175 1176 v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl)); 1177 PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl), 1178 v | BFE_DESC_EOT); 1179 1180 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV); 1181 1182 r->r_curr_desc = 0; 1183 r->r_avail_desc = TX_NUM_DESC; 1184 r->r_cons_desc = 0; 1185 } 1186 1187 /* 1188 * Initializes RX side descriptor entries (bfe_desc_t). Each descriptor entry 1189 * has control (desc_ctl) and address (desc_addr) member. 1190 */ 1191 static void 1192 bfe_rx_desc_init(bfe_ring_t *r) 1193 { 1194 int i; 1195 uint32_t v; 1196 1197 for (i = 0; i < r->r_ndesc; i++) { 1198 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_ctl), 1199 (r->r_buf_dma[i].len& BFE_DESC_LEN)); 1200 1201 PUT_DESC(r, (uint32_t *)&(r->r_desc[i].desc_addr), 1202 (r->r_buf_dma[i].cookie.dmac_laddress + BFE_PCI_DMA)); 1203 1204 /* Initialize rx header (len, flags) */ 1205 bzero(r->r_buf_dma[i].addr, sizeof (bfe_rx_header_t)); 1206 1207 (void) SYNC_BUF(r, i, 0, sizeof (bfe_rx_header_t), 1208 DDI_DMA_SYNC_FORDEV); 1209 } 1210 1211 v = GET_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl)); 1212 PUT_DESC(r, (uint32_t *)&(r->r_desc[i - 1].desc_ctl), 1213 v | BFE_DESC_EOT); 1214 1215 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV); 1216 1217 /* TAIL of RX Descriptor */ 1218 OUTL(r->r_bfe, BFE_DMARX_PTR, ((i) * sizeof (bfe_desc_t))); 1219 1220 r->r_curr_desc = 0; 1221 r->r_avail_desc = RX_NUM_DESC; 1222 } 1223 1224 static int 1225 bfe_chip_start(bfe_t *bfe) 1226 { 1227 ASSERT_ALL_LOCKS(bfe); 1228 1229 /* 1230 * Stop the chip first & then Reset the chip. At last enable interrupts. 1231 */ 1232 bfe_chip_halt(bfe); 1233 bfe_stop_phy(bfe); 1234 1235 /* 1236 * Reset chip and start PHY. 1237 */ 1238 bfe_chip_reset(bfe); 1239 1240 /* 1241 * Initailize Descriptor Rings. 1242 */ 1243 bfe_tx_desc_init(&bfe->bfe_tx_ring); 1244 bfe_rx_desc_init(&bfe->bfe_rx_ring); 1245 1246 bfe->bfe_chip_state = BFE_CHIP_ACTIVE; 1247 bfe->bfe_chip_mode |= BFE_RX_MODE_ENABLE; 1248 bfe_set_rx_mode(bfe); 1249 bfe_enable_chip_intrs(bfe); 1250 1251 /* Check link, speed and duplex mode */ 1252 (void) bfe_check_link(bfe); 1253 1254 return (DDI_SUCCESS); 1255 } 1256 1257 1258 /* 1259 * Clear chip statistics. 1260 */ 1261 static void 1262 bfe_clear_stats(bfe_t *bfe) 1263 { 1264 ulong_t r; 1265 1266 OUTL(bfe, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ); 1267 1268 /* 1269 * Stat registers are cleared by reading. 1270 */ 1271 for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4) 1272 (void) INL(bfe, r); 1273 1274 for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4) 1275 (void) INL(bfe, r); 1276 } 1277 1278 /* 1279 * Collect chip statistics. 1280 */ 1281 static void 1282 bfe_gather_stats(bfe_t *bfe) 1283 { 1284 ulong_t r; 1285 uint32_t *v; 1286 uint32_t txerr = 0, rxerr = 0, coll = 0; 1287 1288 v = &bfe->bfe_hw_stats.tx_good_octets; 1289 for (r = BFE_TX_GOOD_O; r <= BFE_TX_PAUSE; r += 4) { 1290 *v += INL(bfe, r); 1291 v++; 1292 } 1293 1294 v = &bfe->bfe_hw_stats.rx_good_octets; 1295 for (r = BFE_RX_GOOD_O; r <= BFE_RX_NPAUSE; r += 4) { 1296 *v += INL(bfe, r); 1297 v++; 1298 } 1299 1300 /* 1301 * TX : 1302 * ------- 1303 * tx_good_octets, tx_good_pkts, tx_octets 1304 * tx_pkts, tx_broadcast_pkts, tx_multicast_pkts 1305 * tx_len_64, tx_len_65_to_127, tx_len_128_to_255 1306 * tx_len_256_to_511, tx_len_512_to_1023, tx_len_1024_to_max 1307 * tx_jabber_pkts, tx_oversize_pkts, tx_fragment_pkts 1308 * tx_underruns, tx_total_cols, tx_single_cols 1309 * tx_multiple_cols, tx_excessive_cols, tx_late_cols 1310 * tx_defered, tx_carrier_lost, tx_pause_pkts 1311 * 1312 * RX : 1313 * ------- 1314 * rx_good_octets, rx_good_pkts, rx_octets 1315 * rx_pkts, rx_broadcast_pkts, rx_multicast_pkts 1316 * rx_len_64, rx_len_65_to_127, rx_len_128_to_255 1317 * rx_len_256_to_511, rx_len_512_to_1023, rx_len_1024_to_max 1318 * rx_jabber_pkts, rx_oversize_pkts, rx_fragment_pkts 1319 * rx_missed_pkts, rx_crc_align_errs, rx_undersize 1320 * rx_crc_errs, rx_align_errs, rx_symbol_errs 1321 * rx_pause_pkts, rx_nonpause_pkts 1322 */ 1323 1324 bfe->bfe_stats.ether_stat_carrier_errors = 1325 bfe->bfe_hw_stats.tx_carrier_lost; 1326 1327 /* txerr += bfe->bfe_hw_stats.tx_carrier_lost; */ 1328 1329 bfe->bfe_stats.ether_stat_ex_collisions = 1330 bfe->bfe_hw_stats.tx_excessive_cols; 1331 txerr += bfe->bfe_hw_stats.tx_excessive_cols; 1332 coll += bfe->bfe_hw_stats.tx_excessive_cols; 1333 1334 bfe->bfe_stats.ether_stat_fcs_errors = 1335 bfe->bfe_hw_stats.rx_crc_errs; 1336 rxerr += bfe->bfe_hw_stats.rx_crc_errs; 1337 1338 bfe->bfe_stats.ether_stat_first_collisions = 1339 bfe->bfe_hw_stats.tx_single_cols; 1340 coll += bfe->bfe_hw_stats.tx_single_cols; 1341 bfe->bfe_stats.ether_stat_multi_collisions = 1342 bfe->bfe_hw_stats.tx_multiple_cols; 1343 coll += bfe->bfe_hw_stats.tx_multiple_cols; 1344 1345 bfe->bfe_stats.ether_stat_toolong_errors = 1346 bfe->bfe_hw_stats.rx_oversize_pkts; 1347 rxerr += bfe->bfe_hw_stats.rx_oversize_pkts; 1348 1349 bfe->bfe_stats.ether_stat_tooshort_errors = 1350 bfe->bfe_hw_stats.rx_undersize; 1351 rxerr += bfe->bfe_hw_stats.rx_undersize; 1352 1353 bfe->bfe_stats.ether_stat_tx_late_collisions += 1354 bfe->bfe_hw_stats.tx_late_cols; 1355 1356 bfe->bfe_stats.ether_stat_defer_xmts += 1357 bfe->bfe_hw_stats.tx_defered; 1358 1359 bfe->bfe_stats.ether_stat_macrcv_errors += rxerr; 1360 bfe->bfe_stats.ether_stat_macxmt_errors += txerr; 1361 1362 bfe->bfe_stats.collisions += coll; 1363 } 1364 1365 /* 1366 * Gets the state for dladm command and all. 1367 */ 1368 int 1369 bfe_mac_getstat(void *arg, uint_t stat, uint64_t *val) 1370 { 1371 bfe_t *bfe = (bfe_t *)arg; 1372 uint64_t v; 1373 int err = 0; 1374 1375 rw_enter(&bfe->bfe_rwlock, RW_READER); 1376 1377 1378 switch (stat) { 1379 default: 1380 err = ENOTSUP; 1381 break; 1382 1383 case MAC_STAT_IFSPEED: 1384 /* 1385 * MAC layer will ask for IFSPEED first and hence we 1386 * collect it only once. 1387 */ 1388 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) { 1389 /* 1390 * Update stats from the hardware. 1391 */ 1392 bfe_gather_stats(bfe); 1393 } 1394 v = bfe->bfe_chip.speed; 1395 break; 1396 1397 case ETHER_STAT_ADV_CAP_100T4: 1398 v = bfe->bfe_adv_100T4; 1399 break; 1400 1401 case ETHER_STAT_ADV_CAP_100FDX: 1402 v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX_FD) != 0; 1403 break; 1404 1405 case ETHER_STAT_ADV_CAP_100HDX: 1406 v = (bfe->bfe_mii_anar & MII_ABILITY_100BASE_TX) != 0; 1407 break; 1408 1409 case ETHER_STAT_ADV_CAP_10FDX: 1410 v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T_FD) != 0; 1411 break; 1412 1413 case ETHER_STAT_ADV_CAP_10HDX: 1414 v = (bfe->bfe_mii_anar & MII_ABILITY_10BASE_T) != 0; 1415 break; 1416 1417 case ETHER_STAT_ADV_CAP_ASMPAUSE: 1418 v = 0; 1419 break; 1420 1421 case ETHER_STAT_ADV_CAP_AUTONEG: 1422 v = bfe->bfe_adv_aneg; 1423 break; 1424 1425 case ETHER_STAT_ADV_CAP_PAUSE: 1426 v = (bfe->bfe_mii_anar & MII_ABILITY_PAUSE) != 0; 1427 break; 1428 1429 case ETHER_STAT_ADV_REMFAULT: 1430 v = (bfe->bfe_mii_anar & MII_AN_ADVERT_REMFAULT) != 0; 1431 break; 1432 1433 case ETHER_STAT_ALIGN_ERRORS: 1434 /* MIB */ 1435 v = bfe->bfe_stats.ether_stat_align_errors; 1436 break; 1437 1438 case ETHER_STAT_CAP_100T4: 1439 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASE_T4) != 0; 1440 break; 1441 1442 case ETHER_STAT_CAP_100FDX: 1443 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX_FD) != 0; 1444 break; 1445 1446 case ETHER_STAT_CAP_100HDX: 1447 v = (bfe->bfe_mii_bmsr & MII_STATUS_100_BASEX) != 0; 1448 break; 1449 1450 case ETHER_STAT_CAP_10FDX: 1451 v = (bfe->bfe_mii_bmsr & MII_STATUS_10_FD) != 0; 1452 break; 1453 1454 case ETHER_STAT_CAP_10HDX: 1455 v = (bfe->bfe_mii_bmsr & MII_STATUS_10) != 0; 1456 break; 1457 1458 case ETHER_STAT_CAP_ASMPAUSE: 1459 v = 0; 1460 break; 1461 1462 case ETHER_STAT_CAP_AUTONEG: 1463 v = ((bfe->bfe_mii_bmsr & MII_STATUS_CANAUTONEG) != 0); 1464 break; 1465 1466 case ETHER_STAT_CAP_PAUSE: 1467 v = 1; 1468 break; 1469 1470 case ETHER_STAT_CAP_REMFAULT: 1471 v = (bfe->bfe_mii_bmsr & MII_STATUS_REMFAULT) != 0; 1472 break; 1473 1474 case ETHER_STAT_CARRIER_ERRORS: 1475 v = bfe->bfe_stats.ether_stat_carrier_errors; 1476 break; 1477 1478 case ETHER_STAT_JABBER_ERRORS: 1479 err = ENOTSUP; 1480 break; 1481 1482 case ETHER_STAT_DEFER_XMTS: 1483 v = bfe->bfe_stats.ether_stat_defer_xmts; 1484 break; 1485 1486 case ETHER_STAT_EX_COLLISIONS: 1487 /* MIB */ 1488 v = bfe->bfe_stats.ether_stat_ex_collisions; 1489 break; 1490 1491 case ETHER_STAT_FCS_ERRORS: 1492 /* MIB */ 1493 v = bfe->bfe_stats.ether_stat_fcs_errors; 1494 break; 1495 1496 case ETHER_STAT_FIRST_COLLISIONS: 1497 /* MIB */ 1498 v = bfe->bfe_stats.ether_stat_first_collisions; 1499 break; 1500 1501 case ETHER_STAT_LINK_ASMPAUSE: 1502 v = 0; 1503 break; 1504 1505 case ETHER_STAT_LINK_AUTONEG: 1506 v = (bfe->bfe_mii_bmcr & MII_CONTROL_ANE) != 0 && 1507 (bfe->bfe_mii_bmsr & MII_STATUS_ANDONE) != 0; 1508 break; 1509 1510 case ETHER_STAT_LINK_DUPLEX: 1511 v = bfe->bfe_chip.duplex; 1512 break; 1513 1514 case ETHER_STAT_LP_CAP_100T4: 1515 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_T4) != 0; 1516 break; 1517 1518 case ETHER_STAT_LP_CAP_100FDX: 1519 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX_FD) != 0; 1520 break; 1521 1522 case ETHER_STAT_LP_CAP_100HDX: 1523 v = (bfe->bfe_mii_anlpar & MII_ABILITY_100BASE_TX) != 0; 1524 break; 1525 1526 case ETHER_STAT_LP_CAP_10FDX: 1527 v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T_FD) != 0; 1528 break; 1529 1530 case ETHER_STAT_LP_CAP_10HDX: 1531 v = (bfe->bfe_mii_anlpar & MII_ABILITY_10BASE_T) != 0; 1532 break; 1533 1534 case ETHER_STAT_LP_CAP_ASMPAUSE: 1535 v = 0; 1536 break; 1537 1538 case ETHER_STAT_LP_CAP_AUTONEG: 1539 v = (bfe->bfe_mii_exp & MII_AN_EXP_LPCANAN) != 0; 1540 break; 1541 1542 case ETHER_STAT_LP_CAP_PAUSE: 1543 v = (bfe->bfe_mii_anlpar & MII_ABILITY_PAUSE) != 0; 1544 break; 1545 1546 case ETHER_STAT_LP_REMFAULT: 1547 v = (bfe->bfe_mii_anlpar & MII_STATUS_REMFAULT) != 0; 1548 break; 1549 1550 case ETHER_STAT_MACRCV_ERRORS: 1551 v = bfe->bfe_stats.ether_stat_macrcv_errors; 1552 break; 1553 1554 case ETHER_STAT_MACXMT_ERRORS: 1555 v = bfe->bfe_stats.ether_stat_macxmt_errors; 1556 break; 1557 1558 case ETHER_STAT_MULTI_COLLISIONS: 1559 v = bfe->bfe_stats.ether_stat_multi_collisions; 1560 break; 1561 1562 case ETHER_STAT_SQE_ERRORS: 1563 err = ENOTSUP; 1564 break; 1565 1566 case ETHER_STAT_TOOLONG_ERRORS: 1567 v = bfe->bfe_stats.ether_stat_toolong_errors; 1568 break; 1569 1570 case ETHER_STAT_TOOSHORT_ERRORS: 1571 v = bfe->bfe_stats.ether_stat_tooshort_errors; 1572 break; 1573 1574 case ETHER_STAT_TX_LATE_COLLISIONS: 1575 v = bfe->bfe_stats.ether_stat_tx_late_collisions; 1576 break; 1577 1578 case ETHER_STAT_XCVR_ADDR: 1579 v = bfe->bfe_phy_addr; 1580 break; 1581 1582 case ETHER_STAT_XCVR_ID: 1583 v = bfe->bfe_phy_id; 1584 break; 1585 1586 case MAC_STAT_BRDCSTRCV: 1587 v = bfe->bfe_stats.brdcstrcv; 1588 break; 1589 1590 case MAC_STAT_BRDCSTXMT: 1591 v = bfe->bfe_stats.brdcstxmt; 1592 break; 1593 1594 case MAC_STAT_MULTIXMT: 1595 v = bfe->bfe_stats.multixmt; 1596 break; 1597 1598 case MAC_STAT_COLLISIONS: 1599 v = bfe->bfe_stats.collisions; 1600 break; 1601 1602 case MAC_STAT_IERRORS: 1603 v = bfe->bfe_stats.ierrors; 1604 break; 1605 1606 case MAC_STAT_IPACKETS: 1607 v = bfe->bfe_stats.ipackets; 1608 break; 1609 1610 case MAC_STAT_MULTIRCV: 1611 v = bfe->bfe_stats.multircv; 1612 break; 1613 1614 case MAC_STAT_NORCVBUF: 1615 v = bfe->bfe_stats.norcvbuf; 1616 break; 1617 1618 case MAC_STAT_NOXMTBUF: 1619 v = bfe->bfe_stats.noxmtbuf; 1620 break; 1621 1622 case MAC_STAT_OBYTES: 1623 v = bfe->bfe_stats.obytes; 1624 break; 1625 1626 case MAC_STAT_OERRORS: 1627 /* MIB */ 1628 v = bfe->bfe_stats.ether_stat_macxmt_errors; 1629 break; 1630 1631 case MAC_STAT_OPACKETS: 1632 v = bfe->bfe_stats.opackets; 1633 break; 1634 1635 case MAC_STAT_RBYTES: 1636 v = bfe->bfe_stats.rbytes; 1637 break; 1638 1639 case MAC_STAT_UNDERFLOWS: 1640 v = bfe->bfe_stats.underflows; 1641 break; 1642 1643 case MAC_STAT_OVERFLOWS: 1644 v = bfe->bfe_stats.overflows; 1645 break; 1646 } 1647 1648 rw_exit(&bfe->bfe_rwlock); 1649 1650 *val = v; 1651 return (err); 1652 } 1653 1654 int 1655 bfe_mac_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz, 1656 void *val) 1657 { 1658 bfe_t *bfe = (bfe_t *)arg; 1659 int err = 0; 1660 1661 switch (num) { 1662 case MAC_PROP_DUPLEX: 1663 ASSERT(sz >= sizeof (link_duplex_t)); 1664 bcopy(&bfe->bfe_chip.duplex, val, sizeof (link_duplex_t)); 1665 break; 1666 1667 case MAC_PROP_SPEED: 1668 ASSERT(sz >= sizeof (uint64_t)); 1669 bcopy(&bfe->bfe_chip.speed, val, sizeof (uint64_t)); 1670 break; 1671 1672 case MAC_PROP_AUTONEG: 1673 *(uint8_t *)val = bfe->bfe_adv_aneg; 1674 break; 1675 1676 case MAC_PROP_ADV_100FDX_CAP: 1677 *(uint8_t *)val = bfe->bfe_adv_100fdx; 1678 break; 1679 1680 case MAC_PROP_EN_100FDX_CAP: 1681 *(uint8_t *)val = bfe->bfe_adv_100fdx; 1682 break; 1683 1684 case MAC_PROP_ADV_100HDX_CAP: 1685 *(uint8_t *)val = bfe->bfe_adv_100hdx; 1686 break; 1687 1688 case MAC_PROP_EN_100HDX_CAP: 1689 *(uint8_t *)val = bfe->bfe_adv_100hdx; 1690 break; 1691 1692 case MAC_PROP_ADV_10FDX_CAP: 1693 *(uint8_t *)val = bfe->bfe_adv_10fdx; 1694 break; 1695 1696 case MAC_PROP_EN_10FDX_CAP: 1697 *(uint8_t *)val = bfe->bfe_adv_10fdx; 1698 break; 1699 1700 case MAC_PROP_ADV_10HDX_CAP: 1701 *(uint8_t *)val = bfe->bfe_adv_10hdx; 1702 break; 1703 1704 case MAC_PROP_EN_10HDX_CAP: 1705 *(uint8_t *)val = bfe->bfe_adv_10hdx; 1706 break; 1707 1708 case MAC_PROP_ADV_100T4_CAP: 1709 *(uint8_t *)val = bfe->bfe_adv_100T4; 1710 break; 1711 1712 case MAC_PROP_EN_100T4_CAP: 1713 *(uint8_t *)val = bfe->bfe_adv_100T4; 1714 break; 1715 1716 default: 1717 err = ENOTSUP; 1718 } 1719 1720 return (err); 1721 } 1722 1723 1724 static void 1725 bfe_mac_propinfo(void *arg, const char *name, mac_prop_id_t num, 1726 mac_prop_info_handle_t prh) 1727 { 1728 bfe_t *bfe = (bfe_t *)arg; 1729 1730 switch (num) { 1731 case MAC_PROP_DUPLEX: 1732 case MAC_PROP_SPEED: 1733 case MAC_PROP_ADV_100FDX_CAP: 1734 case MAC_PROP_ADV_100HDX_CAP: 1735 case MAC_PROP_ADV_10FDX_CAP: 1736 case MAC_PROP_ADV_10HDX_CAP: 1737 case MAC_PROP_ADV_100T4_CAP: 1738 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1739 break; 1740 1741 case MAC_PROP_AUTONEG: 1742 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_aneg); 1743 break; 1744 1745 case MAC_PROP_EN_100FDX_CAP: 1746 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_100fdx); 1747 break; 1748 1749 case MAC_PROP_EN_100HDX_CAP: 1750 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_100hdx); 1751 break; 1752 1753 case MAC_PROP_EN_10FDX_CAP: 1754 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_10fdx); 1755 break; 1756 1757 case MAC_PROP_EN_10HDX_CAP: 1758 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_10hdx); 1759 break; 1760 1761 case MAC_PROP_EN_100T4_CAP: 1762 mac_prop_info_set_default_uint8(prh, bfe->bfe_cap_100T4); 1763 break; 1764 } 1765 } 1766 1767 1768 /*ARGSUSED*/ 1769 int 1770 bfe_mac_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz, 1771 const void *val) 1772 { 1773 bfe_t *bfe = (bfe_t *)arg; 1774 uint8_t *advp; 1775 uint8_t *capp; 1776 int r = 0; 1777 1778 switch (num) { 1779 case MAC_PROP_EN_100FDX_CAP: 1780 advp = &bfe->bfe_adv_100fdx; 1781 capp = &bfe->bfe_cap_100fdx; 1782 break; 1783 1784 case MAC_PROP_EN_100HDX_CAP: 1785 advp = &bfe->bfe_adv_100hdx; 1786 capp = &bfe->bfe_cap_100hdx; 1787 break; 1788 1789 case MAC_PROP_EN_10FDX_CAP: 1790 advp = &bfe->bfe_adv_10fdx; 1791 capp = &bfe->bfe_cap_10fdx; 1792 break; 1793 1794 case MAC_PROP_EN_10HDX_CAP: 1795 advp = &bfe->bfe_adv_10hdx; 1796 capp = &bfe->bfe_cap_10hdx; 1797 break; 1798 1799 case MAC_PROP_EN_100T4_CAP: 1800 advp = &bfe->bfe_adv_100T4; 1801 capp = &bfe->bfe_cap_100T4; 1802 break; 1803 1804 case MAC_PROP_AUTONEG: 1805 advp = &bfe->bfe_adv_aneg; 1806 capp = &bfe->bfe_cap_aneg; 1807 break; 1808 1809 default: 1810 return (ENOTSUP); 1811 } 1812 1813 if (*capp == 0) 1814 return (ENOTSUP); 1815 1816 bfe_grab_locks(bfe); 1817 1818 if (*advp != *(const uint8_t *)val) { 1819 *advp = *(const uint8_t *)val; 1820 1821 bfe->bfe_chip_action = BFE_ACTION_RESTART_SETPROP; 1822 if (bfe->bfe_chip_state == BFE_CHIP_ACTIVE) { 1823 /* 1824 * We need to stop the timer before grabbing locks 1825 * otherwise we can land-up in deadlock with untimeout. 1826 */ 1827 bfe_stop_timer(bfe); 1828 1829 bfe->bfe_chip_action |= BFE_ACTION_RESTART; 1830 1831 bfe_chip_restart(bfe); 1832 1833 /* 1834 * We leave SETPROP because properties can be 1835 * temporary. 1836 */ 1837 bfe->bfe_chip_action &= ~(BFE_ACTION_RESTART); 1838 r = 1; 1839 } 1840 } 1841 1842 bfe_release_locks(bfe); 1843 1844 /* kick-off a potential stopped downstream */ 1845 if (r) 1846 mac_tx_update(bfe->bfe_machdl); 1847 1848 return (0); 1849 } 1850 1851 1852 int 1853 bfe_mac_set_ether_addr(void *arg, const uint8_t *ea) 1854 { 1855 bfe_t *bfe = (bfe_t *)arg; 1856 1857 bfe_grab_locks(bfe); 1858 bcopy(ea, bfe->bfe_ether_addr, ETHERADDRL); 1859 bfe_set_rx_mode(bfe); 1860 bfe_release_locks(bfe); 1861 return (0); 1862 } 1863 1864 int 1865 bfe_mac_start(void *arg) 1866 { 1867 bfe_t *bfe = (bfe_t *)arg; 1868 1869 bfe_grab_locks(bfe); 1870 if (bfe_chip_start(bfe) == DDI_FAILURE) { 1871 bfe_release_locks(bfe); 1872 return (EINVAL); 1873 } 1874 1875 bfe_release_locks(bfe); 1876 1877 mac_tx_update(bfe->bfe_machdl); 1878 1879 return (0); 1880 } 1881 1882 void 1883 bfe_mac_stop(void *arg) 1884 { 1885 bfe_t *bfe = (bfe_t *)arg; 1886 1887 /* 1888 * We need to stop the timer before grabbing locks otherwise 1889 * we can land-up in deadlock with untimeout. 1890 */ 1891 bfe_stop_timer(bfe); 1892 1893 bfe_grab_locks(bfe); 1894 1895 /* 1896 * First halt the chip by disabling interrupts. 1897 */ 1898 bfe_chip_halt(bfe); 1899 bfe_stop_phy(bfe); 1900 1901 bfe->bfe_chip_state = BFE_CHIP_STOPPED; 1902 1903 /* 1904 * This will leave the PHY running. 1905 */ 1906 bfe_chip_reset(bfe); 1907 1908 /* 1909 * Disable RX register. 1910 */ 1911 bfe->bfe_chip_mode &= ~BFE_RX_MODE_ENABLE; 1912 bfe_set_rx_mode(bfe); 1913 1914 bfe_release_locks(bfe); 1915 } 1916 1917 /* 1918 * Send a packet down the wire. 1919 */ 1920 static int 1921 bfe_send_a_packet(bfe_t *bfe, mblk_t *mp) 1922 { 1923 bfe_ring_t *r = &bfe->bfe_tx_ring; 1924 uint32_t cur = r->r_curr_desc; 1925 uint32_t next; 1926 size_t pktlen = msgsize(mp); 1927 uchar_t *buf; 1928 uint32_t v; 1929 1930 ASSERT(MUTEX_HELD(&r->r_lock)); 1931 ASSERT(mp != NULL); 1932 1933 if (pktlen > r->r_buf_len) { 1934 freemsg(mp); 1935 return (BFE_SUCCESS); 1936 } 1937 1938 /* 1939 * There is a big reason why we don't check for '0'. It becomes easy 1940 * for us to not roll over the ring since we are based on producer (tx) 1941 * and consumer (reclaim by an interrupt) model. Especially when we 1942 * run out of TX descriptor, chip will send a single interrupt and 1943 * both producer and consumer counter will be same. So we keep a 1944 * difference of 1 always. 1945 */ 1946 if (r->r_avail_desc <= 1) { 1947 bfe->bfe_stats.noxmtbuf++; 1948 bfe->bfe_tx_resched = 1; 1949 return (BFE_FAILURE); 1950 } 1951 1952 /* 1953 * Get the DMA buffer to hold packet. 1954 */ 1955 buf = (uchar_t *)r->r_buf_dma[cur].addr; 1956 1957 mcopymsg(mp, buf); /* it also frees mp */ 1958 1959 /* 1960 * Gather statistics. 1961 */ 1962 if (buf[0] & 0x1) { 1963 if (bcmp(buf, bfe_broadcast, ETHERADDRL) != 0) 1964 bfe->bfe_stats.multixmt++; 1965 else 1966 bfe->bfe_stats.brdcstxmt++; 1967 } 1968 bfe->bfe_stats.opackets++; 1969 bfe->bfe_stats.obytes += pktlen; 1970 1971 1972 /* 1973 * Program the DMA descriptor (start and end of frame are same). 1974 */ 1975 next = cur; 1976 v = (pktlen & BFE_DESC_LEN) | BFE_DESC_IOC | BFE_DESC_SOF | 1977 BFE_DESC_EOF; 1978 1979 if (cur == (TX_NUM_DESC - 1)) 1980 v |= BFE_DESC_EOT; 1981 1982 PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_ctl), v); 1983 1984 /* 1985 * DMA addresses need to be added to BFE_PCI_DMA 1986 */ 1987 PUT_DESC(r, (uint32_t *)&(r->r_desc[cur].desc_addr), 1988 (r->r_buf_dma[cur].cookie.dmac_laddress + BFE_PCI_DMA)); 1989 1990 /* 1991 * Sync the packet data for the device. 1992 */ 1993 (void) SYNC_BUF(r, cur, 0, pktlen, DDI_DMA_SYNC_FORDEV); 1994 1995 /* Move to next descriptor slot */ 1996 BFE_INC_SLOT(next, TX_NUM_DESC); 1997 1998 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV); 1999 2000 r->r_curr_desc = next; 2001 2002 /* 2003 * The order should be 1,2,3,... for BFE_DMATX_PTR if 0,1,2,3,... 2004 * descriptor slot are being programmed. 2005 */ 2006 OUTL(bfe, BFE_DMATX_PTR, next * sizeof (bfe_desc_t)); 2007 FLUSH(bfe, BFE_DMATX_PTR); 2008 2009 r->r_avail_desc--; 2010 2011 /* 2012 * Let timeout know that it must reset the chip if a 2013 * packet is not sent down the wire for more than 5 seconds. 2014 */ 2015 bfe->bfe_tx_stall_time = gethrtime() + (5 * 1000000000ULL); 2016 2017 return (BFE_SUCCESS); 2018 } 2019 2020 mblk_t * 2021 bfe_mac_transmit_packet(void *arg, mblk_t *mp) 2022 { 2023 bfe_t *bfe = (bfe_t *)arg; 2024 bfe_ring_t *r = &bfe->bfe_tx_ring; 2025 mblk_t *nmp; 2026 2027 mutex_enter(&r->r_lock); 2028 2029 if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) { 2030 DTRACE_PROBE1(tx__chip__not__active, int, bfe->bfe_unit); 2031 2032 freemsgchain(mp); 2033 mutex_exit(&r->r_lock); 2034 return (NULL); 2035 } 2036 2037 2038 while (mp != NULL) { 2039 nmp = mp->b_next; 2040 mp->b_next = NULL; 2041 2042 if (bfe_send_a_packet(bfe, mp) == BFE_FAILURE) { 2043 mp->b_next = nmp; 2044 break; 2045 } 2046 mp = nmp; 2047 } 2048 2049 mutex_exit(&r->r_lock); 2050 2051 return (mp); 2052 } 2053 2054 int 2055 bfe_mac_set_promisc(void *arg, boolean_t promiscflag) 2056 { 2057 bfe_t *bfe = (bfe_t *)arg; 2058 2059 bfe_grab_locks(bfe); 2060 if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) { 2061 bfe_release_locks(bfe); 2062 return (EIO); 2063 } 2064 2065 if (promiscflag) { 2066 /* Set Promiscous on */ 2067 bfe->bfe_chip_mode |= BFE_RX_MODE_PROMISC; 2068 } else { 2069 bfe->bfe_chip_mode &= ~BFE_RX_MODE_PROMISC; 2070 } 2071 2072 bfe_set_rx_mode(bfe); 2073 bfe_release_locks(bfe); 2074 2075 return (0); 2076 } 2077 2078 int 2079 bfe_mac_set_multicast(void *arg, boolean_t add, const uint8_t *macaddr) 2080 { 2081 /* 2082 * It was too much of pain to implement multicast in CAM. Instead 2083 * we never disable multicast filter. 2084 */ 2085 return (0); 2086 } 2087 2088 static mac_callbacks_t bfe_mac_callbacks = { 2089 MC_SETPROP | MC_GETPROP | MC_PROPINFO, 2090 bfe_mac_getstat, /* gets stats */ 2091 bfe_mac_start, /* starts mac */ 2092 bfe_mac_stop, /* stops mac */ 2093 bfe_mac_set_promisc, /* sets promisc mode for snoop */ 2094 bfe_mac_set_multicast, /* multicast implementation */ 2095 bfe_mac_set_ether_addr, /* sets ethernet address (unicast) */ 2096 bfe_mac_transmit_packet, /* transmits packet */ 2097 NULL, 2098 NULL, /* ioctl */ 2099 NULL, /* getcap */ 2100 NULL, /* open */ 2101 NULL, /* close */ 2102 bfe_mac_setprop, 2103 bfe_mac_getprop, 2104 bfe_mac_propinfo 2105 }; 2106 2107 static void 2108 bfe_error_handler(bfe_t *bfe, int intr_mask) 2109 { 2110 uint32_t v; 2111 2112 if (intr_mask & BFE_ISTAT_RFO) { 2113 bfe->bfe_stats.overflows++; 2114 bfe->bfe_chip_action |= 2115 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT); 2116 goto action; 2117 } 2118 2119 if (intr_mask & BFE_ISTAT_TFU) { 2120 bfe->bfe_stats.underflows++; 2121 return; 2122 } 2123 2124 /* Descriptor Protocol Error */ 2125 if (intr_mask & BFE_ISTAT_DPE) { 2126 bfe_error(bfe->bfe_dip, 2127 "Descriptor Protocol Error. Halting Chip"); 2128 bfe->bfe_chip_action |= 2129 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT); 2130 goto action; 2131 } 2132 2133 /* Descriptor Error */ 2134 if (intr_mask & BFE_ISTAT_DSCE && halt == 0) { 2135 bfe_error(bfe->bfe_dip, "Descriptor Error. Restarting Chip"); 2136 goto action; 2137 } 2138 2139 /* Receive Descr. Underflow */ 2140 if (intr_mask & BFE_ISTAT_RDU) { 2141 bfe_error(bfe->bfe_dip, 2142 "Receive Descriptor Underflow. Restarting Chip"); 2143 bfe->bfe_stats.ether_stat_macrcv_errors++; 2144 bfe->bfe_chip_action |= 2145 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT); 2146 goto action; 2147 } 2148 2149 v = INL(bfe, BFE_DMATX_STAT); 2150 2151 /* Error while sending a packet */ 2152 if (v & BFE_STAT_EMASK) { 2153 bfe->bfe_stats.ether_stat_macxmt_errors++; 2154 bfe_error(bfe->bfe_dip, 2155 "Error while sending a packet. Restarting Chip"); 2156 } 2157 2158 /* Error while receiving a packet */ 2159 v = INL(bfe, BFE_DMARX_STAT); 2160 if (v & BFE_RX_FLAG_ERRORS) { 2161 bfe->bfe_stats.ierrors++; 2162 bfe_error(bfe->bfe_dip, 2163 "Error while receiving a packet. Restarting Chip"); 2164 } 2165 2166 2167 bfe->bfe_chip_action |= 2168 (BFE_ACTION_RESTART | BFE_ACTION_RESTART_FAULT); 2169 2170 action: 2171 bfe_chip_halt(bfe); 2172 } 2173 2174 /* 2175 * It will recycle a RX descriptor slot. 2176 */ 2177 static void 2178 bfe_rx_desc_buf_reinit(bfe_t *bfe, uint_t slot) 2179 { 2180 bfe_ring_t *r = &bfe->bfe_rx_ring; 2181 uint32_t v; 2182 2183 slot %= RX_NUM_DESC; 2184 2185 bzero(r->r_buf_dma[slot].addr, sizeof (bfe_rx_header_t)); 2186 2187 (void) SYNC_BUF(r, slot, 0, BFE_RX_OFFSET, DDI_DMA_SYNC_FORDEV); 2188 2189 v = r->r_buf_dma[slot].len & BFE_DESC_LEN; 2190 if (slot == (RX_NUM_DESC - 1)) 2191 v |= BFE_DESC_EOT; 2192 2193 PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_ctl), v); 2194 2195 /* 2196 * DMA addresses need to be added to BFE_PCI_DMA 2197 */ 2198 PUT_DESC(r, (uint32_t *)&(r->r_desc[slot].desc_addr), 2199 (r->r_buf_dma[slot].cookie.dmac_laddress + BFE_PCI_DMA)); 2200 } 2201 2202 /* 2203 * Gets called from interrupt context to handle RX interrupt. 2204 */ 2205 static mblk_t * 2206 bfe_receive(bfe_t *bfe, int intr_mask) 2207 { 2208 int rxstat, current; 2209 mblk_t *mp = NULL, *rx_head, *rx_tail; 2210 uchar_t *rx_header; 2211 uint16_t len; 2212 uchar_t *bp; 2213 bfe_ring_t *r = &bfe->bfe_rx_ring; 2214 int i; 2215 2216 rxstat = INL(bfe, BFE_DMARX_STAT); 2217 current = (rxstat & BFE_STAT_CDMASK) / sizeof (bfe_desc_t); 2218 i = r->r_curr_desc; 2219 2220 rx_head = rx_tail = NULL; 2221 2222 DTRACE_PROBE3(receive, int, bfe->bfe_unit, 2223 int, r->r_curr_desc, 2224 int, current); 2225 2226 for (i = r->r_curr_desc; i != current; 2227 BFE_INC_SLOT(i, RX_NUM_DESC)) { 2228 2229 /* 2230 * Sync the buffer associated with the descriptor table entry. 2231 */ 2232 (void) SYNC_BUF(r, i, 0, r->r_buf_dma[i].len, 2233 DDI_DMA_SYNC_FORKERNEL); 2234 2235 rx_header = (void *)r->r_buf_dma[i].addr; 2236 2237 /* 2238 * We do this to make sure we are endian neutral. Chip is 2239 * big endian. 2240 * 2241 * The header looks like :- 2242 * 2243 * Offset 0 -> uint16_t len 2244 * Offset 2 -> uint16_t flags 2245 * Offset 4 -> uint16_t pad[12] 2246 */ 2247 len = (rx_header[1] << 8) | rx_header[0]; 2248 len -= 4; /* CRC bytes need to be removed */ 2249 2250 /* 2251 * Don't receive this packet if pkt length is greater than 2252 * MTU + VLAN_TAGSZ. 2253 */ 2254 if (len > r->r_buf_len) { 2255 /* Recycle slot for later use */ 2256 bfe_rx_desc_buf_reinit(bfe, i); 2257 continue; 2258 } 2259 2260 if ((mp = allocb(len + VLAN_TAGSZ, BPRI_MED)) != NULL) { 2261 mp->b_rptr += VLAN_TAGSZ; 2262 bp = mp->b_rptr; 2263 mp->b_wptr = bp + len; 2264 2265 /* sizeof (bfe_rx_header_t) + 2 */ 2266 bcopy(r->r_buf_dma[i].addr + 2267 BFE_RX_OFFSET, bp, len); 2268 2269 mp->b_next = NULL; 2270 if (rx_tail == NULL) 2271 rx_head = rx_tail = mp; 2272 else { 2273 rx_tail->b_next = mp; 2274 rx_tail = mp; 2275 } 2276 2277 /* Number of packets received so far */ 2278 bfe->bfe_stats.ipackets++; 2279 2280 /* Total bytes of packets received so far */ 2281 bfe->bfe_stats.rbytes += len; 2282 2283 if (bcmp(mp->b_rptr, bfe_broadcast, ETHERADDRL) == 0) 2284 bfe->bfe_stats.brdcstrcv++; 2285 else 2286 bfe->bfe_stats.multircv++; 2287 } else { 2288 bfe->bfe_stats.norcvbuf++; 2289 /* Recycle the slot for later use */ 2290 bfe_rx_desc_buf_reinit(bfe, i); 2291 break; 2292 } 2293 2294 /* 2295 * Reinitialize the current descriptor slot's buffer so that 2296 * it can be reused. 2297 */ 2298 bfe_rx_desc_buf_reinit(bfe, i); 2299 } 2300 2301 r->r_curr_desc = i; 2302 2303 (void) SYNC_DESC(r, 0, r->r_ndesc, DDI_DMA_SYNC_FORDEV); 2304 2305 return (rx_head); 2306 } 2307 2308 static int 2309 bfe_tx_reclaim(bfe_ring_t *r) 2310 { 2311 uint32_t cur, start; 2312 uint32_t v; 2313 2314 cur = INL(r->r_bfe, BFE_DMATX_STAT) & BFE_STAT_CDMASK; 2315 cur = cur / sizeof (bfe_desc_t); 2316 2317 /* 2318 * Start with the last descriptor consumed by the chip. 2319 */ 2320 start = r->r_cons_desc; 2321 2322 DTRACE_PROBE3(tx__reclaim, int, r->r_bfe->bfe_unit, 2323 int, start, 2324 int, cur); 2325 2326 /* 2327 * There will be at least one descriptor to process. 2328 */ 2329 while (start != cur) { 2330 r->r_avail_desc++; 2331 v = r->r_buf_dma[start].len & BFE_DESC_LEN; 2332 if (start == (TX_NUM_DESC - 1)) 2333 v |= BFE_DESC_EOT; 2334 2335 PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_ctl), v); 2336 PUT_DESC(r, (uint32_t *)&(r->r_desc[start].desc_addr), 2337 (r->r_buf_dma[start].cookie.dmac_laddress + BFE_PCI_DMA)); 2338 2339 /* Move to next descriptor in TX ring */ 2340 BFE_INC_SLOT(start, TX_NUM_DESC); 2341 } 2342 2343 (void) ddi_dma_sync(r->r_desc_dma_handle, 2344 0, (r->r_ndesc * sizeof (bfe_desc_t)), 2345 DDI_DMA_SYNC_FORDEV); 2346 2347 r->r_cons_desc = start; /* consumed pointer */ 2348 r->r_bfe->bfe_tx_stall_time = 0; 2349 2350 return (cur); 2351 } 2352 2353 static int 2354 bfe_tx_done(bfe_t *bfe, int intr_mask) 2355 { 2356 bfe_ring_t *r = &bfe->bfe_tx_ring; 2357 int resched = 0; 2358 2359 mutex_enter(&r->r_lock); 2360 (void) bfe_tx_reclaim(r); 2361 2362 if (bfe->bfe_tx_resched) { 2363 resched = 1; 2364 bfe->bfe_tx_resched = 0; 2365 } 2366 mutex_exit(&r->r_lock); 2367 2368 return (resched); 2369 } 2370 2371 /* 2372 * ISR for interrupt handling 2373 */ 2374 static uint_t 2375 bfe_interrupt(caddr_t arg1, caddr_t arg2) 2376 { 2377 bfe_t *bfe = (void *)arg1; 2378 uint32_t intr_stat; 2379 mblk_t *rx_head = NULL; 2380 int resched = 0; 2381 2382 /* 2383 * Grab the lock to avoid stopping the chip while this interrupt 2384 * is handled. 2385 */ 2386 rw_enter(&bfe->bfe_rwlock, RW_READER); 2387 2388 /* 2389 * It's necessary to read intr stat again because masking interrupt 2390 * register does not really mask interrupts coming from the chip. 2391 */ 2392 intr_stat = INL(bfe, BFE_INTR_STAT); 2393 intr_stat &= BFE_IMASK_DEF; 2394 OUTL(bfe, BFE_INTR_STAT, intr_stat); 2395 (void) INL(bfe, BFE_INTR_STAT); 2396 2397 if (intr_stat == 0) { 2398 rw_exit(&bfe->bfe_rwlock); 2399 return (DDI_INTR_UNCLAIMED); 2400 } 2401 2402 DTRACE_PROBE2(bfe__interrupt, int, bfe->bfe_unit, 2403 int, intr_stat); 2404 2405 if (bfe->bfe_chip_state != BFE_CHIP_ACTIVE) { 2406 /* 2407 * If chip is suspended then we just return. 2408 */ 2409 if (bfe->bfe_chip_state == BFE_CHIP_SUSPENDED) { 2410 rw_exit(&bfe->bfe_rwlock); 2411 DTRACE_PROBE1(interrupt__chip__is__suspend, int, 2412 bfe->bfe_unit); 2413 return (DDI_INTR_CLAIMED); 2414 } 2415 2416 /* 2417 * Halt the chip again i.e basically disable interrupts. 2418 */ 2419 bfe_chip_halt(bfe); 2420 rw_exit(&bfe->bfe_rwlock); 2421 DTRACE_PROBE1(interrupt__chip__not__active, int, 2422 bfe->bfe_unit); 2423 return (DDI_INTR_CLAIMED); 2424 } 2425 2426 /* A packet was received */ 2427 if (intr_stat & BFE_ISTAT_RX) { 2428 rx_head = bfe_receive(bfe, intr_stat); 2429 } 2430 2431 /* A packet was sent down the wire */ 2432 if (intr_stat & BFE_ISTAT_TX) { 2433 resched = bfe_tx_done(bfe, intr_stat); 2434 } 2435 2436 /* There was an error */ 2437 if (intr_stat & BFE_ISTAT_ERRORS) { 2438 bfe_error_handler(bfe, intr_stat); 2439 } 2440 2441 rw_exit(&bfe->bfe_rwlock); 2442 2443 /* 2444 * Pass the list of packets received from chip to MAC layer. 2445 */ 2446 if (rx_head) { 2447 mac_rx(bfe->bfe_machdl, 0, rx_head); 2448 } 2449 2450 /* 2451 * Let the MAC start sending pkts to a potential stopped stream. 2452 */ 2453 if (resched) 2454 mac_tx_update(bfe->bfe_machdl); 2455 2456 return (DDI_INTR_CLAIMED); 2457 } 2458 2459 /* 2460 * Removes registered interrupt handler. 2461 */ 2462 static void 2463 bfe_remove_intr(bfe_t *bfe) 2464 { 2465 (void) ddi_intr_remove_handler(bfe->bfe_intrhdl); 2466 (void) ddi_intr_free(bfe->bfe_intrhdl); 2467 } 2468 2469 /* 2470 * Add an interrupt for the driver. 2471 */ 2472 static int 2473 bfe_add_intr(bfe_t *bfe) 2474 { 2475 int nintrs = 1; 2476 int ret; 2477 2478 ret = ddi_intr_alloc(bfe->bfe_dip, &bfe->bfe_intrhdl, 2479 DDI_INTR_TYPE_FIXED, /* type */ 2480 0, /* inumber */ 2481 1, /* count */ 2482 &nintrs, /* actual nintrs */ 2483 DDI_INTR_ALLOC_STRICT); 2484 2485 if (ret != DDI_SUCCESS) { 2486 bfe_error(bfe->bfe_dip, "ddi_intr_alloc() failed" 2487 " : ret : %d", ret); 2488 return (DDI_FAILURE); 2489 } 2490 2491 ret = ddi_intr_add_handler(bfe->bfe_intrhdl, bfe_interrupt, bfe, NULL); 2492 if (ret != DDI_SUCCESS) { 2493 bfe_error(bfe->bfe_dip, "ddi_intr_add_handler() failed"); 2494 (void) ddi_intr_free(bfe->bfe_intrhdl); 2495 return (DDI_FAILURE); 2496 } 2497 2498 ret = ddi_intr_get_pri(bfe->bfe_intrhdl, &bfe->bfe_intrpri); 2499 if (ret != DDI_SUCCESS) { 2500 bfe_error(bfe->bfe_dip, "ddi_intr_get_pri() failed"); 2501 bfe_remove_intr(bfe); 2502 return (DDI_FAILURE); 2503 } 2504 2505 return (DDI_SUCCESS); 2506 } 2507 2508 2509 /* 2510 * Identify chipset family. 2511 */ 2512 static int 2513 bfe_identify_hardware(bfe_t *bfe) 2514 { 2515 uint16_t vid, did; 2516 int i; 2517 2518 vid = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_VENID); 2519 did = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_DEVID); 2520 2521 for (i = 0; i < (sizeof (bfe_cards) / sizeof (bfe_cards_t)); i++) { 2522 if (bfe_cards[i].vendor_id == vid && 2523 bfe_cards[i].device_id == did) { 2524 return (BFE_SUCCESS); 2525 } 2526 } 2527 2528 bfe_error(bfe->bfe_dip, "bfe driver is attaching to unknown pci%d,%d" 2529 " vendor/device-id card", vid, did); 2530 2531 return (BFE_SUCCESS); 2532 } 2533 2534 /* 2535 * Maps device registers. 2536 */ 2537 static int 2538 bfe_regs_map(bfe_t *bfe) 2539 { 2540 dev_info_t *dip = bfe->bfe_dip; 2541 int ret; 2542 2543 ret = ddi_regs_map_setup(dip, 1, &bfe->bfe_mem_regset.addr, 0, 0, 2544 &bfe_dev_attr, &bfe->bfe_mem_regset.hdl); 2545 2546 if (ret != DDI_SUCCESS) { 2547 bfe_error(bfe->bfe_dip, "ddi_regs_map_setup failed"); 2548 return (DDI_FAILURE); 2549 } 2550 2551 return (DDI_SUCCESS); 2552 } 2553 2554 static void 2555 bfe_unmap_regs(bfe_t *bfe) 2556 { 2557 ddi_regs_map_free(&bfe->bfe_mem_regset.hdl); 2558 } 2559 2560 static int 2561 bfe_get_chip_config(bfe_t *bfe) 2562 { 2563 uint32_t prom[BFE_EEPROM_SIZE]; 2564 int i; 2565 2566 /* 2567 * Read EEPROM in prom[] 2568 */ 2569 for (i = 0; i < BFE_EEPROM_SIZE; i++) { 2570 prom[i] = INL(bfe, BFE_EEPROM_BASE + i * sizeof (uint32_t)); 2571 } 2572 2573 bfe->bfe_dev_addr[0] = bfe->bfe_ether_addr[0] = 2574 INB(bfe, BFE_EEPROM_BASE + 79); 2575 2576 bfe->bfe_dev_addr[1] = bfe->bfe_ether_addr[1] = 2577 INB(bfe, BFE_EEPROM_BASE + 78); 2578 2579 bfe->bfe_dev_addr[2] = bfe->bfe_ether_addr[2] = 2580 INB(bfe, BFE_EEPROM_BASE + 81); 2581 2582 bfe->bfe_dev_addr[3] = bfe->bfe_ether_addr[3] = 2583 INB(bfe, BFE_EEPROM_BASE + 80); 2584 2585 bfe->bfe_dev_addr[4] = bfe->bfe_ether_addr[4] = 2586 INB(bfe, BFE_EEPROM_BASE + 83); 2587 2588 bfe->bfe_dev_addr[5] = bfe->bfe_ether_addr[5] = 2589 INB(bfe, BFE_EEPROM_BASE + 82); 2590 2591 bfe->bfe_phy_addr = -1; 2592 2593 return (DDI_SUCCESS); 2594 } 2595 2596 /* 2597 * Ring Management routines 2598 */ 2599 static int 2600 bfe_ring_buf_alloc(bfe_t *bfe, bfe_ring_t *r, int slot, int d) 2601 { 2602 int err; 2603 uint_t count = 0; 2604 2605 err = ddi_dma_alloc_handle(bfe->bfe_dip, 2606 &bfe_dma_attr_buf, DDI_DMA_SLEEP, NULL, 2607 &r->r_buf_dma[slot].handle); 2608 2609 if (err != DDI_SUCCESS) { 2610 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :" 2611 " alloc_handle failed"); 2612 goto fail0; 2613 } 2614 2615 err = ddi_dma_mem_alloc(r->r_buf_dma[slot].handle, 2616 r->r_buf_len, &bfe_buf_attr, DDI_DMA_STREAMING, 2617 DDI_DMA_SLEEP, NULL, &r->r_buf_dma[slot].addr, 2618 &r->r_buf_dma[slot].len, 2619 &r->r_buf_dma[slot].acchdl); 2620 2621 if (err != DDI_SUCCESS) { 2622 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :" 2623 " mem_alloc failed :%d", err); 2624 goto fail1; 2625 } 2626 2627 err = ddi_dma_addr_bind_handle(r->r_buf_dma[slot].handle, 2628 NULL, r->r_buf_dma[slot].addr, 2629 r->r_buf_dma[slot].len, 2630 (DDI_DMA_RDWR | DDI_DMA_STREAMING), 2631 DDI_DMA_SLEEP, NULL, 2632 &r->r_buf_dma[slot].cookie, 2633 &count); 2634 2635 if (err != DDI_DMA_MAPPED) { 2636 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :" 2637 " bind_handle failed"); 2638 goto fail2; 2639 } 2640 2641 if (count > 1) { 2642 bfe_error(bfe->bfe_dip, " bfe_ring_buf_alloc() :" 2643 " more than one DMA cookie"); 2644 (void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle); 2645 goto fail2; 2646 } 2647 2648 return (DDI_SUCCESS); 2649 fail2: 2650 ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl); 2651 fail1: 2652 ddi_dma_free_handle(&r->r_buf_dma[slot].handle); 2653 fail0: 2654 return (DDI_FAILURE); 2655 } 2656 2657 static void 2658 bfe_ring_buf_free(bfe_ring_t *r, int slot) 2659 { 2660 if (r->r_buf_dma == NULL) 2661 return; 2662 2663 (void) ddi_dma_unbind_handle(r->r_buf_dma[slot].handle); 2664 ddi_dma_mem_free(&r->r_buf_dma[slot].acchdl); 2665 ddi_dma_free_handle(&r->r_buf_dma[slot].handle); 2666 } 2667 2668 static void 2669 bfe_buffer_free(bfe_ring_t *r) 2670 { 2671 int i; 2672 2673 for (i = 0; i < r->r_ndesc; i++) { 2674 bfe_ring_buf_free(r, i); 2675 } 2676 } 2677 2678 static void 2679 bfe_ring_desc_free(bfe_ring_t *r) 2680 { 2681 (void) ddi_dma_unbind_handle(r->r_desc_dma_handle); 2682 ddi_dma_mem_free(&r->r_desc_acc_handle); 2683 ddi_dma_free_handle(&r->r_desc_dma_handle); 2684 kmem_free(r->r_buf_dma, r->r_ndesc * sizeof (bfe_dma_t)); 2685 2686 r->r_buf_dma = NULL; 2687 r->r_desc = NULL; 2688 } 2689 2690 2691 static int 2692 bfe_ring_desc_alloc(bfe_t *bfe, bfe_ring_t *r, int d) 2693 { 2694 int err, i, fail = 0; 2695 caddr_t ring; 2696 size_t size_krnl = 0, size_dma = 0, ring_len = 0; 2697 ddi_dma_cookie_t cookie; 2698 uint_t count = 0; 2699 2700 ASSERT(bfe != NULL); 2701 2702 size_krnl = r->r_ndesc * sizeof (bfe_dma_t); 2703 size_dma = r->r_ndesc * sizeof (bfe_desc_t); 2704 r->r_buf_dma = kmem_zalloc(size_krnl, KM_SLEEP); 2705 2706 2707 err = ddi_dma_alloc_handle(bfe->bfe_dip, &bfe_dma_attr_desc, 2708 DDI_DMA_SLEEP, NULL, &r->r_desc_dma_handle); 2709 2710 if (err != DDI_SUCCESS) { 2711 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on" 2712 " ddi_dma_alloc_handle()"); 2713 kmem_free(r->r_buf_dma, size_krnl); 2714 return (DDI_FAILURE); 2715 } 2716 2717 2718 err = ddi_dma_mem_alloc(r->r_desc_dma_handle, 2719 size_dma, &bfe_buf_attr, 2720 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 2721 &ring, &ring_len, &r->r_desc_acc_handle); 2722 2723 if (err != DDI_SUCCESS) { 2724 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on" 2725 " ddi_dma_mem_alloc()"); 2726 ddi_dma_free_handle(&r->r_desc_dma_handle); 2727 kmem_free(r->r_buf_dma, size_krnl); 2728 return (DDI_FAILURE); 2729 } 2730 2731 err = ddi_dma_addr_bind_handle(r->r_desc_dma_handle, 2732 NULL, ring, ring_len, 2733 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, 2734 DDI_DMA_SLEEP, NULL, 2735 &cookie, &count); 2736 2737 if (err != DDI_SUCCESS) { 2738 bfe_error(bfe->bfe_dip, "bfe_ring_desc_alloc() failed on" 2739 " ddi_dma_addr_bind_handle()"); 2740 ddi_dma_mem_free(&r->r_desc_acc_handle); 2741 ddi_dma_free_handle(&r->r_desc_dma_handle); 2742 kmem_free(r->r_buf_dma, size_krnl); 2743 return (DDI_FAILURE); 2744 } 2745 2746 /* 2747 * We don't want to have multiple cookies. Descriptor should be 2748 * aligned to PAGESIZE boundary. 2749 */ 2750 ASSERT(count == 1); 2751 2752 /* The actual descriptor for the ring */ 2753 r->r_desc_len = ring_len; 2754 r->r_desc_cookie = cookie; 2755 2756 r->r_desc = (void *)ring; 2757 2758 bzero(r->r_desc, size_dma); 2759 bzero(r->r_desc, ring_len); 2760 2761 /* For each descriptor, allocate a DMA buffer */ 2762 fail = 0; 2763 for (i = 0; i < r->r_ndesc; i++) { 2764 if (bfe_ring_buf_alloc(bfe, r, i, d) != DDI_SUCCESS) { 2765 i--; 2766 fail = 1; 2767 break; 2768 } 2769 } 2770 2771 if (fail) { 2772 while (i-- >= 0) { 2773 bfe_ring_buf_free(r, i); 2774 } 2775 2776 /* We don't need the descriptor anymore */ 2777 bfe_ring_desc_free(r); 2778 return (DDI_FAILURE); 2779 } 2780 2781 return (DDI_SUCCESS); 2782 } 2783 2784 static int 2785 bfe_rings_alloc(bfe_t *bfe) 2786 { 2787 /* TX */ 2788 mutex_init(&bfe->bfe_tx_ring.r_lock, NULL, MUTEX_DRIVER, NULL); 2789 bfe->bfe_tx_ring.r_lockp = &bfe->bfe_tx_ring.r_lock; 2790 bfe->bfe_tx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) + 2791 VLAN_TAGSZ + ETHERFCSL; 2792 bfe->bfe_tx_ring.r_ndesc = TX_NUM_DESC; 2793 bfe->bfe_tx_ring.r_bfe = bfe; 2794 bfe->bfe_tx_ring.r_avail_desc = TX_NUM_DESC; 2795 2796 /* RX */ 2797 mutex_init(&bfe->bfe_rx_ring.r_lock, NULL, MUTEX_DRIVER, NULL); 2798 bfe->bfe_rx_ring.r_lockp = &bfe->bfe_rx_ring.r_lock; 2799 bfe->bfe_rx_ring.r_buf_len = BFE_MTU + sizeof (struct ether_header) + 2800 VLAN_TAGSZ + ETHERFCSL + RX_HEAD_ROOM; 2801 bfe->bfe_rx_ring.r_ndesc = RX_NUM_DESC; 2802 bfe->bfe_rx_ring.r_bfe = bfe; 2803 bfe->bfe_rx_ring.r_avail_desc = RX_NUM_DESC; 2804 2805 /* Allocate TX Ring */ 2806 if (bfe_ring_desc_alloc(bfe, &bfe->bfe_tx_ring, 2807 DDI_DMA_WRITE) != DDI_SUCCESS) 2808 return (DDI_FAILURE); 2809 2810 /* Allocate RX Ring */ 2811 if (bfe_ring_desc_alloc(bfe, &bfe->bfe_rx_ring, 2812 DDI_DMA_READ) != DDI_SUCCESS) { 2813 cmn_err(CE_NOTE, "RX ring allocation failed"); 2814 bfe_ring_desc_free(&bfe->bfe_tx_ring); 2815 return (DDI_FAILURE); 2816 } 2817 2818 bfe->bfe_tx_ring.r_flags = BFE_RING_ALLOCATED; 2819 bfe->bfe_rx_ring.r_flags = BFE_RING_ALLOCATED; 2820 2821 return (DDI_SUCCESS); 2822 } 2823 2824 static int 2825 bfe_resume(dev_info_t *dip) 2826 { 2827 bfe_t *bfe; 2828 int err = DDI_SUCCESS; 2829 2830 if ((bfe = ddi_get_driver_private(dip)) == NULL) { 2831 bfe_error(dip, "Unexpected error (no driver private data)" 2832 " while resume"); 2833 return (DDI_FAILURE); 2834 } 2835 2836 /* 2837 * Grab all the locks first. 2838 */ 2839 bfe_grab_locks(bfe); 2840 bfe->bfe_chip_state = BFE_CHIP_RESUME; 2841 2842 bfe_init_vars(bfe); 2843 /* PHY will also start running */ 2844 bfe_chip_reset(bfe); 2845 if (bfe_chip_start(bfe) == DDI_FAILURE) { 2846 bfe_error(dip, "Could not resume chip"); 2847 err = DDI_FAILURE; 2848 } 2849 2850 bfe_release_locks(bfe); 2851 2852 if (err == DDI_SUCCESS) 2853 mac_tx_update(bfe->bfe_machdl); 2854 2855 return (err); 2856 } 2857 2858 static int 2859 bfe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2860 { 2861 int unit; 2862 bfe_t *bfe; 2863 mac_register_t *macreg; 2864 int ret; 2865 2866 switch (cmd) { 2867 case DDI_RESUME: 2868 return (bfe_resume(dip)); 2869 2870 case DDI_ATTACH: 2871 break; 2872 2873 default: 2874 return (DDI_FAILURE); 2875 } 2876 2877 2878 unit = ddi_get_instance(dip); 2879 2880 bfe = kmem_zalloc(sizeof (bfe_t), KM_SLEEP); 2881 bfe->bfe_dip = dip; 2882 bfe->bfe_unit = unit; 2883 2884 if (pci_config_setup(dip, &bfe->bfe_conf_handle) != DDI_SUCCESS) { 2885 bfe_error(dip, "pci_config_setup failed"); 2886 goto fail0; 2887 } 2888 2889 /* 2890 * Enable IO space, Bus Master and Memory Space accessess. 2891 */ 2892 ret = pci_config_get16(bfe->bfe_conf_handle, PCI_CONF_COMM); 2893 pci_config_put16(bfe->bfe_conf_handle, PCI_CONF_COMM, 2894 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME | ret); 2895 2896 ddi_set_driver_private(dip, bfe); 2897 2898 /* Identify hardware */ 2899 if (bfe_identify_hardware(bfe) == BFE_FAILURE) { 2900 bfe_error(dip, "Could not identify device"); 2901 goto fail1; 2902 } 2903 2904 if (bfe_regs_map(bfe) != DDI_SUCCESS) { 2905 bfe_error(dip, "Could not map device registers"); 2906 goto fail1; 2907 } 2908 2909 (void) bfe_get_chip_config(bfe); 2910 2911 /* 2912 * Register with MAC layer 2913 */ 2914 if ((macreg = mac_alloc(MAC_VERSION)) == NULL) { 2915 bfe_error(dip, "mac_alloc() failed"); 2916 goto fail2; 2917 } 2918 2919 macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 2920 macreg->m_driver = bfe; 2921 macreg->m_dip = dip; 2922 macreg->m_instance = unit; 2923 macreg->m_src_addr = bfe->bfe_ether_addr; 2924 macreg->m_callbacks = &bfe_mac_callbacks; 2925 macreg->m_min_sdu = 0; 2926 macreg->m_max_sdu = ETHERMTU; 2927 macreg->m_margin = VLAN_TAGSZ; 2928 2929 if ((ret = mac_register(macreg, &bfe->bfe_machdl)) != 0) { 2930 bfe_error(dip, "mac_register() failed with %d error", ret); 2931 mac_free(macreg); 2932 goto fail2; 2933 } 2934 2935 mac_free(macreg); 2936 2937 rw_init(&bfe->bfe_rwlock, NULL, RW_DRIVER, 2938 DDI_INTR_PRI(bfe->bfe_intrpri)); 2939 2940 if (bfe_add_intr(bfe) != DDI_SUCCESS) { 2941 bfe_error(dip, "Could not add interrupt"); 2942 goto fail3; 2943 } 2944 2945 if (bfe_rings_alloc(bfe) != DDI_SUCCESS) { 2946 bfe_error(dip, "Could not allocate TX/RX Ring"); 2947 goto fail4; 2948 } 2949 2950 /* Init and then reset the chip */ 2951 bfe->bfe_chip_action = 0; 2952 bfe_init_vars(bfe); 2953 2954 /* PHY will also start running */ 2955 bfe_chip_reset(bfe); 2956 2957 /* 2958 * Even though we enable the interrupts here but chip's interrupt 2959 * is not enabled yet. It will be enabled once we plumb the interface. 2960 */ 2961 if (ddi_intr_enable(bfe->bfe_intrhdl) != DDI_SUCCESS) { 2962 bfe_error(dip, "Could not enable interrupt"); 2963 goto fail4; 2964 } 2965 2966 return (DDI_SUCCESS); 2967 2968 fail4: 2969 bfe_remove_intr(bfe); 2970 fail3: 2971 (void) mac_unregister(bfe->bfe_machdl); 2972 fail2: 2973 bfe_unmap_regs(bfe); 2974 fail1: 2975 pci_config_teardown(&bfe->bfe_conf_handle); 2976 fail0: 2977 kmem_free(bfe, sizeof (bfe_t)); 2978 return (DDI_FAILURE); 2979 } 2980 2981 static int 2982 bfe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2983 { 2984 bfe_t *bfe; 2985 2986 bfe = ddi_get_driver_private(devinfo); 2987 2988 switch (cmd) { 2989 case DDI_DETACH: 2990 /* 2991 * We need to stop the timer before grabbing locks otherwise 2992 * we can land-up in deadlock with untimeout. 2993 */ 2994 bfe_stop_timer(bfe); 2995 2996 /* 2997 * First unregister with MAC layer before stopping DMA 2998 * engine. 2999 */ 3000 if (mac_unregister(bfe->bfe_machdl) != DDI_SUCCESS) 3001 return (DDI_FAILURE); 3002 3003 bfe->bfe_machdl = NULL; 3004 3005 /* 3006 * Quiesce the chip first. 3007 */ 3008 bfe_grab_locks(bfe); 3009 bfe_chip_halt(bfe); 3010 bfe_stop_phy(bfe); 3011 bfe_release_locks(bfe); 3012 3013 (void) ddi_intr_disable(bfe->bfe_intrhdl); 3014 3015 /* Make sure timer is gone. */ 3016 bfe_stop_timer(bfe); 3017 3018 /* 3019 * Free the DMA resources for buffer and then descriptors 3020 */ 3021 if (bfe->bfe_tx_ring.r_flags == BFE_RING_ALLOCATED) { 3022 /* TX */ 3023 bfe_buffer_free(&bfe->bfe_tx_ring); 3024 bfe_ring_desc_free(&bfe->bfe_tx_ring); 3025 } 3026 3027 if (bfe->bfe_rx_ring.r_flags == BFE_RING_ALLOCATED) { 3028 /* RX */ 3029 bfe_buffer_free(&bfe->bfe_rx_ring); 3030 bfe_ring_desc_free(&bfe->bfe_rx_ring); 3031 } 3032 3033 bfe_remove_intr(bfe); 3034 bfe_unmap_regs(bfe); 3035 pci_config_teardown(&bfe->bfe_conf_handle); 3036 3037 mutex_destroy(&bfe->bfe_tx_ring.r_lock); 3038 mutex_destroy(&bfe->bfe_rx_ring.r_lock); 3039 rw_destroy(&bfe->bfe_rwlock); 3040 3041 kmem_free(bfe, sizeof (bfe_t)); 3042 3043 ddi_set_driver_private(devinfo, NULL); 3044 return (DDI_SUCCESS); 3045 3046 case DDI_SUSPEND: 3047 /* 3048 * We need to stop the timer before grabbing locks otherwise 3049 * we can land-up in deadlock with untimeout. 3050 */ 3051 bfe_stop_timer(bfe); 3052 3053 /* 3054 * Grab all the locks first. 3055 */ 3056 bfe_grab_locks(bfe); 3057 bfe_chip_halt(bfe); 3058 bfe_stop_phy(bfe); 3059 bfe->bfe_chip_state = BFE_CHIP_SUSPENDED; 3060 bfe_release_locks(bfe); 3061 3062 return (DDI_SUCCESS); 3063 3064 default: 3065 return (DDI_FAILURE); 3066 } 3067 } 3068 3069 /* 3070 * Quiesce the card for fast reboot 3071 */ 3072 int 3073 bfe_quiesce(dev_info_t *dev_info) 3074 { 3075 bfe_t *bfe; 3076 3077 bfe = ddi_get_driver_private(dev_info); 3078 3079 bfe_chip_halt(bfe); 3080 bfe_stop_phy(bfe); 3081 bfe->bfe_chip_state = BFE_CHIP_QUIESCED; 3082 3083 return (DDI_SUCCESS); 3084 } 3085 3086 static struct cb_ops bfe_cb_ops = { 3087 nulldev, /* cb_open */ 3088 nulldev, /* cb_close */ 3089 nodev, /* cb_strategy */ 3090 nodev, /* cb_print */ 3091 nodev, /* cb_dump */ 3092 nodev, /* cb_read */ 3093 nodev, /* cb_write */ 3094 nodev, /* cb_ioctl */ 3095 nodev, /* cb_devmap */ 3096 nodev, /* cb_mmap */ 3097 nodev, /* cb_segmap */ 3098 nochpoll, /* cb_chpoll */ 3099 ddi_prop_op, /* cb_prop_op */ 3100 NULL, /* cb_stream */ 3101 D_MP | D_HOTPLUG, /* cb_flag */ 3102 CB_REV, /* cb_rev */ 3103 nodev, /* cb_aread */ 3104 nodev /* cb_awrite */ 3105 }; 3106 3107 static struct dev_ops bfe_dev_ops = { 3108 DEVO_REV, /* devo_rev */ 3109 0, /* devo_refcnt */ 3110 NULL, /* devo_getinfo */ 3111 nulldev, /* devo_identify */ 3112 nulldev, /* devo_probe */ 3113 bfe_attach, /* devo_attach */ 3114 bfe_detach, /* devo_detach */ 3115 nodev, /* devo_reset */ 3116 &bfe_cb_ops, /* devo_cb_ops */ 3117 NULL, /* devo_bus_ops */ 3118 ddi_power, /* devo_power */ 3119 bfe_quiesce /* devo_quiesce */ 3120 }; 3121 3122 static struct modldrv bfe_modldrv = { 3123 &mod_driverops, 3124 bfe_ident, 3125 &bfe_dev_ops 3126 }; 3127 3128 static struct modlinkage modlinkage = { 3129 MODREV_1, (void *)&bfe_modldrv, NULL 3130 }; 3131 3132 int 3133 _info(struct modinfo *modinfop) 3134 { 3135 return (mod_info(&modlinkage, modinfop)); 3136 } 3137 3138 int 3139 _init(void) 3140 { 3141 int status; 3142 3143 mac_init_ops(&bfe_dev_ops, MODULE_NAME); 3144 status = mod_install(&modlinkage); 3145 if (status == DDI_FAILURE) 3146 mac_fini_ops(&bfe_dev_ops); 3147 return (status); 3148 } 3149 3150 int 3151 _fini(void) 3152 { 3153 int status; 3154 3155 status = mod_remove(&modlinkage); 3156 if (status == 0) { 3157 mac_fini_ops(&bfe_dev_ops); 3158 } 3159 return (status); 3160 } 3161