1 /* 2 * sfe.c : DP83815/DP83816/SiS900 Fast Ethernet MAC driver for Solaris 3 * 4 * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, 13 * this list of conditions and the following disclaimer in the documentation 14 * and/or other materials provided with the distribution. 15 * 16 * 3. Neither the name of the author nor the names of its contributors may be 17 * used to endorse or promote products derived from this software without 18 * specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 31 * DAMAGE. 32 */ 33 34 #pragma ident "%Z%%M% %I% %E% SMI" /* sfe device driver */ 35 36 /* 37 * System Header files. 38 */ 39 #include <sys/types.h> 40 #include <sys/conf.h> 41 #include <sys/debug.h> 42 #include <sys/kmem.h> 43 #include <sys/modctl.h> 44 #include <sys/errno.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/byteorder.h> 48 #include <sys/ethernet.h> 49 #include <sys/pci.h> 50 51 #include "sfe_mii.h" 52 #include "sfe_util.h" 53 #include "sfereg.h" 54 55 char ident[] = "sis900/dp83815 driver v" "2.6.1t27os"; 56 57 /* Debugging support */ 58 #ifdef DEBUG_LEVEL 59 static int sfe_debug = DEBUG_LEVEL; 60 #if DEBUG_LEVEL > 4 61 #define CONS "^" 62 #else 63 #define CONS "!" 64 #endif 65 #define DPRINTF(n, args) if (sfe_debug > (n)) cmn_err args 66 #else 67 #define CONS "!" 68 #define DPRINTF(n, args) 69 #endif 70 71 /* 72 * Useful macros and typedefs 73 */ 74 #define ONESEC (drv_usectohz(1*1000000)) 75 #define ROUNDUP2(x, a) (((x) + (a) - 1) & ~((a) - 1)) 76 77 /* 78 * Our configuration 79 */ 80 #define MAXTXFRAGS 1 81 #define MAXRXFRAGS 1 82 83 #ifndef TX_BUF_SIZE 84 #define TX_BUF_SIZE 64 85 #endif 86 #ifndef TX_RING_SIZE 87 #if MAXTXFRAGS == 1 88 #define TX_RING_SIZE TX_BUF_SIZE 89 #else 90 #define TX_RING_SIZE (TX_BUF_SIZE * 4) 91 #endif 92 #endif 93 94 #ifndef RX_BUF_SIZE 95 #define RX_BUF_SIZE 256 96 #endif 97 #ifndef RX_RING_SIZE 98 #define RX_RING_SIZE RX_BUF_SIZE 99 #endif 100 101 #define OUR_INTR_BITS \ 102 (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT | ISR_RXSOVR | \ 103 ISR_TXURN | ISR_TXDESC | ISR_TXERR | \ 104 ISR_RXORN | ISR_RXIDLE | ISR_RXOK | ISR_RXERR) 105 106 #define USE_MULTICAST_HASHTBL 107 108 static int sfe_tx_copy_thresh = 256; 109 static int sfe_rx_copy_thresh = 256; 110 111 /* special PHY registers for SIS900 */ 112 #define MII_CONFIG1 0x0010 113 #define MII_CONFIG2 0x0011 114 #define MII_MASK 0x0013 115 #define MII_RESV 0x0014 116 117 #define PHY_MASK 0xfffffff0 118 #define PHY_SIS900_INTERNAL 0x001d8000 119 #define PHY_ICS1893 0x0015f440 120 121 122 #define SFE_DESC_SIZE 16 /* including pads rounding up to power of 2 */ 123 124 /* 125 * Supported chips 126 */ 127 struct chip_info { 128 uint16_t venid; 129 uint16_t devid; 130 char *chip_name; 131 int chip_type; 132 #define CHIPTYPE_DP83815 0 133 #define CHIPTYPE_SIS900 1 134 }; 135 136 /* 137 * Chip dependent MAC state 138 */ 139 struct sfe_dev { 140 /* misc HW information */ 141 struct chip_info *chip; 142 uint32_t our_intr_bits; 143 uint32_t isr_pended; 144 uint32_t cr; 145 uint_t tx_drain_threshold; 146 uint_t tx_fill_threshold; 147 uint_t rx_drain_threshold; 148 uint_t rx_fill_threshold; 149 uint8_t revid; /* revision from PCI configuration */ 150 boolean_t (*get_mac_addr)(struct gem_dev *); 151 uint8_t mac_addr[ETHERADDRL]; 152 uint8_t bridge_revid; 153 }; 154 155 /* 156 * Hardware information 157 */ 158 struct chip_info sfe_chiptbl[] = { 159 { 0x1039, 0x0900, "SiS900", CHIPTYPE_SIS900, }, 160 { 0x100b, 0x0020, "DP83815/83816", CHIPTYPE_DP83815, }, 161 { 0x1039, 0x7016, "SiS7016", CHIPTYPE_SIS900, }, 162 }; 163 #define CHIPTABLESIZE (sizeof (sfe_chiptbl)/sizeof (struct chip_info)) 164 165 /* ======================================================== */ 166 167 /* mii operations */ 168 static void sfe_mii_sync_dp83815(struct gem_dev *); 169 static void sfe_mii_sync_sis900(struct gem_dev *); 170 static uint16_t sfe_mii_read_dp83815(struct gem_dev *, uint_t); 171 static uint16_t sfe_mii_read_sis900(struct gem_dev *, uint_t); 172 static void sfe_mii_write_dp83815(struct gem_dev *, uint_t, uint16_t); 173 static void sfe_mii_write_sis900(struct gem_dev *, uint_t, uint16_t); 174 static void sfe_set_eq_sis630(struct gem_dev *dp); 175 /* nic operations */ 176 static int sfe_reset_chip_sis900(struct gem_dev *); 177 static int sfe_reset_chip_dp83815(struct gem_dev *); 178 static int sfe_init_chip(struct gem_dev *); 179 static int sfe_start_chip(struct gem_dev *); 180 static int sfe_stop_chip(struct gem_dev *); 181 static int sfe_set_media(struct gem_dev *); 182 static int sfe_set_rx_filter_dp83815(struct gem_dev *); 183 static int sfe_set_rx_filter_sis900(struct gem_dev *); 184 static int sfe_get_stats(struct gem_dev *); 185 static int sfe_attach_chip(struct gem_dev *); 186 187 /* descriptor operations */ 188 static int sfe_tx_desc_write(struct gem_dev *dp, int slot, 189 ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags); 190 static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot); 191 static void sfe_rx_desc_write(struct gem_dev *dp, int slot, 192 ddi_dma_cookie_t *dmacookie, int frags); 193 static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc); 194 static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc); 195 196 static void sfe_tx_desc_init(struct gem_dev *dp, int slot); 197 static void sfe_rx_desc_init(struct gem_dev *dp, int slot); 198 static void sfe_tx_desc_clean(struct gem_dev *dp, int slot); 199 static void sfe_rx_desc_clean(struct gem_dev *dp, int slot); 200 201 /* interrupt handler */ 202 static uint_t sfe_interrupt(struct gem_dev *dp); 203 204 /* ======================================================== */ 205 206 /* mapping attributes */ 207 /* Data access requirements. */ 208 static struct ddi_device_acc_attr sfe_dev_attr = { 209 DDI_DEVICE_ATTR_V0, 210 DDI_STRUCTURE_LE_ACC, 211 DDI_STRICTORDER_ACC 212 }; 213 214 /* On sparc, Buffers should be native endian for speed */ 215 static struct ddi_device_acc_attr sfe_buf_attr = { 216 DDI_DEVICE_ATTR_V0, 217 DDI_NEVERSWAP_ACC, /* native endianness */ 218 DDI_STRICTORDER_ACC 219 }; 220 221 static ddi_dma_attr_t sfe_dma_attr_buf = { 222 DMA_ATTR_V0, /* dma_attr_version */ 223 0, /* dma_attr_addr_lo */ 224 0xffffffffull, /* dma_attr_addr_hi */ 225 0x00000fffull, /* dma_attr_count_max */ 226 0, /* patched later */ /* dma_attr_align */ 227 0x000003fc, /* dma_attr_burstsizes */ 228 1, /* dma_attr_minxfer */ 229 0x00000fffull, /* dma_attr_maxxfer */ 230 0xffffffffull, /* dma_attr_seg */ 231 0, /* patched later */ /* dma_attr_sgllen */ 232 1, /* dma_attr_granular */ 233 0 /* dma_attr_flags */ 234 }; 235 236 static ddi_dma_attr_t sfe_dma_attr_desc = { 237 DMA_ATTR_V0, /* dma_attr_version */ 238 16, /* dma_attr_addr_lo */ 239 0xffffffffull, /* dma_attr_addr_hi */ 240 0xffffffffull, /* dma_attr_count_max */ 241 16, /* dma_attr_align */ 242 0x000003fc, /* dma_attr_burstsizes */ 243 1, /* dma_attr_minxfer */ 244 0xffffffffull, /* dma_attr_maxxfer */ 245 0xffffffffull, /* dma_attr_seg */ 246 1, /* dma_attr_sgllen */ 247 1, /* dma_attr_granular */ 248 0 /* dma_attr_flags */ 249 }; 250 251 uint32_t sfe_use_pcimemspace = 0; 252 253 /* ======================================================== */ 254 /* 255 * HW manipulation routines 256 */ 257 /* ======================================================== */ 258 259 #define SFE_EEPROM_DELAY(dp) \ 260 { (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); } 261 #define EE_CMD_READ 6 262 #define EE_CMD_SHIFT 6 263 264 static uint16_t 265 sfe_read_eeprom(struct gem_dev *dp, uint_t offset) 266 { 267 int eedi; 268 int i; 269 uint16_t ret; 270 271 /* ensure de-assert chip select */ 272 OUTL(dp, EROMAR, 0); 273 SFE_EEPROM_DELAY(dp); 274 OUTL(dp, EROMAR, EROMAR_EESK); 275 SFE_EEPROM_DELAY(dp); 276 277 /* assert chip select */ 278 offset |= EE_CMD_READ << EE_CMD_SHIFT; 279 280 for (i = 8; i >= 0; i--) { 281 /* make command */ 282 eedi = ((offset >> i) & 1) << EROMAR_EEDI_SHIFT; 283 284 /* send 1 bit */ 285 OUTL(dp, EROMAR, EROMAR_EECS | eedi); 286 SFE_EEPROM_DELAY(dp); 287 OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK); 288 SFE_EEPROM_DELAY(dp); 289 } 290 291 OUTL(dp, EROMAR, EROMAR_EECS); 292 293 ret = 0; 294 for (i = 0; i < 16; i++) { 295 /* Get 1 bit */ 296 OUTL(dp, EROMAR, EROMAR_EECS); 297 SFE_EEPROM_DELAY(dp); 298 OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK); 299 SFE_EEPROM_DELAY(dp); 300 301 ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1); 302 } 303 304 OUTL(dp, EROMAR, 0); 305 SFE_EEPROM_DELAY(dp); 306 307 return (ret); 308 } 309 #undef SFE_EEPROM_DELAY 310 311 static boolean_t 312 sfe_get_mac_addr_dp83815(struct gem_dev *dp) 313 { 314 uint8_t *mac; 315 uint_t val; 316 int i; 317 318 #define BITSET(p, ix, v) (p)[(ix)/8] |= ((v) ? 1 : 0) << ((ix) & 0x7) 319 320 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); 321 322 mac = dp->dev_addr.ether_addr_octet; 323 324 /* first of all, clear MAC address buffer */ 325 bzero(mac, ETHERADDRL); 326 327 /* get bit 0 */ 328 val = sfe_read_eeprom(dp, 0x6); 329 BITSET(mac, 0, val & 1); 330 331 /* get bit 1 - 16 */ 332 val = sfe_read_eeprom(dp, 0x7); 333 for (i = 0; i < 16; i++) { 334 BITSET(mac, 1 + i, val & (1 << (15 - i))); 335 } 336 337 /* get bit 17 - 32 */ 338 val = sfe_read_eeprom(dp, 0x8); 339 for (i = 0; i < 16; i++) { 340 BITSET(mac, 17 + i, val & (1 << (15 - i))); 341 } 342 343 /* get bit 33 - 47 */ 344 val = sfe_read_eeprom(dp, 0x9); 345 for (i = 0; i < 15; i++) { 346 BITSET(mac, 33 + i, val & (1 << (15 - i))); 347 } 348 349 return (B_TRUE); 350 #undef BITSET 351 } 352 353 static boolean_t 354 sfe_get_mac_addr_sis900(struct gem_dev *dp) 355 { 356 uint_t val; 357 int i; 358 uint8_t *mac; 359 360 mac = dp->dev_addr.ether_addr_octet; 361 362 for (i = 0; i < ETHERADDRL/2; i++) { 363 val = sfe_read_eeprom(dp, 0x8 + i); 364 *mac++ = (uint8_t)val; 365 *mac++ = (uint8_t)(val >> 8); 366 } 367 368 return (B_TRUE); 369 } 370 371 static dev_info_t * 372 sfe_search_pci_dev_subr(dev_info_t *cur_node, int vendor_id, int device_id) 373 { 374 dev_info_t *child_id; 375 dev_info_t *ret; 376 int vid, did; 377 378 if (cur_node == NULL) { 379 return (NULL); 380 } 381 382 /* check brothers */ 383 do { 384 vid = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node, 385 DDI_PROP_DONTPASS, "vendor-id", -1); 386 did = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node, 387 DDI_PROP_DONTPASS, "device-id", -1); 388 389 if (vid == vendor_id && did == device_id) { 390 /* found */ 391 return (cur_node); 392 } 393 394 /* check children */ 395 if ((child_id = ddi_get_child(cur_node)) != NULL) { 396 if ((ret = sfe_search_pci_dev_subr(child_id, 397 vendor_id, device_id)) != NULL) { 398 return (ret); 399 } 400 } 401 402 } while ((cur_node = ddi_get_next_sibling(cur_node)) != NULL); 403 404 /* not found */ 405 return (NULL); 406 } 407 408 static dev_info_t * 409 sfe_search_pci_dev(int vendor_id, int device_id) 410 { 411 return (sfe_search_pci_dev_subr(ddi_root_node(), vendor_id, device_id)); 412 } 413 414 /* Avoid undefined symbol for non IA architectures */ 415 #pragma weak inb 416 #pragma weak outb 417 418 static boolean_t 419 sfe_get_mac_addr_sis630e(struct gem_dev *dp) 420 { 421 int i; 422 dev_info_t *isa_bridge; 423 ddi_acc_handle_t isa_handle; 424 int reg; 425 426 if (inb == NULL || outb == NULL) { 427 /* this is not IA architecture */ 428 return (B_FALSE); 429 } 430 431 if ((isa_bridge = sfe_search_pci_dev(0x1039, 0x8)) == NULL) { 432 cmn_err(CE_WARN, "%s: failed to find isa-bridge pci1039,8", 433 dp->name); 434 return (B_FALSE); 435 } 436 437 if (pci_config_setup(isa_bridge, &isa_handle) != DDI_SUCCESS) { 438 cmn_err(CE_WARN, "%s: ddi_regs_map_setup failed", 439 dp->name); 440 return (B_FALSE); 441 } 442 443 /* enable to access CMOS RAM */ 444 reg = pci_config_get8(isa_handle, 0x48); 445 pci_config_put8(isa_handle, 0x48, reg | 0x40); 446 447 for (i = 0; i < ETHERADDRL; i++) { 448 outb(0x70, 0x09 + i); 449 dp->dev_addr.ether_addr_octet[i] = inb(0x71); 450 } 451 452 /* disable to access CMOS RAM */ 453 pci_config_put8(isa_handle, 0x48, reg); 454 pci_config_teardown(&isa_handle); 455 456 return (B_TRUE); 457 } 458 459 static boolean_t 460 sfe_get_mac_addr_sis635(struct gem_dev *dp) 461 { 462 int i; 463 uint32_t rfcr; 464 uint16_t v; 465 struct sfe_dev *lp = dp->private; 466 467 DPRINTF(2, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); 468 rfcr = INL(dp, RFCR); 469 470 OUTL(dp, CR, lp->cr | CR_RELOAD); 471 OUTL(dp, CR, lp->cr); 472 473 /* disable packet filtering before reading filter */ 474 OUTL(dp, RFCR, rfcr & ~RFCR_RFEN); 475 476 /* load MAC addr from filter data register */ 477 for (i = 0; i < ETHERADDRL; i += 2) { 478 OUTL(dp, RFCR, 479 (RFADDR_MAC_SIS900 + (i/2)) << RFCR_RFADDR_SHIFT_SIS900); 480 v = INL(dp, RFDR); 481 dp->dev_addr.ether_addr_octet[i] = (uint8_t)v; 482 dp->dev_addr.ether_addr_octet[i+1] = (uint8_t)(v >> 8); 483 } 484 485 /* re-enable packet filtering */ 486 OUTL(dp, RFCR, rfcr | RFCR_RFEN); 487 488 return (B_TRUE); 489 } 490 491 static boolean_t 492 sfe_get_mac_addr_sis962(struct gem_dev *dp) 493 { 494 boolean_t ret; 495 int i; 496 497 ret = B_FALSE; 498 499 /* rise request signal to access EEPROM */ 500 OUTL(dp, MEAR, EROMAR_EEREQ); 501 for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) { 502 if (i > 200) { 503 /* failed to acquire eeprom */ 504 cmn_err(CE_NOTE, 505 CONS "%s: failed to access eeprom", dp->name); 506 goto x; 507 } 508 drv_usecwait(10); 509 } 510 ret = sfe_get_mac_addr_sis900(dp); 511 x: 512 /* release EEPROM */ 513 OUTL(dp, MEAR, EROMAR_EEDONE); 514 515 return (ret); 516 } 517 518 static int 519 sfe_reset_chip_sis900(struct gem_dev *dp) 520 { 521 int i; 522 uint32_t done; 523 uint32_t val; 524 struct sfe_dev *lp = dp->private; 525 526 DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__)); 527 528 /* invalidate mac addr cache */ 529 bzero(lp->mac_addr, sizeof (lp->mac_addr)); 530 531 lp->cr = 0; 532 533 /* inhibit interrupt */ 534 OUTL(dp, IMR, 0); 535 lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits; 536 537 OUTL(dp, RFCR, 0); 538 539 OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR); 540 drv_usecwait(10); 541 542 done = 0; 543 for (i = 0; done != (ISR_TXRCMP | ISR_RXRCMP); i++) { 544 if (i > 1000) { 545 cmn_err(CE_WARN, "%s: chip reset timeout", dp->name); 546 return (GEM_FAILURE); 547 } 548 done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP); 549 drv_usecwait(10); 550 } 551 552 if (lp->revid == SIS630ET_900_REV) { 553 lp->cr |= CR_ACCESSMODE; 554 OUTL(dp, CR, lp->cr | INL(dp, CR)); 555 } 556 557 /* Configuration register: enable PCI parity */ 558 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", 559 dp->name, INL(dp, CFG), CFG_BITS_SIS900)); 560 val = 0; 561 if (lp->revid >= SIS635A_900_REV || 562 lp->revid == SIS900B_900_REV) { 563 /* what is this ? */ 564 val |= CFG_RND_CNT; 565 } 566 OUTL(dp, CFG, val); 567 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name, 568 INL(dp, CFG), CFG_BITS_SIS900)); 569 570 return (GEM_SUCCESS); 571 } 572 573 static int 574 sfe_reset_chip_dp83815(struct gem_dev *dp) 575 { 576 int i; 577 uint32_t val; 578 struct sfe_dev *lp = dp->private; 579 580 DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__)); 581 582 /* invalidate mac addr cache */ 583 bzero(lp->mac_addr, sizeof (lp->mac_addr)); 584 585 lp->cr = 0; 586 587 /* inhibit interrupts */ 588 OUTL(dp, IMR, 0); 589 lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits; 590 591 OUTL(dp, RFCR, 0); 592 593 OUTL(dp, CR, CR_RST); 594 drv_usecwait(10); 595 596 for (i = 0; INL(dp, CR) & CR_RST; i++) { 597 if (i > 100) { 598 cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name); 599 return (GEM_FAILURE); 600 } 601 drv_usecwait(10); 602 } 603 DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10)); 604 605 OUTL(dp, CCSR, CCSR_PMESTS); 606 OUTL(dp, CCSR, 0); 607 608 /* Configuration register: enable PCI parity */ 609 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", 610 dp->name, INL(dp, CFG), CFG_BITS_DP83815)); 611 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); 612 OUTL(dp, CFG, val | CFG_PAUSE_ADV); 613 DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name, 614 INL(dp, CFG), CFG_BITS_DP83815)); 615 616 return (GEM_SUCCESS); 617 } 618 619 static int 620 sfe_init_chip(struct gem_dev *dp) 621 { 622 /* Configuration register: have been set up in sfe_chip_reset */ 623 624 /* PCI test control register: do nothing */ 625 626 /* Interrupt status register : do nothing */ 627 628 /* Interrupt mask register: clear, but leave lp->our_intr_bits */ 629 OUTL(dp, IMR, 0); 630 631 /* Enhanced PHY Access register (sis900): do nothing */ 632 633 /* Transmit Descriptor Pointer register: base addr of TX ring */ 634 OUTL(dp, TXDP, dp->tx_ring_dma); 635 636 /* Receive descriptor pointer register: base addr of RX ring */ 637 OUTL(dp, RXDP, dp->rx_ring_dma); 638 639 return (GEM_SUCCESS); 640 } 641 642 static uint_t 643 sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr) 644 { 645 return (gem_ether_crc_be(addr, ETHERADDRL)); 646 } 647 648 #ifdef DEBUG_LEVEL 649 static void 650 sfe_rxfilter_dump(struct gem_dev *dp, int start, int end) 651 { 652 int i; 653 int j; 654 uint16_t ram[0x10]; 655 656 cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name); 657 #define WORDS_PER_LINE 4 658 for (i = start; i < end; i += WORDS_PER_LINE*2) { 659 for (j = 0; j < WORDS_PER_LINE; j++) { 660 OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2); 661 ram[j] = INL(dp, RFDR); 662 } 663 664 cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x", 665 i, ram[0], ram[1], ram[2], ram[3]); 666 } 667 668 #undef WORDS_PER_LINE 669 } 670 #endif 671 672 static uint_t sfe_rf_perfect_base_dp83815[] = { 673 RFADDR_PMATCH0_DP83815, 674 RFADDR_PMATCH1_DP83815, 675 RFADDR_PMATCH2_DP83815, 676 RFADDR_PMATCH3_DP83815, 677 }; 678 679 static int 680 sfe_set_rx_filter_dp83815(struct gem_dev *dp) 681 { 682 int i; 683 int j; 684 uint32_t mode; 685 uint8_t *mac = dp->cur_addr.ether_addr_octet; 686 uint16_t hash_tbl[32]; 687 struct sfe_dev *lp = dp->private; 688 689 DPRINTF(1, (CE_CONT, CONS "%s: %s: called, mc_count:%d, mode:0x%b", 690 dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS)); 691 692 #if DEBUG_LEVEL > 0 693 for (i = 0; i < dp->mc_count; i++) { 694 cmn_err(CE_CONT, 695 "!%s: adding mcast(%d) %02x:%02x:%02x:%02x:%02x:%02x", 696 dp->name, i, 697 dp->mc_list[i].addr.ether_addr_octet[0], 698 dp->mc_list[i].addr.ether_addr_octet[1], 699 dp->mc_list[i].addr.ether_addr_octet[2], 700 dp->mc_list[i].addr.ether_addr_octet[3], 701 dp->mc_list[i].addr.ether_addr_octet[4], 702 dp->mc_list[i].addr.ether_addr_octet[5]); 703 } 704 #endif 705 if ((dp->rxmode & RXMODE_ENABLE) == 0) { 706 /* disable rx filter */ 707 OUTL(dp, RFCR, 0); 708 return (GEM_SUCCESS); 709 } 710 711 /* 712 * Set Receive filter control register 713 */ 714 if (dp->rxmode & RXMODE_PROMISC) { 715 /* all broadcast, all multicast, all physical */ 716 mode = RFCR_AAB | RFCR_AAM | RFCR_AAP; 717 } else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) { 718 /* all broadcast, all multicast, physical for the chip */ 719 mode = RFCR_AAB | RFCR_AAM | RFCR_APM_DP83815; 720 } else if (dp->mc_count > 4) { 721 /* 722 * Use multicast hash table, 723 * accept all broadcast and physical for the chip. 724 */ 725 mode = RFCR_AAB | RFCR_MHEN_DP83815 | RFCR_APM_DP83815; 726 727 bzero(hash_tbl, sizeof (hash_tbl)); 728 for (i = 0; i < dp->mc_count; i++) { 729 j = dp->mc_list[i].hash >> (32 - 9); 730 hash_tbl[j / 16] |= 1 << (j % 16); 731 } 732 } else { 733 /* 734 * Use pattern mach filter for multicast address, 735 * accept all broadcast and physical for the chip 736 */ 737 /* need to enable corresponding pattern registers */ 738 mode = RFCR_AAB | RFCR_APM_DP83815 | 739 (((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT); 740 } 741 742 #if DEBUG_LEVEL > 1 743 cmn_err(CE_CONT, 744 "!%s: mac %02x:%02x:%02x:%02x:%02x:%02x" 745 " cache %02x:%02x:%02x:%02x:%02x:%02x", 746 dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], 747 lp->mac_addr[0], lp->mac_addr[1], 748 lp->mac_addr[2], lp->mac_addr[3], 749 lp->mac_addr[4], lp->mac_addr[5]); 750 #endif 751 if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) { 752 /* 753 * XXX - need to *disable* rx filter to load mac address for 754 * the chip. otherwise, we cannot setup rxfilter correctly. 755 */ 756 /* setup perfect match register for my station address */ 757 for (i = 0; i < ETHERADDRL; i += 2) { 758 OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i); 759 OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]); 760 } 761 762 bcopy(mac, lp->mac_addr, ETHERADDRL); 763 } 764 765 #if DEBUG_LEVEL > 3 766 /* clear pattern ram */ 767 for (j = 0x200; j < 0x380; j += 2) { 768 OUTL(dp, RFCR, j); 769 OUTL(dp, RFDR, 0); 770 } 771 #endif 772 if (mode & RFCR_APAT_DP83815) { 773 /* setup multicast address into pattern match registers */ 774 for (j = 0; j < dp->mc_count; j++) { 775 mac = &dp->mc_list[j].addr.ether_addr_octet[0]; 776 for (i = 0; i < ETHERADDRL; i += 2) { 777 OUTL(dp, RFCR, 778 sfe_rf_perfect_base_dp83815[j] + i*2); 779 OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]); 780 } 781 } 782 783 /* setup pattern count registers */ 784 OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815); 785 OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL); 786 OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815); 787 OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL); 788 } 789 790 if (mode & RFCR_MHEN_DP83815) { 791 /* Load Multicast hash table */ 792 for (i = 0; i < 32; i++) { 793 /* for DP83815, index is in byte */ 794 OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2); 795 OUTL(dp, RFDR, hash_tbl[i]); 796 } 797 } 798 #if DEBUG_LEVEL > 2 799 sfe_rxfilter_dump(dp, 0, 0x10); 800 sfe_rxfilter_dump(dp, 0x200, 0x380); 801 #endif 802 /* Set rx filter mode and enable rx filter */ 803 OUTL(dp, RFCR, RFCR_RFEN | mode); 804 805 return (GEM_SUCCESS); 806 } 807 808 static int 809 sfe_set_rx_filter_sis900(struct gem_dev *dp) 810 { 811 int i; 812 uint32_t mode; 813 uint16_t hash_tbl[16]; 814 uint8_t *mac = dp->cur_addr.ether_addr_octet; 815 int hash_size; 816 int hash_shift; 817 struct sfe_dev *lp = dp->private; 818 819 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); 820 821 if ((dp->rxmode & RXMODE_ENABLE) == 0) { 822 /* disalbe rx filter */ 823 OUTL(dp, RFCR, 0); 824 return (GEM_SUCCESS); 825 } 826 827 /* 828 * determine hardware hash table size in word. 829 */ 830 hash_shift = 25; 831 if (lp->revid >= SIS635A_900_REV || lp->revid == SIS900B_900_REV) { 832 hash_shift = 24; 833 } 834 hash_size = (1 << (32 - hash_shift)) / 16; 835 bzero(hash_tbl, sizeof (hash_tbl)); 836 837 /* Set Receive filter control register */ 838 839 if (dp->rxmode & RXMODE_PROMISC) { 840 /* all broadcast, all multicast, all physical */ 841 mode = RFCR_AAB | RFCR_AAM | RFCR_AAP; 842 } else if ((dp->rxmode & RXMODE_ALLMULTI) || 843 dp->mc_count > hash_size*16/2) { 844 /* all broadcast, all multicast, physical for the chip */ 845 mode = RFCR_AAB | RFCR_AAM; 846 } else { 847 /* all broadcast, physical for the chip */ 848 mode = RFCR_AAB; 849 } 850 851 /* make hash table */ 852 for (i = 0; i < dp->mc_count; i++) { 853 uint_t h; 854 h = dp->mc_list[i].hash >> hash_shift; 855 hash_tbl[h / 16] |= 1 << (h % 16); 856 } 857 858 if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) { 859 /* Disable Rx filter and load mac address */ 860 for (i = 0; i < ETHERADDRL/2; i++) { 861 /* For sis900, index is in word */ 862 OUTL(dp, RFCR, 863 (RFADDR_MAC_SIS900+i) << RFCR_RFADDR_SHIFT_SIS900); 864 OUTL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]); 865 } 866 867 bcopy(mac, lp->mac_addr, ETHERADDRL); 868 } 869 870 /* Load Multicast hash table */ 871 for (i = 0; i < hash_size; i++) { 872 /* For sis900, index is in word */ 873 OUTL(dp, RFCR, 874 (RFADDR_MULTICAST_SIS900 + i) << RFCR_RFADDR_SHIFT_SIS900); 875 OUTL(dp, RFDR, hash_tbl[i]); 876 } 877 878 /* Load rx filter mode and enable rx filter */ 879 OUTL(dp, RFCR, RFCR_RFEN | mode); 880 881 return (GEM_SUCCESS); 882 } 883 884 static int 885 sfe_start_chip(struct gem_dev *dp) 886 { 887 struct sfe_dev *lp = dp->private; 888 889 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); 890 891 /* 892 * setup interrupt mask, which shouldn't include ISR_TOK 893 * to improve performance. 894 */ 895 lp->our_intr_bits = OUR_INTR_BITS; 896 897 /* enable interrupt */ 898 if ((dp->misc_flag & GEM_NOINTR) == 0) { 899 OUTL(dp, IER, 1); 900 OUTL(dp, IMR, lp->our_intr_bits); 901 } 902 903 /* Kick RX */ 904 OUTL(dp, CR, lp->cr | CR_RXE); 905 906 return (GEM_SUCCESS); 907 } 908 909 /* 910 * Stop nic core gracefully. 911 */ 912 static int 913 sfe_stop_chip(struct gem_dev *dp) 914 { 915 struct sfe_dev *lp = dp->private; 916 uint32_t done; 917 int i; 918 uint32_t val; 919 920 DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__)); 921 922 /* 923 * Although we inhibit interrupt here, we don't clear soft copy of 924 * interrupt mask to avoid bogus interrupts. 925 */ 926 OUTL(dp, IMR, 0); 927 928 /* stop TX and RX immediately */ 929 OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR); 930 931 done = 0; 932 for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) { 933 if (i > 1000) { 934 /* 935 * As gem layer will call sfe_reset_chip(), 936 * we don't neet to reset futher 937 */ 938 cmn_err(CE_NOTE, "!%s: %s: Tx/Rx reset timeout", 939 dp->name, __func__); 940 941 return (GEM_FAILURE); 942 } 943 val = INL(dp, ISR); 944 done |= val & (ISR_RXRCMP | ISR_TXRCMP); 945 lp->isr_pended |= val & lp->our_intr_bits; 946 drv_usecwait(10); 947 } 948 949 return (GEM_SUCCESS); 950 } 951 952 /* 953 * Setup media mode 954 */ 955 static uint_t 956 sfe_mxdma_value[] = { 512, 4, 8, 16, 32, 64, 128, 256, }; 957 958 static uint_t 959 sfe_encode_mxdma(uint_t burstsize) 960 { 961 int i; 962 963 if (burstsize > 256) { 964 /* choose 512 */ 965 return (0); 966 } 967 968 for (i = 1; i < 8; i++) { 969 if (burstsize <= sfe_mxdma_value[i]) { 970 break; 971 } 972 } 973 return (i); 974 } 975 976 static int 977 sfe_set_media(struct gem_dev *dp) 978 { 979 uint32_t txcfg; 980 uint32_t rxcfg; 981 uint32_t pcr; 982 uint32_t val; 983 uint32_t txmxdma; 984 uint32_t rxmxdma; 985 struct sfe_dev *lp = dp->private; 986 #ifdef DEBUG_LEVEL 987 extern int gem_speed_value[]; 988 #endif 989 DPRINTF(2, (CE_CONT, CONS "%s: %s: %s duplex, %d Mbps", 990 dp->name, __func__, 991 dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed])); 992 993 /* initialize txcfg and rxcfg */ 994 txcfg = TXCFG_ATP; 995 if (dp->full_duplex) { 996 txcfg |= (TXCFG_CSI | TXCFG_HBI); 997 } 998 rxcfg = RXCFG_AEP | RXCFG_ARP; 999 if (dp->full_duplex) { 1000 rxcfg |= RXCFG_ATX; 1001 } 1002 1003 /* select txmxdma and rxmxdma, maxmum burst length */ 1004 if (lp->chip->chip_type == CHIPTYPE_SIS900) { 1005 #ifdef DEBUG_SIS900_EDB 1006 val = CFG_EDB_MASTER; 1007 #else 1008 val = INL(dp, CFG) & CFG_EDB_MASTER; 1009 #endif 1010 if (val) { 1011 /* 1012 * sis900 built-in cores: 1013 * max burst length must be fixed to 64 1014 */ 1015 txmxdma = 64; 1016 rxmxdma = 64; 1017 } else { 1018 /* 1019 * sis900 pci chipset: 1020 * the vendor recommended to fix max burst length 1021 * to 512 1022 */ 1023 txmxdma = 512; 1024 rxmxdma = 512; 1025 } 1026 } else { 1027 /* 1028 * NS dp83815/816: 1029 * use user defined or default for tx/rx max burst length 1030 */ 1031 txmxdma = max(dp->txmaxdma, 256); 1032 rxmxdma = max(dp->rxmaxdma, 256); 1033 } 1034 1035 1036 /* tx high water mark */ 1037 lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT); 1038 1039 /* determine tx_fill_threshold accroding drain threshold */ 1040 lp->tx_fill_threshold = 1041 TXFIFOSIZE - lp->tx_drain_threshold - TXCFG_FIFO_UNIT; 1042 1043 /* tune txmxdma not to exceed tx_fill_threshold */ 1044 for (; ; ) { 1045 /* normalize txmxdma requested */ 1046 val = sfe_encode_mxdma(txmxdma); 1047 txmxdma = sfe_mxdma_value[val]; 1048 1049 if (txmxdma <= lp->tx_fill_threshold) { 1050 break; 1051 } 1052 /* select new txmxdma */ 1053 txmxdma = txmxdma / 2; 1054 } 1055 txcfg |= val << TXCFG_MXDMA_SHIFT; 1056 1057 /* encode rxmxdma, maxmum burst length for rx */ 1058 val = sfe_encode_mxdma(rxmxdma); 1059 rxcfg |= val << RXCFG_MXDMA_SHIFT; 1060 rxmxdma = sfe_mxdma_value[val]; 1061 1062 /* receive starting threshold - it have only 5bit-wide field */ 1063 val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT); 1064 lp->rx_drain_threshold = 1065 min(val, (RXCFG_DRTH >> RXCFG_DRTH_SHIFT) * RXCFG_FIFO_UNIT); 1066 1067 DPRINTF(0, (CE_CONT, 1068 "%s: %s: tx: drain:%d(rest %d) fill:%d mxdma:%d," 1069 " rx: drain:%d mxdma:%d", 1070 dp->name, __func__, 1071 lp->tx_drain_threshold, TXFIFOSIZE - lp->tx_drain_threshold, 1072 lp->tx_fill_threshold, txmxdma, 1073 lp->rx_drain_threshold, rxmxdma)); 1074 1075 ASSERT(lp->tx_drain_threshold < 64*TXCFG_FIFO_UNIT); 1076 ASSERT(lp->tx_fill_threshold < 64*TXCFG_FIFO_UNIT); 1077 ASSERT(lp->rx_drain_threshold < 32*RXCFG_FIFO_UNIT); 1078 1079 txcfg |= ((lp->tx_fill_threshold/TXCFG_FIFO_UNIT) << TXCFG_FLTH_SHIFT) 1080 | (lp->tx_drain_threshold/TXCFG_FIFO_UNIT); 1081 OUTL(dp, TXCFG, txcfg); 1082 1083 rxcfg |= ((lp->rx_drain_threshold/RXCFG_FIFO_UNIT) << RXCFG_DRTH_SHIFT); 1084 if (lp->chip->chip_type == CHIPTYPE_DP83815) { 1085 rxcfg |= RXCFG_ALP_DP83815; 1086 } 1087 OUTL(dp, RXCFG, rxcfg); 1088 1089 DPRINTF(0, (CE_CONT, CONS "%s: %s: txcfg:%b rxcfg:%b", 1090 dp->name, __func__, 1091 txcfg, TXCFG_BITS, rxcfg, RXCFG_BITS)); 1092 1093 /* Flow control */ 1094 if (lp->chip->chip_type == CHIPTYPE_DP83815) { 1095 pcr = INL(dp, PCR); 1096 switch (dp->flow_control) { 1097 case FLOW_CONTROL_SYMMETRIC: 1098 case FLOW_CONTROL_RX_PAUSE: 1099 OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST); 1100 break; 1101 1102 default: 1103 OUTL(dp, PCR, 1104 pcr & ~(PCR_PSEN | PCR_PS_MCAST | PCR_PS_DA)); 1105 break; 1106 } 1107 DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name, 1108 INL(dp, PCR), PCR_BITS)); 1109 1110 } else if (lp->chip->chip_type == CHIPTYPE_SIS900) { 1111 switch (dp->flow_control) { 1112 case FLOW_CONTROL_SYMMETRIC: 1113 case FLOW_CONTROL_RX_PAUSE: 1114 OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN); 1115 break; 1116 default: 1117 OUTL(dp, FLOWCTL, 0); 1118 break; 1119 } 1120 DPRINTF(2, (CE_CONT, CONS "%s: FLOWCTL: %b", 1121 dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS)); 1122 } 1123 return (GEM_SUCCESS); 1124 } 1125 1126 static int 1127 sfe_get_stats(struct gem_dev *dp) 1128 { 1129 /* do nothing */ 1130 return (GEM_SUCCESS); 1131 } 1132 1133 /* 1134 * descriptor manipulations 1135 */ 1136 static int 1137 sfe_tx_desc_write(struct gem_dev *dp, int slot, 1138 ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags) 1139 { 1140 uint32_t mark; 1141 struct sfe_desc *tdp; 1142 ddi_dma_cookie_t *dcp; 1143 uint32_t tmp0; 1144 #if DEBUG_LEVEL > 2 1145 int i; 1146 1147 cmn_err(CE_CONT, 1148 CONS "%s: time:%d %s seqnum: %d, slot %d, frags: %d flags: %llx", 1149 dp->name, ddi_get_lbolt(), __func__, 1150 dp->tx_desc_tail, slot, frags, flags); 1151 1152 for (i = 0; i < frags; i++) { 1153 cmn_err(CE_CONT, CONS "%d: addr: 0x%x, len: 0x%x", 1154 i, dmacookie[i].dmac_address, dmacookie[i].dmac_size); 1155 } 1156 #endif 1157 /* 1158 * write tx descriptor in reversed order. 1159 */ 1160 #if DEBUG_LEVEL > 3 1161 flags |= GEM_TXFLAG_INTR; 1162 #endif 1163 mark = (flags & GEM_TXFLAG_INTR) 1164 ? (CMDSTS_OWN | CMDSTS_INTR) : CMDSTS_OWN; 1165 1166 ASSERT(frags == 1); 1167 dcp = &dmacookie[0]; 1168 if (flags & GEM_TXFLAG_HEAD) { 1169 mark &= ~CMDSTS_OWN; 1170 } 1171 1172 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot]; 1173 tmp0 = (uint32_t)dcp->dmac_address; 1174 mark |= (uint32_t)dcp->dmac_size; 1175 tdp->d_bufptr = LE_32(tmp0); 1176 tdp->d_cmdsts = LE_32(mark); 1177 1178 return (frags); 1179 } 1180 1181 static void 1182 sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot) 1183 { 1184 uint_t tx_ring_size = dp->gc.gc_tx_ring_size; 1185 struct sfe_desc *tdp; 1186 struct sfe_dev *lp = dp->private; 1187 1188 if (nslot > 1) { 1189 gem_tx_desc_dma_sync(dp, 1190 SLOT(start_slot + 1, tx_ring_size), 1191 nslot - 1, DDI_DMA_SYNC_FORDEV); 1192 } 1193 1194 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot]; 1195 tdp->d_cmdsts |= LE_32(CMDSTS_OWN); 1196 1197 gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV); 1198 1199 /* 1200 * Let the Transmit Buffer Manager Fill state machine active. 1201 */ 1202 if (dp->mac_active) { 1203 OUTL(dp, CR, lp->cr | CR_TXE); 1204 } 1205 } 1206 1207 static void 1208 sfe_rx_desc_write(struct gem_dev *dp, int slot, 1209 ddi_dma_cookie_t *dmacookie, int frags) 1210 { 1211 struct sfe_desc *rdp; 1212 uint32_t tmp0; 1213 uint32_t tmp1; 1214 #if DEBUG_LEVEL > 2 1215 int i; 1216 1217 ASSERT(frags == 1); 1218 1219 cmn_err(CE_CONT, CONS 1220 "%s: %s seqnum: %d, slot %d, frags: %d", 1221 dp->name, __func__, dp->rx_active_tail, slot, frags); 1222 for (i = 0; i < frags; i++) { 1223 cmn_err(CE_CONT, CONS " frag: %d addr: 0x%llx, len: 0x%lx", 1224 i, dmacookie[i].dmac_address, dmacookie[i].dmac_size); 1225 } 1226 #endif 1227 /* for the last slot of the packet */ 1228 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; 1229 1230 tmp0 = (uint32_t)dmacookie->dmac_address; 1231 tmp1 = CMDSTS_INTR | (uint32_t)dmacookie->dmac_size; 1232 rdp->d_bufptr = LE_32(tmp0); 1233 rdp->d_cmdsts = LE_32(tmp1); 1234 } 1235 1236 static uint_t 1237 sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc) 1238 { 1239 uint_t tx_ring_size = dp->gc.gc_tx_ring_size; 1240 struct sfe_desc *tdp; 1241 uint32_t status; 1242 int cols; 1243 struct sfe_dev *lp = dp->private; 1244 #ifdef DEBUG_LEVEL 1245 int i; 1246 clock_t delay; 1247 #endif 1248 /* check status of the last descriptor */ 1249 tdp = (void *) 1250 &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, tx_ring_size)]; 1251 1252 /* 1253 * Don't use LE_32() directly to refer tdp->d_cmdsts. 1254 * It is not atomic for big endian cpus. 1255 */ 1256 status = tdp->d_cmdsts; 1257 status = LE_32(status); 1258 1259 DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b", 1260 dp->name, ddi_get_lbolt(), __func__, 1261 slot, status, TXSTAT_BITS)); 1262 1263 if (status & CMDSTS_OWN) { 1264 /* 1265 * not yet transmitted 1266 */ 1267 /* workaround for tx hang */ 1268 if (lp->chip->chip_type == CHIPTYPE_DP83815 && 1269 dp->mac_active) { 1270 OUTL(dp, CR, lp->cr | CR_TXE); 1271 } 1272 return (0); 1273 } 1274 1275 if (status & CMDSTS_MORE) { 1276 /* XXX - the hardware problem but don't panic the system */ 1277 /* avoid lint bug for %b format string including 32nd bit */ 1278 cmn_err(CE_NOTE, CONS 1279 "%s: tx status bits incorrect: slot:%d, status:0x%x", 1280 dp->name, slot, status); 1281 } 1282 1283 #if DEBUG_LEVEL > 3 1284 delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10; 1285 if (delay >= 50) { 1286 DPRINTF(0, (CE_NOTE, "%s: tx deferred %d mS: slot %d", 1287 dp->name, delay, slot)); 1288 } 1289 #endif 1290 1291 #if DEBUG_LEVEL > 3 1292 for (i = 0; i < nfrag-1; i++) { 1293 uint32_t s; 1294 int n; 1295 1296 n = SLOT(slot + i, tx_ring_size); 1297 s = LE_32( 1298 ((struct sfe_desc *)((void *) 1299 &dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts); 1300 1301 ASSERT(s & CMDSTS_MORE); 1302 ASSERT((s & CMDSTS_OWN) == 0); 1303 } 1304 #endif 1305 1306 /* 1307 * collect statistics 1308 */ 1309 if ((status & CMDSTS_OK) == 0) { 1310 1311 /* failed to transmit the packet */ 1312 1313 DPRINTF(0, (CE_CONT, CONS "%s: Transmit error, Tx status %b", 1314 dp->name, status, TXSTAT_BITS)); 1315 1316 dp->stats.errxmt++; 1317 1318 if (status & CMDSTS_TFU) { 1319 dp->stats.underflow++; 1320 } else if (status & CMDSTS_CRS) { 1321 dp->stats.nocarrier++; 1322 } else if (status & CMDSTS_OWC) { 1323 dp->stats.xmtlatecoll++; 1324 } else if ((!dp->full_duplex) && (status & CMDSTS_EC)) { 1325 dp->stats.excoll++; 1326 dp->stats.collisions += 16; 1327 } else { 1328 dp->stats.xmit_internal_err++; 1329 } 1330 } else if (!dp->full_duplex) { 1331 cols = (status >> CMDSTS_CCNT_SHIFT) & CCNT_MASK; 1332 1333 if (cols > 0) { 1334 if (cols == 1) { 1335 dp->stats.first_coll++; 1336 } else /* (cols > 1) */ { 1337 dp->stats.multi_coll++; 1338 } 1339 dp->stats.collisions += cols; 1340 } else if (status & CMDSTS_TD) { 1341 dp->stats.defer++; 1342 } 1343 } 1344 return (GEM_TX_DONE); 1345 } 1346 1347 static uint64_t 1348 sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc) 1349 { 1350 struct sfe_desc *rdp; 1351 uint_t len; 1352 uint_t flag; 1353 uint32_t status; 1354 1355 flag = GEM_RX_DONE; 1356 1357 /* Dont read ISR because we cannot ack only to rx interrupt. */ 1358 1359 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; 1360 1361 /* 1362 * Don't use LE_32() directly to refer rdp->d_cmdsts. 1363 * It is not atomic for big endian cpus. 1364 */ 1365 status = rdp->d_cmdsts; 1366 status = LE_32(status); 1367 1368 DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b", 1369 dp->name, ddi_get_lbolt(), __func__, 1370 slot, status, RXSTAT_BITS)); 1371 1372 if ((status & CMDSTS_OWN) == 0) { 1373 /* 1374 * No more received packets because 1375 * this buffer is owned by NIC. 1376 */ 1377 return (0); 1378 } 1379 1380 #define RX_ERR_BITS \ 1381 (CMDSTS_RXA | CMDSTS_RXO | CMDSTS_LONG | CMDSTS_RUNT | \ 1382 CMDSTS_ISE | CMDSTS_CRCE | CMDSTS_FAE | CMDSTS_MORE) 1383 1384 if (status & RX_ERR_BITS) { 1385 /* 1386 * Packet with error received 1387 */ 1388 DPRINTF(0, (CE_CONT, CONS "%s: Corrupted packet " 1389 "received, buffer status: %b", 1390 dp->name, status, RXSTAT_BITS)); 1391 1392 /* collect statistics information */ 1393 dp->stats.errrcv++; 1394 1395 if (status & CMDSTS_RXO) { 1396 dp->stats.overflow++; 1397 } else if (status & (CMDSTS_LONG | CMDSTS_MORE)) { 1398 dp->stats.frame_too_long++; 1399 } else if (status & CMDSTS_RUNT) { 1400 dp->stats.runt++; 1401 } else if (status & (CMDSTS_ISE | CMDSTS_FAE)) { 1402 dp->stats.frame++; 1403 } else if (status & CMDSTS_CRCE) { 1404 dp->stats.crc++; 1405 } else { 1406 dp->stats.rcv_internal_err++; 1407 } 1408 1409 return (flag | GEM_RX_ERR); 1410 } 1411 1412 /* 1413 * this packet was received without errors 1414 */ 1415 if ((len = (status & CMDSTS_SIZE)) >= ETHERFCSL) { 1416 len -= ETHERFCSL; 1417 } 1418 1419 #if DEBUG_LEVEL > 10 1420 { 1421 int i; 1422 uint8_t *bp = dp->rx_buf_head->rxb_buf; 1423 1424 cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len); 1425 1426 for (i = 0; i < 60; i += 10) { 1427 cmn_err(CE_CONT, CONS 1428 "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x", 1429 bp[0], bp[1], bp[2], bp[3], bp[4], 1430 bp[5], bp[6], bp[7], bp[8], bp[9]); 1431 } 1432 bp += 10; 1433 } 1434 #endif 1435 return (flag | (len & GEM_RX_LEN)); 1436 } 1437 1438 static void 1439 sfe_tx_desc_init(struct gem_dev *dp, int slot) 1440 { 1441 uint_t tx_ring_size = dp->gc.gc_tx_ring_size; 1442 struct sfe_desc *tdp; 1443 uint32_t here; 1444 1445 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot]; 1446 1447 /* don't clear d_link field, which have a valid pointer */ 1448 tdp->d_cmdsts = 0; 1449 1450 /* make a link to this from the previous descriptor */ 1451 here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot; 1452 1453 tdp = (void *) 1454 &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, tx_ring_size)]; 1455 tdp->d_link = LE_32(here); 1456 } 1457 1458 static void 1459 sfe_rx_desc_init(struct gem_dev *dp, int slot) 1460 { 1461 uint_t rx_ring_size = dp->gc.gc_rx_ring_size; 1462 struct sfe_desc *rdp; 1463 uint32_t here; 1464 1465 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; 1466 1467 /* don't clear d_link field, which have a valid pointer */ 1468 rdp->d_cmdsts = LE_32(CMDSTS_OWN); 1469 1470 /* make a link to this from the previous descriptor */ 1471 here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot; 1472 1473 rdp = (void *) 1474 &dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, rx_ring_size)]; 1475 rdp->d_link = LE_32(here); 1476 } 1477 1478 static void 1479 sfe_tx_desc_clean(struct gem_dev *dp, int slot) 1480 { 1481 struct sfe_desc *tdp; 1482 1483 tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot]; 1484 tdp->d_cmdsts = 0; 1485 } 1486 1487 static void 1488 sfe_rx_desc_clean(struct gem_dev *dp, int slot) 1489 { 1490 struct sfe_desc *rdp; 1491 1492 rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot]; 1493 rdp->d_cmdsts = LE_32(CMDSTS_OWN); 1494 } 1495 1496 /* 1497 * Device depend interrupt handler 1498 */ 1499 static uint_t 1500 sfe_interrupt(struct gem_dev *dp) 1501 { 1502 uint_t rx_ring_size = dp->gc.gc_rx_ring_size; 1503 uint32_t isr; 1504 uint32_t isr_bogus; 1505 uint_t flags = 0; 1506 boolean_t need_to_reset = B_FALSE; 1507 struct sfe_dev *lp = dp->private; 1508 1509 /* read reason and clear interrupt */ 1510 isr = INL(dp, ISR); 1511 1512 isr_bogus = lp->isr_pended; 1513 lp->isr_pended = 0; 1514 1515 if (((isr | isr_bogus) & lp->our_intr_bits) == 0) { 1516 /* we are not the interrupt source */ 1517 return (DDI_INTR_UNCLAIMED); 1518 } 1519 1520 DPRINTF(3, (CE_CONT, 1521 CONS "%s: time:%ld %s:called: isr:0x%b rx_active_head: %d", 1522 dp->name, ddi_get_lbolt(), __func__, 1523 isr, INTR_BITS, dp->rx_active_head)); 1524 1525 if (!dp->mac_active) { 1526 /* the device is going to stop */ 1527 lp->our_intr_bits = 0; 1528 return (DDI_INTR_CLAIMED); 1529 } 1530 1531 isr &= lp->our_intr_bits; 1532 1533 if (isr & (ISR_RXSOVR | ISR_RXORN | ISR_RXIDLE | ISR_RXERR | 1534 ISR_RXDESC | ISR_RXOK)) { 1535 (void) gem_receive(dp); 1536 1537 if (isr & (ISR_RXSOVR | ISR_RXORN)) { 1538 DPRINTF(0, (CE_CONT, 1539 CONS "%s: rx fifo overrun: isr %b", 1540 dp->name, isr, INTR_BITS)); 1541 /* no need restart rx */ 1542 dp->stats.overflow++; 1543 } 1544 1545 if (isr & ISR_RXIDLE) { 1546 DPRINTF(0, (CE_CONT, 1547 CONS "%s: rx buffer ran out: isr %b", 1548 dp->name, isr, INTR_BITS)); 1549 1550 dp->stats.norcvbuf++; 1551 1552 /* 1553 * Make RXDP points the head of receive 1554 * buffer list. 1555 */ 1556 OUTL(dp, RXDP, dp->rx_ring_dma + 1557 SFE_DESC_SIZE * 1558 SLOT(dp->rx_active_head, rx_ring_size)); 1559 1560 /* Restart the receive engine */ 1561 OUTL(dp, CR, lp->cr | CR_RXE); 1562 } 1563 } 1564 1565 if (isr & (ISR_TXURN | ISR_TXERR | ISR_TXDESC | 1566 ISR_TXIDLE | ISR_TXOK)) { 1567 /* need to reclaim tx buffers */ 1568 if (gem_tx_done(dp)) { 1569 flags |= INTR_RESTART_TX; 1570 } 1571 /* 1572 * XXX - tx error statistics will be counted in 1573 * sfe_tx_desc_stat() and no need to restart tx on errors. 1574 */ 1575 } 1576 1577 if (isr & (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT)) { 1578 cmn_err(CE_WARN, "%s: ERROR interrupt: isr %b.", 1579 dp->name, isr, INTR_BITS); 1580 need_to_reset = B_TRUE; 1581 } 1582 reset: 1583 if (need_to_reset) { 1584 (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF); 1585 flags |= INTR_RESTART_TX; 1586 } 1587 1588 DPRINTF(5, (CE_CONT, CONS "%s: %s: return: isr: %b", 1589 dp->name, __func__, isr, INTR_BITS)); 1590 1591 return (DDI_INTR_CLAIMED | flags); 1592 } 1593 1594 /* ======================================================== */ 1595 /* 1596 * HW depend MII routine 1597 */ 1598 /* ======================================================== */ 1599 1600 /* 1601 * MII routines for NS DP83815 1602 */ 1603 static void 1604 sfe_mii_sync_dp83815(struct gem_dev *dp) 1605 { 1606 /* do nothing */ 1607 } 1608 1609 static uint16_t 1610 sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset) 1611 { 1612 DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x", 1613 dp->name, __func__, offset)); 1614 return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4)); 1615 } 1616 1617 static void 1618 sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val) 1619 { 1620 DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x 0x%x", 1621 dp->name, __func__, offset, val)); 1622 OUTL(dp, MII_REGS_BASE + offset*4, val); 1623 } 1624 1625 static int 1626 sfe_mii_config_dp83815(struct gem_dev *dp) 1627 { 1628 uint32_t srr; 1629 1630 srr = INL(dp, SRR) & SRR_REV; 1631 1632 DPRINTF(0, (CE_CONT, CONS "%s: srr:0x%04x %04x %04x %04x %04x %04x", 1633 dp->name, srr, 1634 INW(dp, 0x00cc), /* PGSEL */ 1635 INW(dp, 0x00e4), /* PMDCSR */ 1636 INW(dp, 0x00fc), /* TSTDAT */ 1637 INW(dp, 0x00f4), /* DSPCFG */ 1638 INW(dp, 0x00f8))); /* SDCFG */ 1639 1640 if (srr == SRR_REV_DP83815CVNG) { 1641 /* 1642 * NS datasheet says that DP83815CVNG needs following 1643 * registers to be patched for optimizing its performance. 1644 * A report said that CRC errors on RX disappeared 1645 * with the patch. 1646 */ 1647 OUTW(dp, 0x00cc, 0x0001); /* PGSEL */ 1648 OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */ 1649 OUTW(dp, 0x00fc, 0x0000); /* TSTDAT */ 1650 OUTW(dp, 0x00f4, 0x5040); /* DSPCFG */ 1651 OUTW(dp, 0x00f8, 0x008c); /* SDCFG */ 1652 OUTW(dp, 0x00cc, 0x0000); /* PGSEL */ 1653 1654 DPRINTF(0, (CE_CONT, 1655 CONS "%s: PHY patched %04x %04x %04x %04x %04x", 1656 dp->name, 1657 INW(dp, 0x00cc), /* PGSEL */ 1658 INW(dp, 0x00e4), /* PMDCSR */ 1659 INW(dp, 0x00fc), /* TSTDAT */ 1660 INW(dp, 0x00f4), /* DSPCFG */ 1661 INW(dp, 0x00f8))); /* SDCFG */ 1662 } else if (((srr ^ SRR_REV_DP83815DVNG) & 0xff00) == 0 || 1663 ((srr ^ SRR_REV_DP83816AVNG) & 0xff00) == 0) { 1664 /* 1665 * Additional packets for later chipset 1666 */ 1667 OUTW(dp, 0x00cc, 0x0001); /* PGSEL */ 1668 OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */ 1669 OUTW(dp, 0x00cc, 0x0000); /* PGSEL */ 1670 1671 DPRINTF(0, (CE_CONT, 1672 CONS "%s: PHY patched %04x %04x", 1673 dp->name, 1674 INW(dp, 0x00cc), /* PGSEL */ 1675 INW(dp, 0x00e4))); /* PMDCSR */ 1676 } 1677 1678 return (gem_mii_config_default(dp)); 1679 } 1680 1681 static int 1682 sfe_mii_probe_dp83815(struct gem_dev *dp) 1683 { 1684 uint32_t val; 1685 1686 /* try external phy first */ 1687 DPRINTF(0, (CE_CONT, CONS "%s: %s: trying external phy", 1688 dp->name, __func__)); 1689 dp->mii_phy_addr = 0; 1690 dp->gc.gc_mii_sync = &sfe_mii_sync_sis900; 1691 dp->gc.gc_mii_read = &sfe_mii_read_sis900; 1692 dp->gc.gc_mii_write = &sfe_mii_write_sis900; 1693 1694 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); 1695 OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS); 1696 1697 if (gem_mii_probe_default(dp) == GEM_SUCCESS) { 1698 return (GEM_SUCCESS); 1699 } 1700 1701 /* switch to internal phy */ 1702 DPRINTF(0, (CE_CONT, CONS "%s: %s: switching to internal phy", 1703 dp->name, __func__)); 1704 dp->mii_phy_addr = -1; 1705 dp->gc.gc_mii_sync = &sfe_mii_sync_dp83815; 1706 dp->gc.gc_mii_read = &sfe_mii_read_dp83815; 1707 dp->gc.gc_mii_write = &sfe_mii_write_dp83815; 1708 1709 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); 1710 OUTL(dp, CFG, val | CFG_PAUSE_ADV | CFG_PHY_RST); 1711 drv_usecwait(100); /* keep to assert RST bit for a while */ 1712 OUTL(dp, CFG, val | CFG_PAUSE_ADV); 1713 1714 /* wait for PHY reset */ 1715 delay(drv_usectohz(10000)); 1716 1717 return (gem_mii_probe_default(dp)); 1718 } 1719 1720 static int 1721 sfe_mii_init_dp83815(struct gem_dev *dp) 1722 { 1723 uint32_t val; 1724 1725 val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG); 1726 1727 if (dp->mii_phy_addr == -1) { 1728 /* select internal phy */ 1729 OUTL(dp, CFG, val | CFG_PAUSE_ADV); 1730 } else { 1731 /* select external phy */ 1732 OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS); 1733 } 1734 1735 return (GEM_SUCCESS); 1736 } 1737 1738 /* 1739 * MII routines for SiS900 1740 */ 1741 #define MDIO_DELAY(dp) {(void) INL(dp, MEAR); (void) INL(dp, MEAR); } 1742 static void 1743 sfe_mii_sync_sis900(struct gem_dev *dp) 1744 { 1745 int i; 1746 1747 /* send 32 ONE's to make MII line idle */ 1748 for (i = 0; i < 32; i++) { 1749 OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO); 1750 MDIO_DELAY(dp); 1751 OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC); 1752 MDIO_DELAY(dp); 1753 } 1754 } 1755 1756 static int 1757 sfe_mii_config_sis900(struct gem_dev *dp) 1758 { 1759 struct sfe_dev *lp = dp->private; 1760 1761 /* Do chip depend setup */ 1762 if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) { 1763 /* workaround for ICS1893 PHY */ 1764 gem_mii_write(dp, 0x0018, 0xD200); 1765 } 1766 1767 if (lp->revid == SIS630E_900_REV) { 1768 /* 1769 * SiS 630E has bugs on default values 1770 * of PHY registers 1771 */ 1772 gem_mii_write(dp, MII_AN_ADVERT, 0x05e1); 1773 gem_mii_write(dp, MII_CONFIG1, 0x0022); 1774 gem_mii_write(dp, MII_CONFIG2, 0xff00); 1775 gem_mii_write(dp, MII_MASK, 0xffc0); 1776 } 1777 sfe_set_eq_sis630(dp); 1778 1779 return (gem_mii_config_default(dp)); 1780 } 1781 1782 static uint16_t 1783 sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg) 1784 { 1785 uint32_t cmd; 1786 uint16_t ret; 1787 int i; 1788 uint32_t data; 1789 1790 cmd = MII_READ_CMD(dp->mii_phy_addr, reg); 1791 1792 for (i = 31; i >= 18; i--) { 1793 data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT; 1794 OUTL(dp, MEAR, data | MEAR_MDDIR); 1795 MDIO_DELAY(dp); 1796 OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC); 1797 MDIO_DELAY(dp); 1798 } 1799 1800 /* turn around cycle */ 1801 OUTL(dp, MEAR, 0); 1802 MDIO_DELAY(dp); 1803 1804 /* get response from PHY */ 1805 OUTL(dp, MEAR, MEAR_MDC); 1806 MDIO_DELAY(dp); 1807 1808 OUTL(dp, MEAR, 0); 1809 #if DEBUG_LEBEL > 0 1810 (void) INL(dp, MEAR); /* delay */ 1811 if (INL(dp, MEAR) & MEAR_MDIO) { 1812 cmn_err(CE_WARN, "%s: PHY@%d not responded", 1813 dp->name, dp->mii_phy_addr); 1814 } 1815 #else 1816 MDIO_DELAY(dp); 1817 #endif 1818 /* terminate response cycle */ 1819 OUTL(dp, MEAR, MEAR_MDC); 1820 MDIO_DELAY(dp); 1821 1822 ret = 0; /* to avoid lint errors */ 1823 for (i = 16; i > 0; i--) { 1824 OUTL(dp, MEAR, 0); 1825 (void) INL(dp, MEAR); /* delay */ 1826 ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1); 1827 OUTL(dp, MEAR, MEAR_MDC); 1828 MDIO_DELAY(dp); 1829 } 1830 1831 /* send two idle(Z) bits to terminate the read cycle */ 1832 for (i = 0; i < 2; i++) { 1833 OUTL(dp, MEAR, 0); 1834 MDIO_DELAY(dp); 1835 OUTL(dp, MEAR, MEAR_MDC); 1836 MDIO_DELAY(dp); 1837 } 1838 1839 return (ret); 1840 } 1841 1842 static void 1843 sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val) 1844 { 1845 uint32_t cmd; 1846 int i; 1847 uint32_t data; 1848 1849 cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val); 1850 1851 for (i = 31; i >= 0; i--) { 1852 data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT; 1853 OUTL(dp, MEAR, data | MEAR_MDDIR); 1854 MDIO_DELAY(dp); 1855 OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC); 1856 MDIO_DELAY(dp); 1857 } 1858 1859 /* send two idle(Z) bits to terminate the write cycle. */ 1860 for (i = 0; i < 2; i++) { 1861 OUTL(dp, MEAR, 0); 1862 MDIO_DELAY(dp); 1863 OUTL(dp, MEAR, MEAR_MDC); 1864 MDIO_DELAY(dp); 1865 } 1866 } 1867 #undef MDIO_DELAY 1868 1869 static void 1870 sfe_set_eq_sis630(struct gem_dev *dp) 1871 { 1872 uint16_t reg14h; 1873 uint16_t eq_value; 1874 uint16_t max_value; 1875 uint16_t min_value; 1876 int i; 1877 uint8_t rev; 1878 struct sfe_dev *lp = dp->private; 1879 1880 rev = lp->revid; 1881 1882 if (!(rev == SIS630E_900_REV || rev == SIS630EA1_900_REV || 1883 rev == SIS630A_900_REV || rev == SIS630ET_900_REV)) { 1884 /* it doesn't have a internal PHY */ 1885 return; 1886 } 1887 1888 if (dp->mii_state == MII_STATE_LINKUP) { 1889 reg14h = gem_mii_read(dp, MII_RESV); 1890 gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF); 1891 1892 eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3; 1893 max_value = min_value = eq_value; 1894 for (i = 1; i < 10; i++) { 1895 eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3; 1896 max_value = max(eq_value, max_value); 1897 min_value = min(eq_value, min_value); 1898 } 1899 1900 /* for 630E, rule to determine the equalizer value */ 1901 if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV || 1902 rev == SIS630ET_900_REV) { 1903 if (max_value < 5) { 1904 eq_value = max_value; 1905 } else if (5 <= max_value && max_value < 15) { 1906 eq_value = 1907 max(max_value + 1, 1908 min_value + 2); 1909 } else if (15 <= max_value) { 1910 eq_value = 1911 max(max_value + 5, 1912 min_value + 6); 1913 } 1914 } 1915 /* for 630B0&B1, rule to determine the equalizer value */ 1916 else 1917 if (rev == SIS630A_900_REV && 1918 (lp->bridge_revid == SIS630B0 || 1919 lp->bridge_revid == SIS630B1)) { 1920 1921 if (max_value == 0) { 1922 eq_value = 3; 1923 } else { 1924 eq_value = (max_value + min_value + 1)/2; 1925 } 1926 } 1927 /* write equalizer value and setting */ 1928 reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8; 1929 reg14h |= 0x6000 | (eq_value << 3); 1930 gem_mii_write(dp, MII_RESV, reg14h); 1931 } else { 1932 reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000; 1933 if (rev == SIS630A_900_REV && 1934 (lp->bridge_revid == SIS630B0 || 1935 lp->bridge_revid == SIS630B1)) { 1936 1937 reg14h |= 0x0200; 1938 } 1939 gem_mii_write(dp, MII_RESV, reg14h); 1940 } 1941 } 1942 1943 /* ======================================================== */ 1944 /* 1945 * OS depend (device driver) routine 1946 */ 1947 /* ======================================================== */ 1948 static void 1949 sfe_chipinfo_init_sis900(struct gem_dev *dp) 1950 { 1951 int rev; 1952 struct sfe_dev *lp = (struct sfe_dev *)dp->private; 1953 1954 rev = lp->revid; 1955 1956 if (rev == SIS630E_900_REV /* 0x81 */) { 1957 /* sis630E */ 1958 lp->get_mac_addr = &sfe_get_mac_addr_sis630e; 1959 } else if (rev > 0x81 && rev <= 0x90) { 1960 /* 630S, 630EA1, 630ET, 635A */ 1961 lp->get_mac_addr = &sfe_get_mac_addr_sis635; 1962 } else if (rev == SIS962_900_REV /* 0x91 */) { 1963 /* sis962 or later */ 1964 lp->get_mac_addr = &sfe_get_mac_addr_sis962; 1965 } else { 1966 /* sis900 */ 1967 lp->get_mac_addr = &sfe_get_mac_addr_sis900; 1968 } 1969 1970 lp->bridge_revid = 0; 1971 1972 if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV || 1973 rev == SIS630A_900_REV || rev == SIS630ET_900_REV) { 1974 /* 1975 * read host bridge revision 1976 */ 1977 dev_info_t *bridge; 1978 ddi_acc_handle_t bridge_handle; 1979 1980 if ((bridge = sfe_search_pci_dev(0x1039, 0x630)) == NULL) { 1981 cmn_err(CE_WARN, 1982 "%s: cannot find host bridge (pci1039,630)", 1983 dp->name); 1984 return; 1985 } 1986 1987 if (pci_config_setup(bridge, &bridge_handle) != DDI_SUCCESS) { 1988 cmn_err(CE_WARN, "%s: pci_config_setup failed", 1989 dp->name); 1990 return; 1991 } 1992 1993 lp->bridge_revid = 1994 pci_config_get8(bridge_handle, PCI_CONF_REVID); 1995 pci_config_teardown(&bridge_handle); 1996 } 1997 } 1998 1999 static int 2000 sfe_attach_chip(struct gem_dev *dp) 2001 { 2002 struct sfe_dev *lp = (struct sfe_dev *)dp->private; 2003 2004 DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__)); 2005 2006 /* setup chip-depend get_mac_address function */ 2007 if (lp->chip->chip_type == CHIPTYPE_SIS900) { 2008 sfe_chipinfo_init_sis900(dp); 2009 } else { 2010 lp->get_mac_addr = &sfe_get_mac_addr_dp83815; 2011 } 2012 2013 /* read MAC address */ 2014 if (!(lp->get_mac_addr)(dp)) { 2015 cmn_err(CE_WARN, 2016 "!%s: %s: failed to get factory mac address" 2017 " please specify a mac address in sfe.conf", 2018 dp->name, __func__); 2019 return (GEM_FAILURE); 2020 } 2021 2022 if (lp->chip->chip_type == CHIPTYPE_DP83815) { 2023 dp->mii_phy_addr = -1; /* no need to scan PHY */ 2024 dp->misc_flag |= GEM_VLAN_SOFT; 2025 dp->txthr += 4; /* VTAG_SIZE */ 2026 } 2027 dp->txthr = min(dp->txthr, TXFIFOSIZE - 2); 2028 2029 return (GEM_SUCCESS); 2030 } 2031 2032 static int 2033 sfeattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2034 { 2035 int unit; 2036 const char *drv_name; 2037 int i; 2038 ddi_acc_handle_t conf_handle; 2039 uint16_t vid; 2040 uint16_t did; 2041 uint8_t rev; 2042 #ifdef DEBUG_LEVEL 2043 uint32_t iline; 2044 uint8_t latim; 2045 #endif 2046 struct chip_info *p; 2047 struct gem_dev *dp; 2048 struct sfe_dev *lp; 2049 caddr_t base; 2050 ddi_acc_handle_t regs_ha; 2051 struct gem_conf *gcp; 2052 2053 unit = ddi_get_instance(dip); 2054 drv_name = ddi_driver_name(dip); 2055 2056 DPRINTF(3, (CE_CONT, CONS "%s%d: sfeattach: called", drv_name, unit)); 2057 2058 /* 2059 * Common codes after power-up 2060 */ 2061 if (pci_config_setup(dip, &conf_handle) != DDI_SUCCESS) { 2062 cmn_err(CE_WARN, "%s%d: ddi_regs_map_setup failed", 2063 drv_name, unit); 2064 goto err; 2065 } 2066 2067 vid = pci_config_get16(conf_handle, PCI_CONF_VENID); 2068 did = pci_config_get16(conf_handle, PCI_CONF_DEVID); 2069 rev = pci_config_get16(conf_handle, PCI_CONF_REVID); 2070 #ifdef DEBUG_LEVEL 2071 iline = pci_config_get32(conf_handle, PCI_CONF_ILINE); 2072 latim = pci_config_get8(conf_handle, PCI_CONF_LATENCY_TIMER); 2073 #endif 2074 #ifdef DEBUG_BUILT_IN_SIS900 2075 rev = SIS630E_900_REV; 2076 #endif 2077 for (i = 0, p = sfe_chiptbl; i < CHIPTABLESIZE; i++, p++) { 2078 if (p->venid == vid && p->devid == did) { 2079 /* found */ 2080 goto chip_found; 2081 } 2082 } 2083 2084 /* Not found */ 2085 cmn_err(CE_WARN, 2086 "%s%d: sfe_attach: wrong PCI venid/devid (0x%x, 0x%x)", 2087 drv_name, unit, vid, did); 2088 pci_config_teardown(&conf_handle); 2089 goto err; 2090 2091 chip_found: 2092 pci_config_put16(conf_handle, PCI_CONF_COMM, 2093 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME | 2094 pci_config_get16(conf_handle, PCI_CONF_COMM)); 2095 2096 /* ensure D0 mode */ 2097 (void) gem_pci_set_power_state(dip, conf_handle, PCI_PMCSR_D0); 2098 2099 pci_config_teardown(&conf_handle); 2100 2101 switch (cmd) { 2102 case DDI_RESUME: 2103 return (gem_resume(dip)); 2104 2105 case DDI_ATTACH: 2106 2107 DPRINTF(0, (CE_CONT, 2108 CONS "%s%d: ilr 0x%08x, latency_timer:0x%02x", 2109 drv_name, unit, iline, latim)); 2110 2111 /* 2112 * Map in the device registers. 2113 */ 2114 if (gem_pci_regs_map_setup(dip, 2115 (sfe_use_pcimemspace && p->chip_type == CHIPTYPE_DP83815) 2116 ? PCI_ADDR_MEM32 : PCI_ADDR_IO, PCI_ADDR_MASK, 2117 &sfe_dev_attr, &base, ®s_ha) != DDI_SUCCESS) { 2118 cmn_err(CE_WARN, 2119 "%s%d: ddi_regs_map_setup failed", 2120 drv_name, unit); 2121 goto err; 2122 } 2123 2124 /* 2125 * construct gem configuration 2126 */ 2127 gcp = kmem_zalloc(sizeof (*gcp), KM_SLEEP); 2128 2129 /* name */ 2130 (void) sprintf(gcp->gc_name, "%s%d", drv_name, unit); 2131 2132 /* consistency on tx and rx */ 2133 gcp->gc_tx_buf_align = sizeof (uint8_t) - 1; 2134 gcp->gc_tx_max_frags = MAXTXFRAGS; 2135 gcp->gc_tx_max_descs_per_pkt = gcp->gc_tx_max_frags; 2136 gcp->gc_tx_desc_unit_shift = 4; /* 16 byte */ 2137 gcp->gc_tx_buf_size = TX_BUF_SIZE; 2138 gcp->gc_tx_buf_limit = gcp->gc_tx_buf_size; 2139 gcp->gc_tx_ring_size = TX_RING_SIZE; 2140 gcp->gc_tx_ring_limit = gcp->gc_tx_ring_size; 2141 gcp->gc_tx_auto_pad = B_TRUE; 2142 gcp->gc_tx_copy_thresh = sfe_tx_copy_thresh; 2143 gcp->gc_tx_desc_write_oo = B_TRUE; 2144 2145 gcp->gc_rx_buf_align = sizeof (uint8_t) - 1; 2146 gcp->gc_rx_max_frags = MAXRXFRAGS; 2147 gcp->gc_rx_desc_unit_shift = 4; 2148 gcp->gc_rx_ring_size = RX_RING_SIZE; 2149 gcp->gc_rx_buf_max = RX_BUF_SIZE; 2150 gcp->gc_rx_copy_thresh = sfe_rx_copy_thresh; 2151 2152 /* map attributes */ 2153 gcp->gc_dev_attr = sfe_dev_attr; 2154 gcp->gc_buf_attr = sfe_buf_attr; 2155 gcp->gc_desc_attr = sfe_buf_attr; 2156 2157 /* dma attributes */ 2158 gcp->gc_dma_attr_desc = sfe_dma_attr_desc; 2159 2160 gcp->gc_dma_attr_txbuf = sfe_dma_attr_buf; 2161 gcp->gc_dma_attr_txbuf.dma_attr_align = gcp->gc_tx_buf_align+1; 2162 gcp->gc_dma_attr_txbuf.dma_attr_sgllen = gcp->gc_tx_max_frags; 2163 2164 gcp->gc_dma_attr_rxbuf = sfe_dma_attr_buf; 2165 gcp->gc_dma_attr_rxbuf.dma_attr_align = gcp->gc_rx_buf_align+1; 2166 gcp->gc_dma_attr_rxbuf.dma_attr_sgllen = gcp->gc_rx_max_frags; 2167 2168 /* time out parameters */ 2169 gcp->gc_tx_timeout = 3*ONESEC; 2170 gcp->gc_tx_timeout_interval = ONESEC; 2171 if (p->chip_type == CHIPTYPE_DP83815) { 2172 /* workaround for tx hang */ 2173 gcp->gc_tx_timeout_interval = ONESEC/20; /* 50mS */ 2174 } 2175 2176 /* MII timeout parameters */ 2177 gcp->gc_mii_link_watch_interval = ONESEC; 2178 gcp->gc_mii_an_watch_interval = ONESEC/5; 2179 gcp->gc_mii_reset_timeout = MII_RESET_TIMEOUT; /* 1 sec */ 2180 gcp->gc_mii_an_timeout = MII_AN_TIMEOUT; /* 5 sec */ 2181 gcp->gc_mii_an_wait = 0; 2182 gcp->gc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT; 2183 2184 /* setting for general PHY */ 2185 gcp->gc_mii_an_delay = 0; 2186 gcp->gc_mii_linkdown_action = MII_ACTION_RSA; 2187 gcp->gc_mii_linkdown_timeout_action = MII_ACTION_RESET; 2188 gcp->gc_mii_dont_reset = B_FALSE; 2189 2190 2191 /* I/O methods */ 2192 2193 /* mac operation */ 2194 gcp->gc_attach_chip = &sfe_attach_chip; 2195 if (p->chip_type == CHIPTYPE_DP83815) { 2196 gcp->gc_reset_chip = &sfe_reset_chip_dp83815; 2197 } else { 2198 gcp->gc_reset_chip = &sfe_reset_chip_sis900; 2199 } 2200 gcp->gc_init_chip = &sfe_init_chip; 2201 gcp->gc_start_chip = &sfe_start_chip; 2202 gcp->gc_stop_chip = &sfe_stop_chip; 2203 #ifdef USE_MULTICAST_HASHTBL 2204 gcp->gc_multicast_hash = &sfe_mcast_hash; 2205 #endif 2206 if (p->chip_type == CHIPTYPE_DP83815) { 2207 gcp->gc_set_rx_filter = &sfe_set_rx_filter_dp83815; 2208 } else { 2209 gcp->gc_set_rx_filter = &sfe_set_rx_filter_sis900; 2210 } 2211 gcp->gc_set_media = &sfe_set_media; 2212 gcp->gc_get_stats = &sfe_get_stats; 2213 gcp->gc_interrupt = &sfe_interrupt; 2214 2215 /* descriptor operation */ 2216 gcp->gc_tx_desc_write = &sfe_tx_desc_write; 2217 gcp->gc_tx_start = &sfe_tx_start; 2218 gcp->gc_rx_desc_write = &sfe_rx_desc_write; 2219 gcp->gc_rx_start = NULL; 2220 2221 gcp->gc_tx_desc_stat = &sfe_tx_desc_stat; 2222 gcp->gc_rx_desc_stat = &sfe_rx_desc_stat; 2223 gcp->gc_tx_desc_init = &sfe_tx_desc_init; 2224 gcp->gc_rx_desc_init = &sfe_rx_desc_init; 2225 gcp->gc_tx_desc_clean = &sfe_tx_desc_clean; 2226 gcp->gc_rx_desc_clean = &sfe_rx_desc_clean; 2227 2228 /* mii operations */ 2229 if (p->chip_type == CHIPTYPE_DP83815) { 2230 gcp->gc_mii_probe = &sfe_mii_probe_dp83815; 2231 gcp->gc_mii_init = &sfe_mii_init_dp83815; 2232 gcp->gc_mii_config = &sfe_mii_config_dp83815; 2233 gcp->gc_mii_sync = &sfe_mii_sync_dp83815; 2234 gcp->gc_mii_read = &sfe_mii_read_dp83815; 2235 gcp->gc_mii_write = &sfe_mii_write_dp83815; 2236 gcp->gc_mii_tune_phy = NULL; 2237 gcp->gc_flow_control = FLOW_CONTROL_NONE; 2238 } else { 2239 gcp->gc_mii_probe = &gem_mii_probe_default; 2240 gcp->gc_mii_init = NULL; 2241 gcp->gc_mii_config = &sfe_mii_config_sis900; 2242 gcp->gc_mii_sync = &sfe_mii_sync_sis900; 2243 gcp->gc_mii_read = &sfe_mii_read_sis900; 2244 gcp->gc_mii_write = &sfe_mii_write_sis900; 2245 gcp->gc_mii_tune_phy = &sfe_set_eq_sis630; 2246 gcp->gc_flow_control = FLOW_CONTROL_RX_PAUSE; 2247 } 2248 2249 lp = kmem_zalloc(sizeof (*lp), KM_SLEEP); 2250 lp->chip = p; 2251 lp->revid = rev; 2252 lp->our_intr_bits = 0; 2253 lp->isr_pended = 0; 2254 2255 cmn_err(CE_CONT, CONS "%s%d: chip:%s rev:0x%02x", 2256 drv_name, unit, p->chip_name, rev); 2257 2258 dp = gem_do_attach(dip, 0, gcp, base, ®s_ha, 2259 lp, sizeof (*lp)); 2260 kmem_free(gcp, sizeof (*gcp)); 2261 2262 if (dp == NULL) { 2263 goto err_freelp; 2264 } 2265 2266 return (DDI_SUCCESS); 2267 2268 err_freelp: 2269 kmem_free(lp, sizeof (struct sfe_dev)); 2270 err: 2271 return (DDI_FAILURE); 2272 } 2273 return (DDI_FAILURE); 2274 } 2275 2276 static int 2277 sfedetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2278 { 2279 switch (cmd) { 2280 case DDI_SUSPEND: 2281 return (gem_suspend(dip)); 2282 2283 case DDI_DETACH: 2284 return (gem_do_detach(dip)); 2285 } 2286 return (DDI_FAILURE); 2287 } 2288 2289 /* ======================================================== */ 2290 /* 2291 * OS depend (loadable streams driver) routine 2292 */ 2293 /* ======================================================== */ 2294 DDI_DEFINE_STREAM_OPS(sfe_ops, nulldev, nulldev, sfeattach, sfedetach, 2295 nodev, NULL, D_MP, NULL); 2296 2297 static struct modldrv modldrv = { 2298 &mod_driverops, /* Type of module. This one is a driver */ 2299 ident, 2300 &sfe_ops, /* driver ops */ 2301 }; 2302 2303 static struct modlinkage modlinkage = { 2304 MODREV_1, &modldrv, NULL 2305 }; 2306 2307 /* ======================================================== */ 2308 /* 2309 * Loadable module support 2310 */ 2311 /* ======================================================== */ 2312 int 2313 _init(void) 2314 { 2315 int status; 2316 2317 DPRINTF(2, (CE_CONT, CONS "sfe: _init: called")); 2318 gem_mod_init(&sfe_ops, "sfe"); 2319 status = mod_install(&modlinkage); 2320 if (status != DDI_SUCCESS) { 2321 gem_mod_fini(&sfe_ops); 2322 } 2323 return (status); 2324 } 2325 2326 /* 2327 * _fini : done 2328 */ 2329 int 2330 _fini(void) 2331 { 2332 int status; 2333 2334 DPRINTF(2, (CE_CONT, CONS "sfe: _fini: called")); 2335 status = mod_remove(&modlinkage); 2336 if (status == DDI_SUCCESS) { 2337 gem_mod_fini(&sfe_ops); 2338 } 2339 return (status); 2340 } 2341 2342 int 2343 _info(struct modinfo *modinfop) 2344 { 2345 return (mod_info(&modlinkage, modinfop)); 2346 } 2347