1 /*- 2 * Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org> 3 * Copyright (c) 1997, 1998, 1999 4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are 39 * available from http://www.sis.com.tw. 40 * 41 * This driver also supports the NatSemi DP83815. Datasheets are 42 * available from http://www.national.com. 43 * 44 * Written by Bill Paul <wpaul@ee.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 /* 49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with 50 * simple TX and RX descriptors of 3 longwords in size. The receiver 51 * has a single perfect filter entry for the station address and a 52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based 53 * transceiver while the 7016 requires an external transceiver chip. 54 * Both chips offer the standard bit-bang MII interface as well as 55 * an enchanced PHY interface which simplifies accessing MII registers. 56 * 57 * The only downside to this chipset is that RX descriptors must be 58 * longword aligned. 59 */ 60 61 #ifdef HAVE_KERNEL_OPTION_HEADERS 62 #include "opt_device_polling.h" 63 #endif 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/sockio.h> 68 #include <sys/mbuf.h> 69 #include <sys/malloc.h> 70 #include <sys/kernel.h> 71 #include <sys/module.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <net/if_arp.h> 76 #include <net/ethernet.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_vlan_var.h> 81 82 #include <net/bpf.h> 83 84 #include <machine/bus.h> 85 #include <machine/resource.h> 86 #include <sys/bus.h> 87 #include <sys/rman.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #define SIS_USEIOSPACE 96 97 #include <dev/sis/if_sisreg.h> 98 99 MODULE_DEPEND(sis, pci, 1, 1, 1); 100 MODULE_DEPEND(sis, ether, 1, 1, 1); 101 MODULE_DEPEND(sis, miibus, 1, 1, 1); 102 103 /* "device miibus" required. See GENERIC if you get errors here. */ 104 #include "miibus_if.h" 105 106 #define SIS_LOCK(_sc) mtx_lock(&(_sc)->sis_mtx) 107 #define SIS_UNLOCK(_sc) mtx_unlock(&(_sc)->sis_mtx) 108 #define SIS_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sis_mtx, MA_OWNED) 109 110 /* 111 * register space access macros 112 */ 113 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sis_res[0], reg, val) 114 115 #define CSR_READ_4(sc, reg) bus_read_4(sc->sis_res[0], reg) 116 117 #define CSR_READ_2(sc, reg) bus_read_2(sc->sis_res[0], reg) 118 119 /* 120 * Various supported device vendors/types and their names. 121 */ 122 static struct sis_type sis_devs[] = { 123 { SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" }, 124 { SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" }, 125 { NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" }, 126 { 0, 0, NULL } 127 }; 128 129 static int sis_detach(device_t); 130 static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 static int sis_ifmedia_upd(struct ifnet *); 132 static void sis_init(void *); 133 static void sis_initl(struct sis_softc *); 134 static void sis_intr(void *); 135 static int sis_ioctl(struct ifnet *, u_long, caddr_t); 136 static int sis_newbuf(struct sis_softc *, struct sis_desc *, struct mbuf *); 137 static void sis_start(struct ifnet *); 138 static void sis_startl(struct ifnet *); 139 static void sis_stop(struct sis_softc *); 140 static void sis_watchdog(struct sis_softc *); 141 142 143 static struct resource_spec sis_res_spec[] = { 144 #ifdef SIS_USEIOSPACE 145 { SYS_RES_IOPORT, SIS_PCI_LOIO, RF_ACTIVE}, 146 #else 147 { SYS_RES_MEMORY, SIS_PCI_LOMEM, RF_ACTIVE}, 148 #endif 149 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE}, 150 { -1, 0 } 151 }; 152 153 #define SIS_SETBIT(sc, reg, x) \ 154 CSR_WRITE_4(sc, reg, \ 155 CSR_READ_4(sc, reg) | (x)) 156 157 #define SIS_CLRBIT(sc, reg, x) \ 158 CSR_WRITE_4(sc, reg, \ 159 CSR_READ_4(sc, reg) & ~(x)) 160 161 #define SIO_SET(x) \ 162 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x) 163 164 #define SIO_CLR(x) \ 165 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x) 166 167 static void 168 sis_dma_map_desc_next(void *arg, bus_dma_segment_t *segs, int nseg, int error) 169 { 170 struct sis_desc *r; 171 172 r = arg; 173 r->sis_next = segs->ds_addr; 174 } 175 176 static void 177 sis_dma_map_desc_ptr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 178 { 179 struct sis_desc *r; 180 181 r = arg; 182 r->sis_ptr = segs->ds_addr; 183 } 184 185 static void 186 sis_dma_map_ring(void *arg, bus_dma_segment_t *segs, int nseg, int error) 187 { 188 u_int32_t *p; 189 190 p = arg; 191 *p = segs->ds_addr; 192 } 193 194 /* 195 * Routine to reverse the bits in a word. Stolen almost 196 * verbatim from /usr/games/fortune. 197 */ 198 static uint16_t 199 sis_reverse(uint16_t n) 200 { 201 n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa); 202 n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc); 203 n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0); 204 n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00); 205 206 return(n); 207 } 208 209 static void 210 sis_delay(struct sis_softc *sc) 211 { 212 int idx; 213 214 for (idx = (300 / 33) + 1; idx > 0; idx--) 215 CSR_READ_4(sc, SIS_CSR); 216 } 217 218 static void 219 sis_eeprom_idle(struct sis_softc *sc) 220 { 221 int i; 222 223 SIO_SET(SIS_EECTL_CSEL); 224 sis_delay(sc); 225 SIO_SET(SIS_EECTL_CLK); 226 sis_delay(sc); 227 228 for (i = 0; i < 25; i++) { 229 SIO_CLR(SIS_EECTL_CLK); 230 sis_delay(sc); 231 SIO_SET(SIS_EECTL_CLK); 232 sis_delay(sc); 233 } 234 235 SIO_CLR(SIS_EECTL_CLK); 236 sis_delay(sc); 237 SIO_CLR(SIS_EECTL_CSEL); 238 sis_delay(sc); 239 CSR_WRITE_4(sc, SIS_EECTL, 0x00000000); 240 } 241 242 /* 243 * Send a read command and address to the EEPROM, check for ACK. 244 */ 245 static void 246 sis_eeprom_putbyte(struct sis_softc *sc, int addr) 247 { 248 int d, i; 249 250 d = addr | SIS_EECMD_READ; 251 252 /* 253 * Feed in each bit and stobe the clock. 254 */ 255 for (i = 0x400; i; i >>= 1) { 256 if (d & i) { 257 SIO_SET(SIS_EECTL_DIN); 258 } else { 259 SIO_CLR(SIS_EECTL_DIN); 260 } 261 sis_delay(sc); 262 SIO_SET(SIS_EECTL_CLK); 263 sis_delay(sc); 264 SIO_CLR(SIS_EECTL_CLK); 265 sis_delay(sc); 266 } 267 } 268 269 /* 270 * Read a word of data stored in the EEPROM at address 'addr.' 271 */ 272 static void 273 sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest) 274 { 275 int i; 276 u_int16_t word = 0; 277 278 /* Force EEPROM to idle state. */ 279 sis_eeprom_idle(sc); 280 281 /* Enter EEPROM access mode. */ 282 sis_delay(sc); 283 SIO_CLR(SIS_EECTL_CLK); 284 sis_delay(sc); 285 SIO_SET(SIS_EECTL_CSEL); 286 sis_delay(sc); 287 288 /* 289 * Send address of word we want to read. 290 */ 291 sis_eeprom_putbyte(sc, addr); 292 293 /* 294 * Start reading bits from EEPROM. 295 */ 296 for (i = 0x8000; i; i >>= 1) { 297 SIO_SET(SIS_EECTL_CLK); 298 sis_delay(sc); 299 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT) 300 word |= i; 301 sis_delay(sc); 302 SIO_CLR(SIS_EECTL_CLK); 303 sis_delay(sc); 304 } 305 306 /* Turn off EEPROM access mode. */ 307 sis_eeprom_idle(sc); 308 309 *dest = word; 310 } 311 312 /* 313 * Read a sequence of words from the EEPROM. 314 */ 315 static void 316 sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap) 317 { 318 int i; 319 u_int16_t word = 0, *ptr; 320 321 for (i = 0; i < cnt; i++) { 322 sis_eeprom_getword(sc, off + i, &word); 323 ptr = (u_int16_t *)(dest + (i * 2)); 324 if (swap) 325 *ptr = ntohs(word); 326 else 327 *ptr = word; 328 } 329 } 330 331 #if defined(__i386__) || defined(__amd64__) 332 static device_t 333 sis_find_bridge(device_t dev) 334 { 335 devclass_t pci_devclass; 336 device_t *pci_devices; 337 int pci_count = 0; 338 device_t *pci_children; 339 int pci_childcount = 0; 340 device_t *busp, *childp; 341 device_t child = NULL; 342 int i, j; 343 344 if ((pci_devclass = devclass_find("pci")) == NULL) 345 return(NULL); 346 347 devclass_get_devices(pci_devclass, &pci_devices, &pci_count); 348 349 for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) { 350 if (device_get_children(*busp, &pci_children, &pci_childcount)) 351 continue; 352 for (j = 0, childp = pci_children; 353 j < pci_childcount; j++, childp++) { 354 if (pci_get_vendor(*childp) == SIS_VENDORID && 355 pci_get_device(*childp) == 0x0008) { 356 child = *childp; 357 free(pci_children, M_TEMP); 358 goto done; 359 } 360 } 361 free(pci_children, M_TEMP); 362 } 363 364 done: 365 free(pci_devices, M_TEMP); 366 return(child); 367 } 368 369 static void 370 sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt) 371 { 372 device_t bridge; 373 u_int8_t reg; 374 int i; 375 bus_space_tag_t btag; 376 377 bridge = sis_find_bridge(dev); 378 if (bridge == NULL) 379 return; 380 reg = pci_read_config(bridge, 0x48, 1); 381 pci_write_config(bridge, 0x48, reg|0x40, 1); 382 383 /* XXX */ 384 #if defined(__i386__) 385 btag = I386_BUS_SPACE_IO; 386 #elif defined(__amd64__) 387 btag = AMD64_BUS_SPACE_IO; 388 #endif 389 390 for (i = 0; i < cnt; i++) { 391 bus_space_write_1(btag, 0x0, 0x70, i + off); 392 *(dest + i) = bus_space_read_1(btag, 0x0, 0x71); 393 } 394 395 pci_write_config(bridge, 0x48, reg & ~0x40, 1); 396 return; 397 } 398 399 static void 400 sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest) 401 { 402 u_int32_t filtsave, csrsave; 403 404 filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); 405 csrsave = CSR_READ_4(sc, SIS_CSR); 406 407 CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave); 408 CSR_WRITE_4(sc, SIS_CSR, 0); 409 410 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE); 411 412 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 413 ((u_int16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA); 414 CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1); 415 ((u_int16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA); 416 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 417 ((u_int16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA); 418 419 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); 420 CSR_WRITE_4(sc, SIS_CSR, csrsave); 421 return; 422 } 423 #endif 424 425 /* 426 * Sync the PHYs by setting data bit and strobing the clock 32 times. 427 */ 428 static void 429 sis_mii_sync(struct sis_softc *sc) 430 { 431 int i; 432 433 SIO_SET(SIS_MII_DIR|SIS_MII_DATA); 434 435 for (i = 0; i < 32; i++) { 436 SIO_SET(SIS_MII_CLK); 437 DELAY(1); 438 SIO_CLR(SIS_MII_CLK); 439 DELAY(1); 440 } 441 } 442 443 /* 444 * Clock a series of bits through the MII. 445 */ 446 static void 447 sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt) 448 { 449 int i; 450 451 SIO_CLR(SIS_MII_CLK); 452 453 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 454 if (bits & i) { 455 SIO_SET(SIS_MII_DATA); 456 } else { 457 SIO_CLR(SIS_MII_DATA); 458 } 459 DELAY(1); 460 SIO_CLR(SIS_MII_CLK); 461 DELAY(1); 462 SIO_SET(SIS_MII_CLK); 463 } 464 } 465 466 /* 467 * Read an PHY register through the MII. 468 */ 469 static int 470 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame) 471 { 472 int i, ack; 473 474 /* 475 * Set up frame for RX. 476 */ 477 frame->mii_stdelim = SIS_MII_STARTDELIM; 478 frame->mii_opcode = SIS_MII_READOP; 479 frame->mii_turnaround = 0; 480 frame->mii_data = 0; 481 482 /* 483 * Turn on data xmit. 484 */ 485 SIO_SET(SIS_MII_DIR); 486 487 sis_mii_sync(sc); 488 489 /* 490 * Send command/address info. 491 */ 492 sis_mii_send(sc, frame->mii_stdelim, 2); 493 sis_mii_send(sc, frame->mii_opcode, 2); 494 sis_mii_send(sc, frame->mii_phyaddr, 5); 495 sis_mii_send(sc, frame->mii_regaddr, 5); 496 497 /* Idle bit */ 498 SIO_CLR((SIS_MII_CLK|SIS_MII_DATA)); 499 DELAY(1); 500 SIO_SET(SIS_MII_CLK); 501 DELAY(1); 502 503 /* Turn off xmit. */ 504 SIO_CLR(SIS_MII_DIR); 505 506 /* Check for ack */ 507 SIO_CLR(SIS_MII_CLK); 508 DELAY(1); 509 ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA; 510 SIO_SET(SIS_MII_CLK); 511 DELAY(1); 512 513 /* 514 * Now try reading data bits. If the ack failed, we still 515 * need to clock through 16 cycles to keep the PHY(s) in sync. 516 */ 517 if (ack) { 518 for(i = 0; i < 16; i++) { 519 SIO_CLR(SIS_MII_CLK); 520 DELAY(1); 521 SIO_SET(SIS_MII_CLK); 522 DELAY(1); 523 } 524 goto fail; 525 } 526 527 for (i = 0x8000; i; i >>= 1) { 528 SIO_CLR(SIS_MII_CLK); 529 DELAY(1); 530 if (!ack) { 531 if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA) 532 frame->mii_data |= i; 533 DELAY(1); 534 } 535 SIO_SET(SIS_MII_CLK); 536 DELAY(1); 537 } 538 539 fail: 540 541 SIO_CLR(SIS_MII_CLK); 542 DELAY(1); 543 SIO_SET(SIS_MII_CLK); 544 DELAY(1); 545 546 if (ack) 547 return(1); 548 return(0); 549 } 550 551 /* 552 * Write to a PHY register through the MII. 553 */ 554 static int 555 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame) 556 { 557 558 /* 559 * Set up frame for TX. 560 */ 561 562 frame->mii_stdelim = SIS_MII_STARTDELIM; 563 frame->mii_opcode = SIS_MII_WRITEOP; 564 frame->mii_turnaround = SIS_MII_TURNAROUND; 565 566 /* 567 * Turn on data output. 568 */ 569 SIO_SET(SIS_MII_DIR); 570 571 sis_mii_sync(sc); 572 573 sis_mii_send(sc, frame->mii_stdelim, 2); 574 sis_mii_send(sc, frame->mii_opcode, 2); 575 sis_mii_send(sc, frame->mii_phyaddr, 5); 576 sis_mii_send(sc, frame->mii_regaddr, 5); 577 sis_mii_send(sc, frame->mii_turnaround, 2); 578 sis_mii_send(sc, frame->mii_data, 16); 579 580 /* Idle bit. */ 581 SIO_SET(SIS_MII_CLK); 582 DELAY(1); 583 SIO_CLR(SIS_MII_CLK); 584 DELAY(1); 585 586 /* 587 * Turn off xmit. 588 */ 589 SIO_CLR(SIS_MII_DIR); 590 591 return(0); 592 } 593 594 static int 595 sis_miibus_readreg(device_t dev, int phy, int reg) 596 { 597 struct sis_softc *sc; 598 struct sis_mii_frame frame; 599 600 sc = device_get_softc(dev); 601 602 if (sc->sis_type == SIS_TYPE_83815) { 603 if (phy != 0) 604 return(0); 605 /* 606 * The NatSemi chip can take a while after 607 * a reset to come ready, during which the BMSR 608 * returns a value of 0. This is *never* supposed 609 * to happen: some of the BMSR bits are meant to 610 * be hardwired in the on position, and this can 611 * confuse the miibus code a bit during the probe 612 * and attach phase. So we make an effort to check 613 * for this condition and wait for it to clear. 614 */ 615 if (!CSR_READ_4(sc, NS_BMSR)) 616 DELAY(1000); 617 return CSR_READ_4(sc, NS_BMCR + (reg * 4)); 618 } 619 620 /* 621 * Chipsets < SIS_635 seem not to be able to read/write 622 * through mdio. Use the enhanced PHY access register 623 * again for them. 624 */ 625 if (sc->sis_type == SIS_TYPE_900 && 626 sc->sis_rev < SIS_REV_635) { 627 int i, val = 0; 628 629 if (phy != 0) 630 return(0); 631 632 CSR_WRITE_4(sc, SIS_PHYCTL, 633 (phy << 11) | (reg << 6) | SIS_PHYOP_READ); 634 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 635 636 for (i = 0; i < SIS_TIMEOUT; i++) { 637 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 638 break; 639 } 640 641 if (i == SIS_TIMEOUT) { 642 device_printf(sc->sis_dev, "PHY failed to come ready\n"); 643 return(0); 644 } 645 646 val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF; 647 648 if (val == 0xFFFF) 649 return(0); 650 651 return(val); 652 } else { 653 bzero((char *)&frame, sizeof(frame)); 654 655 frame.mii_phyaddr = phy; 656 frame.mii_regaddr = reg; 657 sis_mii_readreg(sc, &frame); 658 659 return(frame.mii_data); 660 } 661 } 662 663 static int 664 sis_miibus_writereg(device_t dev, int phy, int reg, int data) 665 { 666 struct sis_softc *sc; 667 struct sis_mii_frame frame; 668 669 sc = device_get_softc(dev); 670 671 if (sc->sis_type == SIS_TYPE_83815) { 672 if (phy != 0) 673 return(0); 674 CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data); 675 return(0); 676 } 677 678 /* 679 * Chipsets < SIS_635 seem not to be able to read/write 680 * through mdio. Use the enhanced PHY access register 681 * again for them. 682 */ 683 if (sc->sis_type == SIS_TYPE_900 && 684 sc->sis_rev < SIS_REV_635) { 685 int i; 686 687 if (phy != 0) 688 return(0); 689 690 CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) | 691 (reg << 6) | SIS_PHYOP_WRITE); 692 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 693 694 for (i = 0; i < SIS_TIMEOUT; i++) { 695 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 696 break; 697 } 698 699 if (i == SIS_TIMEOUT) 700 device_printf(sc->sis_dev, "PHY failed to come ready\n"); 701 } else { 702 bzero((char *)&frame, sizeof(frame)); 703 704 frame.mii_phyaddr = phy; 705 frame.mii_regaddr = reg; 706 frame.mii_data = data; 707 sis_mii_writereg(sc, &frame); 708 } 709 return(0); 710 } 711 712 static void 713 sis_miibus_statchg(device_t dev) 714 { 715 struct sis_softc *sc; 716 717 sc = device_get_softc(dev); 718 SIS_LOCK_ASSERT(sc); 719 sis_initl(sc); 720 } 721 722 static uint32_t 723 sis_mchash(struct sis_softc *sc, const uint8_t *addr) 724 { 725 uint32_t crc; 726 727 /* Compute CRC for the address value. */ 728 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 729 730 /* 731 * return the filter bit position 732 * 733 * The NatSemi chip has a 512-bit filter, which is 734 * different than the SiS, so we special-case it. 735 */ 736 if (sc->sis_type == SIS_TYPE_83815) 737 return (crc >> 23); 738 else if (sc->sis_rev >= SIS_REV_635 || 739 sc->sis_rev == SIS_REV_900B) 740 return (crc >> 24); 741 else 742 return (crc >> 25); 743 } 744 745 static void 746 sis_setmulti_ns(struct sis_softc *sc) 747 { 748 struct ifnet *ifp; 749 struct ifmultiaddr *ifma; 750 u_int32_t h = 0, i, filtsave; 751 int bit, index; 752 753 ifp = sc->sis_ifp; 754 755 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 756 SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 757 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 758 return; 759 } 760 761 /* 762 * We have to explicitly enable the multicast hash table 763 * on the NatSemi chip if we want to use it, which we do. 764 */ 765 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 766 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 767 768 filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); 769 770 /* first, zot all the existing hash bits */ 771 for (i = 0; i < 32; i++) { 772 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2)); 773 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); 774 } 775 776 IF_ADDR_LOCK(ifp); 777 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 778 if (ifma->ifma_addr->sa_family != AF_LINK) 779 continue; 780 h = sis_mchash(sc, 781 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 782 index = h >> 3; 783 bit = h & 0x1F; 784 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index); 785 if (bit > 0xF) 786 bit -= 0x10; 787 SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit)); 788 } 789 IF_ADDR_UNLOCK(ifp); 790 791 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); 792 793 return; 794 } 795 796 static void 797 sis_setmulti_sis(struct sis_softc *sc) 798 { 799 struct ifnet *ifp; 800 struct ifmultiaddr *ifma; 801 u_int32_t h, i, n, ctl; 802 u_int16_t hashes[16]; 803 804 ifp = sc->sis_ifp; 805 806 /* hash table size */ 807 if (sc->sis_rev >= SIS_REV_635 || 808 sc->sis_rev == SIS_REV_900B) 809 n = 16; 810 else 811 n = 8; 812 813 ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE; 814 815 if (ifp->if_flags & IFF_BROADCAST) 816 ctl |= SIS_RXFILTCTL_BROAD; 817 818 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 819 ctl |= SIS_RXFILTCTL_ALLMULTI; 820 if (ifp->if_flags & IFF_PROMISC) 821 ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS; 822 for (i = 0; i < n; i++) 823 hashes[i] = ~0; 824 } else { 825 for (i = 0; i < n; i++) 826 hashes[i] = 0; 827 i = 0; 828 IF_ADDR_LOCK(ifp); 829 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 830 if (ifma->ifma_addr->sa_family != AF_LINK) 831 continue; 832 h = sis_mchash(sc, 833 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 834 hashes[h >> 4] |= 1 << (h & 0xf); 835 i++; 836 } 837 IF_ADDR_UNLOCK(ifp); 838 if (i > n) { 839 ctl |= SIS_RXFILTCTL_ALLMULTI; 840 for (i = 0; i < n; i++) 841 hashes[i] = ~0; 842 } 843 } 844 845 for (i = 0; i < n; i++) { 846 CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16); 847 CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]); 848 } 849 850 CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl); 851 } 852 853 static void 854 sis_reset(struct sis_softc *sc) 855 { 856 int i; 857 858 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET); 859 860 for (i = 0; i < SIS_TIMEOUT; i++) { 861 if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET)) 862 break; 863 } 864 865 if (i == SIS_TIMEOUT) 866 device_printf(sc->sis_dev, "reset never completed\n"); 867 868 /* Wait a little while for the chip to get its brains in order. */ 869 DELAY(1000); 870 871 /* 872 * If this is a NetSemi chip, make sure to clear 873 * PME mode. 874 */ 875 if (sc->sis_type == SIS_TYPE_83815) { 876 CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS); 877 CSR_WRITE_4(sc, NS_CLKRUN, 0); 878 } 879 880 return; 881 } 882 883 /* 884 * Probe for an SiS chip. Check the PCI vendor and device 885 * IDs against our list and return a device name if we find a match. 886 */ 887 static int 888 sis_probe(device_t dev) 889 { 890 struct sis_type *t; 891 892 t = sis_devs; 893 894 while(t->sis_name != NULL) { 895 if ((pci_get_vendor(dev) == t->sis_vid) && 896 (pci_get_device(dev) == t->sis_did)) { 897 device_set_desc(dev, t->sis_name); 898 return (BUS_PROBE_DEFAULT); 899 } 900 t++; 901 } 902 903 return(ENXIO); 904 } 905 906 /* 907 * Attach the interface. Allocate softc structures, do ifmedia 908 * setup and ethernet/BPF attach. 909 */ 910 static int 911 sis_attach(device_t dev) 912 { 913 u_char eaddr[ETHER_ADDR_LEN]; 914 struct sis_softc *sc; 915 struct ifnet *ifp; 916 int error = 0, waittime = 0; 917 918 waittime = 0; 919 sc = device_get_softc(dev); 920 921 sc->sis_dev = dev; 922 923 mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 924 MTX_DEF); 925 callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0); 926 927 if (pci_get_device(dev) == SIS_DEVICEID_900) 928 sc->sis_type = SIS_TYPE_900; 929 if (pci_get_device(dev) == SIS_DEVICEID_7016) 930 sc->sis_type = SIS_TYPE_7016; 931 if (pci_get_vendor(dev) == NS_VENDORID) 932 sc->sis_type = SIS_TYPE_83815; 933 934 sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1); 935 /* 936 * Map control/status registers. 937 */ 938 pci_enable_busmaster(dev); 939 940 error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res); 941 if (error) { 942 device_printf(dev, "couldn't allocate resources\n"); 943 goto fail; 944 } 945 946 /* Reset the adapter. */ 947 sis_reset(sc); 948 949 if (sc->sis_type == SIS_TYPE_900 && 950 (sc->sis_rev == SIS_REV_635 || 951 sc->sis_rev == SIS_REV_900B)) { 952 SIO_SET(SIS_CFG_RND_CNT); 953 SIO_SET(SIS_CFG_PERR_DETECT); 954 } 955 956 /* 957 * Get station address from the EEPROM. 958 */ 959 switch (pci_get_vendor(dev)) { 960 case NS_VENDORID: 961 sc->sis_srr = CSR_READ_4(sc, NS_SRR); 962 963 /* We can't update the device description, so spew */ 964 if (sc->sis_srr == NS_SRR_15C) 965 device_printf(dev, "Silicon Revision: DP83815C\n"); 966 else if (sc->sis_srr == NS_SRR_15D) 967 device_printf(dev, "Silicon Revision: DP83815D\n"); 968 else if (sc->sis_srr == NS_SRR_16A) 969 device_printf(dev, "Silicon Revision: DP83816A\n"); 970 else 971 device_printf(dev, "Silicon Revision %x\n", sc->sis_srr); 972 973 /* 974 * Reading the MAC address out of the EEPROM on 975 * the NatSemi chip takes a bit more work than 976 * you'd expect. The address spans 4 16-bit words, 977 * with the first word containing only a single bit. 978 * You have to shift everything over one bit to 979 * get it aligned properly. Also, the bits are 980 * stored backwards (the LSB is really the MSB, 981 * and so on) so you have to reverse them in order 982 * to get the MAC address into the form we want. 983 * Why? Who the hell knows. 984 */ 985 { 986 u_int16_t tmp[4]; 987 988 sis_read_eeprom(sc, (caddr_t)&tmp, 989 NS_EE_NODEADDR, 4, 0); 990 991 /* Shift everything over one bit. */ 992 tmp[3] = tmp[3] >> 1; 993 tmp[3] |= tmp[2] << 15; 994 tmp[2] = tmp[2] >> 1; 995 tmp[2] |= tmp[1] << 15; 996 tmp[1] = tmp[1] >> 1; 997 tmp[1] |= tmp[0] << 15; 998 999 /* Now reverse all the bits. */ 1000 tmp[3] = sis_reverse(tmp[3]); 1001 tmp[2] = sis_reverse(tmp[2]); 1002 tmp[1] = sis_reverse(tmp[1]); 1003 1004 bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN); 1005 } 1006 break; 1007 case SIS_VENDORID: 1008 default: 1009 #if defined(__i386__) || defined(__amd64__) 1010 /* 1011 * If this is a SiS 630E chipset with an embedded 1012 * SiS 900 controller, we have to read the MAC address 1013 * from the APC CMOS RAM. Our method for doing this 1014 * is very ugly since we have to reach out and grab 1015 * ahold of hardware for which we cannot properly 1016 * allocate resources. This code is only compiled on 1017 * the i386 architecture since the SiS 630E chipset 1018 * is for x86 motherboards only. Note that there are 1019 * a lot of magic numbers in this hack. These are 1020 * taken from SiS's Linux driver. I'd like to replace 1021 * them with proper symbolic definitions, but that 1022 * requires some datasheets that I don't have access 1023 * to at the moment. 1024 */ 1025 if (sc->sis_rev == SIS_REV_630S || 1026 sc->sis_rev == SIS_REV_630E || 1027 sc->sis_rev == SIS_REV_630EA1) 1028 sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6); 1029 1030 else if (sc->sis_rev == SIS_REV_635 || 1031 sc->sis_rev == SIS_REV_630ET) 1032 sis_read_mac(sc, dev, (caddr_t)&eaddr); 1033 else if (sc->sis_rev == SIS_REV_96x) { 1034 /* Allow to read EEPROM from LAN. It is shared 1035 * between a 1394 controller and the NIC and each 1036 * time we access it, we need to set SIS_EECMD_REQ. 1037 */ 1038 SIO_SET(SIS_EECMD_REQ); 1039 for (waittime = 0; waittime < SIS_TIMEOUT; 1040 waittime++) { 1041 /* Force EEPROM to idle state. */ 1042 sis_eeprom_idle(sc); 1043 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) { 1044 sis_read_eeprom(sc, (caddr_t)&eaddr, 1045 SIS_EE_NODEADDR, 3, 0); 1046 break; 1047 } 1048 DELAY(1); 1049 } 1050 /* 1051 * Set SIS_EECTL_CLK to high, so a other master 1052 * can operate on the i2c bus. 1053 */ 1054 SIO_SET(SIS_EECTL_CLK); 1055 /* Refuse EEPROM access by LAN */ 1056 SIO_SET(SIS_EECMD_DONE); 1057 } else 1058 #endif 1059 sis_read_eeprom(sc, (caddr_t)&eaddr, 1060 SIS_EE_NODEADDR, 3, 0); 1061 break; 1062 } 1063 1064 /* 1065 * Allocate the parent bus DMA tag appropriate for PCI. 1066 */ 1067 #define SIS_NSEG_NEW 32 1068 error = bus_dma_tag_create(NULL, /* parent */ 1069 1, 0, /* alignment, boundary */ 1070 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1071 BUS_SPACE_MAXADDR, /* highaddr */ 1072 NULL, NULL, /* filter, filterarg */ 1073 MAXBSIZE, SIS_NSEG_NEW, /* maxsize, nsegments */ 1074 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1075 BUS_DMA_ALLOCNOW, /* flags */ 1076 NULL, NULL, /* lockfunc, lockarg */ 1077 &sc->sis_parent_tag); 1078 if (error) 1079 goto fail; 1080 1081 /* 1082 * Now allocate a tag for the DMA descriptor lists and a chunk 1083 * of DMA-able memory based on the tag. Also obtain the physical 1084 * addresses of the RX and TX ring, which we'll need later. 1085 * All of our lists are allocated as a contiguous block 1086 * of memory. 1087 */ 1088 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1089 1, 0, /* alignment, boundary */ 1090 BUS_SPACE_MAXADDR, /* lowaddr */ 1091 BUS_SPACE_MAXADDR, /* highaddr */ 1092 NULL, NULL, /* filter, filterarg */ 1093 SIS_RX_LIST_SZ, 1, /* maxsize,nsegments */ 1094 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1095 0, /* flags */ 1096 busdma_lock_mutex, /* lockfunc */ 1097 &Giant, /* lockarg */ 1098 &sc->sis_rx_tag); 1099 if (error) 1100 goto fail; 1101 1102 error = bus_dmamem_alloc(sc->sis_rx_tag, 1103 (void **)&sc->sis_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1104 &sc->sis_rx_dmamap); 1105 1106 if (error) { 1107 device_printf(dev, "no memory for rx list buffers!\n"); 1108 bus_dma_tag_destroy(sc->sis_rx_tag); 1109 sc->sis_rx_tag = NULL; 1110 goto fail; 1111 } 1112 1113 error = bus_dmamap_load(sc->sis_rx_tag, 1114 sc->sis_rx_dmamap, &(sc->sis_rx_list[0]), 1115 sizeof(struct sis_desc), sis_dma_map_ring, 1116 &sc->sis_rx_paddr, 0); 1117 1118 if (error) { 1119 device_printf(dev, "cannot get address of the rx ring!\n"); 1120 bus_dmamem_free(sc->sis_rx_tag, 1121 sc->sis_rx_list, sc->sis_rx_dmamap); 1122 bus_dma_tag_destroy(sc->sis_rx_tag); 1123 sc->sis_rx_tag = NULL; 1124 goto fail; 1125 } 1126 1127 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1128 1, 0, /* alignment, boundary */ 1129 BUS_SPACE_MAXADDR, /* lowaddr */ 1130 BUS_SPACE_MAXADDR, /* highaddr */ 1131 NULL, NULL, /* filter, filterarg */ 1132 SIS_TX_LIST_SZ, 1, /* maxsize,nsegments */ 1133 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1134 0, /* flags */ 1135 busdma_lock_mutex, /* lockfunc */ 1136 &Giant, /* lockarg */ 1137 &sc->sis_tx_tag); 1138 if (error) 1139 goto fail; 1140 1141 error = bus_dmamem_alloc(sc->sis_tx_tag, 1142 (void **)&sc->sis_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1143 &sc->sis_tx_dmamap); 1144 1145 if (error) { 1146 device_printf(dev, "no memory for tx list buffers!\n"); 1147 bus_dma_tag_destroy(sc->sis_tx_tag); 1148 sc->sis_tx_tag = NULL; 1149 goto fail; 1150 } 1151 1152 error = bus_dmamap_load(sc->sis_tx_tag, 1153 sc->sis_tx_dmamap, &(sc->sis_tx_list[0]), 1154 sizeof(struct sis_desc), sis_dma_map_ring, 1155 &sc->sis_tx_paddr, 0); 1156 1157 if (error) { 1158 device_printf(dev, "cannot get address of the tx ring!\n"); 1159 bus_dmamem_free(sc->sis_tx_tag, 1160 sc->sis_tx_list, sc->sis_tx_dmamap); 1161 bus_dma_tag_destroy(sc->sis_tx_tag); 1162 sc->sis_tx_tag = NULL; 1163 goto fail; 1164 } 1165 1166 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1167 1, 0, /* alignment, boundary */ 1168 BUS_SPACE_MAXADDR, /* lowaddr */ 1169 BUS_SPACE_MAXADDR, /* highaddr */ 1170 NULL, NULL, /* filter, filterarg */ 1171 MCLBYTES, 1, /* maxsize,nsegments */ 1172 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1173 0, /* flags */ 1174 busdma_lock_mutex, /* lockfunc */ 1175 &Giant, /* lockarg */ 1176 &sc->sis_tag); 1177 if (error) 1178 goto fail; 1179 1180 /* 1181 * Obtain the physical addresses of the RX and TX 1182 * rings which we'll need later in the init routine. 1183 */ 1184 1185 ifp = sc->sis_ifp = if_alloc(IFT_ETHER); 1186 if (ifp == NULL) { 1187 device_printf(dev, "can not if_alloc()\n"); 1188 error = ENOSPC; 1189 goto fail; 1190 } 1191 ifp->if_softc = sc; 1192 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1193 ifp->if_mtu = ETHERMTU; 1194 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1195 ifp->if_ioctl = sis_ioctl; 1196 ifp->if_start = sis_start; 1197 ifp->if_init = sis_init; 1198 IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1); 1199 ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1; 1200 IFQ_SET_READY(&ifp->if_snd); 1201 1202 /* 1203 * Do MII setup. 1204 */ 1205 if (mii_phy_probe(dev, &sc->sis_miibus, 1206 sis_ifmedia_upd, sis_ifmedia_sts)) { 1207 device_printf(dev, "MII without any PHY!\n"); 1208 error = ENXIO; 1209 goto fail; 1210 } 1211 1212 /* 1213 * Call MI attach routine. 1214 */ 1215 ether_ifattach(ifp, eaddr); 1216 1217 /* 1218 * Tell the upper layer(s) we support long frames. 1219 */ 1220 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1221 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1222 ifp->if_capenable = ifp->if_capabilities; 1223 #ifdef DEVICE_POLLING 1224 ifp->if_capabilities |= IFCAP_POLLING; 1225 #endif 1226 1227 /* Hook interrupt last to avoid having to lock softc */ 1228 error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE, 1229 NULL, sis_intr, sc, &sc->sis_intrhand); 1230 1231 if (error) { 1232 device_printf(dev, "couldn't set up irq\n"); 1233 ether_ifdetach(ifp); 1234 goto fail; 1235 } 1236 1237 fail: 1238 if (error) 1239 sis_detach(dev); 1240 1241 return(error); 1242 } 1243 1244 /* 1245 * Shutdown hardware and free up resources. This can be called any 1246 * time after the mutex has been initialized. It is called in both 1247 * the error case in attach and the normal detach case so it needs 1248 * to be careful about only freeing resources that have actually been 1249 * allocated. 1250 */ 1251 static int 1252 sis_detach(device_t dev) 1253 { 1254 struct sis_softc *sc; 1255 struct ifnet *ifp; 1256 1257 sc = device_get_softc(dev); 1258 KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized")); 1259 ifp = sc->sis_ifp; 1260 1261 #ifdef DEVICE_POLLING 1262 if (ifp->if_capenable & IFCAP_POLLING) 1263 ether_poll_deregister(ifp); 1264 #endif 1265 1266 /* These should only be active if attach succeeded. */ 1267 if (device_is_attached(dev)) { 1268 SIS_LOCK(sc); 1269 sis_reset(sc); 1270 sis_stop(sc); 1271 SIS_UNLOCK(sc); 1272 callout_drain(&sc->sis_stat_ch); 1273 ether_ifdetach(ifp); 1274 } 1275 if (sc->sis_miibus) 1276 device_delete_child(dev, sc->sis_miibus); 1277 bus_generic_detach(dev); 1278 1279 if (sc->sis_intrhand) 1280 bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand); 1281 bus_release_resources(dev, sis_res_spec, sc->sis_res); 1282 1283 if (ifp) 1284 if_free(ifp); 1285 1286 if (sc->sis_rx_tag) { 1287 bus_dmamap_unload(sc->sis_rx_tag, 1288 sc->sis_rx_dmamap); 1289 bus_dmamem_free(sc->sis_rx_tag, 1290 sc->sis_rx_list, sc->sis_rx_dmamap); 1291 bus_dma_tag_destroy(sc->sis_rx_tag); 1292 } 1293 if (sc->sis_tx_tag) { 1294 bus_dmamap_unload(sc->sis_tx_tag, 1295 sc->sis_tx_dmamap); 1296 bus_dmamem_free(sc->sis_tx_tag, 1297 sc->sis_tx_list, sc->sis_tx_dmamap); 1298 bus_dma_tag_destroy(sc->sis_tx_tag); 1299 } 1300 if (sc->sis_parent_tag) 1301 bus_dma_tag_destroy(sc->sis_parent_tag); 1302 if (sc->sis_tag) 1303 bus_dma_tag_destroy(sc->sis_tag); 1304 1305 mtx_destroy(&sc->sis_mtx); 1306 1307 return(0); 1308 } 1309 1310 /* 1311 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that 1312 * we arrange the descriptors in a closed ring, so that the last descriptor 1313 * points back to the first. 1314 */ 1315 static int 1316 sis_ring_init(struct sis_softc *sc) 1317 { 1318 int i, error; 1319 struct sis_desc *dp; 1320 1321 dp = &sc->sis_tx_list[0]; 1322 for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { 1323 if (i == (SIS_TX_LIST_CNT - 1)) 1324 dp->sis_nextdesc = &sc->sis_tx_list[0]; 1325 else 1326 dp->sis_nextdesc = dp + 1; 1327 bus_dmamap_load(sc->sis_tx_tag, 1328 sc->sis_tx_dmamap, 1329 dp->sis_nextdesc, sizeof(struct sis_desc), 1330 sis_dma_map_desc_next, dp, 0); 1331 dp->sis_mbuf = NULL; 1332 dp->sis_ptr = 0; 1333 dp->sis_ctl = 0; 1334 } 1335 1336 sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0; 1337 1338 bus_dmamap_sync(sc->sis_tx_tag, 1339 sc->sis_tx_dmamap, BUS_DMASYNC_PREWRITE); 1340 1341 dp = &sc->sis_rx_list[0]; 1342 for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { 1343 error = sis_newbuf(sc, dp, NULL); 1344 if (error) 1345 return(error); 1346 if (i == (SIS_RX_LIST_CNT - 1)) 1347 dp->sis_nextdesc = &sc->sis_rx_list[0]; 1348 else 1349 dp->sis_nextdesc = dp + 1; 1350 bus_dmamap_load(sc->sis_rx_tag, 1351 sc->sis_rx_dmamap, 1352 dp->sis_nextdesc, sizeof(struct sis_desc), 1353 sis_dma_map_desc_next, dp, 0); 1354 } 1355 1356 bus_dmamap_sync(sc->sis_rx_tag, 1357 sc->sis_rx_dmamap, BUS_DMASYNC_PREWRITE); 1358 1359 sc->sis_rx_pdsc = &sc->sis_rx_list[0]; 1360 1361 return(0); 1362 } 1363 1364 /* 1365 * Initialize an RX descriptor and attach an MBUF cluster. 1366 */ 1367 static int 1368 sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m) 1369 { 1370 1371 if (c == NULL) 1372 return(EINVAL); 1373 1374 if (m == NULL) { 1375 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1376 if (m == NULL) 1377 return(ENOBUFS); 1378 } else 1379 m->m_data = m->m_ext.ext_buf; 1380 1381 c->sis_mbuf = m; 1382 c->sis_ctl = SIS_RXLEN; 1383 1384 bus_dmamap_create(sc->sis_tag, 0, &c->sis_map); 1385 bus_dmamap_load(sc->sis_tag, c->sis_map, 1386 mtod(m, void *), MCLBYTES, 1387 sis_dma_map_desc_ptr, c, 0); 1388 bus_dmamap_sync(sc->sis_tag, c->sis_map, BUS_DMASYNC_PREREAD); 1389 1390 return(0); 1391 } 1392 1393 /* 1394 * A frame has been uploaded: pass the resulting mbuf chain up to 1395 * the higher level protocols. 1396 */ 1397 static void 1398 sis_rxeof(struct sis_softc *sc) 1399 { 1400 struct mbuf *m, *m0; 1401 struct ifnet *ifp; 1402 struct sis_desc *cur_rx; 1403 int total_len = 0; 1404 u_int32_t rxstat; 1405 1406 SIS_LOCK_ASSERT(sc); 1407 1408 ifp = sc->sis_ifp; 1409 1410 for(cur_rx = sc->sis_rx_pdsc; SIS_OWNDESC(cur_rx); 1411 cur_rx = cur_rx->sis_nextdesc) { 1412 1413 #ifdef DEVICE_POLLING 1414 if (ifp->if_capenable & IFCAP_POLLING) { 1415 if (sc->rxcycles <= 0) 1416 break; 1417 sc->rxcycles--; 1418 } 1419 #endif 1420 rxstat = cur_rx->sis_rxstat; 1421 bus_dmamap_sync(sc->sis_tag, 1422 cur_rx->sis_map, BUS_DMASYNC_POSTWRITE); 1423 bus_dmamap_unload(sc->sis_tag, cur_rx->sis_map); 1424 bus_dmamap_destroy(sc->sis_tag, cur_rx->sis_map); 1425 m = cur_rx->sis_mbuf; 1426 cur_rx->sis_mbuf = NULL; 1427 total_len = SIS_RXBYTES(cur_rx); 1428 1429 /* 1430 * If an error occurs, update stats, clear the 1431 * status word and leave the mbuf cluster in place: 1432 * it should simply get re-used next time this descriptor 1433 * comes up in the ring. 1434 */ 1435 if ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 && 1436 total_len <= (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - 1437 ETHER_CRC_LEN)) 1438 rxstat &= ~SIS_RXSTAT_GIANT; 1439 if (SIS_RXSTAT_ERROR(rxstat) != 0) { 1440 ifp->if_ierrors++; 1441 if (rxstat & SIS_RXSTAT_COLL) 1442 ifp->if_collisions++; 1443 sis_newbuf(sc, cur_rx, m); 1444 continue; 1445 } 1446 1447 /* No errors; receive the packet. */ 1448 #ifdef __NO_STRICT_ALIGNMENT 1449 /* 1450 * On architectures without alignment problems we try to 1451 * allocate a new buffer for the receive ring, and pass up 1452 * the one where the packet is already, saving the expensive 1453 * copy done in m_devget(). 1454 * If we are on an architecture with alignment problems, or 1455 * if the allocation fails, then use m_devget and leave the 1456 * existing buffer in the receive ring. 1457 */ 1458 if (sis_newbuf(sc, cur_rx, NULL) == 0) 1459 m->m_pkthdr.len = m->m_len = total_len; 1460 else 1461 #endif 1462 { 1463 m0 = m_devget(mtod(m, char *), total_len, 1464 ETHER_ALIGN, ifp, NULL); 1465 sis_newbuf(sc, cur_rx, m); 1466 if (m0 == NULL) { 1467 ifp->if_ierrors++; 1468 continue; 1469 } 1470 m = m0; 1471 } 1472 1473 ifp->if_ipackets++; 1474 m->m_pkthdr.rcvif = ifp; 1475 1476 SIS_UNLOCK(sc); 1477 (*ifp->if_input)(ifp, m); 1478 SIS_LOCK(sc); 1479 } 1480 1481 sc->sis_rx_pdsc = cur_rx; 1482 } 1483 1484 static void 1485 sis_rxeoc(struct sis_softc *sc) 1486 { 1487 1488 SIS_LOCK_ASSERT(sc); 1489 sis_rxeof(sc); 1490 sis_initl(sc); 1491 } 1492 1493 /* 1494 * A frame was downloaded to the chip. It's safe for us to clean up 1495 * the list buffers. 1496 */ 1497 1498 static void 1499 sis_txeof(struct sis_softc *sc) 1500 { 1501 struct ifnet *ifp; 1502 u_int32_t idx; 1503 1504 SIS_LOCK_ASSERT(sc); 1505 ifp = sc->sis_ifp; 1506 1507 /* 1508 * Go through our tx list and free mbufs for those 1509 * frames that have been transmitted. 1510 */ 1511 for (idx = sc->sis_tx_cons; sc->sis_tx_cnt > 0; 1512 sc->sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT) ) { 1513 struct sis_desc *cur_tx = &sc->sis_tx_list[idx]; 1514 1515 if (SIS_OWNDESC(cur_tx)) 1516 break; 1517 1518 if (cur_tx->sis_ctl & SIS_CMDSTS_MORE) 1519 continue; 1520 1521 if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) { 1522 ifp->if_oerrors++; 1523 if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS) 1524 ifp->if_collisions++; 1525 if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL) 1526 ifp->if_collisions++; 1527 } 1528 1529 ifp->if_collisions += 1530 (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16; 1531 1532 ifp->if_opackets++; 1533 if (cur_tx->sis_mbuf != NULL) { 1534 m_freem(cur_tx->sis_mbuf); 1535 cur_tx->sis_mbuf = NULL; 1536 bus_dmamap_unload(sc->sis_tag, cur_tx->sis_map); 1537 bus_dmamap_destroy(sc->sis_tag, cur_tx->sis_map); 1538 } 1539 } 1540 1541 if (idx != sc->sis_tx_cons) { 1542 /* we freed up some buffers */ 1543 sc->sis_tx_cons = idx; 1544 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1545 } 1546 1547 sc->sis_watchdog_timer = (sc->sis_tx_cnt == 0) ? 0 : 5; 1548 1549 return; 1550 } 1551 1552 static void 1553 sis_tick(void *xsc) 1554 { 1555 struct sis_softc *sc; 1556 struct mii_data *mii; 1557 struct ifnet *ifp; 1558 1559 sc = xsc; 1560 SIS_LOCK_ASSERT(sc); 1561 sc->in_tick = 1; 1562 ifp = sc->sis_ifp; 1563 1564 mii = device_get_softc(sc->sis_miibus); 1565 mii_tick(mii); 1566 1567 sis_watchdog(sc); 1568 1569 if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE && 1570 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1571 sc->sis_link++; 1572 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1573 sis_startl(ifp); 1574 } 1575 1576 callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); 1577 sc->in_tick = 0; 1578 } 1579 1580 #ifdef DEVICE_POLLING 1581 static poll_handler_t sis_poll; 1582 1583 static void 1584 sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1585 { 1586 struct sis_softc *sc = ifp->if_softc; 1587 1588 SIS_LOCK(sc); 1589 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1590 SIS_UNLOCK(sc); 1591 return; 1592 } 1593 1594 /* 1595 * On the sis, reading the status register also clears it. 1596 * So before returning to intr mode we must make sure that all 1597 * possible pending sources of interrupts have been served. 1598 * In practice this means run to completion the *eof routines, 1599 * and then call the interrupt routine 1600 */ 1601 sc->rxcycles = count; 1602 sis_rxeof(sc); 1603 sis_txeof(sc); 1604 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1605 sis_startl(ifp); 1606 1607 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1608 u_int32_t status; 1609 1610 /* Reading the ISR register clears all interrupts. */ 1611 status = CSR_READ_4(sc, SIS_ISR); 1612 1613 if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW)) 1614 sis_rxeoc(sc); 1615 1616 if (status & (SIS_ISR_RX_IDLE)) 1617 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1618 1619 if (status & SIS_ISR_SYSERR) { 1620 sis_reset(sc); 1621 sis_initl(sc); 1622 } 1623 } 1624 1625 SIS_UNLOCK(sc); 1626 } 1627 #endif /* DEVICE_POLLING */ 1628 1629 static void 1630 sis_intr(void *arg) 1631 { 1632 struct sis_softc *sc; 1633 struct ifnet *ifp; 1634 u_int32_t status; 1635 1636 sc = arg; 1637 ifp = sc->sis_ifp; 1638 1639 if (sc->sis_stopped) /* Most likely shared interrupt */ 1640 return; 1641 1642 SIS_LOCK(sc); 1643 #ifdef DEVICE_POLLING 1644 if (ifp->if_capenable & IFCAP_POLLING) { 1645 SIS_UNLOCK(sc); 1646 return; 1647 } 1648 #endif 1649 1650 /* Disable interrupts. */ 1651 CSR_WRITE_4(sc, SIS_IER, 0); 1652 1653 for (;;) { 1654 SIS_LOCK_ASSERT(sc); 1655 /* Reading the ISR register clears all interrupts. */ 1656 status = CSR_READ_4(sc, SIS_ISR); 1657 1658 if ((status & SIS_INTRS) == 0) 1659 break; 1660 1661 if (status & 1662 (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR | 1663 SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) ) 1664 sis_txeof(sc); 1665 1666 if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK | 1667 SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE)) 1668 sis_rxeof(sc); 1669 1670 if (status & SIS_ISR_RX_OFLOW) 1671 sis_rxeoc(sc); 1672 1673 if (status & (SIS_ISR_RX_IDLE)) 1674 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1675 1676 if (status & SIS_ISR_SYSERR) { 1677 sis_reset(sc); 1678 sis_initl(sc); 1679 } 1680 } 1681 1682 /* Re-enable interrupts. */ 1683 CSR_WRITE_4(sc, SIS_IER, 1); 1684 1685 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1686 sis_startl(ifp); 1687 1688 SIS_UNLOCK(sc); 1689 } 1690 1691 /* 1692 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1693 * pointers to the fragment pointers. 1694 */ 1695 static int 1696 sis_encap(struct sis_softc *sc, struct mbuf **m_head, uint32_t *txidx) 1697 { 1698 struct sis_desc *f = NULL; 1699 struct mbuf *m; 1700 int frag, cur, cnt = 0, chainlen = 0; 1701 1702 /* 1703 * If there's no way we can send any packets, return now. 1704 */ 1705 if (SIS_TX_LIST_CNT - sc->sis_tx_cnt < 2) 1706 return (ENOBUFS); 1707 1708 /* 1709 * Count the number of frags in this chain to see if 1710 * we need to m_defrag. Since the descriptor list is shared 1711 * by all packets, we'll m_defrag long chains so that they 1712 * do not use up the entire list, even if they would fit. 1713 */ 1714 1715 for (m = *m_head; m != NULL; m = m->m_next) 1716 chainlen++; 1717 1718 if ((chainlen > SIS_TX_LIST_CNT / 4) || 1719 ((SIS_TX_LIST_CNT - (chainlen + sc->sis_tx_cnt)) < 2)) { 1720 m = m_defrag(*m_head, M_DONTWAIT); 1721 if (m == NULL) 1722 return (ENOBUFS); 1723 *m_head = m; 1724 } 1725 1726 /* 1727 * Start packing the mbufs in this chain into 1728 * the fragment pointers. Stop when we run out 1729 * of fragments or hit the end of the mbuf chain. 1730 */ 1731 cur = frag = *txidx; 1732 1733 for (m = *m_head; m != NULL; m = m->m_next) { 1734 if (m->m_len != 0) { 1735 if ((SIS_TX_LIST_CNT - 1736 (sc->sis_tx_cnt + cnt)) < 2) 1737 return(ENOBUFS); 1738 f = &sc->sis_tx_list[frag]; 1739 f->sis_ctl = SIS_CMDSTS_MORE | m->m_len; 1740 bus_dmamap_create(sc->sis_tag, 0, &f->sis_map); 1741 bus_dmamap_load(sc->sis_tag, f->sis_map, 1742 mtod(m, void *), m->m_len, 1743 sis_dma_map_desc_ptr, f, 0); 1744 bus_dmamap_sync(sc->sis_tag, 1745 f->sis_map, BUS_DMASYNC_PREREAD); 1746 if (cnt != 0) 1747 f->sis_ctl |= SIS_CMDSTS_OWN; 1748 cur = frag; 1749 SIS_INC(frag, SIS_TX_LIST_CNT); 1750 cnt++; 1751 } 1752 } 1753 1754 if (m != NULL) 1755 return(ENOBUFS); 1756 1757 sc->sis_tx_list[cur].sis_mbuf = *m_head; 1758 sc->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE; 1759 sc->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN; 1760 sc->sis_tx_cnt += cnt; 1761 *txidx = frag; 1762 1763 return(0); 1764 } 1765 1766 /* 1767 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1768 * to the mbuf data regions directly in the transmit lists. We also save a 1769 * copy of the pointers since the transmit list fragment pointers are 1770 * physical addresses. 1771 */ 1772 1773 static void 1774 sis_start(struct ifnet *ifp) 1775 { 1776 struct sis_softc *sc; 1777 1778 sc = ifp->if_softc; 1779 SIS_LOCK(sc); 1780 sis_startl(ifp); 1781 SIS_UNLOCK(sc); 1782 } 1783 1784 static void 1785 sis_startl(struct ifnet *ifp) 1786 { 1787 struct sis_softc *sc; 1788 struct mbuf *m_head = NULL; 1789 u_int32_t idx, queued = 0; 1790 1791 sc = ifp->if_softc; 1792 1793 SIS_LOCK_ASSERT(sc); 1794 1795 if (!sc->sis_link) 1796 return; 1797 1798 idx = sc->sis_tx_prod; 1799 1800 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 1801 return; 1802 1803 while(sc->sis_tx_list[idx].sis_mbuf == NULL) { 1804 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1805 if (m_head == NULL) 1806 break; 1807 1808 if (sis_encap(sc, &m_head, &idx)) { 1809 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1810 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1811 break; 1812 } 1813 1814 queued++; 1815 1816 /* 1817 * If there's a BPF listener, bounce a copy of this frame 1818 * to him. 1819 */ 1820 BPF_MTAP(ifp, m_head); 1821 1822 } 1823 1824 if (queued) { 1825 /* Transmit */ 1826 sc->sis_tx_prod = idx; 1827 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE); 1828 1829 /* 1830 * Set a timeout in case the chip goes out to lunch. 1831 */ 1832 sc->sis_watchdog_timer = 5; 1833 } 1834 } 1835 1836 static void 1837 sis_init(void *xsc) 1838 { 1839 struct sis_softc *sc = xsc; 1840 1841 SIS_LOCK(sc); 1842 sis_initl(sc); 1843 SIS_UNLOCK(sc); 1844 } 1845 1846 static void 1847 sis_initl(struct sis_softc *sc) 1848 { 1849 struct ifnet *ifp = sc->sis_ifp; 1850 struct mii_data *mii; 1851 1852 SIS_LOCK_ASSERT(sc); 1853 1854 /* 1855 * Cancel pending I/O and free all RX/TX buffers. 1856 */ 1857 sis_stop(sc); 1858 sc->sis_stopped = 0; 1859 1860 #ifdef notyet 1861 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) { 1862 /* 1863 * Configure 400usec of interrupt holdoff. This is based 1864 * on emperical tests on a Soekris 4801. 1865 */ 1866 CSR_WRITE_4(sc, NS_IHR, 0x100 | 4); 1867 } 1868 #endif 1869 1870 mii = device_get_softc(sc->sis_miibus); 1871 1872 /* Set MAC address */ 1873 if (sc->sis_type == SIS_TYPE_83815) { 1874 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0); 1875 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1876 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); 1877 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1); 1878 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1879 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); 1880 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2); 1881 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1882 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); 1883 } else { 1884 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 1885 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1886 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); 1887 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); 1888 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1889 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); 1890 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 1891 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1892 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); 1893 } 1894 1895 /* Init circular TX/RX lists. */ 1896 if (sis_ring_init(sc) != 0) { 1897 device_printf(sc->sis_dev, 1898 "initialization failed: no memory for rx buffers\n"); 1899 sis_stop(sc); 1900 return; 1901 } 1902 1903 /* 1904 * Short Cable Receive Errors (MP21.E) 1905 * also: Page 78 of the DP83815 data sheet (september 2002 version) 1906 * recommends the following register settings "for optimum 1907 * performance." for rev 15C. Set this also for 15D parts as 1908 * they require it in practice. 1909 */ 1910 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) { 1911 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 1912 CSR_WRITE_4(sc, NS_PHY_CR, 0x189C); 1913 /* set val for c2 */ 1914 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000); 1915 /* load/kill c2 */ 1916 CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040); 1917 /* rais SD off, from 4 to c */ 1918 CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C); 1919 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 1920 } 1921 1922 1923 /* 1924 * For the NatSemi chip, we have to explicitly enable the 1925 * reception of ARP frames, as well as turn on the 'perfect 1926 * match' filter where we store the station address, otherwise 1927 * we won't receive unicasts meant for this host. 1928 */ 1929 if (sc->sis_type == SIS_TYPE_83815) { 1930 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP); 1931 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT); 1932 } 1933 1934 /* If we want promiscuous mode, set the allframes bit. */ 1935 if (ifp->if_flags & IFF_PROMISC) { 1936 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 1937 } else { 1938 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 1939 } 1940 1941 /* 1942 * Set the capture broadcast bit to capture broadcast frames. 1943 */ 1944 if (ifp->if_flags & IFF_BROADCAST) { 1945 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1946 } else { 1947 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1948 } 1949 1950 /* 1951 * Load the multicast filter. 1952 */ 1953 if (sc->sis_type == SIS_TYPE_83815) 1954 sis_setmulti_ns(sc); 1955 else 1956 sis_setmulti_sis(sc); 1957 1958 /* Turn the receive filter on */ 1959 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 1960 1961 /* 1962 * Load the address of the RX and TX lists. 1963 */ 1964 CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sis_rx_paddr); 1965 CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sis_tx_paddr); 1966 1967 /* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of 1968 * the PCI bus. When this bit is set, the Max DMA Burst Size 1969 * for TX/RX DMA should be no larger than 16 double words. 1970 */ 1971 if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) { 1972 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64); 1973 } else { 1974 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256); 1975 } 1976 1977 /* Accept Long Packets for VLAN support */ 1978 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER); 1979 1980 /* Set TX configuration */ 1981 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { 1982 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10); 1983 } else { 1984 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); 1985 } 1986 1987 /* Set full/half duplex mode. */ 1988 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1989 SIS_SETBIT(sc, SIS_TX_CFG, 1990 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1991 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 1992 } else { 1993 SIS_CLRBIT(sc, SIS_TX_CFG, 1994 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1995 SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 1996 } 1997 1998 if (sc->sis_type == SIS_TYPE_83816) { 1999 /* 2000 * MPII03.D: Half Duplex Excessive Collisions. 2001 * Also page 49 in 83816 manual 2002 */ 2003 SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D); 2004 } 2005 2006 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A && 2007 IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 2008 uint32_t reg; 2009 2010 /* 2011 * Short Cable Receive Errors (MP21.E) 2012 */ 2013 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 2014 reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff; 2015 CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000); 2016 DELAY(100000); 2017 reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff; 2018 if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) { 2019 device_printf(sc->sis_dev, 2020 "Applying short cable fix (reg=%x)\n", reg); 2021 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8); 2022 SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20); 2023 } 2024 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 2025 } 2026 2027 /* 2028 * Enable interrupts. 2029 */ 2030 CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS); 2031 #ifdef DEVICE_POLLING 2032 /* 2033 * ... only enable interrupts if we are not polling, make sure 2034 * they are off otherwise. 2035 */ 2036 if (ifp->if_capenable & IFCAP_POLLING) 2037 CSR_WRITE_4(sc, SIS_IER, 0); 2038 else 2039 #endif 2040 CSR_WRITE_4(sc, SIS_IER, 1); 2041 2042 /* Enable receiver and transmitter. */ 2043 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 2044 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 2045 2046 #ifdef notdef 2047 mii_mediachg(mii); 2048 #endif 2049 2050 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2051 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2052 2053 if (!sc->in_tick) 2054 callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); 2055 } 2056 2057 /* 2058 * Set media options. 2059 */ 2060 static int 2061 sis_ifmedia_upd(struct ifnet *ifp) 2062 { 2063 struct sis_softc *sc; 2064 struct mii_data *mii; 2065 2066 sc = ifp->if_softc; 2067 2068 SIS_LOCK(sc); 2069 mii = device_get_softc(sc->sis_miibus); 2070 sc->sis_link = 0; 2071 if (mii->mii_instance) { 2072 struct mii_softc *miisc; 2073 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2074 mii_phy_reset(miisc); 2075 } 2076 mii_mediachg(mii); 2077 SIS_UNLOCK(sc); 2078 2079 return(0); 2080 } 2081 2082 /* 2083 * Report current media status. 2084 */ 2085 static void 2086 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2087 { 2088 struct sis_softc *sc; 2089 struct mii_data *mii; 2090 2091 sc = ifp->if_softc; 2092 2093 SIS_LOCK(sc); 2094 mii = device_get_softc(sc->sis_miibus); 2095 mii_pollstat(mii); 2096 SIS_UNLOCK(sc); 2097 ifmr->ifm_active = mii->mii_media_active; 2098 ifmr->ifm_status = mii->mii_media_status; 2099 } 2100 2101 static int 2102 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2103 { 2104 struct sis_softc *sc = ifp->if_softc; 2105 struct ifreq *ifr = (struct ifreq *) data; 2106 struct mii_data *mii; 2107 int error = 0; 2108 2109 switch(command) { 2110 case SIOCSIFFLAGS: 2111 SIS_LOCK(sc); 2112 if (ifp->if_flags & IFF_UP) { 2113 sis_initl(sc); 2114 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2115 sis_stop(sc); 2116 } 2117 SIS_UNLOCK(sc); 2118 error = 0; 2119 break; 2120 case SIOCADDMULTI: 2121 case SIOCDELMULTI: 2122 SIS_LOCK(sc); 2123 if (sc->sis_type == SIS_TYPE_83815) 2124 sis_setmulti_ns(sc); 2125 else 2126 sis_setmulti_sis(sc); 2127 SIS_UNLOCK(sc); 2128 error = 0; 2129 break; 2130 case SIOCGIFMEDIA: 2131 case SIOCSIFMEDIA: 2132 mii = device_get_softc(sc->sis_miibus); 2133 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2134 break; 2135 case SIOCSIFCAP: 2136 /* ok, disable interrupts */ 2137 #ifdef DEVICE_POLLING 2138 if (ifr->ifr_reqcap & IFCAP_POLLING && 2139 !(ifp->if_capenable & IFCAP_POLLING)) { 2140 error = ether_poll_register(sis_poll, ifp); 2141 if (error) 2142 return(error); 2143 SIS_LOCK(sc); 2144 /* Disable interrupts */ 2145 CSR_WRITE_4(sc, SIS_IER, 0); 2146 ifp->if_capenable |= IFCAP_POLLING; 2147 SIS_UNLOCK(sc); 2148 return (error); 2149 2150 } 2151 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 2152 ifp->if_capenable & IFCAP_POLLING) { 2153 error = ether_poll_deregister(ifp); 2154 /* Enable interrupts. */ 2155 SIS_LOCK(sc); 2156 CSR_WRITE_4(sc, SIS_IER, 1); 2157 ifp->if_capenable &= ~IFCAP_POLLING; 2158 SIS_UNLOCK(sc); 2159 return (error); 2160 } 2161 #endif /* DEVICE_POLLING */ 2162 break; 2163 default: 2164 error = ether_ioctl(ifp, command, data); 2165 break; 2166 } 2167 2168 return(error); 2169 } 2170 2171 static void 2172 sis_watchdog(struct sis_softc *sc) 2173 { 2174 2175 SIS_LOCK_ASSERT(sc); 2176 if (sc->sis_stopped) { 2177 SIS_UNLOCK(sc); 2178 return; 2179 } 2180 2181 if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0) 2182 return; 2183 2184 device_printf(sc->sis_dev, "watchdog timeout\n"); 2185 sc->sis_ifp->if_oerrors++; 2186 2187 sis_stop(sc); 2188 sis_reset(sc); 2189 sis_initl(sc); 2190 2191 if (!IFQ_DRV_IS_EMPTY(&sc->sis_ifp->if_snd)) 2192 sis_startl(sc->sis_ifp); 2193 } 2194 2195 /* 2196 * Stop the adapter and free any mbufs allocated to the 2197 * RX and TX lists. 2198 */ 2199 static void 2200 sis_stop(struct sis_softc *sc) 2201 { 2202 int i; 2203 struct ifnet *ifp; 2204 struct sis_desc *dp; 2205 2206 if (sc->sis_stopped) 2207 return; 2208 SIS_LOCK_ASSERT(sc); 2209 ifp = sc->sis_ifp; 2210 sc->sis_watchdog_timer = 0; 2211 2212 callout_stop(&sc->sis_stat_ch); 2213 2214 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2215 CSR_WRITE_4(sc, SIS_IER, 0); 2216 CSR_WRITE_4(sc, SIS_IMR, 0); 2217 CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */ 2218 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 2219 DELAY(1000); 2220 CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0); 2221 CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0); 2222 2223 sc->sis_link = 0; 2224 2225 /* 2226 * Free data in the RX lists. 2227 */ 2228 dp = &sc->sis_rx_list[0]; 2229 for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { 2230 if (dp->sis_mbuf == NULL) 2231 continue; 2232 bus_dmamap_unload(sc->sis_tag, dp->sis_map); 2233 bus_dmamap_destroy(sc->sis_tag, dp->sis_map); 2234 m_freem(dp->sis_mbuf); 2235 dp->sis_mbuf = NULL; 2236 } 2237 bzero(sc->sis_rx_list, SIS_RX_LIST_SZ); 2238 2239 /* 2240 * Free the TX list buffers. 2241 */ 2242 dp = &sc->sis_tx_list[0]; 2243 for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { 2244 if (dp->sis_mbuf == NULL) 2245 continue; 2246 bus_dmamap_unload(sc->sis_tag, dp->sis_map); 2247 bus_dmamap_destroy(sc->sis_tag, dp->sis_map); 2248 m_freem(dp->sis_mbuf); 2249 dp->sis_mbuf = NULL; 2250 } 2251 2252 bzero(sc->sis_tx_list, SIS_TX_LIST_SZ); 2253 2254 sc->sis_stopped = 1; 2255 } 2256 2257 /* 2258 * Stop all chip I/O so that the kernel's probe routines don't 2259 * get confused by errant DMAs when rebooting. 2260 */ 2261 static int 2262 sis_shutdown(device_t dev) 2263 { 2264 struct sis_softc *sc; 2265 2266 sc = device_get_softc(dev); 2267 SIS_LOCK(sc); 2268 sis_reset(sc); 2269 sis_stop(sc); 2270 SIS_UNLOCK(sc); 2271 return (0); 2272 } 2273 2274 static device_method_t sis_methods[] = { 2275 /* Device interface */ 2276 DEVMETHOD(device_probe, sis_probe), 2277 DEVMETHOD(device_attach, sis_attach), 2278 DEVMETHOD(device_detach, sis_detach), 2279 DEVMETHOD(device_shutdown, sis_shutdown), 2280 2281 /* bus interface */ 2282 DEVMETHOD(bus_print_child, bus_generic_print_child), 2283 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 2284 2285 /* MII interface */ 2286 DEVMETHOD(miibus_readreg, sis_miibus_readreg), 2287 DEVMETHOD(miibus_writereg, sis_miibus_writereg), 2288 DEVMETHOD(miibus_statchg, sis_miibus_statchg), 2289 2290 { 0, 0 } 2291 }; 2292 2293 static driver_t sis_driver = { 2294 "sis", 2295 sis_methods, 2296 sizeof(struct sis_softc) 2297 }; 2298 2299 static devclass_t sis_devclass; 2300 2301 DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0); 2302 DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0); 2303