1 /*- 2 * Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org> 3 * Copyright (c) 1997, 1998, 1999 4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are 39 * available from http://www.sis.com.tw. 40 * 41 * This driver also supports the NatSemi DP83815. Datasheets are 42 * available from http://www.national.com. 43 * 44 * Written by Bill Paul <wpaul@ee.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 /* 49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with 50 * simple TX and RX descriptors of 3 longwords in size. The receiver 51 * has a single perfect filter entry for the station address and a 52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based 53 * transceiver while the 7016 requires an external transceiver chip. 54 * Both chips offer the standard bit-bang MII interface as well as 55 * an enchanced PHY interface which simplifies accessing MII registers. 56 * 57 * The only downside to this chipset is that RX descriptors must be 58 * longword aligned. 59 */ 60 61 #ifdef HAVE_KERNEL_OPTION_HEADERS 62 #include "opt_device_polling.h" 63 #endif 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/sockio.h> 68 #include <sys/mbuf.h> 69 #include <sys/malloc.h> 70 #include <sys/kernel.h> 71 #include <sys/module.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <net/if_arp.h> 76 #include <net/ethernet.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_vlan_var.h> 81 82 #include <net/bpf.h> 83 84 #include <machine/bus.h> 85 #include <machine/resource.h> 86 #include <sys/bus.h> 87 #include <sys/rman.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #define SIS_USEIOSPACE 96 97 #include <dev/sis/if_sisreg.h> 98 99 MODULE_DEPEND(sis, pci, 1, 1, 1); 100 MODULE_DEPEND(sis, ether, 1, 1, 1); 101 MODULE_DEPEND(sis, miibus, 1, 1, 1); 102 103 /* "device miibus" required. See GENERIC if you get errors here. */ 104 #include "miibus_if.h" 105 106 #define SIS_LOCK(_sc) mtx_lock(&(_sc)->sis_mtx) 107 #define SIS_UNLOCK(_sc) mtx_unlock(&(_sc)->sis_mtx) 108 #define SIS_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sis_mtx, MA_OWNED) 109 110 /* 111 * register space access macros 112 */ 113 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sis_res[0], reg, val) 114 115 #define CSR_READ_4(sc, reg) bus_read_4(sc->sis_res[0], reg) 116 117 #define CSR_READ_2(sc, reg) bus_read_2(sc->sis_res[0], reg) 118 119 /* 120 * Various supported device vendors/types and their names. 121 */ 122 static struct sis_type sis_devs[] = { 123 { SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" }, 124 { SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" }, 125 { NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" }, 126 { 0, 0, NULL } 127 }; 128 129 static int sis_detach(device_t); 130 static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 static int sis_ifmedia_upd(struct ifnet *); 132 static void sis_init(void *); 133 static void sis_initl(struct sis_softc *); 134 static void sis_intr(void *); 135 static int sis_ioctl(struct ifnet *, u_long, caddr_t); 136 static int sis_newbuf(struct sis_softc *, struct sis_desc *, struct mbuf *); 137 static void sis_start(struct ifnet *); 138 static void sis_startl(struct ifnet *); 139 static void sis_stop(struct sis_softc *); 140 static void sis_watchdog(struct sis_softc *); 141 142 143 static struct resource_spec sis_res_spec[] = { 144 #ifdef SIS_USEIOSPACE 145 { SYS_RES_IOPORT, SIS_PCI_LOIO, RF_ACTIVE}, 146 #else 147 { SYS_RES_MEMORY, SIS_PCI_LOMEM, RF_ACTIVE}, 148 #endif 149 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE}, 150 { -1, 0 } 151 }; 152 153 #define SIS_SETBIT(sc, reg, x) \ 154 CSR_WRITE_4(sc, reg, \ 155 CSR_READ_4(sc, reg) | (x)) 156 157 #define SIS_CLRBIT(sc, reg, x) \ 158 CSR_WRITE_4(sc, reg, \ 159 CSR_READ_4(sc, reg) & ~(x)) 160 161 #define SIO_SET(x) \ 162 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x) 163 164 #define SIO_CLR(x) \ 165 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x) 166 167 static void 168 sis_dma_map_desc_next(void *arg, bus_dma_segment_t *segs, int nseg, int error) 169 { 170 struct sis_desc *r; 171 172 r = arg; 173 r->sis_next = segs->ds_addr; 174 } 175 176 static void 177 sis_dma_map_desc_ptr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 178 { 179 struct sis_desc *r; 180 181 r = arg; 182 r->sis_ptr = segs->ds_addr; 183 } 184 185 static void 186 sis_dma_map_ring(void *arg, bus_dma_segment_t *segs, int nseg, int error) 187 { 188 u_int32_t *p; 189 190 p = arg; 191 *p = segs->ds_addr; 192 } 193 194 /* 195 * Routine to reverse the bits in a word. Stolen almost 196 * verbatim from /usr/games/fortune. 197 */ 198 static uint16_t 199 sis_reverse(uint16_t n) 200 { 201 n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa); 202 n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc); 203 n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0); 204 n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00); 205 206 return(n); 207 } 208 209 static void 210 sis_delay(struct sis_softc *sc) 211 { 212 int idx; 213 214 for (idx = (300 / 33) + 1; idx > 0; idx--) 215 CSR_READ_4(sc, SIS_CSR); 216 } 217 218 static void 219 sis_eeprom_idle(struct sis_softc *sc) 220 { 221 int i; 222 223 SIO_SET(SIS_EECTL_CSEL); 224 sis_delay(sc); 225 SIO_SET(SIS_EECTL_CLK); 226 sis_delay(sc); 227 228 for (i = 0; i < 25; i++) { 229 SIO_CLR(SIS_EECTL_CLK); 230 sis_delay(sc); 231 SIO_SET(SIS_EECTL_CLK); 232 sis_delay(sc); 233 } 234 235 SIO_CLR(SIS_EECTL_CLK); 236 sis_delay(sc); 237 SIO_CLR(SIS_EECTL_CSEL); 238 sis_delay(sc); 239 CSR_WRITE_4(sc, SIS_EECTL, 0x00000000); 240 } 241 242 /* 243 * Send a read command and address to the EEPROM, check for ACK. 244 */ 245 static void 246 sis_eeprom_putbyte(struct sis_softc *sc, int addr) 247 { 248 int d, i; 249 250 d = addr | SIS_EECMD_READ; 251 252 /* 253 * Feed in each bit and stobe the clock. 254 */ 255 for (i = 0x400; i; i >>= 1) { 256 if (d & i) { 257 SIO_SET(SIS_EECTL_DIN); 258 } else { 259 SIO_CLR(SIS_EECTL_DIN); 260 } 261 sis_delay(sc); 262 SIO_SET(SIS_EECTL_CLK); 263 sis_delay(sc); 264 SIO_CLR(SIS_EECTL_CLK); 265 sis_delay(sc); 266 } 267 } 268 269 /* 270 * Read a word of data stored in the EEPROM at address 'addr.' 271 */ 272 static void 273 sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest) 274 { 275 int i; 276 u_int16_t word = 0; 277 278 /* Force EEPROM to idle state. */ 279 sis_eeprom_idle(sc); 280 281 /* Enter EEPROM access mode. */ 282 sis_delay(sc); 283 SIO_CLR(SIS_EECTL_CLK); 284 sis_delay(sc); 285 SIO_SET(SIS_EECTL_CSEL); 286 sis_delay(sc); 287 288 /* 289 * Send address of word we want to read. 290 */ 291 sis_eeprom_putbyte(sc, addr); 292 293 /* 294 * Start reading bits from EEPROM. 295 */ 296 for (i = 0x8000; i; i >>= 1) { 297 SIO_SET(SIS_EECTL_CLK); 298 sis_delay(sc); 299 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT) 300 word |= i; 301 sis_delay(sc); 302 SIO_CLR(SIS_EECTL_CLK); 303 sis_delay(sc); 304 } 305 306 /* Turn off EEPROM access mode. */ 307 sis_eeprom_idle(sc); 308 309 *dest = word; 310 } 311 312 /* 313 * Read a sequence of words from the EEPROM. 314 */ 315 static void 316 sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap) 317 { 318 int i; 319 u_int16_t word = 0, *ptr; 320 321 for (i = 0; i < cnt; i++) { 322 sis_eeprom_getword(sc, off + i, &word); 323 ptr = (u_int16_t *)(dest + (i * 2)); 324 if (swap) 325 *ptr = ntohs(word); 326 else 327 *ptr = word; 328 } 329 } 330 331 #if defined(__i386__) || defined(__amd64__) 332 static device_t 333 sis_find_bridge(device_t dev) 334 { 335 devclass_t pci_devclass; 336 device_t *pci_devices; 337 int pci_count = 0; 338 device_t *pci_children; 339 int pci_childcount = 0; 340 device_t *busp, *childp; 341 device_t child = NULL; 342 int i, j; 343 344 if ((pci_devclass = devclass_find("pci")) == NULL) 345 return(NULL); 346 347 devclass_get_devices(pci_devclass, &pci_devices, &pci_count); 348 349 for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) { 350 if (device_get_children(*busp, &pci_children, &pci_childcount)) 351 continue; 352 for (j = 0, childp = pci_children; 353 j < pci_childcount; j++, childp++) { 354 if (pci_get_vendor(*childp) == SIS_VENDORID && 355 pci_get_device(*childp) == 0x0008) { 356 child = *childp; 357 free(pci_children, M_TEMP); 358 goto done; 359 } 360 } 361 free(pci_children, M_TEMP); 362 } 363 364 done: 365 free(pci_devices, M_TEMP); 366 return(child); 367 } 368 369 static void 370 sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt) 371 { 372 device_t bridge; 373 u_int8_t reg; 374 int i; 375 bus_space_tag_t btag; 376 377 bridge = sis_find_bridge(dev); 378 if (bridge == NULL) 379 return; 380 reg = pci_read_config(bridge, 0x48, 1); 381 pci_write_config(bridge, 0x48, reg|0x40, 1); 382 383 /* XXX */ 384 #if defined(__i386__) 385 btag = I386_BUS_SPACE_IO; 386 #elif defined(__amd64__) 387 btag = AMD64_BUS_SPACE_IO; 388 #endif 389 390 for (i = 0; i < cnt; i++) { 391 bus_space_write_1(btag, 0x0, 0x70, i + off); 392 *(dest + i) = bus_space_read_1(btag, 0x0, 0x71); 393 } 394 395 pci_write_config(bridge, 0x48, reg & ~0x40, 1); 396 return; 397 } 398 399 static void 400 sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest) 401 { 402 u_int32_t filtsave, csrsave; 403 404 filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); 405 csrsave = CSR_READ_4(sc, SIS_CSR); 406 407 CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave); 408 CSR_WRITE_4(sc, SIS_CSR, 0); 409 410 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE); 411 412 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 413 ((u_int16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA); 414 CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1); 415 ((u_int16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA); 416 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 417 ((u_int16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA); 418 419 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); 420 CSR_WRITE_4(sc, SIS_CSR, csrsave); 421 return; 422 } 423 #endif 424 425 /* 426 * Sync the PHYs by setting data bit and strobing the clock 32 times. 427 */ 428 static void 429 sis_mii_sync(struct sis_softc *sc) 430 { 431 int i; 432 433 SIO_SET(SIS_MII_DIR|SIS_MII_DATA); 434 435 for (i = 0; i < 32; i++) { 436 SIO_SET(SIS_MII_CLK); 437 DELAY(1); 438 SIO_CLR(SIS_MII_CLK); 439 DELAY(1); 440 } 441 } 442 443 /* 444 * Clock a series of bits through the MII. 445 */ 446 static void 447 sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt) 448 { 449 int i; 450 451 SIO_CLR(SIS_MII_CLK); 452 453 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 454 if (bits & i) { 455 SIO_SET(SIS_MII_DATA); 456 } else { 457 SIO_CLR(SIS_MII_DATA); 458 } 459 DELAY(1); 460 SIO_CLR(SIS_MII_CLK); 461 DELAY(1); 462 SIO_SET(SIS_MII_CLK); 463 } 464 } 465 466 /* 467 * Read an PHY register through the MII. 468 */ 469 static int 470 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame) 471 { 472 int i, ack; 473 474 /* 475 * Set up frame for RX. 476 */ 477 frame->mii_stdelim = SIS_MII_STARTDELIM; 478 frame->mii_opcode = SIS_MII_READOP; 479 frame->mii_turnaround = 0; 480 frame->mii_data = 0; 481 482 /* 483 * Turn on data xmit. 484 */ 485 SIO_SET(SIS_MII_DIR); 486 487 sis_mii_sync(sc); 488 489 /* 490 * Send command/address info. 491 */ 492 sis_mii_send(sc, frame->mii_stdelim, 2); 493 sis_mii_send(sc, frame->mii_opcode, 2); 494 sis_mii_send(sc, frame->mii_phyaddr, 5); 495 sis_mii_send(sc, frame->mii_regaddr, 5); 496 497 /* Idle bit */ 498 SIO_CLR((SIS_MII_CLK|SIS_MII_DATA)); 499 DELAY(1); 500 SIO_SET(SIS_MII_CLK); 501 DELAY(1); 502 503 /* Turn off xmit. */ 504 SIO_CLR(SIS_MII_DIR); 505 506 /* Check for ack */ 507 SIO_CLR(SIS_MII_CLK); 508 DELAY(1); 509 ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA; 510 SIO_SET(SIS_MII_CLK); 511 DELAY(1); 512 513 /* 514 * Now try reading data bits. If the ack failed, we still 515 * need to clock through 16 cycles to keep the PHY(s) in sync. 516 */ 517 if (ack) { 518 for(i = 0; i < 16; i++) { 519 SIO_CLR(SIS_MII_CLK); 520 DELAY(1); 521 SIO_SET(SIS_MII_CLK); 522 DELAY(1); 523 } 524 goto fail; 525 } 526 527 for (i = 0x8000; i; i >>= 1) { 528 SIO_CLR(SIS_MII_CLK); 529 DELAY(1); 530 if (!ack) { 531 if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA) 532 frame->mii_data |= i; 533 DELAY(1); 534 } 535 SIO_SET(SIS_MII_CLK); 536 DELAY(1); 537 } 538 539 fail: 540 541 SIO_CLR(SIS_MII_CLK); 542 DELAY(1); 543 SIO_SET(SIS_MII_CLK); 544 DELAY(1); 545 546 if (ack) 547 return(1); 548 return(0); 549 } 550 551 /* 552 * Write to a PHY register through the MII. 553 */ 554 static int 555 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame) 556 { 557 558 /* 559 * Set up frame for TX. 560 */ 561 562 frame->mii_stdelim = SIS_MII_STARTDELIM; 563 frame->mii_opcode = SIS_MII_WRITEOP; 564 frame->mii_turnaround = SIS_MII_TURNAROUND; 565 566 /* 567 * Turn on data output. 568 */ 569 SIO_SET(SIS_MII_DIR); 570 571 sis_mii_sync(sc); 572 573 sis_mii_send(sc, frame->mii_stdelim, 2); 574 sis_mii_send(sc, frame->mii_opcode, 2); 575 sis_mii_send(sc, frame->mii_phyaddr, 5); 576 sis_mii_send(sc, frame->mii_regaddr, 5); 577 sis_mii_send(sc, frame->mii_turnaround, 2); 578 sis_mii_send(sc, frame->mii_data, 16); 579 580 /* Idle bit. */ 581 SIO_SET(SIS_MII_CLK); 582 DELAY(1); 583 SIO_CLR(SIS_MII_CLK); 584 DELAY(1); 585 586 /* 587 * Turn off xmit. 588 */ 589 SIO_CLR(SIS_MII_DIR); 590 591 return(0); 592 } 593 594 static int 595 sis_miibus_readreg(device_t dev, int phy, int reg) 596 { 597 struct sis_softc *sc; 598 struct sis_mii_frame frame; 599 600 sc = device_get_softc(dev); 601 602 if (sc->sis_type == SIS_TYPE_83815) { 603 if (phy != 0) 604 return(0); 605 /* 606 * The NatSemi chip can take a while after 607 * a reset to come ready, during which the BMSR 608 * returns a value of 0. This is *never* supposed 609 * to happen: some of the BMSR bits are meant to 610 * be hardwired in the on position, and this can 611 * confuse the miibus code a bit during the probe 612 * and attach phase. So we make an effort to check 613 * for this condition and wait for it to clear. 614 */ 615 if (!CSR_READ_4(sc, NS_BMSR)) 616 DELAY(1000); 617 return CSR_READ_4(sc, NS_BMCR + (reg * 4)); 618 } 619 620 /* 621 * Chipsets < SIS_635 seem not to be able to read/write 622 * through mdio. Use the enhanced PHY access register 623 * again for them. 624 */ 625 if (sc->sis_type == SIS_TYPE_900 && 626 sc->sis_rev < SIS_REV_635) { 627 int i, val = 0; 628 629 if (phy != 0) 630 return(0); 631 632 CSR_WRITE_4(sc, SIS_PHYCTL, 633 (phy << 11) | (reg << 6) | SIS_PHYOP_READ); 634 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 635 636 for (i = 0; i < SIS_TIMEOUT; i++) { 637 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 638 break; 639 } 640 641 if (i == SIS_TIMEOUT) { 642 device_printf(sc->sis_dev, "PHY failed to come ready\n"); 643 return(0); 644 } 645 646 val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF; 647 648 if (val == 0xFFFF) 649 return(0); 650 651 return(val); 652 } else { 653 bzero((char *)&frame, sizeof(frame)); 654 655 frame.mii_phyaddr = phy; 656 frame.mii_regaddr = reg; 657 sis_mii_readreg(sc, &frame); 658 659 return(frame.mii_data); 660 } 661 } 662 663 static int 664 sis_miibus_writereg(device_t dev, int phy, int reg, int data) 665 { 666 struct sis_softc *sc; 667 struct sis_mii_frame frame; 668 669 sc = device_get_softc(dev); 670 671 if (sc->sis_type == SIS_TYPE_83815) { 672 if (phy != 0) 673 return(0); 674 CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data); 675 return(0); 676 } 677 678 /* 679 * Chipsets < SIS_635 seem not to be able to read/write 680 * through mdio. Use the enhanced PHY access register 681 * again for them. 682 */ 683 if (sc->sis_type == SIS_TYPE_900 && 684 sc->sis_rev < SIS_REV_635) { 685 int i; 686 687 if (phy != 0) 688 return(0); 689 690 CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) | 691 (reg << 6) | SIS_PHYOP_WRITE); 692 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 693 694 for (i = 0; i < SIS_TIMEOUT; i++) { 695 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 696 break; 697 } 698 699 if (i == SIS_TIMEOUT) 700 device_printf(sc->sis_dev, "PHY failed to come ready\n"); 701 } else { 702 bzero((char *)&frame, sizeof(frame)); 703 704 frame.mii_phyaddr = phy; 705 frame.mii_regaddr = reg; 706 frame.mii_data = data; 707 sis_mii_writereg(sc, &frame); 708 } 709 return(0); 710 } 711 712 static void 713 sis_miibus_statchg(device_t dev) 714 { 715 struct sis_softc *sc; 716 717 sc = device_get_softc(dev); 718 SIS_LOCK_ASSERT(sc); 719 sis_initl(sc); 720 } 721 722 static uint32_t 723 sis_mchash(struct sis_softc *sc, const uint8_t *addr) 724 { 725 uint32_t crc; 726 727 /* Compute CRC for the address value. */ 728 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 729 730 /* 731 * return the filter bit position 732 * 733 * The NatSemi chip has a 512-bit filter, which is 734 * different than the SiS, so we special-case it. 735 */ 736 if (sc->sis_type == SIS_TYPE_83815) 737 return (crc >> 23); 738 else if (sc->sis_rev >= SIS_REV_635 || 739 sc->sis_rev == SIS_REV_900B) 740 return (crc >> 24); 741 else 742 return (crc >> 25); 743 } 744 745 static void 746 sis_setmulti_ns(struct sis_softc *sc) 747 { 748 struct ifnet *ifp; 749 struct ifmultiaddr *ifma; 750 u_int32_t h = 0, i, filtsave; 751 int bit, index; 752 753 ifp = sc->sis_ifp; 754 755 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 756 SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 757 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 758 return; 759 } 760 761 /* 762 * We have to explicitly enable the multicast hash table 763 * on the NatSemi chip if we want to use it, which we do. 764 */ 765 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 766 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 767 768 filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); 769 770 /* first, zot all the existing hash bits */ 771 for (i = 0; i < 32; i++) { 772 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2)); 773 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); 774 } 775 776 if_maddr_rlock(ifp); 777 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 778 if (ifma->ifma_addr->sa_family != AF_LINK) 779 continue; 780 h = sis_mchash(sc, 781 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 782 index = h >> 3; 783 bit = h & 0x1F; 784 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index); 785 if (bit > 0xF) 786 bit -= 0x10; 787 SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit)); 788 } 789 if_maddr_runlock(ifp); 790 791 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); 792 793 return; 794 } 795 796 static void 797 sis_setmulti_sis(struct sis_softc *sc) 798 { 799 struct ifnet *ifp; 800 struct ifmultiaddr *ifma; 801 u_int32_t h, i, n, ctl; 802 u_int16_t hashes[16]; 803 804 ifp = sc->sis_ifp; 805 806 /* hash table size */ 807 if (sc->sis_rev >= SIS_REV_635 || 808 sc->sis_rev == SIS_REV_900B) 809 n = 16; 810 else 811 n = 8; 812 813 ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE; 814 815 if (ifp->if_flags & IFF_BROADCAST) 816 ctl |= SIS_RXFILTCTL_BROAD; 817 818 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 819 ctl |= SIS_RXFILTCTL_ALLMULTI; 820 if (ifp->if_flags & IFF_PROMISC) 821 ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS; 822 for (i = 0; i < n; i++) 823 hashes[i] = ~0; 824 } else { 825 for (i = 0; i < n; i++) 826 hashes[i] = 0; 827 i = 0; 828 if_maddr_rlock(ifp); 829 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 830 if (ifma->ifma_addr->sa_family != AF_LINK) 831 continue; 832 h = sis_mchash(sc, 833 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 834 hashes[h >> 4] |= 1 << (h & 0xf); 835 i++; 836 } 837 if_maddr_runlock(ifp); 838 if (i > n) { 839 ctl |= SIS_RXFILTCTL_ALLMULTI; 840 for (i = 0; i < n; i++) 841 hashes[i] = ~0; 842 } 843 } 844 845 for (i = 0; i < n; i++) { 846 CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16); 847 CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]); 848 } 849 850 CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl); 851 } 852 853 static void 854 sis_reset(struct sis_softc *sc) 855 { 856 int i; 857 858 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET); 859 860 for (i = 0; i < SIS_TIMEOUT; i++) { 861 if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET)) 862 break; 863 } 864 865 if (i == SIS_TIMEOUT) 866 device_printf(sc->sis_dev, "reset never completed\n"); 867 868 /* Wait a little while for the chip to get its brains in order. */ 869 DELAY(1000); 870 871 /* 872 * If this is a NetSemi chip, make sure to clear 873 * PME mode. 874 */ 875 if (sc->sis_type == SIS_TYPE_83815) { 876 CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS); 877 CSR_WRITE_4(sc, NS_CLKRUN, 0); 878 } 879 880 return; 881 } 882 883 /* 884 * Probe for an SiS chip. Check the PCI vendor and device 885 * IDs against our list and return a device name if we find a match. 886 */ 887 static int 888 sis_probe(device_t dev) 889 { 890 struct sis_type *t; 891 892 t = sis_devs; 893 894 while(t->sis_name != NULL) { 895 if ((pci_get_vendor(dev) == t->sis_vid) && 896 (pci_get_device(dev) == t->sis_did)) { 897 device_set_desc(dev, t->sis_name); 898 return (BUS_PROBE_DEFAULT); 899 } 900 t++; 901 } 902 903 return(ENXIO); 904 } 905 906 /* 907 * Attach the interface. Allocate softc structures, do ifmedia 908 * setup and ethernet/BPF attach. 909 */ 910 static int 911 sis_attach(device_t dev) 912 { 913 u_char eaddr[ETHER_ADDR_LEN]; 914 struct sis_softc *sc; 915 struct ifnet *ifp; 916 int error = 0, waittime = 0; 917 918 waittime = 0; 919 sc = device_get_softc(dev); 920 921 sc->sis_dev = dev; 922 923 mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 924 MTX_DEF); 925 callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0); 926 927 if (pci_get_device(dev) == SIS_DEVICEID_900) 928 sc->sis_type = SIS_TYPE_900; 929 if (pci_get_device(dev) == SIS_DEVICEID_7016) 930 sc->sis_type = SIS_TYPE_7016; 931 if (pci_get_vendor(dev) == NS_VENDORID) 932 sc->sis_type = SIS_TYPE_83815; 933 934 sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1); 935 /* 936 * Map control/status registers. 937 */ 938 pci_enable_busmaster(dev); 939 940 error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res); 941 if (error) { 942 device_printf(dev, "couldn't allocate resources\n"); 943 goto fail; 944 } 945 946 /* Reset the adapter. */ 947 sis_reset(sc); 948 949 if (sc->sis_type == SIS_TYPE_900 && 950 (sc->sis_rev == SIS_REV_635 || 951 sc->sis_rev == SIS_REV_900B)) { 952 SIO_SET(SIS_CFG_RND_CNT); 953 SIO_SET(SIS_CFG_PERR_DETECT); 954 } 955 956 /* 957 * Get station address from the EEPROM. 958 */ 959 switch (pci_get_vendor(dev)) { 960 case NS_VENDORID: 961 sc->sis_srr = CSR_READ_4(sc, NS_SRR); 962 963 /* We can't update the device description, so spew */ 964 if (sc->sis_srr == NS_SRR_15C) 965 device_printf(dev, "Silicon Revision: DP83815C\n"); 966 else if (sc->sis_srr == NS_SRR_15D) 967 device_printf(dev, "Silicon Revision: DP83815D\n"); 968 else if (sc->sis_srr == NS_SRR_16A) 969 device_printf(dev, "Silicon Revision: DP83816A\n"); 970 else 971 device_printf(dev, "Silicon Revision %x\n", sc->sis_srr); 972 973 /* 974 * Reading the MAC address out of the EEPROM on 975 * the NatSemi chip takes a bit more work than 976 * you'd expect. The address spans 4 16-bit words, 977 * with the first word containing only a single bit. 978 * You have to shift everything over one bit to 979 * get it aligned properly. Also, the bits are 980 * stored backwards (the LSB is really the MSB, 981 * and so on) so you have to reverse them in order 982 * to get the MAC address into the form we want. 983 * Why? Who the hell knows. 984 */ 985 { 986 u_int16_t tmp[4]; 987 988 sis_read_eeprom(sc, (caddr_t)&tmp, 989 NS_EE_NODEADDR, 4, 0); 990 991 /* Shift everything over one bit. */ 992 tmp[3] = tmp[3] >> 1; 993 tmp[3] |= tmp[2] << 15; 994 tmp[2] = tmp[2] >> 1; 995 tmp[2] |= tmp[1] << 15; 996 tmp[1] = tmp[1] >> 1; 997 tmp[1] |= tmp[0] << 15; 998 999 /* Now reverse all the bits. */ 1000 tmp[3] = sis_reverse(tmp[3]); 1001 tmp[2] = sis_reverse(tmp[2]); 1002 tmp[1] = sis_reverse(tmp[1]); 1003 1004 bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN); 1005 } 1006 break; 1007 case SIS_VENDORID: 1008 default: 1009 #if defined(__i386__) || defined(__amd64__) 1010 /* 1011 * If this is a SiS 630E chipset with an embedded 1012 * SiS 900 controller, we have to read the MAC address 1013 * from the APC CMOS RAM. Our method for doing this 1014 * is very ugly since we have to reach out and grab 1015 * ahold of hardware for which we cannot properly 1016 * allocate resources. This code is only compiled on 1017 * the i386 architecture since the SiS 630E chipset 1018 * is for x86 motherboards only. Note that there are 1019 * a lot of magic numbers in this hack. These are 1020 * taken from SiS's Linux driver. I'd like to replace 1021 * them with proper symbolic definitions, but that 1022 * requires some datasheets that I don't have access 1023 * to at the moment. 1024 */ 1025 if (sc->sis_rev == SIS_REV_630S || 1026 sc->sis_rev == SIS_REV_630E || 1027 sc->sis_rev == SIS_REV_630EA1) 1028 sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6); 1029 1030 else if (sc->sis_rev == SIS_REV_635 || 1031 sc->sis_rev == SIS_REV_630ET) 1032 sis_read_mac(sc, dev, (caddr_t)&eaddr); 1033 else if (sc->sis_rev == SIS_REV_96x) { 1034 /* Allow to read EEPROM from LAN. It is shared 1035 * between a 1394 controller and the NIC and each 1036 * time we access it, we need to set SIS_EECMD_REQ. 1037 */ 1038 SIO_SET(SIS_EECMD_REQ); 1039 for (waittime = 0; waittime < SIS_TIMEOUT; 1040 waittime++) { 1041 /* Force EEPROM to idle state. */ 1042 sis_eeprom_idle(sc); 1043 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) { 1044 sis_read_eeprom(sc, (caddr_t)&eaddr, 1045 SIS_EE_NODEADDR, 3, 0); 1046 break; 1047 } 1048 DELAY(1); 1049 } 1050 /* 1051 * Set SIS_EECTL_CLK to high, so a other master 1052 * can operate on the i2c bus. 1053 */ 1054 SIO_SET(SIS_EECTL_CLK); 1055 /* Refuse EEPROM access by LAN */ 1056 SIO_SET(SIS_EECMD_DONE); 1057 } else 1058 #endif 1059 sis_read_eeprom(sc, (caddr_t)&eaddr, 1060 SIS_EE_NODEADDR, 3, 0); 1061 break; 1062 } 1063 1064 /* 1065 * Allocate the parent bus DMA tag appropriate for PCI. 1066 */ 1067 #define SIS_NSEG_NEW 32 1068 error = bus_dma_tag_create(NULL, /* parent */ 1069 1, 0, /* alignment, boundary */ 1070 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1071 BUS_SPACE_MAXADDR, /* highaddr */ 1072 NULL, NULL, /* filter, filterarg */ 1073 MAXBSIZE, SIS_NSEG_NEW, /* maxsize, nsegments */ 1074 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1075 BUS_DMA_ALLOCNOW, /* flags */ 1076 NULL, NULL, /* lockfunc, lockarg */ 1077 &sc->sis_parent_tag); 1078 if (error) 1079 goto fail; 1080 1081 /* 1082 * Now allocate a tag for the DMA descriptor lists and a chunk 1083 * of DMA-able memory based on the tag. Also obtain the physical 1084 * addresses of the RX and TX ring, which we'll need later. 1085 * All of our lists are allocated as a contiguous block 1086 * of memory. 1087 */ 1088 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1089 1, 0, /* alignment, boundary */ 1090 BUS_SPACE_MAXADDR, /* lowaddr */ 1091 BUS_SPACE_MAXADDR, /* highaddr */ 1092 NULL, NULL, /* filter, filterarg */ 1093 SIS_RX_LIST_SZ, 1, /* maxsize,nsegments */ 1094 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1095 0, /* flags */ 1096 busdma_lock_mutex, /* lockfunc */ 1097 &Giant, /* lockarg */ 1098 &sc->sis_rx_tag); 1099 if (error) 1100 goto fail; 1101 1102 error = bus_dmamem_alloc(sc->sis_rx_tag, 1103 (void **)&sc->sis_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1104 &sc->sis_rx_dmamap); 1105 1106 if (error) { 1107 device_printf(dev, "no memory for rx list buffers!\n"); 1108 bus_dma_tag_destroy(sc->sis_rx_tag); 1109 sc->sis_rx_tag = NULL; 1110 goto fail; 1111 } 1112 1113 error = bus_dmamap_load(sc->sis_rx_tag, 1114 sc->sis_rx_dmamap, &(sc->sis_rx_list[0]), 1115 sizeof(struct sis_desc), sis_dma_map_ring, 1116 &sc->sis_rx_paddr, 0); 1117 1118 if (error) { 1119 device_printf(dev, "cannot get address of the rx ring!\n"); 1120 bus_dmamem_free(sc->sis_rx_tag, 1121 sc->sis_rx_list, sc->sis_rx_dmamap); 1122 bus_dma_tag_destroy(sc->sis_rx_tag); 1123 sc->sis_rx_tag = NULL; 1124 goto fail; 1125 } 1126 1127 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1128 1, 0, /* alignment, boundary */ 1129 BUS_SPACE_MAXADDR, /* lowaddr */ 1130 BUS_SPACE_MAXADDR, /* highaddr */ 1131 NULL, NULL, /* filter, filterarg */ 1132 SIS_TX_LIST_SZ, 1, /* maxsize,nsegments */ 1133 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1134 0, /* flags */ 1135 busdma_lock_mutex, /* lockfunc */ 1136 &Giant, /* lockarg */ 1137 &sc->sis_tx_tag); 1138 if (error) 1139 goto fail; 1140 1141 error = bus_dmamem_alloc(sc->sis_tx_tag, 1142 (void **)&sc->sis_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1143 &sc->sis_tx_dmamap); 1144 1145 if (error) { 1146 device_printf(dev, "no memory for tx list buffers!\n"); 1147 bus_dma_tag_destroy(sc->sis_tx_tag); 1148 sc->sis_tx_tag = NULL; 1149 goto fail; 1150 } 1151 1152 error = bus_dmamap_load(sc->sis_tx_tag, 1153 sc->sis_tx_dmamap, &(sc->sis_tx_list[0]), 1154 sizeof(struct sis_desc), sis_dma_map_ring, 1155 &sc->sis_tx_paddr, 0); 1156 1157 if (error) { 1158 device_printf(dev, "cannot get address of the tx ring!\n"); 1159 bus_dmamem_free(sc->sis_tx_tag, 1160 sc->sis_tx_list, sc->sis_tx_dmamap); 1161 bus_dma_tag_destroy(sc->sis_tx_tag); 1162 sc->sis_tx_tag = NULL; 1163 goto fail; 1164 } 1165 1166 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1167 1, 0, /* alignment, boundary */ 1168 BUS_SPACE_MAXADDR, /* lowaddr */ 1169 BUS_SPACE_MAXADDR, /* highaddr */ 1170 NULL, NULL, /* filter, filterarg */ 1171 MCLBYTES, 1, /* maxsize,nsegments */ 1172 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1173 0, /* flags */ 1174 busdma_lock_mutex, /* lockfunc */ 1175 &Giant, /* lockarg */ 1176 &sc->sis_tag); 1177 if (error) 1178 goto fail; 1179 1180 /* 1181 * Obtain the physical addresses of the RX and TX 1182 * rings which we'll need later in the init routine. 1183 */ 1184 1185 ifp = sc->sis_ifp = if_alloc(IFT_ETHER); 1186 if (ifp == NULL) { 1187 device_printf(dev, "can not if_alloc()\n"); 1188 error = ENOSPC; 1189 goto fail; 1190 } 1191 ifp->if_softc = sc; 1192 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1193 ifp->if_mtu = ETHERMTU; 1194 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1195 ifp->if_ioctl = sis_ioctl; 1196 ifp->if_start = sis_start; 1197 ifp->if_init = sis_init; 1198 IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1); 1199 ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1; 1200 IFQ_SET_READY(&ifp->if_snd); 1201 1202 /* 1203 * Do MII setup. 1204 */ 1205 if (mii_phy_probe(dev, &sc->sis_miibus, 1206 sis_ifmedia_upd, sis_ifmedia_sts)) { 1207 device_printf(dev, "MII without any PHY!\n"); 1208 error = ENXIO; 1209 goto fail; 1210 } 1211 1212 /* 1213 * Call MI attach routine. 1214 */ 1215 ether_ifattach(ifp, eaddr); 1216 1217 /* 1218 * Tell the upper layer(s) we support long frames. 1219 */ 1220 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1221 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1222 ifp->if_capenable = ifp->if_capabilities; 1223 #ifdef DEVICE_POLLING 1224 ifp->if_capabilities |= IFCAP_POLLING; 1225 #endif 1226 1227 /* Hook interrupt last to avoid having to lock softc */ 1228 error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE, 1229 NULL, sis_intr, sc, &sc->sis_intrhand); 1230 1231 if (error) { 1232 device_printf(dev, "couldn't set up irq\n"); 1233 ether_ifdetach(ifp); 1234 goto fail; 1235 } 1236 1237 fail: 1238 if (error) 1239 sis_detach(dev); 1240 1241 return(error); 1242 } 1243 1244 /* 1245 * Shutdown hardware and free up resources. This can be called any 1246 * time after the mutex has been initialized. It is called in both 1247 * the error case in attach and the normal detach case so it needs 1248 * to be careful about only freeing resources that have actually been 1249 * allocated. 1250 */ 1251 static int 1252 sis_detach(device_t dev) 1253 { 1254 struct sis_softc *sc; 1255 struct ifnet *ifp; 1256 1257 sc = device_get_softc(dev); 1258 KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized")); 1259 ifp = sc->sis_ifp; 1260 1261 #ifdef DEVICE_POLLING 1262 if (ifp->if_capenable & IFCAP_POLLING) 1263 ether_poll_deregister(ifp); 1264 #endif 1265 1266 /* These should only be active if attach succeeded. */ 1267 if (device_is_attached(dev)) { 1268 SIS_LOCK(sc); 1269 sis_reset(sc); 1270 sis_stop(sc); 1271 SIS_UNLOCK(sc); 1272 callout_drain(&sc->sis_stat_ch); 1273 ether_ifdetach(ifp); 1274 } 1275 if (sc->sis_miibus) 1276 device_delete_child(dev, sc->sis_miibus); 1277 bus_generic_detach(dev); 1278 1279 if (sc->sis_intrhand) 1280 bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand); 1281 bus_release_resources(dev, sis_res_spec, sc->sis_res); 1282 1283 if (ifp) 1284 if_free(ifp); 1285 1286 if (sc->sis_rx_tag) { 1287 bus_dmamap_unload(sc->sis_rx_tag, 1288 sc->sis_rx_dmamap); 1289 bus_dmamem_free(sc->sis_rx_tag, 1290 sc->sis_rx_list, sc->sis_rx_dmamap); 1291 bus_dma_tag_destroy(sc->sis_rx_tag); 1292 } 1293 if (sc->sis_tx_tag) { 1294 bus_dmamap_unload(sc->sis_tx_tag, 1295 sc->sis_tx_dmamap); 1296 bus_dmamem_free(sc->sis_tx_tag, 1297 sc->sis_tx_list, sc->sis_tx_dmamap); 1298 bus_dma_tag_destroy(sc->sis_tx_tag); 1299 } 1300 if (sc->sis_parent_tag) 1301 bus_dma_tag_destroy(sc->sis_parent_tag); 1302 if (sc->sis_tag) 1303 bus_dma_tag_destroy(sc->sis_tag); 1304 1305 mtx_destroy(&sc->sis_mtx); 1306 1307 return(0); 1308 } 1309 1310 /* 1311 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that 1312 * we arrange the descriptors in a closed ring, so that the last descriptor 1313 * points back to the first. 1314 */ 1315 static int 1316 sis_ring_init(struct sis_softc *sc) 1317 { 1318 int i, error; 1319 struct sis_desc *dp; 1320 1321 dp = &sc->sis_tx_list[0]; 1322 for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { 1323 if (i == (SIS_TX_LIST_CNT - 1)) 1324 dp->sis_nextdesc = &sc->sis_tx_list[0]; 1325 else 1326 dp->sis_nextdesc = dp + 1; 1327 bus_dmamap_load(sc->sis_tx_tag, 1328 sc->sis_tx_dmamap, 1329 dp->sis_nextdesc, sizeof(struct sis_desc), 1330 sis_dma_map_desc_next, dp, 0); 1331 dp->sis_mbuf = NULL; 1332 dp->sis_ptr = 0; 1333 dp->sis_ctl = 0; 1334 } 1335 1336 sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0; 1337 1338 bus_dmamap_sync(sc->sis_tx_tag, 1339 sc->sis_tx_dmamap, BUS_DMASYNC_PREWRITE); 1340 1341 dp = &sc->sis_rx_list[0]; 1342 for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { 1343 error = sis_newbuf(sc, dp, NULL); 1344 if (error) 1345 return(error); 1346 if (i == (SIS_RX_LIST_CNT - 1)) 1347 dp->sis_nextdesc = &sc->sis_rx_list[0]; 1348 else 1349 dp->sis_nextdesc = dp + 1; 1350 bus_dmamap_load(sc->sis_rx_tag, 1351 sc->sis_rx_dmamap, 1352 dp->sis_nextdesc, sizeof(struct sis_desc), 1353 sis_dma_map_desc_next, dp, 0); 1354 } 1355 1356 bus_dmamap_sync(sc->sis_rx_tag, 1357 sc->sis_rx_dmamap, BUS_DMASYNC_PREWRITE); 1358 1359 sc->sis_rx_pdsc = &sc->sis_rx_list[0]; 1360 1361 return(0); 1362 } 1363 1364 /* 1365 * Initialize an RX descriptor and attach an MBUF cluster. 1366 */ 1367 static int 1368 sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m) 1369 { 1370 1371 if (c == NULL) 1372 return(EINVAL); 1373 1374 if (m == NULL) { 1375 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1376 if (m == NULL) 1377 return(ENOBUFS); 1378 } else 1379 m->m_data = m->m_ext.ext_buf; 1380 1381 c->sis_mbuf = m; 1382 c->sis_ctl = SIS_RXLEN; 1383 1384 bus_dmamap_create(sc->sis_tag, 0, &c->sis_map); 1385 bus_dmamap_load(sc->sis_tag, c->sis_map, 1386 mtod(m, void *), MCLBYTES, 1387 sis_dma_map_desc_ptr, c, 0); 1388 bus_dmamap_sync(sc->sis_tag, c->sis_map, BUS_DMASYNC_PREREAD); 1389 1390 return(0); 1391 } 1392 1393 /* 1394 * A frame has been uploaded: pass the resulting mbuf chain up to 1395 * the higher level protocols. 1396 */ 1397 static int 1398 sis_rxeof(struct sis_softc *sc) 1399 { 1400 struct mbuf *m, *m0; 1401 struct ifnet *ifp; 1402 struct sis_desc *cur_rx; 1403 int total_len = 0, rx_npkts = 0; 1404 u_int32_t rxstat; 1405 1406 SIS_LOCK_ASSERT(sc); 1407 1408 ifp = sc->sis_ifp; 1409 1410 for(cur_rx = sc->sis_rx_pdsc; SIS_OWNDESC(cur_rx); 1411 cur_rx = cur_rx->sis_nextdesc) { 1412 1413 #ifdef DEVICE_POLLING 1414 if (ifp->if_capenable & IFCAP_POLLING) { 1415 if (sc->rxcycles <= 0) 1416 break; 1417 sc->rxcycles--; 1418 } 1419 #endif 1420 rxstat = cur_rx->sis_rxstat; 1421 bus_dmamap_sync(sc->sis_tag, 1422 cur_rx->sis_map, BUS_DMASYNC_POSTWRITE); 1423 bus_dmamap_unload(sc->sis_tag, cur_rx->sis_map); 1424 bus_dmamap_destroy(sc->sis_tag, cur_rx->sis_map); 1425 m = cur_rx->sis_mbuf; 1426 cur_rx->sis_mbuf = NULL; 1427 total_len = SIS_RXBYTES(cur_rx); 1428 1429 /* 1430 * If an error occurs, update stats, clear the 1431 * status word and leave the mbuf cluster in place: 1432 * it should simply get re-used next time this descriptor 1433 * comes up in the ring. 1434 */ 1435 if ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 && 1436 total_len <= (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - 1437 ETHER_CRC_LEN)) 1438 rxstat &= ~SIS_RXSTAT_GIANT; 1439 if (SIS_RXSTAT_ERROR(rxstat) != 0) { 1440 ifp->if_ierrors++; 1441 if (rxstat & SIS_RXSTAT_COLL) 1442 ifp->if_collisions++; 1443 sis_newbuf(sc, cur_rx, m); 1444 continue; 1445 } 1446 1447 /* No errors; receive the packet. */ 1448 #ifdef __NO_STRICT_ALIGNMENT 1449 /* 1450 * On architectures without alignment problems we try to 1451 * allocate a new buffer for the receive ring, and pass up 1452 * the one where the packet is already, saving the expensive 1453 * copy done in m_devget(). 1454 * If we are on an architecture with alignment problems, or 1455 * if the allocation fails, then use m_devget and leave the 1456 * existing buffer in the receive ring. 1457 */ 1458 if (sis_newbuf(sc, cur_rx, NULL) == 0) 1459 m->m_pkthdr.len = m->m_len = total_len; 1460 else 1461 #endif 1462 { 1463 m0 = m_devget(mtod(m, char *), total_len, 1464 ETHER_ALIGN, ifp, NULL); 1465 sis_newbuf(sc, cur_rx, m); 1466 if (m0 == NULL) { 1467 ifp->if_ierrors++; 1468 continue; 1469 } 1470 m = m0; 1471 } 1472 1473 ifp->if_ipackets++; 1474 m->m_pkthdr.rcvif = ifp; 1475 1476 SIS_UNLOCK(sc); 1477 (*ifp->if_input)(ifp, m); 1478 SIS_LOCK(sc); 1479 rx_npkts++; 1480 } 1481 1482 sc->sis_rx_pdsc = cur_rx; 1483 return (rx_npkts); 1484 } 1485 1486 /* 1487 * A frame was downloaded to the chip. It's safe for us to clean up 1488 * the list buffers. 1489 */ 1490 1491 static void 1492 sis_txeof(struct sis_softc *sc) 1493 { 1494 struct ifnet *ifp; 1495 u_int32_t idx; 1496 1497 SIS_LOCK_ASSERT(sc); 1498 ifp = sc->sis_ifp; 1499 1500 /* 1501 * Go through our tx list and free mbufs for those 1502 * frames that have been transmitted. 1503 */ 1504 for (idx = sc->sis_tx_cons; sc->sis_tx_cnt > 0; 1505 sc->sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT) ) { 1506 struct sis_desc *cur_tx = &sc->sis_tx_list[idx]; 1507 1508 if (SIS_OWNDESC(cur_tx)) 1509 break; 1510 1511 if (cur_tx->sis_ctl & SIS_CMDSTS_MORE) 1512 continue; 1513 1514 if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) { 1515 ifp->if_oerrors++; 1516 if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS) 1517 ifp->if_collisions++; 1518 if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL) 1519 ifp->if_collisions++; 1520 } 1521 1522 ifp->if_collisions += 1523 (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16; 1524 1525 ifp->if_opackets++; 1526 if (cur_tx->sis_mbuf != NULL) { 1527 m_freem(cur_tx->sis_mbuf); 1528 cur_tx->sis_mbuf = NULL; 1529 bus_dmamap_unload(sc->sis_tag, cur_tx->sis_map); 1530 bus_dmamap_destroy(sc->sis_tag, cur_tx->sis_map); 1531 } 1532 } 1533 1534 if (idx != sc->sis_tx_cons) { 1535 /* we freed up some buffers */ 1536 sc->sis_tx_cons = idx; 1537 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1538 } 1539 1540 sc->sis_watchdog_timer = (sc->sis_tx_cnt == 0) ? 0 : 5; 1541 1542 return; 1543 } 1544 1545 static void 1546 sis_tick(void *xsc) 1547 { 1548 struct sis_softc *sc; 1549 struct mii_data *mii; 1550 struct ifnet *ifp; 1551 1552 sc = xsc; 1553 SIS_LOCK_ASSERT(sc); 1554 sc->in_tick = 1; 1555 ifp = sc->sis_ifp; 1556 1557 mii = device_get_softc(sc->sis_miibus); 1558 mii_tick(mii); 1559 1560 sis_watchdog(sc); 1561 1562 if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE && 1563 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1564 sc->sis_link++; 1565 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1566 sis_startl(ifp); 1567 } 1568 1569 callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); 1570 sc->in_tick = 0; 1571 } 1572 1573 #ifdef DEVICE_POLLING 1574 static poll_handler_t sis_poll; 1575 1576 static int 1577 sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1578 { 1579 struct sis_softc *sc = ifp->if_softc; 1580 int rx_npkts = 0; 1581 1582 SIS_LOCK(sc); 1583 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1584 SIS_UNLOCK(sc); 1585 return (rx_npkts); 1586 } 1587 1588 /* 1589 * On the sis, reading the status register also clears it. 1590 * So before returning to intr mode we must make sure that all 1591 * possible pending sources of interrupts have been served. 1592 * In practice this means run to completion the *eof routines, 1593 * and then call the interrupt routine 1594 */ 1595 sc->rxcycles = count; 1596 rx_npkts = sis_rxeof(sc); 1597 sis_txeof(sc); 1598 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1599 sis_startl(ifp); 1600 1601 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1602 u_int32_t status; 1603 1604 /* Reading the ISR register clears all interrupts. */ 1605 status = CSR_READ_4(sc, SIS_ISR); 1606 1607 if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW)) 1608 ifp->if_ierrors++; 1609 1610 if (status & (SIS_ISR_RX_IDLE)) 1611 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1612 1613 if (status & SIS_ISR_SYSERR) { 1614 sis_reset(sc); 1615 sis_initl(sc); 1616 } 1617 } 1618 1619 SIS_UNLOCK(sc); 1620 return (rx_npkts); 1621 } 1622 #endif /* DEVICE_POLLING */ 1623 1624 static void 1625 sis_intr(void *arg) 1626 { 1627 struct sis_softc *sc; 1628 struct ifnet *ifp; 1629 u_int32_t status; 1630 1631 sc = arg; 1632 ifp = sc->sis_ifp; 1633 1634 if (sc->sis_stopped) /* Most likely shared interrupt */ 1635 return; 1636 1637 SIS_LOCK(sc); 1638 #ifdef DEVICE_POLLING 1639 if (ifp->if_capenable & IFCAP_POLLING) { 1640 SIS_UNLOCK(sc); 1641 return; 1642 } 1643 #endif 1644 1645 /* Disable interrupts. */ 1646 CSR_WRITE_4(sc, SIS_IER, 0); 1647 1648 for (;;) { 1649 SIS_LOCK_ASSERT(sc); 1650 /* Reading the ISR register clears all interrupts. */ 1651 status = CSR_READ_4(sc, SIS_ISR); 1652 1653 if ((status & SIS_INTRS) == 0) 1654 break; 1655 1656 if (status & 1657 (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR | 1658 SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) ) 1659 sis_txeof(sc); 1660 1661 if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK | 1662 SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE)) 1663 sis_rxeof(sc); 1664 1665 if (status & SIS_ISR_RX_OFLOW) 1666 ifp->if_ierrors++; 1667 1668 if (status & (SIS_ISR_RX_IDLE)) 1669 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1670 1671 if (status & SIS_ISR_SYSERR) { 1672 sis_reset(sc); 1673 sis_initl(sc); 1674 } 1675 } 1676 1677 /* Re-enable interrupts. */ 1678 CSR_WRITE_4(sc, SIS_IER, 1); 1679 1680 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1681 sis_startl(ifp); 1682 1683 SIS_UNLOCK(sc); 1684 } 1685 1686 /* 1687 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1688 * pointers to the fragment pointers. 1689 */ 1690 static int 1691 sis_encap(struct sis_softc *sc, struct mbuf **m_head, uint32_t *txidx) 1692 { 1693 struct sis_desc *f = NULL; 1694 struct mbuf *m; 1695 int frag, cur, cnt = 0, chainlen = 0; 1696 1697 /* 1698 * If there's no way we can send any packets, return now. 1699 */ 1700 if (SIS_TX_LIST_CNT - sc->sis_tx_cnt < 2) 1701 return (ENOBUFS); 1702 1703 /* 1704 * Count the number of frags in this chain to see if 1705 * we need to m_defrag. Since the descriptor list is shared 1706 * by all packets, we'll m_defrag long chains so that they 1707 * do not use up the entire list, even if they would fit. 1708 */ 1709 1710 for (m = *m_head; m != NULL; m = m->m_next) 1711 chainlen++; 1712 1713 if ((chainlen > SIS_TX_LIST_CNT / 4) || 1714 ((SIS_TX_LIST_CNT - (chainlen + sc->sis_tx_cnt)) < 2)) { 1715 m = m_defrag(*m_head, M_DONTWAIT); 1716 if (m == NULL) 1717 return (ENOBUFS); 1718 *m_head = m; 1719 } 1720 1721 /* 1722 * Start packing the mbufs in this chain into 1723 * the fragment pointers. Stop when we run out 1724 * of fragments or hit the end of the mbuf chain. 1725 */ 1726 cur = frag = *txidx; 1727 1728 for (m = *m_head; m != NULL; m = m->m_next) { 1729 if (m->m_len != 0) { 1730 if ((SIS_TX_LIST_CNT - 1731 (sc->sis_tx_cnt + cnt)) < 2) 1732 return(ENOBUFS); 1733 f = &sc->sis_tx_list[frag]; 1734 f->sis_ctl = SIS_CMDSTS_MORE | m->m_len; 1735 bus_dmamap_create(sc->sis_tag, 0, &f->sis_map); 1736 bus_dmamap_load(sc->sis_tag, f->sis_map, 1737 mtod(m, void *), m->m_len, 1738 sis_dma_map_desc_ptr, f, 0); 1739 bus_dmamap_sync(sc->sis_tag, 1740 f->sis_map, BUS_DMASYNC_PREREAD); 1741 if (cnt != 0) 1742 f->sis_ctl |= SIS_CMDSTS_OWN; 1743 cur = frag; 1744 SIS_INC(frag, SIS_TX_LIST_CNT); 1745 cnt++; 1746 } 1747 } 1748 1749 if (m != NULL) 1750 return(ENOBUFS); 1751 1752 sc->sis_tx_list[cur].sis_mbuf = *m_head; 1753 sc->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE; 1754 sc->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN; 1755 sc->sis_tx_cnt += cnt; 1756 *txidx = frag; 1757 1758 return(0); 1759 } 1760 1761 /* 1762 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1763 * to the mbuf data regions directly in the transmit lists. We also save a 1764 * copy of the pointers since the transmit list fragment pointers are 1765 * physical addresses. 1766 */ 1767 1768 static void 1769 sis_start(struct ifnet *ifp) 1770 { 1771 struct sis_softc *sc; 1772 1773 sc = ifp->if_softc; 1774 SIS_LOCK(sc); 1775 sis_startl(ifp); 1776 SIS_UNLOCK(sc); 1777 } 1778 1779 static void 1780 sis_startl(struct ifnet *ifp) 1781 { 1782 struct sis_softc *sc; 1783 struct mbuf *m_head = NULL; 1784 u_int32_t idx, queued = 0; 1785 1786 sc = ifp->if_softc; 1787 1788 SIS_LOCK_ASSERT(sc); 1789 1790 if (!sc->sis_link) 1791 return; 1792 1793 idx = sc->sis_tx_prod; 1794 1795 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 1796 return; 1797 1798 while(sc->sis_tx_list[idx].sis_mbuf == NULL) { 1799 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1800 if (m_head == NULL) 1801 break; 1802 1803 if (sis_encap(sc, &m_head, &idx)) { 1804 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1805 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1806 break; 1807 } 1808 1809 queued++; 1810 1811 /* 1812 * If there's a BPF listener, bounce a copy of this frame 1813 * to him. 1814 */ 1815 BPF_MTAP(ifp, m_head); 1816 1817 } 1818 1819 if (queued) { 1820 /* Transmit */ 1821 sc->sis_tx_prod = idx; 1822 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE); 1823 1824 /* 1825 * Set a timeout in case the chip goes out to lunch. 1826 */ 1827 sc->sis_watchdog_timer = 5; 1828 } 1829 } 1830 1831 static void 1832 sis_init(void *xsc) 1833 { 1834 struct sis_softc *sc = xsc; 1835 1836 SIS_LOCK(sc); 1837 sis_initl(sc); 1838 SIS_UNLOCK(sc); 1839 } 1840 1841 static void 1842 sis_initl(struct sis_softc *sc) 1843 { 1844 struct ifnet *ifp = sc->sis_ifp; 1845 struct mii_data *mii; 1846 1847 SIS_LOCK_ASSERT(sc); 1848 1849 /* 1850 * Cancel pending I/O and free all RX/TX buffers. 1851 */ 1852 sis_stop(sc); 1853 sc->sis_stopped = 0; 1854 1855 #ifdef notyet 1856 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) { 1857 /* 1858 * Configure 400usec of interrupt holdoff. This is based 1859 * on emperical tests on a Soekris 4801. 1860 */ 1861 CSR_WRITE_4(sc, NS_IHR, 0x100 | 4); 1862 } 1863 #endif 1864 1865 mii = device_get_softc(sc->sis_miibus); 1866 1867 /* Set MAC address */ 1868 if (sc->sis_type == SIS_TYPE_83815) { 1869 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0); 1870 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1871 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); 1872 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1); 1873 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1874 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); 1875 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2); 1876 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1877 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); 1878 } else { 1879 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 1880 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1881 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); 1882 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); 1883 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1884 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); 1885 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 1886 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1887 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); 1888 } 1889 1890 /* Init circular TX/RX lists. */ 1891 if (sis_ring_init(sc) != 0) { 1892 device_printf(sc->sis_dev, 1893 "initialization failed: no memory for rx buffers\n"); 1894 sis_stop(sc); 1895 return; 1896 } 1897 1898 /* 1899 * Short Cable Receive Errors (MP21.E) 1900 * also: Page 78 of the DP83815 data sheet (september 2002 version) 1901 * recommends the following register settings "for optimum 1902 * performance." for rev 15C. Set this also for 15D parts as 1903 * they require it in practice. 1904 */ 1905 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) { 1906 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 1907 CSR_WRITE_4(sc, NS_PHY_CR, 0x189C); 1908 /* set val for c2 */ 1909 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000); 1910 /* load/kill c2 */ 1911 CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040); 1912 /* rais SD off, from 4 to c */ 1913 CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C); 1914 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 1915 } 1916 1917 1918 /* 1919 * For the NatSemi chip, we have to explicitly enable the 1920 * reception of ARP frames, as well as turn on the 'perfect 1921 * match' filter where we store the station address, otherwise 1922 * we won't receive unicasts meant for this host. 1923 */ 1924 if (sc->sis_type == SIS_TYPE_83815) { 1925 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP); 1926 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT); 1927 } 1928 1929 /* If we want promiscuous mode, set the allframes bit. */ 1930 if (ifp->if_flags & IFF_PROMISC) { 1931 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 1932 } else { 1933 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 1934 } 1935 1936 /* 1937 * Set the capture broadcast bit to capture broadcast frames. 1938 */ 1939 if (ifp->if_flags & IFF_BROADCAST) { 1940 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1941 } else { 1942 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1943 } 1944 1945 /* 1946 * Load the multicast filter. 1947 */ 1948 if (sc->sis_type == SIS_TYPE_83815) 1949 sis_setmulti_ns(sc); 1950 else 1951 sis_setmulti_sis(sc); 1952 1953 /* Turn the receive filter on */ 1954 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 1955 1956 /* 1957 * Load the address of the RX and TX lists. 1958 */ 1959 CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sis_rx_paddr); 1960 CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sis_tx_paddr); 1961 1962 /* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of 1963 * the PCI bus. When this bit is set, the Max DMA Burst Size 1964 * for TX/RX DMA should be no larger than 16 double words. 1965 */ 1966 if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) { 1967 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64); 1968 } else { 1969 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256); 1970 } 1971 1972 /* Accept Long Packets for VLAN support */ 1973 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER); 1974 1975 /* Set TX configuration */ 1976 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { 1977 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10); 1978 } else { 1979 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); 1980 } 1981 1982 /* Set full/half duplex mode. */ 1983 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1984 SIS_SETBIT(sc, SIS_TX_CFG, 1985 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1986 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 1987 } else { 1988 SIS_CLRBIT(sc, SIS_TX_CFG, 1989 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1990 SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 1991 } 1992 1993 if (sc->sis_type == SIS_TYPE_83816) { 1994 /* 1995 * MPII03.D: Half Duplex Excessive Collisions. 1996 * Also page 49 in 83816 manual 1997 */ 1998 SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D); 1999 } 2000 2001 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A && 2002 IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 2003 uint32_t reg; 2004 2005 /* 2006 * Short Cable Receive Errors (MP21.E) 2007 */ 2008 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 2009 reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff; 2010 CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000); 2011 DELAY(100); 2012 reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff; 2013 if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) { 2014 device_printf(sc->sis_dev, 2015 "Applying short cable fix (reg=%x)\n", reg); 2016 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8); 2017 SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20); 2018 } 2019 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 2020 } 2021 2022 /* 2023 * Enable interrupts. 2024 */ 2025 CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS); 2026 #ifdef DEVICE_POLLING 2027 /* 2028 * ... only enable interrupts if we are not polling, make sure 2029 * they are off otherwise. 2030 */ 2031 if (ifp->if_capenable & IFCAP_POLLING) 2032 CSR_WRITE_4(sc, SIS_IER, 0); 2033 else 2034 #endif 2035 CSR_WRITE_4(sc, SIS_IER, 1); 2036 2037 /* Enable receiver and transmitter. */ 2038 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 2039 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 2040 2041 #ifdef notdef 2042 mii_mediachg(mii); 2043 #endif 2044 2045 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2046 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2047 2048 if (!sc->in_tick) 2049 callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); 2050 } 2051 2052 /* 2053 * Set media options. 2054 */ 2055 static int 2056 sis_ifmedia_upd(struct ifnet *ifp) 2057 { 2058 struct sis_softc *sc; 2059 struct mii_data *mii; 2060 2061 sc = ifp->if_softc; 2062 2063 SIS_LOCK(sc); 2064 mii = device_get_softc(sc->sis_miibus); 2065 sc->sis_link = 0; 2066 if (mii->mii_instance) { 2067 struct mii_softc *miisc; 2068 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2069 mii_phy_reset(miisc); 2070 } 2071 mii_mediachg(mii); 2072 SIS_UNLOCK(sc); 2073 2074 return(0); 2075 } 2076 2077 /* 2078 * Report current media status. 2079 */ 2080 static void 2081 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2082 { 2083 struct sis_softc *sc; 2084 struct mii_data *mii; 2085 2086 sc = ifp->if_softc; 2087 2088 SIS_LOCK(sc); 2089 mii = device_get_softc(sc->sis_miibus); 2090 mii_pollstat(mii); 2091 SIS_UNLOCK(sc); 2092 ifmr->ifm_active = mii->mii_media_active; 2093 ifmr->ifm_status = mii->mii_media_status; 2094 } 2095 2096 static int 2097 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2098 { 2099 struct sis_softc *sc = ifp->if_softc; 2100 struct ifreq *ifr = (struct ifreq *) data; 2101 struct mii_data *mii; 2102 int error = 0; 2103 2104 switch(command) { 2105 case SIOCSIFFLAGS: 2106 SIS_LOCK(sc); 2107 if (ifp->if_flags & IFF_UP) { 2108 sis_initl(sc); 2109 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2110 sis_stop(sc); 2111 } 2112 SIS_UNLOCK(sc); 2113 error = 0; 2114 break; 2115 case SIOCADDMULTI: 2116 case SIOCDELMULTI: 2117 SIS_LOCK(sc); 2118 if (sc->sis_type == SIS_TYPE_83815) 2119 sis_setmulti_ns(sc); 2120 else 2121 sis_setmulti_sis(sc); 2122 SIS_UNLOCK(sc); 2123 error = 0; 2124 break; 2125 case SIOCGIFMEDIA: 2126 case SIOCSIFMEDIA: 2127 mii = device_get_softc(sc->sis_miibus); 2128 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2129 break; 2130 case SIOCSIFCAP: 2131 /* ok, disable interrupts */ 2132 #ifdef DEVICE_POLLING 2133 if (ifr->ifr_reqcap & IFCAP_POLLING && 2134 !(ifp->if_capenable & IFCAP_POLLING)) { 2135 error = ether_poll_register(sis_poll, ifp); 2136 if (error) 2137 return(error); 2138 SIS_LOCK(sc); 2139 /* Disable interrupts */ 2140 CSR_WRITE_4(sc, SIS_IER, 0); 2141 ifp->if_capenable |= IFCAP_POLLING; 2142 SIS_UNLOCK(sc); 2143 return (error); 2144 2145 } 2146 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 2147 ifp->if_capenable & IFCAP_POLLING) { 2148 error = ether_poll_deregister(ifp); 2149 /* Enable interrupts. */ 2150 SIS_LOCK(sc); 2151 CSR_WRITE_4(sc, SIS_IER, 1); 2152 ifp->if_capenable &= ~IFCAP_POLLING; 2153 SIS_UNLOCK(sc); 2154 return (error); 2155 } 2156 #endif /* DEVICE_POLLING */ 2157 break; 2158 default: 2159 error = ether_ioctl(ifp, command, data); 2160 break; 2161 } 2162 2163 return(error); 2164 } 2165 2166 static void 2167 sis_watchdog(struct sis_softc *sc) 2168 { 2169 2170 SIS_LOCK_ASSERT(sc); 2171 if (sc->sis_stopped) { 2172 SIS_UNLOCK(sc); 2173 return; 2174 } 2175 2176 if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0) 2177 return; 2178 2179 device_printf(sc->sis_dev, "watchdog timeout\n"); 2180 sc->sis_ifp->if_oerrors++; 2181 2182 sis_stop(sc); 2183 sis_reset(sc); 2184 sis_initl(sc); 2185 2186 if (!IFQ_DRV_IS_EMPTY(&sc->sis_ifp->if_snd)) 2187 sis_startl(sc->sis_ifp); 2188 } 2189 2190 /* 2191 * Stop the adapter and free any mbufs allocated to the 2192 * RX and TX lists. 2193 */ 2194 static void 2195 sis_stop(struct sis_softc *sc) 2196 { 2197 int i; 2198 struct ifnet *ifp; 2199 struct sis_desc *dp; 2200 2201 if (sc->sis_stopped) 2202 return; 2203 SIS_LOCK_ASSERT(sc); 2204 ifp = sc->sis_ifp; 2205 sc->sis_watchdog_timer = 0; 2206 2207 callout_stop(&sc->sis_stat_ch); 2208 2209 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2210 CSR_WRITE_4(sc, SIS_IER, 0); 2211 CSR_WRITE_4(sc, SIS_IMR, 0); 2212 CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */ 2213 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 2214 DELAY(1000); 2215 CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0); 2216 CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0); 2217 2218 sc->sis_link = 0; 2219 2220 /* 2221 * Free data in the RX lists. 2222 */ 2223 dp = &sc->sis_rx_list[0]; 2224 for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { 2225 if (dp->sis_mbuf == NULL) 2226 continue; 2227 bus_dmamap_unload(sc->sis_tag, dp->sis_map); 2228 bus_dmamap_destroy(sc->sis_tag, dp->sis_map); 2229 m_freem(dp->sis_mbuf); 2230 dp->sis_mbuf = NULL; 2231 } 2232 bzero(sc->sis_rx_list, SIS_RX_LIST_SZ); 2233 2234 /* 2235 * Free the TX list buffers. 2236 */ 2237 dp = &sc->sis_tx_list[0]; 2238 for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { 2239 if (dp->sis_mbuf == NULL) 2240 continue; 2241 bus_dmamap_unload(sc->sis_tag, dp->sis_map); 2242 bus_dmamap_destroy(sc->sis_tag, dp->sis_map); 2243 m_freem(dp->sis_mbuf); 2244 dp->sis_mbuf = NULL; 2245 } 2246 2247 bzero(sc->sis_tx_list, SIS_TX_LIST_SZ); 2248 2249 sc->sis_stopped = 1; 2250 } 2251 2252 /* 2253 * Stop all chip I/O so that the kernel's probe routines don't 2254 * get confused by errant DMAs when rebooting. 2255 */ 2256 static int 2257 sis_shutdown(device_t dev) 2258 { 2259 struct sis_softc *sc; 2260 2261 sc = device_get_softc(dev); 2262 SIS_LOCK(sc); 2263 sis_reset(sc); 2264 sis_stop(sc); 2265 SIS_UNLOCK(sc); 2266 return (0); 2267 } 2268 2269 static device_method_t sis_methods[] = { 2270 /* Device interface */ 2271 DEVMETHOD(device_probe, sis_probe), 2272 DEVMETHOD(device_attach, sis_attach), 2273 DEVMETHOD(device_detach, sis_detach), 2274 DEVMETHOD(device_shutdown, sis_shutdown), 2275 2276 /* bus interface */ 2277 DEVMETHOD(bus_print_child, bus_generic_print_child), 2278 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 2279 2280 /* MII interface */ 2281 DEVMETHOD(miibus_readreg, sis_miibus_readreg), 2282 DEVMETHOD(miibus_writereg, sis_miibus_writereg), 2283 DEVMETHOD(miibus_statchg, sis_miibus_statchg), 2284 2285 { 0, 0 } 2286 }; 2287 2288 static driver_t sis_driver = { 2289 "sis", 2290 sis_methods, 2291 sizeof(struct sis_softc) 2292 }; 2293 2294 static devclass_t sis_devclass; 2295 2296 DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0); 2297 DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0); 2298