1 /*- 2 * Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org> 3 * Copyright (c) 1997, 1998, 1999 4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are 39 * available from http://www.sis.com.tw. 40 * 41 * This driver also supports the NatSemi DP83815. Datasheets are 42 * available from http://www.national.com. 43 * 44 * Written by Bill Paul <wpaul@ee.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 /* 49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with 50 * simple TX and RX descriptors of 3 longwords in size. The receiver 51 * has a single perfect filter entry for the station address and a 52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based 53 * transceiver while the 7016 requires an external transceiver chip. 54 * Both chips offer the standard bit-bang MII interface as well as 55 * an enchanced PHY interface which simplifies accessing MII registers. 56 * 57 * The only downside to this chipset is that RX descriptors must be 58 * longword aligned. 59 */ 60 61 #ifdef HAVE_KERNEL_OPTION_HEADERS 62 #include "opt_device_polling.h" 63 #endif 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/sockio.h> 68 #include <sys/mbuf.h> 69 #include <sys/malloc.h> 70 #include <sys/kernel.h> 71 #include <sys/module.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <net/if_arp.h> 76 #include <net/ethernet.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_vlan_var.h> 81 82 #include <net/bpf.h> 83 84 #include <machine/bus.h> 85 #include <machine/resource.h> 86 #include <sys/bus.h> 87 #include <sys/rman.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #define SIS_USEIOSPACE 96 97 #include <dev/sis/if_sisreg.h> 98 99 MODULE_DEPEND(sis, pci, 1, 1, 1); 100 MODULE_DEPEND(sis, ether, 1, 1, 1); 101 MODULE_DEPEND(sis, miibus, 1, 1, 1); 102 103 /* "device miibus" required. See GENERIC if you get errors here. */ 104 #include "miibus_if.h" 105 106 #define SIS_LOCK(_sc) mtx_lock(&(_sc)->sis_mtx) 107 #define SIS_UNLOCK(_sc) mtx_unlock(&(_sc)->sis_mtx) 108 #define SIS_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sis_mtx, MA_OWNED) 109 110 /* 111 * register space access macros 112 */ 113 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sis_res[0], reg, val) 114 115 #define CSR_READ_4(sc, reg) bus_read_4(sc->sis_res[0], reg) 116 117 #define CSR_READ_2(sc, reg) bus_read_2(sc->sis_res[0], reg) 118 119 /* 120 * Various supported device vendors/types and their names. 121 */ 122 static struct sis_type sis_devs[] = { 123 { SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" }, 124 { SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" }, 125 { NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" }, 126 { 0, 0, NULL } 127 }; 128 129 static int sis_detach(device_t); 130 static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 static int sis_ifmedia_upd(struct ifnet *); 132 static void sis_init(void *); 133 static void sis_initl(struct sis_softc *); 134 static void sis_intr(void *); 135 static int sis_ioctl(struct ifnet *, u_long, caddr_t); 136 static int sis_newbuf(struct sis_softc *, struct sis_desc *, struct mbuf *); 137 static void sis_start(struct ifnet *); 138 static void sis_startl(struct ifnet *); 139 static void sis_stop(struct sis_softc *); 140 static void sis_watchdog(struct sis_softc *); 141 142 143 static struct resource_spec sis_res_spec[] = { 144 #ifdef SIS_USEIOSPACE 145 { SYS_RES_IOPORT, SIS_PCI_LOIO, RF_ACTIVE}, 146 #else 147 { SYS_RES_MEMORY, SIS_PCI_LOMEM, RF_ACTIVE}, 148 #endif 149 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE}, 150 { -1, 0 } 151 }; 152 153 #define SIS_SETBIT(sc, reg, x) \ 154 CSR_WRITE_4(sc, reg, \ 155 CSR_READ_4(sc, reg) | (x)) 156 157 #define SIS_CLRBIT(sc, reg, x) \ 158 CSR_WRITE_4(sc, reg, \ 159 CSR_READ_4(sc, reg) & ~(x)) 160 161 #define SIO_SET(x) \ 162 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x) 163 164 #define SIO_CLR(x) \ 165 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x) 166 167 static void 168 sis_dma_map_desc_next(void *arg, bus_dma_segment_t *segs, int nseg, int error) 169 { 170 struct sis_desc *r; 171 172 r = arg; 173 r->sis_next = segs->ds_addr; 174 } 175 176 static void 177 sis_dma_map_desc_ptr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 178 { 179 struct sis_desc *r; 180 181 r = arg; 182 r->sis_ptr = segs->ds_addr; 183 } 184 185 static void 186 sis_dma_map_ring(void *arg, bus_dma_segment_t *segs, int nseg, int error) 187 { 188 u_int32_t *p; 189 190 p = arg; 191 *p = segs->ds_addr; 192 } 193 194 /* 195 * Routine to reverse the bits in a word. Stolen almost 196 * verbatim from /usr/games/fortune. 197 */ 198 static uint16_t 199 sis_reverse(uint16_t n) 200 { 201 n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa); 202 n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc); 203 n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0); 204 n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00); 205 206 return(n); 207 } 208 209 static void 210 sis_delay(struct sis_softc *sc) 211 { 212 int idx; 213 214 for (idx = (300 / 33) + 1; idx > 0; idx--) 215 CSR_READ_4(sc, SIS_CSR); 216 } 217 218 static void 219 sis_eeprom_idle(struct sis_softc *sc) 220 { 221 int i; 222 223 SIO_SET(SIS_EECTL_CSEL); 224 sis_delay(sc); 225 SIO_SET(SIS_EECTL_CLK); 226 sis_delay(sc); 227 228 for (i = 0; i < 25; i++) { 229 SIO_CLR(SIS_EECTL_CLK); 230 sis_delay(sc); 231 SIO_SET(SIS_EECTL_CLK); 232 sis_delay(sc); 233 } 234 235 SIO_CLR(SIS_EECTL_CLK); 236 sis_delay(sc); 237 SIO_CLR(SIS_EECTL_CSEL); 238 sis_delay(sc); 239 CSR_WRITE_4(sc, SIS_EECTL, 0x00000000); 240 } 241 242 /* 243 * Send a read command and address to the EEPROM, check for ACK. 244 */ 245 static void 246 sis_eeprom_putbyte(struct sis_softc *sc, int addr) 247 { 248 int d, i; 249 250 d = addr | SIS_EECMD_READ; 251 252 /* 253 * Feed in each bit and stobe the clock. 254 */ 255 for (i = 0x400; i; i >>= 1) { 256 if (d & i) { 257 SIO_SET(SIS_EECTL_DIN); 258 } else { 259 SIO_CLR(SIS_EECTL_DIN); 260 } 261 sis_delay(sc); 262 SIO_SET(SIS_EECTL_CLK); 263 sis_delay(sc); 264 SIO_CLR(SIS_EECTL_CLK); 265 sis_delay(sc); 266 } 267 } 268 269 /* 270 * Read a word of data stored in the EEPROM at address 'addr.' 271 */ 272 static void 273 sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest) 274 { 275 int i; 276 u_int16_t word = 0; 277 278 /* Force EEPROM to idle state. */ 279 sis_eeprom_idle(sc); 280 281 /* Enter EEPROM access mode. */ 282 sis_delay(sc); 283 SIO_CLR(SIS_EECTL_CLK); 284 sis_delay(sc); 285 SIO_SET(SIS_EECTL_CSEL); 286 sis_delay(sc); 287 288 /* 289 * Send address of word we want to read. 290 */ 291 sis_eeprom_putbyte(sc, addr); 292 293 /* 294 * Start reading bits from EEPROM. 295 */ 296 for (i = 0x8000; i; i >>= 1) { 297 SIO_SET(SIS_EECTL_CLK); 298 sis_delay(sc); 299 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT) 300 word |= i; 301 sis_delay(sc); 302 SIO_CLR(SIS_EECTL_CLK); 303 sis_delay(sc); 304 } 305 306 /* Turn off EEPROM access mode. */ 307 sis_eeprom_idle(sc); 308 309 *dest = word; 310 } 311 312 /* 313 * Read a sequence of words from the EEPROM. 314 */ 315 static void 316 sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap) 317 { 318 int i; 319 u_int16_t word = 0, *ptr; 320 321 for (i = 0; i < cnt; i++) { 322 sis_eeprom_getword(sc, off + i, &word); 323 ptr = (u_int16_t *)(dest + (i * 2)); 324 if (swap) 325 *ptr = ntohs(word); 326 else 327 *ptr = word; 328 } 329 } 330 331 #if defined(__i386__) || defined(__amd64__) 332 static device_t 333 sis_find_bridge(device_t dev) 334 { 335 devclass_t pci_devclass; 336 device_t *pci_devices; 337 int pci_count = 0; 338 device_t *pci_children; 339 int pci_childcount = 0; 340 device_t *busp, *childp; 341 device_t child = NULL; 342 int i, j; 343 344 if ((pci_devclass = devclass_find("pci")) == NULL) 345 return(NULL); 346 347 devclass_get_devices(pci_devclass, &pci_devices, &pci_count); 348 349 for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) { 350 pci_childcount = 0; 351 device_get_children(*busp, &pci_children, &pci_childcount); 352 for (j = 0, childp = pci_children; 353 j < pci_childcount; j++, childp++) { 354 if (pci_get_vendor(*childp) == SIS_VENDORID && 355 pci_get_device(*childp) == 0x0008) { 356 child = *childp; 357 goto done; 358 } 359 } 360 } 361 362 done: 363 free(pci_devices, M_TEMP); 364 free(pci_children, M_TEMP); 365 return(child); 366 } 367 368 static void 369 sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt) 370 { 371 device_t bridge; 372 u_int8_t reg; 373 int i; 374 bus_space_tag_t btag; 375 376 bridge = sis_find_bridge(dev); 377 if (bridge == NULL) 378 return; 379 reg = pci_read_config(bridge, 0x48, 1); 380 pci_write_config(bridge, 0x48, reg|0x40, 1); 381 382 /* XXX */ 383 #if defined(__i386__) 384 btag = I386_BUS_SPACE_IO; 385 #elif defined(__amd64__) 386 btag = AMD64_BUS_SPACE_IO; 387 #endif 388 389 for (i = 0; i < cnt; i++) { 390 bus_space_write_1(btag, 0x0, 0x70, i + off); 391 *(dest + i) = bus_space_read_1(btag, 0x0, 0x71); 392 } 393 394 pci_write_config(bridge, 0x48, reg & ~0x40, 1); 395 return; 396 } 397 398 static void 399 sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest) 400 { 401 u_int32_t filtsave, csrsave; 402 403 filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); 404 csrsave = CSR_READ_4(sc, SIS_CSR); 405 406 CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave); 407 CSR_WRITE_4(sc, SIS_CSR, 0); 408 409 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE); 410 411 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 412 ((u_int16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA); 413 CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1); 414 ((u_int16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA); 415 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 416 ((u_int16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA); 417 418 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); 419 CSR_WRITE_4(sc, SIS_CSR, csrsave); 420 return; 421 } 422 #endif 423 424 /* 425 * Sync the PHYs by setting data bit and strobing the clock 32 times. 426 */ 427 static void 428 sis_mii_sync(struct sis_softc *sc) 429 { 430 int i; 431 432 SIO_SET(SIS_MII_DIR|SIS_MII_DATA); 433 434 for (i = 0; i < 32; i++) { 435 SIO_SET(SIS_MII_CLK); 436 DELAY(1); 437 SIO_CLR(SIS_MII_CLK); 438 DELAY(1); 439 } 440 } 441 442 /* 443 * Clock a series of bits through the MII. 444 */ 445 static void 446 sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt) 447 { 448 int i; 449 450 SIO_CLR(SIS_MII_CLK); 451 452 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 453 if (bits & i) { 454 SIO_SET(SIS_MII_DATA); 455 } else { 456 SIO_CLR(SIS_MII_DATA); 457 } 458 DELAY(1); 459 SIO_CLR(SIS_MII_CLK); 460 DELAY(1); 461 SIO_SET(SIS_MII_CLK); 462 } 463 } 464 465 /* 466 * Read an PHY register through the MII. 467 */ 468 static int 469 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame) 470 { 471 int i, ack; 472 473 /* 474 * Set up frame for RX. 475 */ 476 frame->mii_stdelim = SIS_MII_STARTDELIM; 477 frame->mii_opcode = SIS_MII_READOP; 478 frame->mii_turnaround = 0; 479 frame->mii_data = 0; 480 481 /* 482 * Turn on data xmit. 483 */ 484 SIO_SET(SIS_MII_DIR); 485 486 sis_mii_sync(sc); 487 488 /* 489 * Send command/address info. 490 */ 491 sis_mii_send(sc, frame->mii_stdelim, 2); 492 sis_mii_send(sc, frame->mii_opcode, 2); 493 sis_mii_send(sc, frame->mii_phyaddr, 5); 494 sis_mii_send(sc, frame->mii_regaddr, 5); 495 496 /* Idle bit */ 497 SIO_CLR((SIS_MII_CLK|SIS_MII_DATA)); 498 DELAY(1); 499 SIO_SET(SIS_MII_CLK); 500 DELAY(1); 501 502 /* Turn off xmit. */ 503 SIO_CLR(SIS_MII_DIR); 504 505 /* Check for ack */ 506 SIO_CLR(SIS_MII_CLK); 507 DELAY(1); 508 ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA; 509 SIO_SET(SIS_MII_CLK); 510 DELAY(1); 511 512 /* 513 * Now try reading data bits. If the ack failed, we still 514 * need to clock through 16 cycles to keep the PHY(s) in sync. 515 */ 516 if (ack) { 517 for(i = 0; i < 16; i++) { 518 SIO_CLR(SIS_MII_CLK); 519 DELAY(1); 520 SIO_SET(SIS_MII_CLK); 521 DELAY(1); 522 } 523 goto fail; 524 } 525 526 for (i = 0x8000; i; i >>= 1) { 527 SIO_CLR(SIS_MII_CLK); 528 DELAY(1); 529 if (!ack) { 530 if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA) 531 frame->mii_data |= i; 532 DELAY(1); 533 } 534 SIO_SET(SIS_MII_CLK); 535 DELAY(1); 536 } 537 538 fail: 539 540 SIO_CLR(SIS_MII_CLK); 541 DELAY(1); 542 SIO_SET(SIS_MII_CLK); 543 DELAY(1); 544 545 if (ack) 546 return(1); 547 return(0); 548 } 549 550 /* 551 * Write to a PHY register through the MII. 552 */ 553 static int 554 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame) 555 { 556 557 /* 558 * Set up frame for TX. 559 */ 560 561 frame->mii_stdelim = SIS_MII_STARTDELIM; 562 frame->mii_opcode = SIS_MII_WRITEOP; 563 frame->mii_turnaround = SIS_MII_TURNAROUND; 564 565 /* 566 * Turn on data output. 567 */ 568 SIO_SET(SIS_MII_DIR); 569 570 sis_mii_sync(sc); 571 572 sis_mii_send(sc, frame->mii_stdelim, 2); 573 sis_mii_send(sc, frame->mii_opcode, 2); 574 sis_mii_send(sc, frame->mii_phyaddr, 5); 575 sis_mii_send(sc, frame->mii_regaddr, 5); 576 sis_mii_send(sc, frame->mii_turnaround, 2); 577 sis_mii_send(sc, frame->mii_data, 16); 578 579 /* Idle bit. */ 580 SIO_SET(SIS_MII_CLK); 581 DELAY(1); 582 SIO_CLR(SIS_MII_CLK); 583 DELAY(1); 584 585 /* 586 * Turn off xmit. 587 */ 588 SIO_CLR(SIS_MII_DIR); 589 590 return(0); 591 } 592 593 static int 594 sis_miibus_readreg(device_t dev, int phy, int reg) 595 { 596 struct sis_softc *sc; 597 struct sis_mii_frame frame; 598 599 sc = device_get_softc(dev); 600 601 if (sc->sis_type == SIS_TYPE_83815) { 602 if (phy != 0) 603 return(0); 604 /* 605 * The NatSemi chip can take a while after 606 * a reset to come ready, during which the BMSR 607 * returns a value of 0. This is *never* supposed 608 * to happen: some of the BMSR bits are meant to 609 * be hardwired in the on position, and this can 610 * confuse the miibus code a bit during the probe 611 * and attach phase. So we make an effort to check 612 * for this condition and wait for it to clear. 613 */ 614 if (!CSR_READ_4(sc, NS_BMSR)) 615 DELAY(1000); 616 return CSR_READ_4(sc, NS_BMCR + (reg * 4)); 617 } 618 619 /* 620 * Chipsets < SIS_635 seem not to be able to read/write 621 * through mdio. Use the enhanced PHY access register 622 * again for them. 623 */ 624 if (sc->sis_type == SIS_TYPE_900 && 625 sc->sis_rev < SIS_REV_635) { 626 int i, val = 0; 627 628 if (phy != 0) 629 return(0); 630 631 CSR_WRITE_4(sc, SIS_PHYCTL, 632 (phy << 11) | (reg << 6) | SIS_PHYOP_READ); 633 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 634 635 for (i = 0; i < SIS_TIMEOUT; i++) { 636 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 637 break; 638 } 639 640 if (i == SIS_TIMEOUT) { 641 device_printf(sc->sis_dev, "PHY failed to come ready\n"); 642 return(0); 643 } 644 645 val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF; 646 647 if (val == 0xFFFF) 648 return(0); 649 650 return(val); 651 } else { 652 bzero((char *)&frame, sizeof(frame)); 653 654 frame.mii_phyaddr = phy; 655 frame.mii_regaddr = reg; 656 sis_mii_readreg(sc, &frame); 657 658 return(frame.mii_data); 659 } 660 } 661 662 static int 663 sis_miibus_writereg(device_t dev, int phy, int reg, int data) 664 { 665 struct sis_softc *sc; 666 struct sis_mii_frame frame; 667 668 sc = device_get_softc(dev); 669 670 if (sc->sis_type == SIS_TYPE_83815) { 671 if (phy != 0) 672 return(0); 673 CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data); 674 return(0); 675 } 676 677 /* 678 * Chipsets < SIS_635 seem not to be able to read/write 679 * through mdio. Use the enhanced PHY access register 680 * again for them. 681 */ 682 if (sc->sis_type == SIS_TYPE_900 && 683 sc->sis_rev < SIS_REV_635) { 684 int i; 685 686 if (phy != 0) 687 return(0); 688 689 CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) | 690 (reg << 6) | SIS_PHYOP_WRITE); 691 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 692 693 for (i = 0; i < SIS_TIMEOUT; i++) { 694 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 695 break; 696 } 697 698 if (i == SIS_TIMEOUT) 699 device_printf(sc->sis_dev, "PHY failed to come ready\n"); 700 } else { 701 bzero((char *)&frame, sizeof(frame)); 702 703 frame.mii_phyaddr = phy; 704 frame.mii_regaddr = reg; 705 frame.mii_data = data; 706 sis_mii_writereg(sc, &frame); 707 } 708 return(0); 709 } 710 711 static void 712 sis_miibus_statchg(device_t dev) 713 { 714 struct sis_softc *sc; 715 716 sc = device_get_softc(dev); 717 SIS_LOCK_ASSERT(sc); 718 sis_initl(sc); 719 } 720 721 static uint32_t 722 sis_mchash(struct sis_softc *sc, const uint8_t *addr) 723 { 724 uint32_t crc; 725 726 /* Compute CRC for the address value. */ 727 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 728 729 /* 730 * return the filter bit position 731 * 732 * The NatSemi chip has a 512-bit filter, which is 733 * different than the SiS, so we special-case it. 734 */ 735 if (sc->sis_type == SIS_TYPE_83815) 736 return (crc >> 23); 737 else if (sc->sis_rev >= SIS_REV_635 || 738 sc->sis_rev == SIS_REV_900B) 739 return (crc >> 24); 740 else 741 return (crc >> 25); 742 } 743 744 static void 745 sis_setmulti_ns(struct sis_softc *sc) 746 { 747 struct ifnet *ifp; 748 struct ifmultiaddr *ifma; 749 u_int32_t h = 0, i, filtsave; 750 int bit, index; 751 752 ifp = sc->sis_ifp; 753 754 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 755 SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 756 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 757 return; 758 } 759 760 /* 761 * We have to explicitly enable the multicast hash table 762 * on the NatSemi chip if we want to use it, which we do. 763 */ 764 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 765 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 766 767 filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); 768 769 /* first, zot all the existing hash bits */ 770 for (i = 0; i < 32; i++) { 771 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2)); 772 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); 773 } 774 775 IF_ADDR_LOCK(ifp); 776 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 777 if (ifma->ifma_addr->sa_family != AF_LINK) 778 continue; 779 h = sis_mchash(sc, 780 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 781 index = h >> 3; 782 bit = h & 0x1F; 783 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index); 784 if (bit > 0xF) 785 bit -= 0x10; 786 SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit)); 787 } 788 IF_ADDR_UNLOCK(ifp); 789 790 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); 791 792 return; 793 } 794 795 static void 796 sis_setmulti_sis(struct sis_softc *sc) 797 { 798 struct ifnet *ifp; 799 struct ifmultiaddr *ifma; 800 u_int32_t h, i, n, ctl; 801 u_int16_t hashes[16]; 802 803 ifp = sc->sis_ifp; 804 805 /* hash table size */ 806 if (sc->sis_rev >= SIS_REV_635 || 807 sc->sis_rev == SIS_REV_900B) 808 n = 16; 809 else 810 n = 8; 811 812 ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE; 813 814 if (ifp->if_flags & IFF_BROADCAST) 815 ctl |= SIS_RXFILTCTL_BROAD; 816 817 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 818 ctl |= SIS_RXFILTCTL_ALLMULTI; 819 if (ifp->if_flags & IFF_PROMISC) 820 ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS; 821 for (i = 0; i < n; i++) 822 hashes[i] = ~0; 823 } else { 824 for (i = 0; i < n; i++) 825 hashes[i] = 0; 826 i = 0; 827 IF_ADDR_LOCK(ifp); 828 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 829 if (ifma->ifma_addr->sa_family != AF_LINK) 830 continue; 831 h = sis_mchash(sc, 832 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 833 hashes[h >> 4] |= 1 << (h & 0xf); 834 i++; 835 } 836 IF_ADDR_UNLOCK(ifp); 837 if (i > n) { 838 ctl |= SIS_RXFILTCTL_ALLMULTI; 839 for (i = 0; i < n; i++) 840 hashes[i] = ~0; 841 } 842 } 843 844 for (i = 0; i < n; i++) { 845 CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16); 846 CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]); 847 } 848 849 CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl); 850 } 851 852 static void 853 sis_reset(struct sis_softc *sc) 854 { 855 int i; 856 857 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET); 858 859 for (i = 0; i < SIS_TIMEOUT; i++) { 860 if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET)) 861 break; 862 } 863 864 if (i == SIS_TIMEOUT) 865 device_printf(sc->sis_dev, "reset never completed\n"); 866 867 /* Wait a little while for the chip to get its brains in order. */ 868 DELAY(1000); 869 870 /* 871 * If this is a NetSemi chip, make sure to clear 872 * PME mode. 873 */ 874 if (sc->sis_type == SIS_TYPE_83815) { 875 CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS); 876 CSR_WRITE_4(sc, NS_CLKRUN, 0); 877 } 878 879 return; 880 } 881 882 /* 883 * Probe for an SiS chip. Check the PCI vendor and device 884 * IDs against our list and return a device name if we find a match. 885 */ 886 static int 887 sis_probe(device_t dev) 888 { 889 struct sis_type *t; 890 891 t = sis_devs; 892 893 while(t->sis_name != NULL) { 894 if ((pci_get_vendor(dev) == t->sis_vid) && 895 (pci_get_device(dev) == t->sis_did)) { 896 device_set_desc(dev, t->sis_name); 897 return (BUS_PROBE_DEFAULT); 898 } 899 t++; 900 } 901 902 return(ENXIO); 903 } 904 905 /* 906 * Attach the interface. Allocate softc structures, do ifmedia 907 * setup and ethernet/BPF attach. 908 */ 909 static int 910 sis_attach(device_t dev) 911 { 912 u_char eaddr[ETHER_ADDR_LEN]; 913 struct sis_softc *sc; 914 struct ifnet *ifp; 915 int error = 0, waittime = 0; 916 917 waittime = 0; 918 sc = device_get_softc(dev); 919 920 sc->sis_dev = dev; 921 922 mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 923 MTX_DEF); 924 callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0); 925 926 if (pci_get_device(dev) == SIS_DEVICEID_900) 927 sc->sis_type = SIS_TYPE_900; 928 if (pci_get_device(dev) == SIS_DEVICEID_7016) 929 sc->sis_type = SIS_TYPE_7016; 930 if (pci_get_vendor(dev) == NS_VENDORID) 931 sc->sis_type = SIS_TYPE_83815; 932 933 sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1); 934 /* 935 * Map control/status registers. 936 */ 937 pci_enable_busmaster(dev); 938 939 error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res); 940 if (error) { 941 device_printf(dev, "couldn't allocate resources\n"); 942 goto fail; 943 } 944 945 /* Reset the adapter. */ 946 sis_reset(sc); 947 948 if (sc->sis_type == SIS_TYPE_900 && 949 (sc->sis_rev == SIS_REV_635 || 950 sc->sis_rev == SIS_REV_900B)) { 951 SIO_SET(SIS_CFG_RND_CNT); 952 SIO_SET(SIS_CFG_PERR_DETECT); 953 } 954 955 /* 956 * Get station address from the EEPROM. 957 */ 958 switch (pci_get_vendor(dev)) { 959 case NS_VENDORID: 960 sc->sis_srr = CSR_READ_4(sc, NS_SRR); 961 962 /* We can't update the device description, so spew */ 963 if (sc->sis_srr == NS_SRR_15C) 964 device_printf(dev, "Silicon Revision: DP83815C\n"); 965 else if (sc->sis_srr == NS_SRR_15D) 966 device_printf(dev, "Silicon Revision: DP83815D\n"); 967 else if (sc->sis_srr == NS_SRR_16A) 968 device_printf(dev, "Silicon Revision: DP83816A\n"); 969 else 970 device_printf(dev, "Silicon Revision %x\n", sc->sis_srr); 971 972 /* 973 * Reading the MAC address out of the EEPROM on 974 * the NatSemi chip takes a bit more work than 975 * you'd expect. The address spans 4 16-bit words, 976 * with the first word containing only a single bit. 977 * You have to shift everything over one bit to 978 * get it aligned properly. Also, the bits are 979 * stored backwards (the LSB is really the MSB, 980 * and so on) so you have to reverse them in order 981 * to get the MAC address into the form we want. 982 * Why? Who the hell knows. 983 */ 984 { 985 u_int16_t tmp[4]; 986 987 sis_read_eeprom(sc, (caddr_t)&tmp, 988 NS_EE_NODEADDR, 4, 0); 989 990 /* Shift everything over one bit. */ 991 tmp[3] = tmp[3] >> 1; 992 tmp[3] |= tmp[2] << 15; 993 tmp[2] = tmp[2] >> 1; 994 tmp[2] |= tmp[1] << 15; 995 tmp[1] = tmp[1] >> 1; 996 tmp[1] |= tmp[0] << 15; 997 998 /* Now reverse all the bits. */ 999 tmp[3] = sis_reverse(tmp[3]); 1000 tmp[2] = sis_reverse(tmp[2]); 1001 tmp[1] = sis_reverse(tmp[1]); 1002 1003 bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN); 1004 } 1005 break; 1006 case SIS_VENDORID: 1007 default: 1008 #if defined(__i386__) || defined(__amd64__) 1009 /* 1010 * If this is a SiS 630E chipset with an embedded 1011 * SiS 900 controller, we have to read the MAC address 1012 * from the APC CMOS RAM. Our method for doing this 1013 * is very ugly since we have to reach out and grab 1014 * ahold of hardware for which we cannot properly 1015 * allocate resources. This code is only compiled on 1016 * the i386 architecture since the SiS 630E chipset 1017 * is for x86 motherboards only. Note that there are 1018 * a lot of magic numbers in this hack. These are 1019 * taken from SiS's Linux driver. I'd like to replace 1020 * them with proper symbolic definitions, but that 1021 * requires some datasheets that I don't have access 1022 * to at the moment. 1023 */ 1024 if (sc->sis_rev == SIS_REV_630S || 1025 sc->sis_rev == SIS_REV_630E || 1026 sc->sis_rev == SIS_REV_630EA1) 1027 sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6); 1028 1029 else if (sc->sis_rev == SIS_REV_635 || 1030 sc->sis_rev == SIS_REV_630ET) 1031 sis_read_mac(sc, dev, (caddr_t)&eaddr); 1032 else if (sc->sis_rev == SIS_REV_96x) { 1033 /* Allow to read EEPROM from LAN. It is shared 1034 * between a 1394 controller and the NIC and each 1035 * time we access it, we need to set SIS_EECMD_REQ. 1036 */ 1037 SIO_SET(SIS_EECMD_REQ); 1038 for (waittime = 0; waittime < SIS_TIMEOUT; 1039 waittime++) { 1040 /* Force EEPROM to idle state. */ 1041 sis_eeprom_idle(sc); 1042 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) { 1043 sis_read_eeprom(sc, (caddr_t)&eaddr, 1044 SIS_EE_NODEADDR, 3, 0); 1045 break; 1046 } 1047 DELAY(1); 1048 } 1049 /* 1050 * Set SIS_EECTL_CLK to high, so a other master 1051 * can operate on the i2c bus. 1052 */ 1053 SIO_SET(SIS_EECTL_CLK); 1054 /* Refuse EEPROM access by LAN */ 1055 SIO_SET(SIS_EECMD_DONE); 1056 } else 1057 #endif 1058 sis_read_eeprom(sc, (caddr_t)&eaddr, 1059 SIS_EE_NODEADDR, 3, 0); 1060 break; 1061 } 1062 1063 /* 1064 * Allocate the parent bus DMA tag appropriate for PCI. 1065 */ 1066 #define SIS_NSEG_NEW 32 1067 error = bus_dma_tag_create(NULL, /* parent */ 1068 1, 0, /* alignment, boundary */ 1069 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1070 BUS_SPACE_MAXADDR, /* highaddr */ 1071 NULL, NULL, /* filter, filterarg */ 1072 MAXBSIZE, SIS_NSEG_NEW, /* maxsize, nsegments */ 1073 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1074 BUS_DMA_ALLOCNOW, /* flags */ 1075 NULL, NULL, /* lockfunc, lockarg */ 1076 &sc->sis_parent_tag); 1077 if (error) 1078 goto fail; 1079 1080 /* 1081 * Now allocate a tag for the DMA descriptor lists and a chunk 1082 * of DMA-able memory based on the tag. Also obtain the physical 1083 * addresses of the RX and TX ring, which we'll need later. 1084 * All of our lists are allocated as a contiguous block 1085 * of memory. 1086 */ 1087 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1088 1, 0, /* alignment, boundary */ 1089 BUS_SPACE_MAXADDR, /* lowaddr */ 1090 BUS_SPACE_MAXADDR, /* highaddr */ 1091 NULL, NULL, /* filter, filterarg */ 1092 SIS_RX_LIST_SZ, 1, /* maxsize,nsegments */ 1093 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1094 0, /* flags */ 1095 busdma_lock_mutex, /* lockfunc */ 1096 &Giant, /* lockarg */ 1097 &sc->sis_rx_tag); 1098 if (error) 1099 goto fail; 1100 1101 error = bus_dmamem_alloc(sc->sis_rx_tag, 1102 (void **)&sc->sis_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1103 &sc->sis_rx_dmamap); 1104 1105 if (error) { 1106 device_printf(dev, "no memory for rx list buffers!\n"); 1107 bus_dma_tag_destroy(sc->sis_rx_tag); 1108 sc->sis_rx_tag = NULL; 1109 goto fail; 1110 } 1111 1112 error = bus_dmamap_load(sc->sis_rx_tag, 1113 sc->sis_rx_dmamap, &(sc->sis_rx_list[0]), 1114 sizeof(struct sis_desc), sis_dma_map_ring, 1115 &sc->sis_rx_paddr, 0); 1116 1117 if (error) { 1118 device_printf(dev, "cannot get address of the rx ring!\n"); 1119 bus_dmamem_free(sc->sis_rx_tag, 1120 sc->sis_rx_list, sc->sis_rx_dmamap); 1121 bus_dma_tag_destroy(sc->sis_rx_tag); 1122 sc->sis_rx_tag = NULL; 1123 goto fail; 1124 } 1125 1126 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1127 1, 0, /* alignment, boundary */ 1128 BUS_SPACE_MAXADDR, /* lowaddr */ 1129 BUS_SPACE_MAXADDR, /* highaddr */ 1130 NULL, NULL, /* filter, filterarg */ 1131 SIS_TX_LIST_SZ, 1, /* maxsize,nsegments */ 1132 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1133 0, /* flags */ 1134 busdma_lock_mutex, /* lockfunc */ 1135 &Giant, /* lockarg */ 1136 &sc->sis_tx_tag); 1137 if (error) 1138 goto fail; 1139 1140 error = bus_dmamem_alloc(sc->sis_tx_tag, 1141 (void **)&sc->sis_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1142 &sc->sis_tx_dmamap); 1143 1144 if (error) { 1145 device_printf(dev, "no memory for tx list buffers!\n"); 1146 bus_dma_tag_destroy(sc->sis_tx_tag); 1147 sc->sis_tx_tag = NULL; 1148 goto fail; 1149 } 1150 1151 error = bus_dmamap_load(sc->sis_tx_tag, 1152 sc->sis_tx_dmamap, &(sc->sis_tx_list[0]), 1153 sizeof(struct sis_desc), sis_dma_map_ring, 1154 &sc->sis_tx_paddr, 0); 1155 1156 if (error) { 1157 device_printf(dev, "cannot get address of the tx ring!\n"); 1158 bus_dmamem_free(sc->sis_tx_tag, 1159 sc->sis_tx_list, sc->sis_tx_dmamap); 1160 bus_dma_tag_destroy(sc->sis_tx_tag); 1161 sc->sis_tx_tag = NULL; 1162 goto fail; 1163 } 1164 1165 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1166 1, 0, /* alignment, boundary */ 1167 BUS_SPACE_MAXADDR, /* lowaddr */ 1168 BUS_SPACE_MAXADDR, /* highaddr */ 1169 NULL, NULL, /* filter, filterarg */ 1170 MCLBYTES, 1, /* maxsize,nsegments */ 1171 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1172 0, /* flags */ 1173 busdma_lock_mutex, /* lockfunc */ 1174 &Giant, /* lockarg */ 1175 &sc->sis_tag); 1176 if (error) 1177 goto fail; 1178 1179 /* 1180 * Obtain the physical addresses of the RX and TX 1181 * rings which we'll need later in the init routine. 1182 */ 1183 1184 ifp = sc->sis_ifp = if_alloc(IFT_ETHER); 1185 if (ifp == NULL) { 1186 device_printf(dev, "can not if_alloc()\n"); 1187 error = ENOSPC; 1188 goto fail; 1189 } 1190 ifp->if_softc = sc; 1191 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1192 ifp->if_mtu = ETHERMTU; 1193 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1194 ifp->if_ioctl = sis_ioctl; 1195 ifp->if_start = sis_start; 1196 ifp->if_init = sis_init; 1197 IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1); 1198 ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1; 1199 IFQ_SET_READY(&ifp->if_snd); 1200 1201 /* 1202 * Do MII setup. 1203 */ 1204 if (mii_phy_probe(dev, &sc->sis_miibus, 1205 sis_ifmedia_upd, sis_ifmedia_sts)) { 1206 device_printf(dev, "MII without any PHY!\n"); 1207 error = ENXIO; 1208 goto fail; 1209 } 1210 1211 /* 1212 * Call MI attach routine. 1213 */ 1214 ether_ifattach(ifp, eaddr); 1215 1216 /* 1217 * Tell the upper layer(s) we support long frames. 1218 */ 1219 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1220 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1221 ifp->if_capenable = ifp->if_capabilities; 1222 #ifdef DEVICE_POLLING 1223 ifp->if_capabilities |= IFCAP_POLLING; 1224 #endif 1225 1226 /* Hook interrupt last to avoid having to lock softc */ 1227 error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE, 1228 NULL, sis_intr, sc, &sc->sis_intrhand); 1229 1230 if (error) { 1231 device_printf(dev, "couldn't set up irq\n"); 1232 ether_ifdetach(ifp); 1233 goto fail; 1234 } 1235 1236 fail: 1237 if (error) 1238 sis_detach(dev); 1239 1240 return(error); 1241 } 1242 1243 /* 1244 * Shutdown hardware and free up resources. This can be called any 1245 * time after the mutex has been initialized. It is called in both 1246 * the error case in attach and the normal detach case so it needs 1247 * to be careful about only freeing resources that have actually been 1248 * allocated. 1249 */ 1250 static int 1251 sis_detach(device_t dev) 1252 { 1253 struct sis_softc *sc; 1254 struct ifnet *ifp; 1255 1256 sc = device_get_softc(dev); 1257 KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized")); 1258 ifp = sc->sis_ifp; 1259 1260 #ifdef DEVICE_POLLING 1261 if (ifp->if_capenable & IFCAP_POLLING) 1262 ether_poll_deregister(ifp); 1263 #endif 1264 1265 /* These should only be active if attach succeeded. */ 1266 if (device_is_attached(dev)) { 1267 SIS_LOCK(sc); 1268 sis_reset(sc); 1269 sis_stop(sc); 1270 SIS_UNLOCK(sc); 1271 callout_drain(&sc->sis_stat_ch); 1272 ether_ifdetach(ifp); 1273 } 1274 if (sc->sis_miibus) 1275 device_delete_child(dev, sc->sis_miibus); 1276 bus_generic_detach(dev); 1277 1278 if (sc->sis_intrhand) 1279 bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand); 1280 bus_release_resources(dev, sis_res_spec, sc->sis_res); 1281 1282 if (ifp) 1283 if_free(ifp); 1284 1285 if (sc->sis_rx_tag) { 1286 bus_dmamap_unload(sc->sis_rx_tag, 1287 sc->sis_rx_dmamap); 1288 bus_dmamem_free(sc->sis_rx_tag, 1289 sc->sis_rx_list, sc->sis_rx_dmamap); 1290 bus_dma_tag_destroy(sc->sis_rx_tag); 1291 } 1292 if (sc->sis_tx_tag) { 1293 bus_dmamap_unload(sc->sis_tx_tag, 1294 sc->sis_tx_dmamap); 1295 bus_dmamem_free(sc->sis_tx_tag, 1296 sc->sis_tx_list, sc->sis_tx_dmamap); 1297 bus_dma_tag_destroy(sc->sis_tx_tag); 1298 } 1299 if (sc->sis_parent_tag) 1300 bus_dma_tag_destroy(sc->sis_parent_tag); 1301 if (sc->sis_tag) 1302 bus_dma_tag_destroy(sc->sis_tag); 1303 1304 mtx_destroy(&sc->sis_mtx); 1305 1306 return(0); 1307 } 1308 1309 /* 1310 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that 1311 * we arrange the descriptors in a closed ring, so that the last descriptor 1312 * points back to the first. 1313 */ 1314 static int 1315 sis_ring_init(struct sis_softc *sc) 1316 { 1317 int i, error; 1318 struct sis_desc *dp; 1319 1320 dp = &sc->sis_tx_list[0]; 1321 for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { 1322 if (i == (SIS_TX_LIST_CNT - 1)) 1323 dp->sis_nextdesc = &sc->sis_tx_list[0]; 1324 else 1325 dp->sis_nextdesc = dp + 1; 1326 bus_dmamap_load(sc->sis_tx_tag, 1327 sc->sis_tx_dmamap, 1328 dp->sis_nextdesc, sizeof(struct sis_desc), 1329 sis_dma_map_desc_next, dp, 0); 1330 dp->sis_mbuf = NULL; 1331 dp->sis_ptr = 0; 1332 dp->sis_ctl = 0; 1333 } 1334 1335 sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0; 1336 1337 bus_dmamap_sync(sc->sis_tx_tag, 1338 sc->sis_tx_dmamap, BUS_DMASYNC_PREWRITE); 1339 1340 dp = &sc->sis_rx_list[0]; 1341 for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { 1342 error = sis_newbuf(sc, dp, NULL); 1343 if (error) 1344 return(error); 1345 if (i == (SIS_RX_LIST_CNT - 1)) 1346 dp->sis_nextdesc = &sc->sis_rx_list[0]; 1347 else 1348 dp->sis_nextdesc = dp + 1; 1349 bus_dmamap_load(sc->sis_rx_tag, 1350 sc->sis_rx_dmamap, 1351 dp->sis_nextdesc, sizeof(struct sis_desc), 1352 sis_dma_map_desc_next, dp, 0); 1353 } 1354 1355 bus_dmamap_sync(sc->sis_rx_tag, 1356 sc->sis_rx_dmamap, BUS_DMASYNC_PREWRITE); 1357 1358 sc->sis_rx_pdsc = &sc->sis_rx_list[0]; 1359 1360 return(0); 1361 } 1362 1363 /* 1364 * Initialize an RX descriptor and attach an MBUF cluster. 1365 */ 1366 static int 1367 sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m) 1368 { 1369 1370 if (c == NULL) 1371 return(EINVAL); 1372 1373 if (m == NULL) { 1374 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1375 if (m == NULL) 1376 return(ENOBUFS); 1377 } else 1378 m->m_data = m->m_ext.ext_buf; 1379 1380 c->sis_mbuf = m; 1381 c->sis_ctl = SIS_RXLEN; 1382 1383 bus_dmamap_create(sc->sis_tag, 0, &c->sis_map); 1384 bus_dmamap_load(sc->sis_tag, c->sis_map, 1385 mtod(m, void *), MCLBYTES, 1386 sis_dma_map_desc_ptr, c, 0); 1387 bus_dmamap_sync(sc->sis_tag, c->sis_map, BUS_DMASYNC_PREREAD); 1388 1389 return(0); 1390 } 1391 1392 /* 1393 * A frame has been uploaded: pass the resulting mbuf chain up to 1394 * the higher level protocols. 1395 */ 1396 static void 1397 sis_rxeof(struct sis_softc *sc) 1398 { 1399 struct mbuf *m, *m0; 1400 struct ifnet *ifp; 1401 struct sis_desc *cur_rx; 1402 int total_len = 0; 1403 u_int32_t rxstat; 1404 1405 SIS_LOCK_ASSERT(sc); 1406 1407 ifp = sc->sis_ifp; 1408 1409 for(cur_rx = sc->sis_rx_pdsc; SIS_OWNDESC(cur_rx); 1410 cur_rx = cur_rx->sis_nextdesc) { 1411 1412 #ifdef DEVICE_POLLING 1413 if (ifp->if_capenable & IFCAP_POLLING) { 1414 if (sc->rxcycles <= 0) 1415 break; 1416 sc->rxcycles--; 1417 } 1418 #endif 1419 rxstat = cur_rx->sis_rxstat; 1420 bus_dmamap_sync(sc->sis_tag, 1421 cur_rx->sis_map, BUS_DMASYNC_POSTWRITE); 1422 bus_dmamap_unload(sc->sis_tag, cur_rx->sis_map); 1423 bus_dmamap_destroy(sc->sis_tag, cur_rx->sis_map); 1424 m = cur_rx->sis_mbuf; 1425 cur_rx->sis_mbuf = NULL; 1426 total_len = SIS_RXBYTES(cur_rx); 1427 1428 /* 1429 * If an error occurs, update stats, clear the 1430 * status word and leave the mbuf cluster in place: 1431 * it should simply get re-used next time this descriptor 1432 * comes up in the ring. 1433 */ 1434 if (!(rxstat & SIS_CMDSTS_PKT_OK)) { 1435 ifp->if_ierrors++; 1436 if (rxstat & SIS_RXSTAT_COLL) 1437 ifp->if_collisions++; 1438 sis_newbuf(sc, cur_rx, m); 1439 continue; 1440 } 1441 1442 /* No errors; receive the packet. */ 1443 #ifdef __NO_STRICT_ALIGNMENT 1444 /* 1445 * On architectures without alignment problems we try to 1446 * allocate a new buffer for the receive ring, and pass up 1447 * the one where the packet is already, saving the expensive 1448 * copy done in m_devget(). 1449 * If we are on an architecture with alignment problems, or 1450 * if the allocation fails, then use m_devget and leave the 1451 * existing buffer in the receive ring. 1452 */ 1453 if (sis_newbuf(sc, cur_rx, NULL) == 0) 1454 m->m_pkthdr.len = m->m_len = total_len; 1455 else 1456 #endif 1457 { 1458 m0 = m_devget(mtod(m, char *), total_len, 1459 ETHER_ALIGN, ifp, NULL); 1460 sis_newbuf(sc, cur_rx, m); 1461 if (m0 == NULL) { 1462 ifp->if_ierrors++; 1463 continue; 1464 } 1465 m = m0; 1466 } 1467 1468 ifp->if_ipackets++; 1469 m->m_pkthdr.rcvif = ifp; 1470 1471 SIS_UNLOCK(sc); 1472 (*ifp->if_input)(ifp, m); 1473 SIS_LOCK(sc); 1474 } 1475 1476 sc->sis_rx_pdsc = cur_rx; 1477 } 1478 1479 static void 1480 sis_rxeoc(struct sis_softc *sc) 1481 { 1482 1483 SIS_LOCK_ASSERT(sc); 1484 sis_rxeof(sc); 1485 sis_initl(sc); 1486 } 1487 1488 /* 1489 * A frame was downloaded to the chip. It's safe for us to clean up 1490 * the list buffers. 1491 */ 1492 1493 static void 1494 sis_txeof(struct sis_softc *sc) 1495 { 1496 struct ifnet *ifp; 1497 u_int32_t idx; 1498 1499 SIS_LOCK_ASSERT(sc); 1500 ifp = sc->sis_ifp; 1501 1502 /* 1503 * Go through our tx list and free mbufs for those 1504 * frames that have been transmitted. 1505 */ 1506 for (idx = sc->sis_tx_cons; sc->sis_tx_cnt > 0; 1507 sc->sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT) ) { 1508 struct sis_desc *cur_tx = &sc->sis_tx_list[idx]; 1509 1510 if (SIS_OWNDESC(cur_tx)) 1511 break; 1512 1513 if (cur_tx->sis_ctl & SIS_CMDSTS_MORE) 1514 continue; 1515 1516 if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) { 1517 ifp->if_oerrors++; 1518 if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS) 1519 ifp->if_collisions++; 1520 if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL) 1521 ifp->if_collisions++; 1522 } 1523 1524 ifp->if_collisions += 1525 (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16; 1526 1527 ifp->if_opackets++; 1528 if (cur_tx->sis_mbuf != NULL) { 1529 m_freem(cur_tx->sis_mbuf); 1530 cur_tx->sis_mbuf = NULL; 1531 bus_dmamap_unload(sc->sis_tag, cur_tx->sis_map); 1532 bus_dmamap_destroy(sc->sis_tag, cur_tx->sis_map); 1533 } 1534 } 1535 1536 if (idx != sc->sis_tx_cons) { 1537 /* we freed up some buffers */ 1538 sc->sis_tx_cons = idx; 1539 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1540 } 1541 1542 sc->sis_watchdog_timer = (sc->sis_tx_cnt == 0) ? 0 : 5; 1543 1544 return; 1545 } 1546 1547 static void 1548 sis_tick(void *xsc) 1549 { 1550 struct sis_softc *sc; 1551 struct mii_data *mii; 1552 struct ifnet *ifp; 1553 1554 sc = xsc; 1555 SIS_LOCK_ASSERT(sc); 1556 sc->in_tick = 1; 1557 ifp = sc->sis_ifp; 1558 1559 mii = device_get_softc(sc->sis_miibus); 1560 mii_tick(mii); 1561 1562 sis_watchdog(sc); 1563 1564 if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE && 1565 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1566 sc->sis_link++; 1567 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1568 sis_startl(ifp); 1569 } 1570 1571 callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); 1572 sc->in_tick = 0; 1573 } 1574 1575 #ifdef DEVICE_POLLING 1576 static poll_handler_t sis_poll; 1577 1578 static void 1579 sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1580 { 1581 struct sis_softc *sc = ifp->if_softc; 1582 1583 SIS_LOCK(sc); 1584 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1585 SIS_UNLOCK(sc); 1586 return; 1587 } 1588 1589 /* 1590 * On the sis, reading the status register also clears it. 1591 * So before returning to intr mode we must make sure that all 1592 * possible pending sources of interrupts have been served. 1593 * In practice this means run to completion the *eof routines, 1594 * and then call the interrupt routine 1595 */ 1596 sc->rxcycles = count; 1597 sis_rxeof(sc); 1598 sis_txeof(sc); 1599 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1600 sis_startl(ifp); 1601 1602 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1603 u_int32_t status; 1604 1605 /* Reading the ISR register clears all interrupts. */ 1606 status = CSR_READ_4(sc, SIS_ISR); 1607 1608 if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW)) 1609 sis_rxeoc(sc); 1610 1611 if (status & (SIS_ISR_RX_IDLE)) 1612 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1613 1614 if (status & SIS_ISR_SYSERR) { 1615 sis_reset(sc); 1616 sis_initl(sc); 1617 } 1618 } 1619 1620 SIS_UNLOCK(sc); 1621 } 1622 #endif /* DEVICE_POLLING */ 1623 1624 static void 1625 sis_intr(void *arg) 1626 { 1627 struct sis_softc *sc; 1628 struct ifnet *ifp; 1629 u_int32_t status; 1630 1631 sc = arg; 1632 ifp = sc->sis_ifp; 1633 1634 if (sc->sis_stopped) /* Most likely shared interrupt */ 1635 return; 1636 1637 SIS_LOCK(sc); 1638 #ifdef DEVICE_POLLING 1639 if (ifp->if_capenable & IFCAP_POLLING) { 1640 SIS_UNLOCK(sc); 1641 return; 1642 } 1643 #endif 1644 1645 /* Disable interrupts. */ 1646 CSR_WRITE_4(sc, SIS_IER, 0); 1647 1648 for (;;) { 1649 SIS_LOCK_ASSERT(sc); 1650 /* Reading the ISR register clears all interrupts. */ 1651 status = CSR_READ_4(sc, SIS_ISR); 1652 1653 if ((status & SIS_INTRS) == 0) 1654 break; 1655 1656 if (status & 1657 (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR | 1658 SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) ) 1659 sis_txeof(sc); 1660 1661 if (status & (SIS_ISR_RX_DESC_OK|SIS_ISR_RX_OK|SIS_ISR_RX_IDLE)) 1662 sis_rxeof(sc); 1663 1664 if (status & (SIS_ISR_RX_ERR | SIS_ISR_RX_OFLOW)) 1665 sis_rxeoc(sc); 1666 1667 if (status & (SIS_ISR_RX_IDLE)) 1668 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1669 1670 if (status & SIS_ISR_SYSERR) { 1671 sis_reset(sc); 1672 sis_initl(sc); 1673 } 1674 } 1675 1676 /* Re-enable interrupts. */ 1677 CSR_WRITE_4(sc, SIS_IER, 1); 1678 1679 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1680 sis_startl(ifp); 1681 1682 SIS_UNLOCK(sc); 1683 } 1684 1685 /* 1686 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1687 * pointers to the fragment pointers. 1688 */ 1689 static int 1690 sis_encap(struct sis_softc *sc, struct mbuf **m_head, uint32_t *txidx) 1691 { 1692 struct sis_desc *f = NULL; 1693 struct mbuf *m; 1694 int frag, cur, cnt = 0, chainlen = 0; 1695 1696 /* 1697 * If there's no way we can send any packets, return now. 1698 */ 1699 if (SIS_TX_LIST_CNT - sc->sis_tx_cnt < 2) 1700 return (ENOBUFS); 1701 1702 /* 1703 * Count the number of frags in this chain to see if 1704 * we need to m_defrag. Since the descriptor list is shared 1705 * by all packets, we'll m_defrag long chains so that they 1706 * do not use up the entire list, even if they would fit. 1707 */ 1708 1709 for (m = *m_head; m != NULL; m = m->m_next) 1710 chainlen++; 1711 1712 if ((chainlen > SIS_TX_LIST_CNT / 4) || 1713 ((SIS_TX_LIST_CNT - (chainlen + sc->sis_tx_cnt)) < 2)) { 1714 m = m_defrag(*m_head, M_DONTWAIT); 1715 if (m == NULL) 1716 return (ENOBUFS); 1717 *m_head = m; 1718 } 1719 1720 /* 1721 * Start packing the mbufs in this chain into 1722 * the fragment pointers. Stop when we run out 1723 * of fragments or hit the end of the mbuf chain. 1724 */ 1725 cur = frag = *txidx; 1726 1727 for (m = *m_head; m != NULL; m = m->m_next) { 1728 if (m->m_len != 0) { 1729 if ((SIS_TX_LIST_CNT - 1730 (sc->sis_tx_cnt + cnt)) < 2) 1731 return(ENOBUFS); 1732 f = &sc->sis_tx_list[frag]; 1733 f->sis_ctl = SIS_CMDSTS_MORE | m->m_len; 1734 bus_dmamap_create(sc->sis_tag, 0, &f->sis_map); 1735 bus_dmamap_load(sc->sis_tag, f->sis_map, 1736 mtod(m, void *), m->m_len, 1737 sis_dma_map_desc_ptr, f, 0); 1738 bus_dmamap_sync(sc->sis_tag, 1739 f->sis_map, BUS_DMASYNC_PREREAD); 1740 if (cnt != 0) 1741 f->sis_ctl |= SIS_CMDSTS_OWN; 1742 cur = frag; 1743 SIS_INC(frag, SIS_TX_LIST_CNT); 1744 cnt++; 1745 } 1746 } 1747 1748 if (m != NULL) 1749 return(ENOBUFS); 1750 1751 sc->sis_tx_list[cur].sis_mbuf = *m_head; 1752 sc->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE; 1753 sc->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN; 1754 sc->sis_tx_cnt += cnt; 1755 *txidx = frag; 1756 1757 return(0); 1758 } 1759 1760 /* 1761 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1762 * to the mbuf data regions directly in the transmit lists. We also save a 1763 * copy of the pointers since the transmit list fragment pointers are 1764 * physical addresses. 1765 */ 1766 1767 static void 1768 sis_start(struct ifnet *ifp) 1769 { 1770 struct sis_softc *sc; 1771 1772 sc = ifp->if_softc; 1773 SIS_LOCK(sc); 1774 sis_startl(ifp); 1775 SIS_UNLOCK(sc); 1776 } 1777 1778 static void 1779 sis_startl(struct ifnet *ifp) 1780 { 1781 struct sis_softc *sc; 1782 struct mbuf *m_head = NULL; 1783 u_int32_t idx, queued = 0; 1784 1785 sc = ifp->if_softc; 1786 1787 SIS_LOCK_ASSERT(sc); 1788 1789 if (!sc->sis_link) 1790 return; 1791 1792 idx = sc->sis_tx_prod; 1793 1794 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 1795 return; 1796 1797 while(sc->sis_tx_list[idx].sis_mbuf == NULL) { 1798 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1799 if (m_head == NULL) 1800 break; 1801 1802 if (sis_encap(sc, &m_head, &idx)) { 1803 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1804 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1805 break; 1806 } 1807 1808 queued++; 1809 1810 /* 1811 * If there's a BPF listener, bounce a copy of this frame 1812 * to him. 1813 */ 1814 BPF_MTAP(ifp, m_head); 1815 1816 } 1817 1818 if (queued) { 1819 /* Transmit */ 1820 sc->sis_tx_prod = idx; 1821 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE); 1822 1823 /* 1824 * Set a timeout in case the chip goes out to lunch. 1825 */ 1826 sc->sis_watchdog_timer = 5; 1827 } 1828 } 1829 1830 static void 1831 sis_init(void *xsc) 1832 { 1833 struct sis_softc *sc = xsc; 1834 1835 SIS_LOCK(sc); 1836 sis_initl(sc); 1837 SIS_UNLOCK(sc); 1838 } 1839 1840 static void 1841 sis_initl(struct sis_softc *sc) 1842 { 1843 struct ifnet *ifp = sc->sis_ifp; 1844 struct mii_data *mii; 1845 1846 SIS_LOCK_ASSERT(sc); 1847 1848 /* 1849 * Cancel pending I/O and free all RX/TX buffers. 1850 */ 1851 sis_stop(sc); 1852 sc->sis_stopped = 0; 1853 1854 #ifdef notyet 1855 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) { 1856 /* 1857 * Configure 400usec of interrupt holdoff. This is based 1858 * on emperical tests on a Soekris 4801. 1859 */ 1860 CSR_WRITE_4(sc, NS_IHR, 0x100 | 4); 1861 } 1862 #endif 1863 1864 mii = device_get_softc(sc->sis_miibus); 1865 1866 /* Set MAC address */ 1867 if (sc->sis_type == SIS_TYPE_83815) { 1868 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0); 1869 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1870 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); 1871 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1); 1872 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1873 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); 1874 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2); 1875 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1876 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); 1877 } else { 1878 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 1879 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1880 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); 1881 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); 1882 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1883 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); 1884 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 1885 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1886 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); 1887 } 1888 1889 /* Init circular TX/RX lists. */ 1890 if (sis_ring_init(sc) != 0) { 1891 device_printf(sc->sis_dev, 1892 "initialization failed: no memory for rx buffers\n"); 1893 sis_stop(sc); 1894 return; 1895 } 1896 1897 /* 1898 * Short Cable Receive Errors (MP21.E) 1899 * also: Page 78 of the DP83815 data sheet (september 2002 version) 1900 * recommends the following register settings "for optimum 1901 * performance." for rev 15C. Set this also for 15D parts as 1902 * they require it in practice. 1903 */ 1904 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) { 1905 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 1906 CSR_WRITE_4(sc, NS_PHY_CR, 0x189C); 1907 /* set val for c2 */ 1908 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000); 1909 /* load/kill c2 */ 1910 CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040); 1911 /* rais SD off, from 4 to c */ 1912 CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C); 1913 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 1914 } 1915 1916 1917 /* 1918 * For the NatSemi chip, we have to explicitly enable the 1919 * reception of ARP frames, as well as turn on the 'perfect 1920 * match' filter where we store the station address, otherwise 1921 * we won't receive unicasts meant for this host. 1922 */ 1923 if (sc->sis_type == SIS_TYPE_83815) { 1924 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP); 1925 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT); 1926 } 1927 1928 /* If we want promiscuous mode, set the allframes bit. */ 1929 if (ifp->if_flags & IFF_PROMISC) { 1930 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 1931 } else { 1932 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 1933 } 1934 1935 /* 1936 * Set the capture broadcast bit to capture broadcast frames. 1937 */ 1938 if (ifp->if_flags & IFF_BROADCAST) { 1939 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1940 } else { 1941 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1942 } 1943 1944 /* 1945 * Load the multicast filter. 1946 */ 1947 if (sc->sis_type == SIS_TYPE_83815) 1948 sis_setmulti_ns(sc); 1949 else 1950 sis_setmulti_sis(sc); 1951 1952 /* Turn the receive filter on */ 1953 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 1954 1955 /* 1956 * Load the address of the RX and TX lists. 1957 */ 1958 CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sis_rx_paddr); 1959 CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sis_tx_paddr); 1960 1961 /* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of 1962 * the PCI bus. When this bit is set, the Max DMA Burst Size 1963 * for TX/RX DMA should be no larger than 16 double words. 1964 */ 1965 if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) { 1966 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64); 1967 } else { 1968 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256); 1969 } 1970 1971 /* Accept Long Packets for VLAN support */ 1972 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER); 1973 1974 /* Set TX configuration */ 1975 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { 1976 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10); 1977 } else { 1978 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); 1979 } 1980 1981 /* Set full/half duplex mode. */ 1982 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1983 SIS_SETBIT(sc, SIS_TX_CFG, 1984 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1985 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 1986 } else { 1987 SIS_CLRBIT(sc, SIS_TX_CFG, 1988 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1989 SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 1990 } 1991 1992 if (sc->sis_type == SIS_TYPE_83816) { 1993 /* 1994 * MPII03.D: Half Duplex Excessive Collisions. 1995 * Also page 49 in 83816 manual 1996 */ 1997 SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D); 1998 } 1999 2000 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A && 2001 IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 2002 uint32_t reg; 2003 2004 /* 2005 * Short Cable Receive Errors (MP21.E) 2006 */ 2007 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 2008 reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff; 2009 CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000); 2010 DELAY(100000); 2011 reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff; 2012 if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) { 2013 device_printf(sc->sis_dev, 2014 "Applying short cable fix (reg=%x)\n", reg); 2015 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8); 2016 SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20); 2017 } 2018 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 2019 } 2020 2021 /* 2022 * Enable interrupts. 2023 */ 2024 CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS); 2025 #ifdef DEVICE_POLLING 2026 /* 2027 * ... only enable interrupts if we are not polling, make sure 2028 * they are off otherwise. 2029 */ 2030 if (ifp->if_capenable & IFCAP_POLLING) 2031 CSR_WRITE_4(sc, SIS_IER, 0); 2032 else 2033 #endif 2034 CSR_WRITE_4(sc, SIS_IER, 1); 2035 2036 /* Enable receiver and transmitter. */ 2037 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 2038 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 2039 2040 #ifdef notdef 2041 mii_mediachg(mii); 2042 #endif 2043 2044 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2045 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2046 2047 if (!sc->in_tick) 2048 callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); 2049 } 2050 2051 /* 2052 * Set media options. 2053 */ 2054 static int 2055 sis_ifmedia_upd(struct ifnet *ifp) 2056 { 2057 struct sis_softc *sc; 2058 struct mii_data *mii; 2059 2060 sc = ifp->if_softc; 2061 2062 SIS_LOCK(sc); 2063 mii = device_get_softc(sc->sis_miibus); 2064 sc->sis_link = 0; 2065 if (mii->mii_instance) { 2066 struct mii_softc *miisc; 2067 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2068 mii_phy_reset(miisc); 2069 } 2070 mii_mediachg(mii); 2071 SIS_UNLOCK(sc); 2072 2073 return(0); 2074 } 2075 2076 /* 2077 * Report current media status. 2078 */ 2079 static void 2080 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2081 { 2082 struct sis_softc *sc; 2083 struct mii_data *mii; 2084 2085 sc = ifp->if_softc; 2086 2087 SIS_LOCK(sc); 2088 mii = device_get_softc(sc->sis_miibus); 2089 mii_pollstat(mii); 2090 SIS_UNLOCK(sc); 2091 ifmr->ifm_active = mii->mii_media_active; 2092 ifmr->ifm_status = mii->mii_media_status; 2093 } 2094 2095 static int 2096 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2097 { 2098 struct sis_softc *sc = ifp->if_softc; 2099 struct ifreq *ifr = (struct ifreq *) data; 2100 struct mii_data *mii; 2101 int error = 0; 2102 2103 switch(command) { 2104 case SIOCSIFFLAGS: 2105 SIS_LOCK(sc); 2106 if (ifp->if_flags & IFF_UP) { 2107 sis_initl(sc); 2108 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2109 sis_stop(sc); 2110 } 2111 SIS_UNLOCK(sc); 2112 error = 0; 2113 break; 2114 case SIOCADDMULTI: 2115 case SIOCDELMULTI: 2116 SIS_LOCK(sc); 2117 if (sc->sis_type == SIS_TYPE_83815) 2118 sis_setmulti_ns(sc); 2119 else 2120 sis_setmulti_sis(sc); 2121 SIS_UNLOCK(sc); 2122 error = 0; 2123 break; 2124 case SIOCGIFMEDIA: 2125 case SIOCSIFMEDIA: 2126 mii = device_get_softc(sc->sis_miibus); 2127 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2128 break; 2129 case SIOCSIFCAP: 2130 /* ok, disable interrupts */ 2131 #ifdef DEVICE_POLLING 2132 if (ifr->ifr_reqcap & IFCAP_POLLING && 2133 !(ifp->if_capenable & IFCAP_POLLING)) { 2134 error = ether_poll_register(sis_poll, ifp); 2135 if (error) 2136 return(error); 2137 SIS_LOCK(sc); 2138 /* Disable interrupts */ 2139 CSR_WRITE_4(sc, SIS_IER, 0); 2140 ifp->if_capenable |= IFCAP_POLLING; 2141 SIS_UNLOCK(sc); 2142 return (error); 2143 2144 } 2145 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 2146 ifp->if_capenable & IFCAP_POLLING) { 2147 error = ether_poll_deregister(ifp); 2148 /* Enable interrupts. */ 2149 SIS_LOCK(sc); 2150 CSR_WRITE_4(sc, SIS_IER, 1); 2151 ifp->if_capenable &= ~IFCAP_POLLING; 2152 SIS_UNLOCK(sc); 2153 return (error); 2154 } 2155 #endif /* DEVICE_POLLING */ 2156 break; 2157 default: 2158 error = ether_ioctl(ifp, command, data); 2159 break; 2160 } 2161 2162 return(error); 2163 } 2164 2165 static void 2166 sis_watchdog(struct sis_softc *sc) 2167 { 2168 2169 SIS_LOCK_ASSERT(sc); 2170 if (sc->sis_stopped) { 2171 SIS_UNLOCK(sc); 2172 return; 2173 } 2174 2175 if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0) 2176 return; 2177 2178 device_printf(sc->sis_dev, "watchdog timeout\n"); 2179 sc->sis_ifp->if_oerrors++; 2180 2181 sis_stop(sc); 2182 sis_reset(sc); 2183 sis_initl(sc); 2184 2185 if (!IFQ_DRV_IS_EMPTY(&sc->sis_ifp->if_snd)) 2186 sis_startl(sc->sis_ifp); 2187 } 2188 2189 /* 2190 * Stop the adapter and free any mbufs allocated to the 2191 * RX and TX lists. 2192 */ 2193 static void 2194 sis_stop(struct sis_softc *sc) 2195 { 2196 int i; 2197 struct ifnet *ifp; 2198 struct sis_desc *dp; 2199 2200 if (sc->sis_stopped) 2201 return; 2202 SIS_LOCK_ASSERT(sc); 2203 ifp = sc->sis_ifp; 2204 sc->sis_watchdog_timer = 0; 2205 2206 callout_stop(&sc->sis_stat_ch); 2207 2208 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2209 CSR_WRITE_4(sc, SIS_IER, 0); 2210 CSR_WRITE_4(sc, SIS_IMR, 0); 2211 CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */ 2212 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 2213 DELAY(1000); 2214 CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0); 2215 CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0); 2216 2217 sc->sis_link = 0; 2218 2219 /* 2220 * Free data in the RX lists. 2221 */ 2222 dp = &sc->sis_rx_list[0]; 2223 for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { 2224 if (dp->sis_mbuf == NULL) 2225 continue; 2226 bus_dmamap_unload(sc->sis_tag, dp->sis_map); 2227 bus_dmamap_destroy(sc->sis_tag, dp->sis_map); 2228 m_freem(dp->sis_mbuf); 2229 dp->sis_mbuf = NULL; 2230 } 2231 bzero(sc->sis_rx_list, SIS_RX_LIST_SZ); 2232 2233 /* 2234 * Free the TX list buffers. 2235 */ 2236 dp = &sc->sis_tx_list[0]; 2237 for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { 2238 if (dp->sis_mbuf == NULL) 2239 continue; 2240 bus_dmamap_unload(sc->sis_tag, dp->sis_map); 2241 bus_dmamap_destroy(sc->sis_tag, dp->sis_map); 2242 m_freem(dp->sis_mbuf); 2243 dp->sis_mbuf = NULL; 2244 } 2245 2246 bzero(sc->sis_tx_list, SIS_TX_LIST_SZ); 2247 2248 sc->sis_stopped = 1; 2249 } 2250 2251 /* 2252 * Stop all chip I/O so that the kernel's probe routines don't 2253 * get confused by errant DMAs when rebooting. 2254 */ 2255 static void 2256 sis_shutdown(device_t dev) 2257 { 2258 struct sis_softc *sc; 2259 2260 sc = device_get_softc(dev); 2261 SIS_LOCK(sc); 2262 sis_reset(sc); 2263 sis_stop(sc); 2264 SIS_UNLOCK(sc); 2265 } 2266 2267 static device_method_t sis_methods[] = { 2268 /* Device interface */ 2269 DEVMETHOD(device_probe, sis_probe), 2270 DEVMETHOD(device_attach, sis_attach), 2271 DEVMETHOD(device_detach, sis_detach), 2272 DEVMETHOD(device_shutdown, sis_shutdown), 2273 2274 /* bus interface */ 2275 DEVMETHOD(bus_print_child, bus_generic_print_child), 2276 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 2277 2278 /* MII interface */ 2279 DEVMETHOD(miibus_readreg, sis_miibus_readreg), 2280 DEVMETHOD(miibus_writereg, sis_miibus_writereg), 2281 DEVMETHOD(miibus_statchg, sis_miibus_statchg), 2282 2283 { 0, 0 } 2284 }; 2285 2286 static driver_t sis_driver = { 2287 "sis", 2288 sis_methods, 2289 sizeof(struct sis_softc) 2290 }; 2291 2292 static devclass_t sis_devclass; 2293 2294 DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0); 2295 DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0); 2296