1 /*- 2 * Copyright (c) 2005 Poul-Henning Kamp <phk@FreeBSD.org> 3 * Copyright (c) 1997, 1998, 1999 4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are 39 * available from http://www.sis.com.tw. 40 * 41 * This driver also supports the NatSemi DP83815. Datasheets are 42 * available from http://www.national.com. 43 * 44 * Written by Bill Paul <wpaul@ee.columbia.edu> 45 * Electrical Engineering Department 46 * Columbia University, New York City 47 */ 48 /* 49 * The SiS 900 is a fairly simple chip. It uses bus master DMA with 50 * simple TX and RX descriptors of 3 longwords in size. The receiver 51 * has a single perfect filter entry for the station address and a 52 * 128-bit multicast hash table. The SiS 900 has a built-in MII-based 53 * transceiver while the 7016 requires an external transceiver chip. 54 * Both chips offer the standard bit-bang MII interface as well as 55 * an enchanced PHY interface which simplifies accessing MII registers. 56 * 57 * The only downside to this chipset is that RX descriptors must be 58 * longword aligned. 59 */ 60 61 #ifdef HAVE_KERNEL_OPTION_HEADERS 62 #include "opt_device_polling.h" 63 #endif 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/sockio.h> 68 #include <sys/mbuf.h> 69 #include <sys/malloc.h> 70 #include <sys/kernel.h> 71 #include <sys/module.h> 72 #include <sys/socket.h> 73 74 #include <net/if.h> 75 #include <net/if_arp.h> 76 #include <net/ethernet.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/if_types.h> 80 #include <net/if_vlan_var.h> 81 82 #include <net/bpf.h> 83 84 #include <machine/bus.h> 85 #include <machine/resource.h> 86 #include <sys/bus.h> 87 #include <sys/rman.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #define SIS_USEIOSPACE 96 97 #include <dev/sis/if_sisreg.h> 98 99 MODULE_DEPEND(sis, pci, 1, 1, 1); 100 MODULE_DEPEND(sis, ether, 1, 1, 1); 101 MODULE_DEPEND(sis, miibus, 1, 1, 1); 102 103 /* "device miibus" required. See GENERIC if you get errors here. */ 104 #include "miibus_if.h" 105 106 #define SIS_LOCK(_sc) mtx_lock(&(_sc)->sis_mtx) 107 #define SIS_UNLOCK(_sc) mtx_unlock(&(_sc)->sis_mtx) 108 #define SIS_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sis_mtx, MA_OWNED) 109 110 /* 111 * register space access macros 112 */ 113 #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sis_res[0], reg, val) 114 115 #define CSR_READ_4(sc, reg) bus_read_4(sc->sis_res[0], reg) 116 117 #define CSR_READ_2(sc, reg) bus_read_2(sc->sis_res[0], reg) 118 119 /* 120 * Various supported device vendors/types and their names. 121 */ 122 static struct sis_type sis_devs[] = { 123 { SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" }, 124 { SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" }, 125 { NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" }, 126 { 0, 0, NULL } 127 }; 128 129 static int sis_detach(device_t); 130 static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 static int sis_ifmedia_upd(struct ifnet *); 132 static void sis_init(void *); 133 static void sis_initl(struct sis_softc *); 134 static void sis_intr(void *); 135 static int sis_ioctl(struct ifnet *, u_long, caddr_t); 136 static int sis_newbuf(struct sis_softc *, struct sis_desc *, struct mbuf *); 137 static void sis_start(struct ifnet *); 138 static void sis_startl(struct ifnet *); 139 static void sis_stop(struct sis_softc *); 140 static void sis_watchdog(struct sis_softc *); 141 142 143 static struct resource_spec sis_res_spec[] = { 144 #ifdef SIS_USEIOSPACE 145 { SYS_RES_IOPORT, SIS_PCI_LOIO, RF_ACTIVE}, 146 #else 147 { SYS_RES_MEMORY, SIS_PCI_LOMEM, RF_ACTIVE}, 148 #endif 149 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE}, 150 { -1, 0 } 151 }; 152 153 #define SIS_SETBIT(sc, reg, x) \ 154 CSR_WRITE_4(sc, reg, \ 155 CSR_READ_4(sc, reg) | (x)) 156 157 #define SIS_CLRBIT(sc, reg, x) \ 158 CSR_WRITE_4(sc, reg, \ 159 CSR_READ_4(sc, reg) & ~(x)) 160 161 #define SIO_SET(x) \ 162 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x) 163 164 #define SIO_CLR(x) \ 165 CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x) 166 167 static void 168 sis_dma_map_desc_next(void *arg, bus_dma_segment_t *segs, int nseg, int error) 169 { 170 struct sis_desc *r; 171 172 r = arg; 173 r->sis_next = segs->ds_addr; 174 } 175 176 static void 177 sis_dma_map_desc_ptr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 178 { 179 struct sis_desc *r; 180 181 r = arg; 182 r->sis_ptr = segs->ds_addr; 183 } 184 185 static void 186 sis_dma_map_ring(void *arg, bus_dma_segment_t *segs, int nseg, int error) 187 { 188 u_int32_t *p; 189 190 p = arg; 191 *p = segs->ds_addr; 192 } 193 194 /* 195 * Routine to reverse the bits in a word. Stolen almost 196 * verbatim from /usr/games/fortune. 197 */ 198 static uint16_t 199 sis_reverse(uint16_t n) 200 { 201 n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa); 202 n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc); 203 n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0); 204 n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00); 205 206 return(n); 207 } 208 209 static void 210 sis_delay(struct sis_softc *sc) 211 { 212 int idx; 213 214 for (idx = (300 / 33) + 1; idx > 0; idx--) 215 CSR_READ_4(sc, SIS_CSR); 216 } 217 218 static void 219 sis_eeprom_idle(struct sis_softc *sc) 220 { 221 int i; 222 223 SIO_SET(SIS_EECTL_CSEL); 224 sis_delay(sc); 225 SIO_SET(SIS_EECTL_CLK); 226 sis_delay(sc); 227 228 for (i = 0; i < 25; i++) { 229 SIO_CLR(SIS_EECTL_CLK); 230 sis_delay(sc); 231 SIO_SET(SIS_EECTL_CLK); 232 sis_delay(sc); 233 } 234 235 SIO_CLR(SIS_EECTL_CLK); 236 sis_delay(sc); 237 SIO_CLR(SIS_EECTL_CSEL); 238 sis_delay(sc); 239 CSR_WRITE_4(sc, SIS_EECTL, 0x00000000); 240 } 241 242 /* 243 * Send a read command and address to the EEPROM, check for ACK. 244 */ 245 static void 246 sis_eeprom_putbyte(struct sis_softc *sc, int addr) 247 { 248 int d, i; 249 250 d = addr | SIS_EECMD_READ; 251 252 /* 253 * Feed in each bit and stobe the clock. 254 */ 255 for (i = 0x400; i; i >>= 1) { 256 if (d & i) { 257 SIO_SET(SIS_EECTL_DIN); 258 } else { 259 SIO_CLR(SIS_EECTL_DIN); 260 } 261 sis_delay(sc); 262 SIO_SET(SIS_EECTL_CLK); 263 sis_delay(sc); 264 SIO_CLR(SIS_EECTL_CLK); 265 sis_delay(sc); 266 } 267 } 268 269 /* 270 * Read a word of data stored in the EEPROM at address 'addr.' 271 */ 272 static void 273 sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest) 274 { 275 int i; 276 u_int16_t word = 0; 277 278 /* Force EEPROM to idle state. */ 279 sis_eeprom_idle(sc); 280 281 /* Enter EEPROM access mode. */ 282 sis_delay(sc); 283 SIO_CLR(SIS_EECTL_CLK); 284 sis_delay(sc); 285 SIO_SET(SIS_EECTL_CSEL); 286 sis_delay(sc); 287 288 /* 289 * Send address of word we want to read. 290 */ 291 sis_eeprom_putbyte(sc, addr); 292 293 /* 294 * Start reading bits from EEPROM. 295 */ 296 for (i = 0x8000; i; i >>= 1) { 297 SIO_SET(SIS_EECTL_CLK); 298 sis_delay(sc); 299 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT) 300 word |= i; 301 sis_delay(sc); 302 SIO_CLR(SIS_EECTL_CLK); 303 sis_delay(sc); 304 } 305 306 /* Turn off EEPROM access mode. */ 307 sis_eeprom_idle(sc); 308 309 *dest = word; 310 } 311 312 /* 313 * Read a sequence of words from the EEPROM. 314 */ 315 static void 316 sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap) 317 { 318 int i; 319 u_int16_t word = 0, *ptr; 320 321 for (i = 0; i < cnt; i++) { 322 sis_eeprom_getword(sc, off + i, &word); 323 ptr = (u_int16_t *)(dest + (i * 2)); 324 if (swap) 325 *ptr = ntohs(word); 326 else 327 *ptr = word; 328 } 329 } 330 331 #if defined(__i386__) || defined(__amd64__) 332 static device_t 333 sis_find_bridge(device_t dev) 334 { 335 devclass_t pci_devclass; 336 device_t *pci_devices; 337 int pci_count = 0; 338 device_t *pci_children; 339 int pci_childcount = 0; 340 device_t *busp, *childp; 341 device_t child = NULL; 342 int i, j; 343 344 if ((pci_devclass = devclass_find("pci")) == NULL) 345 return(NULL); 346 347 devclass_get_devices(pci_devclass, &pci_devices, &pci_count); 348 349 for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) { 350 if (device_get_children(*busp, &pci_children, &pci_childcount)) 351 continue; 352 for (j = 0, childp = pci_children; 353 j < pci_childcount; j++, childp++) { 354 if (pci_get_vendor(*childp) == SIS_VENDORID && 355 pci_get_device(*childp) == 0x0008) { 356 child = *childp; 357 free(pci_children, M_TEMP); 358 goto done; 359 } 360 } 361 free(pci_children, M_TEMP); 362 } 363 364 done: 365 free(pci_devices, M_TEMP); 366 return(child); 367 } 368 369 static void 370 sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt) 371 { 372 device_t bridge; 373 u_int8_t reg; 374 int i; 375 bus_space_tag_t btag; 376 377 bridge = sis_find_bridge(dev); 378 if (bridge == NULL) 379 return; 380 reg = pci_read_config(bridge, 0x48, 1); 381 pci_write_config(bridge, 0x48, reg|0x40, 1); 382 383 /* XXX */ 384 #if defined(__i386__) 385 btag = I386_BUS_SPACE_IO; 386 #elif defined(__amd64__) 387 btag = AMD64_BUS_SPACE_IO; 388 #endif 389 390 for (i = 0; i < cnt; i++) { 391 bus_space_write_1(btag, 0x0, 0x70, i + off); 392 *(dest + i) = bus_space_read_1(btag, 0x0, 0x71); 393 } 394 395 pci_write_config(bridge, 0x48, reg & ~0x40, 1); 396 return; 397 } 398 399 static void 400 sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest) 401 { 402 u_int32_t filtsave, csrsave; 403 404 filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); 405 csrsave = CSR_READ_4(sc, SIS_CSR); 406 407 CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave); 408 CSR_WRITE_4(sc, SIS_CSR, 0); 409 410 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE); 411 412 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 413 ((u_int16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA); 414 CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1); 415 ((u_int16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA); 416 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 417 ((u_int16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA); 418 419 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); 420 CSR_WRITE_4(sc, SIS_CSR, csrsave); 421 return; 422 } 423 #endif 424 425 /* 426 * Sync the PHYs by setting data bit and strobing the clock 32 times. 427 */ 428 static void 429 sis_mii_sync(struct sis_softc *sc) 430 { 431 int i; 432 433 SIO_SET(SIS_MII_DIR|SIS_MII_DATA); 434 435 for (i = 0; i < 32; i++) { 436 SIO_SET(SIS_MII_CLK); 437 DELAY(1); 438 SIO_CLR(SIS_MII_CLK); 439 DELAY(1); 440 } 441 } 442 443 /* 444 * Clock a series of bits through the MII. 445 */ 446 static void 447 sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt) 448 { 449 int i; 450 451 SIO_CLR(SIS_MII_CLK); 452 453 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 454 if (bits & i) { 455 SIO_SET(SIS_MII_DATA); 456 } else { 457 SIO_CLR(SIS_MII_DATA); 458 } 459 DELAY(1); 460 SIO_CLR(SIS_MII_CLK); 461 DELAY(1); 462 SIO_SET(SIS_MII_CLK); 463 } 464 } 465 466 /* 467 * Read an PHY register through the MII. 468 */ 469 static int 470 sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame) 471 { 472 int i, ack; 473 474 /* 475 * Set up frame for RX. 476 */ 477 frame->mii_stdelim = SIS_MII_STARTDELIM; 478 frame->mii_opcode = SIS_MII_READOP; 479 frame->mii_turnaround = 0; 480 frame->mii_data = 0; 481 482 /* 483 * Turn on data xmit. 484 */ 485 SIO_SET(SIS_MII_DIR); 486 487 sis_mii_sync(sc); 488 489 /* 490 * Send command/address info. 491 */ 492 sis_mii_send(sc, frame->mii_stdelim, 2); 493 sis_mii_send(sc, frame->mii_opcode, 2); 494 sis_mii_send(sc, frame->mii_phyaddr, 5); 495 sis_mii_send(sc, frame->mii_regaddr, 5); 496 497 /* Idle bit */ 498 SIO_CLR((SIS_MII_CLK|SIS_MII_DATA)); 499 DELAY(1); 500 SIO_SET(SIS_MII_CLK); 501 DELAY(1); 502 503 /* Turn off xmit. */ 504 SIO_CLR(SIS_MII_DIR); 505 506 /* Check for ack */ 507 SIO_CLR(SIS_MII_CLK); 508 DELAY(1); 509 ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA; 510 SIO_SET(SIS_MII_CLK); 511 DELAY(1); 512 513 /* 514 * Now try reading data bits. If the ack failed, we still 515 * need to clock through 16 cycles to keep the PHY(s) in sync. 516 */ 517 if (ack) { 518 for(i = 0; i < 16; i++) { 519 SIO_CLR(SIS_MII_CLK); 520 DELAY(1); 521 SIO_SET(SIS_MII_CLK); 522 DELAY(1); 523 } 524 goto fail; 525 } 526 527 for (i = 0x8000; i; i >>= 1) { 528 SIO_CLR(SIS_MII_CLK); 529 DELAY(1); 530 if (!ack) { 531 if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA) 532 frame->mii_data |= i; 533 DELAY(1); 534 } 535 SIO_SET(SIS_MII_CLK); 536 DELAY(1); 537 } 538 539 fail: 540 541 SIO_CLR(SIS_MII_CLK); 542 DELAY(1); 543 SIO_SET(SIS_MII_CLK); 544 DELAY(1); 545 546 if (ack) 547 return(1); 548 return(0); 549 } 550 551 /* 552 * Write to a PHY register through the MII. 553 */ 554 static int 555 sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame) 556 { 557 558 /* 559 * Set up frame for TX. 560 */ 561 562 frame->mii_stdelim = SIS_MII_STARTDELIM; 563 frame->mii_opcode = SIS_MII_WRITEOP; 564 frame->mii_turnaround = SIS_MII_TURNAROUND; 565 566 /* 567 * Turn on data output. 568 */ 569 SIO_SET(SIS_MII_DIR); 570 571 sis_mii_sync(sc); 572 573 sis_mii_send(sc, frame->mii_stdelim, 2); 574 sis_mii_send(sc, frame->mii_opcode, 2); 575 sis_mii_send(sc, frame->mii_phyaddr, 5); 576 sis_mii_send(sc, frame->mii_regaddr, 5); 577 sis_mii_send(sc, frame->mii_turnaround, 2); 578 sis_mii_send(sc, frame->mii_data, 16); 579 580 /* Idle bit. */ 581 SIO_SET(SIS_MII_CLK); 582 DELAY(1); 583 SIO_CLR(SIS_MII_CLK); 584 DELAY(1); 585 586 /* 587 * Turn off xmit. 588 */ 589 SIO_CLR(SIS_MII_DIR); 590 591 return(0); 592 } 593 594 static int 595 sis_miibus_readreg(device_t dev, int phy, int reg) 596 { 597 struct sis_softc *sc; 598 struct sis_mii_frame frame; 599 600 sc = device_get_softc(dev); 601 602 if (sc->sis_type == SIS_TYPE_83815) { 603 if (phy != 0) 604 return(0); 605 /* 606 * The NatSemi chip can take a while after 607 * a reset to come ready, during which the BMSR 608 * returns a value of 0. This is *never* supposed 609 * to happen: some of the BMSR bits are meant to 610 * be hardwired in the on position, and this can 611 * confuse the miibus code a bit during the probe 612 * and attach phase. So we make an effort to check 613 * for this condition and wait for it to clear. 614 */ 615 if (!CSR_READ_4(sc, NS_BMSR)) 616 DELAY(1000); 617 return CSR_READ_4(sc, NS_BMCR + (reg * 4)); 618 } 619 620 /* 621 * Chipsets < SIS_635 seem not to be able to read/write 622 * through mdio. Use the enhanced PHY access register 623 * again for them. 624 */ 625 if (sc->sis_type == SIS_TYPE_900 && 626 sc->sis_rev < SIS_REV_635) { 627 int i, val = 0; 628 629 if (phy != 0) 630 return(0); 631 632 CSR_WRITE_4(sc, SIS_PHYCTL, 633 (phy << 11) | (reg << 6) | SIS_PHYOP_READ); 634 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 635 636 for (i = 0; i < SIS_TIMEOUT; i++) { 637 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 638 break; 639 } 640 641 if (i == SIS_TIMEOUT) { 642 device_printf(sc->sis_dev, "PHY failed to come ready\n"); 643 return(0); 644 } 645 646 val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF; 647 648 if (val == 0xFFFF) 649 return(0); 650 651 return(val); 652 } else { 653 bzero((char *)&frame, sizeof(frame)); 654 655 frame.mii_phyaddr = phy; 656 frame.mii_regaddr = reg; 657 sis_mii_readreg(sc, &frame); 658 659 return(frame.mii_data); 660 } 661 } 662 663 static int 664 sis_miibus_writereg(device_t dev, int phy, int reg, int data) 665 { 666 struct sis_softc *sc; 667 struct sis_mii_frame frame; 668 669 sc = device_get_softc(dev); 670 671 if (sc->sis_type == SIS_TYPE_83815) { 672 if (phy != 0) 673 return(0); 674 CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data); 675 return(0); 676 } 677 678 /* 679 * Chipsets < SIS_635 seem not to be able to read/write 680 * through mdio. Use the enhanced PHY access register 681 * again for them. 682 */ 683 if (sc->sis_type == SIS_TYPE_900 && 684 sc->sis_rev < SIS_REV_635) { 685 int i; 686 687 if (phy != 0) 688 return(0); 689 690 CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) | 691 (reg << 6) | SIS_PHYOP_WRITE); 692 SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); 693 694 for (i = 0; i < SIS_TIMEOUT; i++) { 695 if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) 696 break; 697 } 698 699 if (i == SIS_TIMEOUT) 700 device_printf(sc->sis_dev, "PHY failed to come ready\n"); 701 } else { 702 bzero((char *)&frame, sizeof(frame)); 703 704 frame.mii_phyaddr = phy; 705 frame.mii_regaddr = reg; 706 frame.mii_data = data; 707 sis_mii_writereg(sc, &frame); 708 } 709 return(0); 710 } 711 712 static void 713 sis_miibus_statchg(device_t dev) 714 { 715 struct sis_softc *sc; 716 717 sc = device_get_softc(dev); 718 SIS_LOCK_ASSERT(sc); 719 sis_initl(sc); 720 } 721 722 static uint32_t 723 sis_mchash(struct sis_softc *sc, const uint8_t *addr) 724 { 725 uint32_t crc; 726 727 /* Compute CRC for the address value. */ 728 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 729 730 /* 731 * return the filter bit position 732 * 733 * The NatSemi chip has a 512-bit filter, which is 734 * different than the SiS, so we special-case it. 735 */ 736 if (sc->sis_type == SIS_TYPE_83815) 737 return (crc >> 23); 738 else if (sc->sis_rev >= SIS_REV_635 || 739 sc->sis_rev == SIS_REV_900B) 740 return (crc >> 24); 741 else 742 return (crc >> 25); 743 } 744 745 static void 746 sis_setmulti_ns(struct sis_softc *sc) 747 { 748 struct ifnet *ifp; 749 struct ifmultiaddr *ifma; 750 u_int32_t h = 0, i, filtsave; 751 int bit, index; 752 753 ifp = sc->sis_ifp; 754 755 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 756 SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 757 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 758 return; 759 } 760 761 /* 762 * We have to explicitly enable the multicast hash table 763 * on the NatSemi chip if we want to use it, which we do. 764 */ 765 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); 766 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); 767 768 filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); 769 770 /* first, zot all the existing hash bits */ 771 for (i = 0; i < 32; i++) { 772 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2)); 773 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); 774 } 775 776 if_maddr_rlock(ifp); 777 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 778 if (ifma->ifma_addr->sa_family != AF_LINK) 779 continue; 780 h = sis_mchash(sc, 781 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 782 index = h >> 3; 783 bit = h & 0x1F; 784 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index); 785 if (bit > 0xF) 786 bit -= 0x10; 787 SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit)); 788 } 789 if_maddr_runlock(ifp); 790 791 CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); 792 793 return; 794 } 795 796 static void 797 sis_setmulti_sis(struct sis_softc *sc) 798 { 799 struct ifnet *ifp; 800 struct ifmultiaddr *ifma; 801 u_int32_t h, i, n, ctl; 802 u_int16_t hashes[16]; 803 804 ifp = sc->sis_ifp; 805 806 /* hash table size */ 807 if (sc->sis_rev >= SIS_REV_635 || 808 sc->sis_rev == SIS_REV_900B) 809 n = 16; 810 else 811 n = 8; 812 813 ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE; 814 815 if (ifp->if_flags & IFF_BROADCAST) 816 ctl |= SIS_RXFILTCTL_BROAD; 817 818 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 819 ctl |= SIS_RXFILTCTL_ALLMULTI; 820 if (ifp->if_flags & IFF_PROMISC) 821 ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS; 822 for (i = 0; i < n; i++) 823 hashes[i] = ~0; 824 } else { 825 for (i = 0; i < n; i++) 826 hashes[i] = 0; 827 i = 0; 828 if_maddr_rlock(ifp); 829 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 830 if (ifma->ifma_addr->sa_family != AF_LINK) 831 continue; 832 h = sis_mchash(sc, 833 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 834 hashes[h >> 4] |= 1 << (h & 0xf); 835 i++; 836 } 837 if_maddr_runlock(ifp); 838 if (i > n) { 839 ctl |= SIS_RXFILTCTL_ALLMULTI; 840 for (i = 0; i < n; i++) 841 hashes[i] = ~0; 842 } 843 } 844 845 for (i = 0; i < n; i++) { 846 CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16); 847 CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]); 848 } 849 850 CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl); 851 } 852 853 static void 854 sis_reset(struct sis_softc *sc) 855 { 856 int i; 857 858 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET); 859 860 for (i = 0; i < SIS_TIMEOUT; i++) { 861 if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET)) 862 break; 863 } 864 865 if (i == SIS_TIMEOUT) 866 device_printf(sc->sis_dev, "reset never completed\n"); 867 868 /* Wait a little while for the chip to get its brains in order. */ 869 DELAY(1000); 870 871 /* 872 * If this is a NetSemi chip, make sure to clear 873 * PME mode. 874 */ 875 if (sc->sis_type == SIS_TYPE_83815) { 876 CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS); 877 CSR_WRITE_4(sc, NS_CLKRUN, 0); 878 } 879 880 return; 881 } 882 883 /* 884 * Probe for an SiS chip. Check the PCI vendor and device 885 * IDs against our list and return a device name if we find a match. 886 */ 887 static int 888 sis_probe(device_t dev) 889 { 890 struct sis_type *t; 891 892 t = sis_devs; 893 894 while(t->sis_name != NULL) { 895 if ((pci_get_vendor(dev) == t->sis_vid) && 896 (pci_get_device(dev) == t->sis_did)) { 897 device_set_desc(dev, t->sis_name); 898 return (BUS_PROBE_DEFAULT); 899 } 900 t++; 901 } 902 903 return(ENXIO); 904 } 905 906 /* 907 * Attach the interface. Allocate softc structures, do ifmedia 908 * setup and ethernet/BPF attach. 909 */ 910 static int 911 sis_attach(device_t dev) 912 { 913 u_char eaddr[ETHER_ADDR_LEN]; 914 struct sis_softc *sc; 915 struct ifnet *ifp; 916 int error = 0, waittime = 0; 917 918 waittime = 0; 919 sc = device_get_softc(dev); 920 921 sc->sis_dev = dev; 922 923 mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 924 MTX_DEF); 925 callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0); 926 927 if (pci_get_device(dev) == SIS_DEVICEID_900) 928 sc->sis_type = SIS_TYPE_900; 929 if (pci_get_device(dev) == SIS_DEVICEID_7016) 930 sc->sis_type = SIS_TYPE_7016; 931 if (pci_get_vendor(dev) == NS_VENDORID) 932 sc->sis_type = SIS_TYPE_83815; 933 934 sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1); 935 /* 936 * Map control/status registers. 937 */ 938 pci_enable_busmaster(dev); 939 940 error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res); 941 if (error) { 942 device_printf(dev, "couldn't allocate resources\n"); 943 goto fail; 944 } 945 946 /* Reset the adapter. */ 947 sis_reset(sc); 948 949 if (sc->sis_type == SIS_TYPE_900 && 950 (sc->sis_rev == SIS_REV_635 || 951 sc->sis_rev == SIS_REV_900B)) { 952 SIO_SET(SIS_CFG_RND_CNT); 953 SIO_SET(SIS_CFG_PERR_DETECT); 954 } 955 956 /* 957 * Get station address from the EEPROM. 958 */ 959 switch (pci_get_vendor(dev)) { 960 case NS_VENDORID: 961 sc->sis_srr = CSR_READ_4(sc, NS_SRR); 962 963 /* We can't update the device description, so spew */ 964 if (sc->sis_srr == NS_SRR_15C) 965 device_printf(dev, "Silicon Revision: DP83815C\n"); 966 else if (sc->sis_srr == NS_SRR_15D) 967 device_printf(dev, "Silicon Revision: DP83815D\n"); 968 else if (sc->sis_srr == NS_SRR_16A) 969 device_printf(dev, "Silicon Revision: DP83816A\n"); 970 else 971 device_printf(dev, "Silicon Revision %x\n", sc->sis_srr); 972 973 /* 974 * Reading the MAC address out of the EEPROM on 975 * the NatSemi chip takes a bit more work than 976 * you'd expect. The address spans 4 16-bit words, 977 * with the first word containing only a single bit. 978 * You have to shift everything over one bit to 979 * get it aligned properly. Also, the bits are 980 * stored backwards (the LSB is really the MSB, 981 * and so on) so you have to reverse them in order 982 * to get the MAC address into the form we want. 983 * Why? Who the hell knows. 984 */ 985 { 986 u_int16_t tmp[4]; 987 988 sis_read_eeprom(sc, (caddr_t)&tmp, 989 NS_EE_NODEADDR, 4, 0); 990 991 /* Shift everything over one bit. */ 992 tmp[3] = tmp[3] >> 1; 993 tmp[3] |= tmp[2] << 15; 994 tmp[2] = tmp[2] >> 1; 995 tmp[2] |= tmp[1] << 15; 996 tmp[1] = tmp[1] >> 1; 997 tmp[1] |= tmp[0] << 15; 998 999 /* Now reverse all the bits. */ 1000 tmp[3] = sis_reverse(tmp[3]); 1001 tmp[2] = sis_reverse(tmp[2]); 1002 tmp[1] = sis_reverse(tmp[1]); 1003 1004 bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN); 1005 } 1006 break; 1007 case SIS_VENDORID: 1008 default: 1009 #if defined(__i386__) || defined(__amd64__) 1010 /* 1011 * If this is a SiS 630E chipset with an embedded 1012 * SiS 900 controller, we have to read the MAC address 1013 * from the APC CMOS RAM. Our method for doing this 1014 * is very ugly since we have to reach out and grab 1015 * ahold of hardware for which we cannot properly 1016 * allocate resources. This code is only compiled on 1017 * the i386 architecture since the SiS 630E chipset 1018 * is for x86 motherboards only. Note that there are 1019 * a lot of magic numbers in this hack. These are 1020 * taken from SiS's Linux driver. I'd like to replace 1021 * them with proper symbolic definitions, but that 1022 * requires some datasheets that I don't have access 1023 * to at the moment. 1024 */ 1025 if (sc->sis_rev == SIS_REV_630S || 1026 sc->sis_rev == SIS_REV_630E || 1027 sc->sis_rev == SIS_REV_630EA1) 1028 sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6); 1029 1030 else if (sc->sis_rev == SIS_REV_635 || 1031 sc->sis_rev == SIS_REV_630ET) 1032 sis_read_mac(sc, dev, (caddr_t)&eaddr); 1033 else if (sc->sis_rev == SIS_REV_96x) { 1034 /* Allow to read EEPROM from LAN. It is shared 1035 * between a 1394 controller and the NIC and each 1036 * time we access it, we need to set SIS_EECMD_REQ. 1037 */ 1038 SIO_SET(SIS_EECMD_REQ); 1039 for (waittime = 0; waittime < SIS_TIMEOUT; 1040 waittime++) { 1041 /* Force EEPROM to idle state. */ 1042 sis_eeprom_idle(sc); 1043 if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) { 1044 sis_read_eeprom(sc, (caddr_t)&eaddr, 1045 SIS_EE_NODEADDR, 3, 0); 1046 break; 1047 } 1048 DELAY(1); 1049 } 1050 /* 1051 * Set SIS_EECTL_CLK to high, so a other master 1052 * can operate on the i2c bus. 1053 */ 1054 SIO_SET(SIS_EECTL_CLK); 1055 /* Refuse EEPROM access by LAN */ 1056 SIO_SET(SIS_EECMD_DONE); 1057 } else 1058 #endif 1059 sis_read_eeprom(sc, (caddr_t)&eaddr, 1060 SIS_EE_NODEADDR, 3, 0); 1061 break; 1062 } 1063 1064 /* 1065 * Allocate the parent bus DMA tag appropriate for PCI. 1066 */ 1067 #define SIS_NSEG_NEW 32 1068 error = bus_dma_tag_create(NULL, /* parent */ 1069 1, 0, /* alignment, boundary */ 1070 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1071 BUS_SPACE_MAXADDR, /* highaddr */ 1072 NULL, NULL, /* filter, filterarg */ 1073 MAXBSIZE, SIS_NSEG_NEW, /* maxsize, nsegments */ 1074 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1075 BUS_DMA_ALLOCNOW, /* flags */ 1076 NULL, NULL, /* lockfunc, lockarg */ 1077 &sc->sis_parent_tag); 1078 if (error) 1079 goto fail; 1080 1081 /* 1082 * Now allocate a tag for the DMA descriptor lists and a chunk 1083 * of DMA-able memory based on the tag. Also obtain the physical 1084 * addresses of the RX and TX ring, which we'll need later. 1085 * All of our lists are allocated as a contiguous block 1086 * of memory. 1087 */ 1088 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1089 1, 0, /* alignment, boundary */ 1090 BUS_SPACE_MAXADDR, /* lowaddr */ 1091 BUS_SPACE_MAXADDR, /* highaddr */ 1092 NULL, NULL, /* filter, filterarg */ 1093 SIS_RX_LIST_SZ, 1, /* maxsize,nsegments */ 1094 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1095 0, /* flags */ 1096 busdma_lock_mutex, /* lockfunc */ 1097 &Giant, /* lockarg */ 1098 &sc->sis_rx_tag); 1099 if (error) 1100 goto fail; 1101 1102 error = bus_dmamem_alloc(sc->sis_rx_tag, 1103 (void **)&sc->sis_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1104 &sc->sis_rx_dmamap); 1105 1106 if (error) { 1107 device_printf(dev, "no memory for rx list buffers!\n"); 1108 bus_dma_tag_destroy(sc->sis_rx_tag); 1109 sc->sis_rx_tag = NULL; 1110 goto fail; 1111 } 1112 1113 error = bus_dmamap_load(sc->sis_rx_tag, 1114 sc->sis_rx_dmamap, &(sc->sis_rx_list[0]), 1115 sizeof(struct sis_desc), sis_dma_map_ring, 1116 &sc->sis_rx_paddr, 0); 1117 1118 if (error) { 1119 device_printf(dev, "cannot get address of the rx ring!\n"); 1120 bus_dmamem_free(sc->sis_rx_tag, 1121 sc->sis_rx_list, sc->sis_rx_dmamap); 1122 bus_dma_tag_destroy(sc->sis_rx_tag); 1123 sc->sis_rx_tag = NULL; 1124 goto fail; 1125 } 1126 1127 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1128 1, 0, /* alignment, boundary */ 1129 BUS_SPACE_MAXADDR, /* lowaddr */ 1130 BUS_SPACE_MAXADDR, /* highaddr */ 1131 NULL, NULL, /* filter, filterarg */ 1132 SIS_TX_LIST_SZ, 1, /* maxsize,nsegments */ 1133 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1134 0, /* flags */ 1135 busdma_lock_mutex, /* lockfunc */ 1136 &Giant, /* lockarg */ 1137 &sc->sis_tx_tag); 1138 if (error) 1139 goto fail; 1140 1141 error = bus_dmamem_alloc(sc->sis_tx_tag, 1142 (void **)&sc->sis_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1143 &sc->sis_tx_dmamap); 1144 1145 if (error) { 1146 device_printf(dev, "no memory for tx list buffers!\n"); 1147 bus_dma_tag_destroy(sc->sis_tx_tag); 1148 sc->sis_tx_tag = NULL; 1149 goto fail; 1150 } 1151 1152 error = bus_dmamap_load(sc->sis_tx_tag, 1153 sc->sis_tx_dmamap, &(sc->sis_tx_list[0]), 1154 sizeof(struct sis_desc), sis_dma_map_ring, 1155 &sc->sis_tx_paddr, 0); 1156 1157 if (error) { 1158 device_printf(dev, "cannot get address of the tx ring!\n"); 1159 bus_dmamem_free(sc->sis_tx_tag, 1160 sc->sis_tx_list, sc->sis_tx_dmamap); 1161 bus_dma_tag_destroy(sc->sis_tx_tag); 1162 sc->sis_tx_tag = NULL; 1163 goto fail; 1164 } 1165 1166 error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1167 1, 0, /* alignment, boundary */ 1168 BUS_SPACE_MAXADDR, /* lowaddr */ 1169 BUS_SPACE_MAXADDR, /* highaddr */ 1170 NULL, NULL, /* filter, filterarg */ 1171 MCLBYTES, 1, /* maxsize,nsegments */ 1172 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1173 0, /* flags */ 1174 busdma_lock_mutex, /* lockfunc */ 1175 &Giant, /* lockarg */ 1176 &sc->sis_tag); 1177 if (error) 1178 goto fail; 1179 1180 /* 1181 * Obtain the physical addresses of the RX and TX 1182 * rings which we'll need later in the init routine. 1183 */ 1184 1185 ifp = sc->sis_ifp = if_alloc(IFT_ETHER); 1186 if (ifp == NULL) { 1187 device_printf(dev, "can not if_alloc()\n"); 1188 error = ENOSPC; 1189 goto fail; 1190 } 1191 ifp->if_softc = sc; 1192 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1193 ifp->if_mtu = ETHERMTU; 1194 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1195 ifp->if_ioctl = sis_ioctl; 1196 ifp->if_start = sis_start; 1197 ifp->if_init = sis_init; 1198 IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1); 1199 ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1; 1200 IFQ_SET_READY(&ifp->if_snd); 1201 1202 /* 1203 * Do MII setup. 1204 */ 1205 if (mii_phy_probe(dev, &sc->sis_miibus, 1206 sis_ifmedia_upd, sis_ifmedia_sts)) { 1207 device_printf(dev, "MII without any PHY!\n"); 1208 error = ENXIO; 1209 goto fail; 1210 } 1211 1212 /* 1213 * Call MI attach routine. 1214 */ 1215 ether_ifattach(ifp, eaddr); 1216 1217 /* 1218 * Tell the upper layer(s) we support long frames. 1219 */ 1220 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1221 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1222 ifp->if_capenable = ifp->if_capabilities; 1223 #ifdef DEVICE_POLLING 1224 ifp->if_capabilities |= IFCAP_POLLING; 1225 #endif 1226 1227 /* Hook interrupt last to avoid having to lock softc */ 1228 error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE, 1229 NULL, sis_intr, sc, &sc->sis_intrhand); 1230 1231 if (error) { 1232 device_printf(dev, "couldn't set up irq\n"); 1233 ether_ifdetach(ifp); 1234 goto fail; 1235 } 1236 1237 fail: 1238 if (error) 1239 sis_detach(dev); 1240 1241 return(error); 1242 } 1243 1244 /* 1245 * Shutdown hardware and free up resources. This can be called any 1246 * time after the mutex has been initialized. It is called in both 1247 * the error case in attach and the normal detach case so it needs 1248 * to be careful about only freeing resources that have actually been 1249 * allocated. 1250 */ 1251 static int 1252 sis_detach(device_t dev) 1253 { 1254 struct sis_softc *sc; 1255 struct ifnet *ifp; 1256 1257 sc = device_get_softc(dev); 1258 KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized")); 1259 ifp = sc->sis_ifp; 1260 1261 #ifdef DEVICE_POLLING 1262 if (ifp->if_capenable & IFCAP_POLLING) 1263 ether_poll_deregister(ifp); 1264 #endif 1265 1266 /* These should only be active if attach succeeded. */ 1267 if (device_is_attached(dev)) { 1268 SIS_LOCK(sc); 1269 sis_reset(sc); 1270 sis_stop(sc); 1271 SIS_UNLOCK(sc); 1272 callout_drain(&sc->sis_stat_ch); 1273 ether_ifdetach(ifp); 1274 } 1275 if (sc->sis_miibus) 1276 device_delete_child(dev, sc->sis_miibus); 1277 bus_generic_detach(dev); 1278 1279 if (sc->sis_intrhand) 1280 bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand); 1281 bus_release_resources(dev, sis_res_spec, sc->sis_res); 1282 1283 if (ifp) 1284 if_free(ifp); 1285 1286 if (sc->sis_rx_tag) { 1287 bus_dmamap_unload(sc->sis_rx_tag, 1288 sc->sis_rx_dmamap); 1289 bus_dmamem_free(sc->sis_rx_tag, 1290 sc->sis_rx_list, sc->sis_rx_dmamap); 1291 bus_dma_tag_destroy(sc->sis_rx_tag); 1292 } 1293 if (sc->sis_tx_tag) { 1294 bus_dmamap_unload(sc->sis_tx_tag, 1295 sc->sis_tx_dmamap); 1296 bus_dmamem_free(sc->sis_tx_tag, 1297 sc->sis_tx_list, sc->sis_tx_dmamap); 1298 bus_dma_tag_destroy(sc->sis_tx_tag); 1299 } 1300 if (sc->sis_parent_tag) 1301 bus_dma_tag_destroy(sc->sis_parent_tag); 1302 if (sc->sis_tag) 1303 bus_dma_tag_destroy(sc->sis_tag); 1304 1305 mtx_destroy(&sc->sis_mtx); 1306 1307 return(0); 1308 } 1309 1310 /* 1311 * Initialize the TX and RX descriptors and allocate mbufs for them. Note that 1312 * we arrange the descriptors in a closed ring, so that the last descriptor 1313 * points back to the first. 1314 */ 1315 static int 1316 sis_ring_init(struct sis_softc *sc) 1317 { 1318 int i, error; 1319 struct sis_desc *dp; 1320 1321 dp = &sc->sis_tx_list[0]; 1322 for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { 1323 if (i == (SIS_TX_LIST_CNT - 1)) 1324 dp->sis_nextdesc = &sc->sis_tx_list[0]; 1325 else 1326 dp->sis_nextdesc = dp + 1; 1327 bus_dmamap_load(sc->sis_tx_tag, 1328 sc->sis_tx_dmamap, 1329 dp->sis_nextdesc, sizeof(struct sis_desc), 1330 sis_dma_map_desc_next, dp, 0); 1331 dp->sis_mbuf = NULL; 1332 dp->sis_ptr = 0; 1333 dp->sis_ctl = 0; 1334 } 1335 1336 sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0; 1337 1338 bus_dmamap_sync(sc->sis_tx_tag, 1339 sc->sis_tx_dmamap, BUS_DMASYNC_PREWRITE); 1340 1341 dp = &sc->sis_rx_list[0]; 1342 for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { 1343 error = sis_newbuf(sc, dp, NULL); 1344 if (error) 1345 return(error); 1346 if (i == (SIS_RX_LIST_CNT - 1)) 1347 dp->sis_nextdesc = &sc->sis_rx_list[0]; 1348 else 1349 dp->sis_nextdesc = dp + 1; 1350 bus_dmamap_load(sc->sis_rx_tag, 1351 sc->sis_rx_dmamap, 1352 dp->sis_nextdesc, sizeof(struct sis_desc), 1353 sis_dma_map_desc_next, dp, 0); 1354 } 1355 1356 bus_dmamap_sync(sc->sis_rx_tag, 1357 sc->sis_rx_dmamap, BUS_DMASYNC_PREWRITE); 1358 1359 sc->sis_rx_pdsc = &sc->sis_rx_list[0]; 1360 1361 return(0); 1362 } 1363 1364 /* 1365 * Initialize an RX descriptor and attach an MBUF cluster. 1366 */ 1367 static int 1368 sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m) 1369 { 1370 1371 if (c == NULL) 1372 return(EINVAL); 1373 1374 if (m == NULL) { 1375 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1376 if (m == NULL) 1377 return(ENOBUFS); 1378 } else 1379 m->m_data = m->m_ext.ext_buf; 1380 1381 c->sis_mbuf = m; 1382 c->sis_ctl = SIS_RXLEN; 1383 1384 bus_dmamap_create(sc->sis_tag, 0, &c->sis_map); 1385 bus_dmamap_load(sc->sis_tag, c->sis_map, 1386 mtod(m, void *), MCLBYTES, 1387 sis_dma_map_desc_ptr, c, 0); 1388 bus_dmamap_sync(sc->sis_tag, c->sis_map, BUS_DMASYNC_PREREAD); 1389 1390 return(0); 1391 } 1392 1393 /* 1394 * A frame has been uploaded: pass the resulting mbuf chain up to 1395 * the higher level protocols. 1396 */ 1397 static int 1398 sis_rxeof(struct sis_softc *sc) 1399 { 1400 struct mbuf *m, *m0; 1401 struct ifnet *ifp; 1402 struct sis_desc *cur_rx; 1403 int total_len = 0, rx_npkts = 0; 1404 u_int32_t rxstat; 1405 1406 SIS_LOCK_ASSERT(sc); 1407 1408 ifp = sc->sis_ifp; 1409 1410 for(cur_rx = sc->sis_rx_pdsc; SIS_OWNDESC(cur_rx); 1411 cur_rx = cur_rx->sis_nextdesc) { 1412 1413 #ifdef DEVICE_POLLING 1414 if (ifp->if_capenable & IFCAP_POLLING) { 1415 if (sc->rxcycles <= 0) 1416 break; 1417 sc->rxcycles--; 1418 } 1419 #endif 1420 rxstat = cur_rx->sis_rxstat; 1421 bus_dmamap_sync(sc->sis_tag, 1422 cur_rx->sis_map, BUS_DMASYNC_POSTWRITE); 1423 bus_dmamap_unload(sc->sis_tag, cur_rx->sis_map); 1424 bus_dmamap_destroy(sc->sis_tag, cur_rx->sis_map); 1425 m = cur_rx->sis_mbuf; 1426 cur_rx->sis_mbuf = NULL; 1427 total_len = SIS_RXBYTES(cur_rx); 1428 1429 /* 1430 * If an error occurs, update stats, clear the 1431 * status word and leave the mbuf cluster in place: 1432 * it should simply get re-used next time this descriptor 1433 * comes up in the ring. 1434 */ 1435 if ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 && 1436 total_len <= (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - 1437 ETHER_CRC_LEN)) 1438 rxstat &= ~SIS_RXSTAT_GIANT; 1439 if (SIS_RXSTAT_ERROR(rxstat) != 0) { 1440 ifp->if_ierrors++; 1441 if (rxstat & SIS_RXSTAT_COLL) 1442 ifp->if_collisions++; 1443 sis_newbuf(sc, cur_rx, m); 1444 continue; 1445 } 1446 1447 /* No errors; receive the packet. */ 1448 #ifdef __NO_STRICT_ALIGNMENT 1449 /* 1450 * On architectures without alignment problems we try to 1451 * allocate a new buffer for the receive ring, and pass up 1452 * the one where the packet is already, saving the expensive 1453 * copy done in m_devget(). 1454 * If we are on an architecture with alignment problems, or 1455 * if the allocation fails, then use m_devget and leave the 1456 * existing buffer in the receive ring. 1457 */ 1458 if (sis_newbuf(sc, cur_rx, NULL) == 0) 1459 m->m_pkthdr.len = m->m_len = total_len; 1460 else 1461 #endif 1462 { 1463 m0 = m_devget(mtod(m, char *), total_len, 1464 ETHER_ALIGN, ifp, NULL); 1465 sis_newbuf(sc, cur_rx, m); 1466 if (m0 == NULL) { 1467 ifp->if_ierrors++; 1468 continue; 1469 } 1470 m = m0; 1471 } 1472 1473 ifp->if_ipackets++; 1474 m->m_pkthdr.rcvif = ifp; 1475 1476 SIS_UNLOCK(sc); 1477 (*ifp->if_input)(ifp, m); 1478 SIS_LOCK(sc); 1479 rx_npkts++; 1480 } 1481 1482 sc->sis_rx_pdsc = cur_rx; 1483 return (rx_npkts); 1484 } 1485 1486 static void 1487 sis_rxeoc(struct sis_softc *sc) 1488 { 1489 1490 SIS_LOCK_ASSERT(sc); 1491 sis_rxeof(sc); 1492 sis_initl(sc); 1493 } 1494 1495 /* 1496 * A frame was downloaded to the chip. It's safe for us to clean up 1497 * the list buffers. 1498 */ 1499 1500 static void 1501 sis_txeof(struct sis_softc *sc) 1502 { 1503 struct ifnet *ifp; 1504 u_int32_t idx; 1505 1506 SIS_LOCK_ASSERT(sc); 1507 ifp = sc->sis_ifp; 1508 1509 /* 1510 * Go through our tx list and free mbufs for those 1511 * frames that have been transmitted. 1512 */ 1513 for (idx = sc->sis_tx_cons; sc->sis_tx_cnt > 0; 1514 sc->sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT) ) { 1515 struct sis_desc *cur_tx = &sc->sis_tx_list[idx]; 1516 1517 if (SIS_OWNDESC(cur_tx)) 1518 break; 1519 1520 if (cur_tx->sis_ctl & SIS_CMDSTS_MORE) 1521 continue; 1522 1523 if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) { 1524 ifp->if_oerrors++; 1525 if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS) 1526 ifp->if_collisions++; 1527 if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL) 1528 ifp->if_collisions++; 1529 } 1530 1531 ifp->if_collisions += 1532 (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16; 1533 1534 ifp->if_opackets++; 1535 if (cur_tx->sis_mbuf != NULL) { 1536 m_freem(cur_tx->sis_mbuf); 1537 cur_tx->sis_mbuf = NULL; 1538 bus_dmamap_unload(sc->sis_tag, cur_tx->sis_map); 1539 bus_dmamap_destroy(sc->sis_tag, cur_tx->sis_map); 1540 } 1541 } 1542 1543 if (idx != sc->sis_tx_cons) { 1544 /* we freed up some buffers */ 1545 sc->sis_tx_cons = idx; 1546 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1547 } 1548 1549 sc->sis_watchdog_timer = (sc->sis_tx_cnt == 0) ? 0 : 5; 1550 1551 return; 1552 } 1553 1554 static void 1555 sis_tick(void *xsc) 1556 { 1557 struct sis_softc *sc; 1558 struct mii_data *mii; 1559 struct ifnet *ifp; 1560 1561 sc = xsc; 1562 SIS_LOCK_ASSERT(sc); 1563 sc->in_tick = 1; 1564 ifp = sc->sis_ifp; 1565 1566 mii = device_get_softc(sc->sis_miibus); 1567 mii_tick(mii); 1568 1569 sis_watchdog(sc); 1570 1571 if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE && 1572 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1573 sc->sis_link++; 1574 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1575 sis_startl(ifp); 1576 } 1577 1578 callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); 1579 sc->in_tick = 0; 1580 } 1581 1582 #ifdef DEVICE_POLLING 1583 static poll_handler_t sis_poll; 1584 1585 static int 1586 sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1587 { 1588 struct sis_softc *sc = ifp->if_softc; 1589 int rx_npkts = 0; 1590 1591 SIS_LOCK(sc); 1592 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1593 SIS_UNLOCK(sc); 1594 return (rx_npkts); 1595 } 1596 1597 /* 1598 * On the sis, reading the status register also clears it. 1599 * So before returning to intr mode we must make sure that all 1600 * possible pending sources of interrupts have been served. 1601 * In practice this means run to completion the *eof routines, 1602 * and then call the interrupt routine 1603 */ 1604 sc->rxcycles = count; 1605 rx_npkts = sis_rxeof(sc); 1606 sis_txeof(sc); 1607 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1608 sis_startl(ifp); 1609 1610 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1611 u_int32_t status; 1612 1613 /* Reading the ISR register clears all interrupts. */ 1614 status = CSR_READ_4(sc, SIS_ISR); 1615 1616 if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW)) 1617 sis_rxeoc(sc); 1618 1619 if (status & (SIS_ISR_RX_IDLE)) 1620 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1621 1622 if (status & SIS_ISR_SYSERR) { 1623 sis_reset(sc); 1624 sis_initl(sc); 1625 } 1626 } 1627 1628 SIS_UNLOCK(sc); 1629 return (rx_npkts); 1630 } 1631 #endif /* DEVICE_POLLING */ 1632 1633 static void 1634 sis_intr(void *arg) 1635 { 1636 struct sis_softc *sc; 1637 struct ifnet *ifp; 1638 u_int32_t status; 1639 1640 sc = arg; 1641 ifp = sc->sis_ifp; 1642 1643 if (sc->sis_stopped) /* Most likely shared interrupt */ 1644 return; 1645 1646 SIS_LOCK(sc); 1647 #ifdef DEVICE_POLLING 1648 if (ifp->if_capenable & IFCAP_POLLING) { 1649 SIS_UNLOCK(sc); 1650 return; 1651 } 1652 #endif 1653 1654 /* Disable interrupts. */ 1655 CSR_WRITE_4(sc, SIS_IER, 0); 1656 1657 for (;;) { 1658 SIS_LOCK_ASSERT(sc); 1659 /* Reading the ISR register clears all interrupts. */ 1660 status = CSR_READ_4(sc, SIS_ISR); 1661 1662 if ((status & SIS_INTRS) == 0) 1663 break; 1664 1665 if (status & 1666 (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR | 1667 SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) ) 1668 sis_txeof(sc); 1669 1670 if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK | 1671 SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE)) 1672 sis_rxeof(sc); 1673 1674 if (status & SIS_ISR_RX_OFLOW) 1675 sis_rxeoc(sc); 1676 1677 if (status & (SIS_ISR_RX_IDLE)) 1678 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 1679 1680 if (status & SIS_ISR_SYSERR) { 1681 sis_reset(sc); 1682 sis_initl(sc); 1683 } 1684 } 1685 1686 /* Re-enable interrupts. */ 1687 CSR_WRITE_4(sc, SIS_IER, 1); 1688 1689 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1690 sis_startl(ifp); 1691 1692 SIS_UNLOCK(sc); 1693 } 1694 1695 /* 1696 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1697 * pointers to the fragment pointers. 1698 */ 1699 static int 1700 sis_encap(struct sis_softc *sc, struct mbuf **m_head, uint32_t *txidx) 1701 { 1702 struct sis_desc *f = NULL; 1703 struct mbuf *m; 1704 int frag, cur, cnt = 0, chainlen = 0; 1705 1706 /* 1707 * If there's no way we can send any packets, return now. 1708 */ 1709 if (SIS_TX_LIST_CNT - sc->sis_tx_cnt < 2) 1710 return (ENOBUFS); 1711 1712 /* 1713 * Count the number of frags in this chain to see if 1714 * we need to m_defrag. Since the descriptor list is shared 1715 * by all packets, we'll m_defrag long chains so that they 1716 * do not use up the entire list, even if they would fit. 1717 */ 1718 1719 for (m = *m_head; m != NULL; m = m->m_next) 1720 chainlen++; 1721 1722 if ((chainlen > SIS_TX_LIST_CNT / 4) || 1723 ((SIS_TX_LIST_CNT - (chainlen + sc->sis_tx_cnt)) < 2)) { 1724 m = m_defrag(*m_head, M_DONTWAIT); 1725 if (m == NULL) 1726 return (ENOBUFS); 1727 *m_head = m; 1728 } 1729 1730 /* 1731 * Start packing the mbufs in this chain into 1732 * the fragment pointers. Stop when we run out 1733 * of fragments or hit the end of the mbuf chain. 1734 */ 1735 cur = frag = *txidx; 1736 1737 for (m = *m_head; m != NULL; m = m->m_next) { 1738 if (m->m_len != 0) { 1739 if ((SIS_TX_LIST_CNT - 1740 (sc->sis_tx_cnt + cnt)) < 2) 1741 return(ENOBUFS); 1742 f = &sc->sis_tx_list[frag]; 1743 f->sis_ctl = SIS_CMDSTS_MORE | m->m_len; 1744 bus_dmamap_create(sc->sis_tag, 0, &f->sis_map); 1745 bus_dmamap_load(sc->sis_tag, f->sis_map, 1746 mtod(m, void *), m->m_len, 1747 sis_dma_map_desc_ptr, f, 0); 1748 bus_dmamap_sync(sc->sis_tag, 1749 f->sis_map, BUS_DMASYNC_PREREAD); 1750 if (cnt != 0) 1751 f->sis_ctl |= SIS_CMDSTS_OWN; 1752 cur = frag; 1753 SIS_INC(frag, SIS_TX_LIST_CNT); 1754 cnt++; 1755 } 1756 } 1757 1758 if (m != NULL) 1759 return(ENOBUFS); 1760 1761 sc->sis_tx_list[cur].sis_mbuf = *m_head; 1762 sc->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE; 1763 sc->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN; 1764 sc->sis_tx_cnt += cnt; 1765 *txidx = frag; 1766 1767 return(0); 1768 } 1769 1770 /* 1771 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1772 * to the mbuf data regions directly in the transmit lists. We also save a 1773 * copy of the pointers since the transmit list fragment pointers are 1774 * physical addresses. 1775 */ 1776 1777 static void 1778 sis_start(struct ifnet *ifp) 1779 { 1780 struct sis_softc *sc; 1781 1782 sc = ifp->if_softc; 1783 SIS_LOCK(sc); 1784 sis_startl(ifp); 1785 SIS_UNLOCK(sc); 1786 } 1787 1788 static void 1789 sis_startl(struct ifnet *ifp) 1790 { 1791 struct sis_softc *sc; 1792 struct mbuf *m_head = NULL; 1793 u_int32_t idx, queued = 0; 1794 1795 sc = ifp->if_softc; 1796 1797 SIS_LOCK_ASSERT(sc); 1798 1799 if (!sc->sis_link) 1800 return; 1801 1802 idx = sc->sis_tx_prod; 1803 1804 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 1805 return; 1806 1807 while(sc->sis_tx_list[idx].sis_mbuf == NULL) { 1808 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1809 if (m_head == NULL) 1810 break; 1811 1812 if (sis_encap(sc, &m_head, &idx)) { 1813 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1814 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1815 break; 1816 } 1817 1818 queued++; 1819 1820 /* 1821 * If there's a BPF listener, bounce a copy of this frame 1822 * to him. 1823 */ 1824 BPF_MTAP(ifp, m_head); 1825 1826 } 1827 1828 if (queued) { 1829 /* Transmit */ 1830 sc->sis_tx_prod = idx; 1831 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE); 1832 1833 /* 1834 * Set a timeout in case the chip goes out to lunch. 1835 */ 1836 sc->sis_watchdog_timer = 5; 1837 } 1838 } 1839 1840 static void 1841 sis_init(void *xsc) 1842 { 1843 struct sis_softc *sc = xsc; 1844 1845 SIS_LOCK(sc); 1846 sis_initl(sc); 1847 SIS_UNLOCK(sc); 1848 } 1849 1850 static void 1851 sis_initl(struct sis_softc *sc) 1852 { 1853 struct ifnet *ifp = sc->sis_ifp; 1854 struct mii_data *mii; 1855 1856 SIS_LOCK_ASSERT(sc); 1857 1858 /* 1859 * Cancel pending I/O and free all RX/TX buffers. 1860 */ 1861 sis_stop(sc); 1862 sc->sis_stopped = 0; 1863 1864 #ifdef notyet 1865 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) { 1866 /* 1867 * Configure 400usec of interrupt holdoff. This is based 1868 * on emperical tests on a Soekris 4801. 1869 */ 1870 CSR_WRITE_4(sc, NS_IHR, 0x100 | 4); 1871 } 1872 #endif 1873 1874 mii = device_get_softc(sc->sis_miibus); 1875 1876 /* Set MAC address */ 1877 if (sc->sis_type == SIS_TYPE_83815) { 1878 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0); 1879 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1880 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); 1881 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1); 1882 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1883 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); 1884 CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2); 1885 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1886 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); 1887 } else { 1888 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); 1889 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1890 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); 1891 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); 1892 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1893 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); 1894 CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); 1895 CSR_WRITE_4(sc, SIS_RXFILT_DATA, 1896 ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); 1897 } 1898 1899 /* Init circular TX/RX lists. */ 1900 if (sis_ring_init(sc) != 0) { 1901 device_printf(sc->sis_dev, 1902 "initialization failed: no memory for rx buffers\n"); 1903 sis_stop(sc); 1904 return; 1905 } 1906 1907 /* 1908 * Short Cable Receive Errors (MP21.E) 1909 * also: Page 78 of the DP83815 data sheet (september 2002 version) 1910 * recommends the following register settings "for optimum 1911 * performance." for rev 15C. Set this also for 15D parts as 1912 * they require it in practice. 1913 */ 1914 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) { 1915 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 1916 CSR_WRITE_4(sc, NS_PHY_CR, 0x189C); 1917 /* set val for c2 */ 1918 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000); 1919 /* load/kill c2 */ 1920 CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040); 1921 /* rais SD off, from 4 to c */ 1922 CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C); 1923 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 1924 } 1925 1926 1927 /* 1928 * For the NatSemi chip, we have to explicitly enable the 1929 * reception of ARP frames, as well as turn on the 'perfect 1930 * match' filter where we store the station address, otherwise 1931 * we won't receive unicasts meant for this host. 1932 */ 1933 if (sc->sis_type == SIS_TYPE_83815) { 1934 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP); 1935 SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT); 1936 } 1937 1938 /* If we want promiscuous mode, set the allframes bit. */ 1939 if (ifp->if_flags & IFF_PROMISC) { 1940 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 1941 } else { 1942 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); 1943 } 1944 1945 /* 1946 * Set the capture broadcast bit to capture broadcast frames. 1947 */ 1948 if (ifp->if_flags & IFF_BROADCAST) { 1949 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1950 } else { 1951 SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); 1952 } 1953 1954 /* 1955 * Load the multicast filter. 1956 */ 1957 if (sc->sis_type == SIS_TYPE_83815) 1958 sis_setmulti_ns(sc); 1959 else 1960 sis_setmulti_sis(sc); 1961 1962 /* Turn the receive filter on */ 1963 SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); 1964 1965 /* 1966 * Load the address of the RX and TX lists. 1967 */ 1968 CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sis_rx_paddr); 1969 CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sis_tx_paddr); 1970 1971 /* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of 1972 * the PCI bus. When this bit is set, the Max DMA Burst Size 1973 * for TX/RX DMA should be no larger than 16 double words. 1974 */ 1975 if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) { 1976 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64); 1977 } else { 1978 CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256); 1979 } 1980 1981 /* Accept Long Packets for VLAN support */ 1982 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER); 1983 1984 /* Set TX configuration */ 1985 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { 1986 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10); 1987 } else { 1988 CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); 1989 } 1990 1991 /* Set full/half duplex mode. */ 1992 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1993 SIS_SETBIT(sc, SIS_TX_CFG, 1994 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1995 SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 1996 } else { 1997 SIS_CLRBIT(sc, SIS_TX_CFG, 1998 (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); 1999 SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); 2000 } 2001 2002 if (sc->sis_type == SIS_TYPE_83816) { 2003 /* 2004 * MPII03.D: Half Duplex Excessive Collisions. 2005 * Also page 49 in 83816 manual 2006 */ 2007 SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D); 2008 } 2009 2010 if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A && 2011 IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 2012 uint32_t reg; 2013 2014 /* 2015 * Short Cable Receive Errors (MP21.E) 2016 */ 2017 CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); 2018 reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff; 2019 CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000); 2020 DELAY(100000); 2021 reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff; 2022 if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) { 2023 device_printf(sc->sis_dev, 2024 "Applying short cable fix (reg=%x)\n", reg); 2025 CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8); 2026 SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20); 2027 } 2028 CSR_WRITE_4(sc, NS_PHY_PAGE, 0); 2029 } 2030 2031 /* 2032 * Enable interrupts. 2033 */ 2034 CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS); 2035 #ifdef DEVICE_POLLING 2036 /* 2037 * ... only enable interrupts if we are not polling, make sure 2038 * they are off otherwise. 2039 */ 2040 if (ifp->if_capenable & IFCAP_POLLING) 2041 CSR_WRITE_4(sc, SIS_IER, 0); 2042 else 2043 #endif 2044 CSR_WRITE_4(sc, SIS_IER, 1); 2045 2046 /* Enable receiver and transmitter. */ 2047 SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 2048 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); 2049 2050 #ifdef notdef 2051 mii_mediachg(mii); 2052 #endif 2053 2054 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2055 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2056 2057 if (!sc->in_tick) 2058 callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); 2059 } 2060 2061 /* 2062 * Set media options. 2063 */ 2064 static int 2065 sis_ifmedia_upd(struct ifnet *ifp) 2066 { 2067 struct sis_softc *sc; 2068 struct mii_data *mii; 2069 2070 sc = ifp->if_softc; 2071 2072 SIS_LOCK(sc); 2073 mii = device_get_softc(sc->sis_miibus); 2074 sc->sis_link = 0; 2075 if (mii->mii_instance) { 2076 struct mii_softc *miisc; 2077 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2078 mii_phy_reset(miisc); 2079 } 2080 mii_mediachg(mii); 2081 SIS_UNLOCK(sc); 2082 2083 return(0); 2084 } 2085 2086 /* 2087 * Report current media status. 2088 */ 2089 static void 2090 sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2091 { 2092 struct sis_softc *sc; 2093 struct mii_data *mii; 2094 2095 sc = ifp->if_softc; 2096 2097 SIS_LOCK(sc); 2098 mii = device_get_softc(sc->sis_miibus); 2099 mii_pollstat(mii); 2100 SIS_UNLOCK(sc); 2101 ifmr->ifm_active = mii->mii_media_active; 2102 ifmr->ifm_status = mii->mii_media_status; 2103 } 2104 2105 static int 2106 sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2107 { 2108 struct sis_softc *sc = ifp->if_softc; 2109 struct ifreq *ifr = (struct ifreq *) data; 2110 struct mii_data *mii; 2111 int error = 0; 2112 2113 switch(command) { 2114 case SIOCSIFFLAGS: 2115 SIS_LOCK(sc); 2116 if (ifp->if_flags & IFF_UP) { 2117 sis_initl(sc); 2118 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2119 sis_stop(sc); 2120 } 2121 SIS_UNLOCK(sc); 2122 error = 0; 2123 break; 2124 case SIOCADDMULTI: 2125 case SIOCDELMULTI: 2126 SIS_LOCK(sc); 2127 if (sc->sis_type == SIS_TYPE_83815) 2128 sis_setmulti_ns(sc); 2129 else 2130 sis_setmulti_sis(sc); 2131 SIS_UNLOCK(sc); 2132 error = 0; 2133 break; 2134 case SIOCGIFMEDIA: 2135 case SIOCSIFMEDIA: 2136 mii = device_get_softc(sc->sis_miibus); 2137 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2138 break; 2139 case SIOCSIFCAP: 2140 /* ok, disable interrupts */ 2141 #ifdef DEVICE_POLLING 2142 if (ifr->ifr_reqcap & IFCAP_POLLING && 2143 !(ifp->if_capenable & IFCAP_POLLING)) { 2144 error = ether_poll_register(sis_poll, ifp); 2145 if (error) 2146 return(error); 2147 SIS_LOCK(sc); 2148 /* Disable interrupts */ 2149 CSR_WRITE_4(sc, SIS_IER, 0); 2150 ifp->if_capenable |= IFCAP_POLLING; 2151 SIS_UNLOCK(sc); 2152 return (error); 2153 2154 } 2155 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 2156 ifp->if_capenable & IFCAP_POLLING) { 2157 error = ether_poll_deregister(ifp); 2158 /* Enable interrupts. */ 2159 SIS_LOCK(sc); 2160 CSR_WRITE_4(sc, SIS_IER, 1); 2161 ifp->if_capenable &= ~IFCAP_POLLING; 2162 SIS_UNLOCK(sc); 2163 return (error); 2164 } 2165 #endif /* DEVICE_POLLING */ 2166 break; 2167 default: 2168 error = ether_ioctl(ifp, command, data); 2169 break; 2170 } 2171 2172 return(error); 2173 } 2174 2175 static void 2176 sis_watchdog(struct sis_softc *sc) 2177 { 2178 2179 SIS_LOCK_ASSERT(sc); 2180 if (sc->sis_stopped) { 2181 SIS_UNLOCK(sc); 2182 return; 2183 } 2184 2185 if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0) 2186 return; 2187 2188 device_printf(sc->sis_dev, "watchdog timeout\n"); 2189 sc->sis_ifp->if_oerrors++; 2190 2191 sis_stop(sc); 2192 sis_reset(sc); 2193 sis_initl(sc); 2194 2195 if (!IFQ_DRV_IS_EMPTY(&sc->sis_ifp->if_snd)) 2196 sis_startl(sc->sis_ifp); 2197 } 2198 2199 /* 2200 * Stop the adapter and free any mbufs allocated to the 2201 * RX and TX lists. 2202 */ 2203 static void 2204 sis_stop(struct sis_softc *sc) 2205 { 2206 int i; 2207 struct ifnet *ifp; 2208 struct sis_desc *dp; 2209 2210 if (sc->sis_stopped) 2211 return; 2212 SIS_LOCK_ASSERT(sc); 2213 ifp = sc->sis_ifp; 2214 sc->sis_watchdog_timer = 0; 2215 2216 callout_stop(&sc->sis_stat_ch); 2217 2218 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2219 CSR_WRITE_4(sc, SIS_IER, 0); 2220 CSR_WRITE_4(sc, SIS_IMR, 0); 2221 CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */ 2222 SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); 2223 DELAY(1000); 2224 CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0); 2225 CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0); 2226 2227 sc->sis_link = 0; 2228 2229 /* 2230 * Free data in the RX lists. 2231 */ 2232 dp = &sc->sis_rx_list[0]; 2233 for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { 2234 if (dp->sis_mbuf == NULL) 2235 continue; 2236 bus_dmamap_unload(sc->sis_tag, dp->sis_map); 2237 bus_dmamap_destroy(sc->sis_tag, dp->sis_map); 2238 m_freem(dp->sis_mbuf); 2239 dp->sis_mbuf = NULL; 2240 } 2241 bzero(sc->sis_rx_list, SIS_RX_LIST_SZ); 2242 2243 /* 2244 * Free the TX list buffers. 2245 */ 2246 dp = &sc->sis_tx_list[0]; 2247 for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { 2248 if (dp->sis_mbuf == NULL) 2249 continue; 2250 bus_dmamap_unload(sc->sis_tag, dp->sis_map); 2251 bus_dmamap_destroy(sc->sis_tag, dp->sis_map); 2252 m_freem(dp->sis_mbuf); 2253 dp->sis_mbuf = NULL; 2254 } 2255 2256 bzero(sc->sis_tx_list, SIS_TX_LIST_SZ); 2257 2258 sc->sis_stopped = 1; 2259 } 2260 2261 /* 2262 * Stop all chip I/O so that the kernel's probe routines don't 2263 * get confused by errant DMAs when rebooting. 2264 */ 2265 static int 2266 sis_shutdown(device_t dev) 2267 { 2268 struct sis_softc *sc; 2269 2270 sc = device_get_softc(dev); 2271 SIS_LOCK(sc); 2272 sis_reset(sc); 2273 sis_stop(sc); 2274 SIS_UNLOCK(sc); 2275 return (0); 2276 } 2277 2278 static device_method_t sis_methods[] = { 2279 /* Device interface */ 2280 DEVMETHOD(device_probe, sis_probe), 2281 DEVMETHOD(device_attach, sis_attach), 2282 DEVMETHOD(device_detach, sis_detach), 2283 DEVMETHOD(device_shutdown, sis_shutdown), 2284 2285 /* bus interface */ 2286 DEVMETHOD(bus_print_child, bus_generic_print_child), 2287 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 2288 2289 /* MII interface */ 2290 DEVMETHOD(miibus_readreg, sis_miibus_readreg), 2291 DEVMETHOD(miibus_writereg, sis_miibus_writereg), 2292 DEVMETHOD(miibus_statchg, sis_miibus_statchg), 2293 2294 { 0, 0 } 2295 }; 2296 2297 static driver_t sis_driver = { 2298 "sis", 2299 sis_methods, 2300 sizeof(struct sis_softc) 2301 }; 2302 2303 static devclass_t sis_devclass; 2304 2305 DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0); 2306 DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0); 2307