1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 /* 36 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 37 * series chips and several workalikes including the following: 38 * 39 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 40 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 41 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 42 * ASIX Electronics AX88140A (www.asix.com.tw) 43 * ASIX Electronics AX88141 (www.asix.com.tw) 44 * ADMtek AL981 (www.admtek.com.tw) 45 * ADMtek AN985 (www.admtek.com.tw) 46 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 47 * Accton EN1217 (www.accton.com) 48 * Xircom X3201 (www.xircom.com) 49 * Abocom FE2500 50 * Conexant LANfinity (www.conexant.com) 51 * 52 * Datasheets for the 21143 are available at developer.intel.com. 53 * Datasheets for the clone parts can be found at their respective sites. 54 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 55 * The PNIC II is essentially a Macronix 98715A chip; the only difference 56 * worth noting is that its multicast hash table is only 128 bits wide 57 * instead of 512. 58 * 59 * Written by Bill Paul <wpaul@ee.columbia.edu> 60 * Electrical Engineering Department 61 * Columbia University, New York City 62 */ 63 64 /* 65 * The Intel 21143 is the successor to the DEC 21140. It is basically 66 * the same as the 21140 but with a few new features. The 21143 supports 67 * three kinds of media attachments: 68 * 69 * o MII port, for 10Mbps and 100Mbps support and NWAY 70 * autonegotiation provided by an external PHY. 71 * o SYM port, for symbol mode 100Mbps support. 72 * o 10baseT port. 73 * o AUI/BNC port. 74 * 75 * The 100Mbps SYM port and 10baseT port can be used together in 76 * combination with the internal NWAY support to create a 10/100 77 * autosensing configuration. 78 * 79 * Note that not all tulip workalikes are handled in this driver: we only 80 * deal with those which are relatively well behaved. The Winbond is 81 * handled separately due to its different register offsets and the 82 * special handling needed for its various bugs. The PNIC is handled 83 * here, but I'm not thrilled about it. 84 * 85 * All of the workalike chips use some form of MII transceiver support 86 * with the exception of the Macronix chips, which also have a SYM port. 87 * The ASIX AX88140A is also documented to have a SYM port, but all 88 * the cards I've seen use an MII transceiver, probably because the 89 * AX88140A doesn't support internal NWAY. 90 */ 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/sockio.h> 95 #include <sys/mbuf.h> 96 #include <sys/malloc.h> 97 #include <sys/kernel.h> 98 #include <sys/socket.h> 99 #include <sys/sysctl.h> 100 101 #include <net/if.h> 102 #include <net/if_arp.h> 103 #include <net/ethernet.h> 104 #include <net/if_dl.h> 105 #include <net/if_media.h> 106 #include <net/if_types.h> 107 #include <net/if_vlan_var.h> 108 109 #include <net/bpf.h> 110 111 #include <vm/vm.h> /* for vtophys */ 112 #include <vm/pmap.h> /* for vtophys */ 113 #include <machine/bus_pio.h> 114 #include <machine/bus_memio.h> 115 #include <machine/bus.h> 116 #include <machine/resource.h> 117 #include <sys/bus.h> 118 #include <sys/rman.h> 119 120 #include <dev/mii/mii.h> 121 #include <dev/mii/miivar.h> 122 123 #include <pci/pcireg.h> 124 #include <pci/pcivar.h> 125 126 #define DC_USEIOSPACE 127 #ifdef __alpha__ 128 #define SRM_MEDIA 129 #endif 130 131 #include <pci/if_dcreg.h> 132 133 MODULE_DEPEND(dc, miibus, 1, 1, 1); 134 135 /* "controller miibus0" required. See GENERIC if you get errors here. */ 136 #include "miibus_if.h" 137 138 #ifndef lint 139 static const char rcsid[] = 140 "$FreeBSD$"; 141 #endif 142 143 /* 144 * Various supported device vendors/types and their names. 145 */ 146 static struct dc_type dc_devs[] = { 147 { DC_VENDORID_DEC, DC_DEVICEID_21143, 148 "Intel 21143 10/100BaseTX" }, 149 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009, 150 "Davicom DM9009 10/100BaseTX" }, 151 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, 152 "Davicom DM9100 10/100BaseTX" }, 153 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 154 "Davicom DM9102 10/100BaseTX" }, 155 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 156 "Davicom DM9102A 10/100BaseTX" }, 157 { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, 158 "ADMtek AL981 10/100BaseTX" }, 159 { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, 160 "ADMtek AN985 10/100BaseTX" }, 161 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 162 "ASIX AX88140A 10/100BaseTX" }, 163 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 164 "ASIX AX88141 10/100BaseTX" }, 165 { DC_VENDORID_MX, DC_DEVICEID_98713, 166 "Macronix 98713 10/100BaseTX" }, 167 { DC_VENDORID_MX, DC_DEVICEID_98713, 168 "Macronix 98713A 10/100BaseTX" }, 169 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 170 "Compex RL100-TX 10/100BaseTX" }, 171 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 172 "Compex RL100-TX 10/100BaseTX" }, 173 { DC_VENDORID_MX, DC_DEVICEID_987x5, 174 "Macronix 98715/98715A 10/100BaseTX" }, 175 { DC_VENDORID_MX, DC_DEVICEID_987x5, 176 "Macronix 98715AEC-C 10/100BaseTX" }, 177 { DC_VENDORID_MX, DC_DEVICEID_987x5, 178 "Macronix 98725 10/100BaseTX" }, 179 { DC_VENDORID_MX, DC_DEVICEID_98727, 180 "Macronix 98727/98732 10/100BaseTX" }, 181 { DC_VENDORID_LO, DC_DEVICEID_82C115, 182 "LC82C115 PNIC II 10/100BaseTX" }, 183 { DC_VENDORID_LO, DC_DEVICEID_82C168, 184 "82c168 PNIC 10/100BaseTX" }, 185 { DC_VENDORID_LO, DC_DEVICEID_82C168, 186 "82c169 PNIC 10/100BaseTX" }, 187 { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, 188 "Accton EN1217 10/100BaseTX" }, 189 { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, 190 "Accton EN2242 MiniPCI 10/100BaseTX" }, 191 { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, 192 "Xircom X3201 10/100BaseTX" }, 193 { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500, 194 "Abocom FE2500 10/100BaseTX" }, 195 { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112, 196 "Conexant LANfinity MiniPCI 10/100BaseTX" }, 197 { DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX, 198 "Hawking CB102 CardBus 10/100" }, 199 { 0, 0, NULL } 200 }; 201 202 static int dc_probe (device_t); 203 static int dc_attach (device_t); 204 static int dc_detach (device_t); 205 static int dc_suspend (device_t); 206 static int dc_resume (device_t); 207 static void dc_acpi (device_t); 208 static struct dc_type *dc_devtype (device_t); 209 static int dc_newbuf (struct dc_softc *, int, struct mbuf *); 210 static int dc_encap (struct dc_softc *, struct mbuf *, u_int32_t *); 211 static int dc_coal (struct dc_softc *, struct mbuf **); 212 static void dc_pnic_rx_bug_war (struct dc_softc *, int); 213 static int dc_rx_resync (struct dc_softc *); 214 static void dc_rxeof (struct dc_softc *); 215 static void dc_txeof (struct dc_softc *); 216 static void dc_tick (void *); 217 static void dc_tx_underrun (struct dc_softc *); 218 static void dc_intr (void *); 219 static void dc_start (struct ifnet *); 220 static int dc_ioctl (struct ifnet *, u_long, caddr_t); 221 static void dc_init (void *); 222 static void dc_stop (struct dc_softc *); 223 static void dc_watchdog (struct ifnet *); 224 static void dc_shutdown (device_t); 225 static int dc_ifmedia_upd (struct ifnet *); 226 static void dc_ifmedia_sts (struct ifnet *, struct ifmediareq *); 227 228 static void dc_delay (struct dc_softc *); 229 static void dc_eeprom_idle (struct dc_softc *); 230 static void dc_eeprom_putbyte (struct dc_softc *, int); 231 static void dc_eeprom_getword (struct dc_softc *, int, u_int16_t *); 232 static void dc_eeprom_getword_pnic 233 (struct dc_softc *, int, u_int16_t *); 234 static void dc_eeprom_getword_xircom 235 (struct dc_softc *, int, u_int16_t *); 236 static void dc_eeprom_width (struct dc_softc *); 237 static void dc_read_eeprom (struct dc_softc *, caddr_t, int, int, int); 238 239 static void dc_mii_writebit (struct dc_softc *, int); 240 static int dc_mii_readbit (struct dc_softc *); 241 static void dc_mii_sync (struct dc_softc *); 242 static void dc_mii_send (struct dc_softc *, u_int32_t, int); 243 static int dc_mii_readreg (struct dc_softc *, struct dc_mii_frame *); 244 static int dc_mii_writereg (struct dc_softc *, struct dc_mii_frame *); 245 static int dc_miibus_readreg (device_t, int, int); 246 static int dc_miibus_writereg (device_t, int, int, int); 247 static void dc_miibus_statchg (device_t); 248 static void dc_miibus_mediainit (device_t); 249 250 static void dc_setcfg (struct dc_softc *, int); 251 static u_int32_t dc_crc_le (struct dc_softc *, caddr_t); 252 static u_int32_t dc_crc_be (caddr_t); 253 static void dc_setfilt_21143 (struct dc_softc *); 254 static void dc_setfilt_asix (struct dc_softc *); 255 static void dc_setfilt_admtek (struct dc_softc *); 256 static void dc_setfilt_xircom (struct dc_softc *); 257 258 static void dc_setfilt (struct dc_softc *); 259 260 static void dc_reset (struct dc_softc *); 261 static int dc_list_rx_init (struct dc_softc *); 262 static int dc_list_tx_init (struct dc_softc *); 263 264 static void dc_read_srom (struct dc_softc *, int); 265 static void dc_parse_21143_srom (struct dc_softc *); 266 static void dc_decode_leaf_sia (struct dc_softc *, struct dc_eblock_sia *); 267 static void dc_decode_leaf_mii (struct dc_softc *, struct dc_eblock_mii *); 268 static void dc_decode_leaf_sym (struct dc_softc *, struct dc_eblock_sym *); 269 static void dc_apply_fixup (struct dc_softc *, int); 270 271 #ifdef DC_USEIOSPACE 272 #define DC_RES SYS_RES_IOPORT 273 #define DC_RID DC_PCI_CFBIO 274 #else 275 #define DC_RES SYS_RES_MEMORY 276 #define DC_RID DC_PCI_CFBMA 277 #endif 278 279 static device_method_t dc_methods[] = { 280 /* Device interface */ 281 DEVMETHOD(device_probe, dc_probe), 282 DEVMETHOD(device_attach, dc_attach), 283 DEVMETHOD(device_detach, dc_detach), 284 DEVMETHOD(device_suspend, dc_suspend), 285 DEVMETHOD(device_resume, dc_resume), 286 DEVMETHOD(device_shutdown, dc_shutdown), 287 288 /* bus interface */ 289 DEVMETHOD(bus_print_child, bus_generic_print_child), 290 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 291 292 /* MII interface */ 293 DEVMETHOD(miibus_readreg, dc_miibus_readreg), 294 DEVMETHOD(miibus_writereg, dc_miibus_writereg), 295 DEVMETHOD(miibus_statchg, dc_miibus_statchg), 296 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), 297 298 { 0, 0 } 299 }; 300 301 static driver_t dc_driver = { 302 "dc", 303 dc_methods, 304 sizeof(struct dc_softc) 305 }; 306 307 static devclass_t dc_devclass; 308 #ifdef __i386__ 309 static int dc_quick=1; 310 SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, 311 &dc_quick,0,"do not mdevget in dc driver"); 312 #endif 313 314 DRIVER_MODULE(if_dc, cardbus, dc_driver, dc_devclass, 0, 0); 315 DRIVER_MODULE(if_dc, pci, dc_driver, dc_devclass, 0, 0); 316 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); 317 318 #define DC_SETBIT(sc, reg, x) \ 319 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 320 321 #define DC_CLRBIT(sc, reg, x) \ 322 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 323 324 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 325 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 326 327 #define IS_MPSAFE 0 328 329 static void 330 dc_delay(sc) 331 struct dc_softc *sc; 332 { 333 int idx; 334 335 for (idx = (300 / 33) + 1; idx > 0; idx--) 336 CSR_READ_4(sc, DC_BUSCTL); 337 } 338 339 static void 340 dc_eeprom_width(sc) 341 struct dc_softc *sc; 342 { 343 int i; 344 345 /* Force EEPROM to idle state. */ 346 dc_eeprom_idle(sc); 347 348 /* Enter EEPROM access mode. */ 349 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 350 dc_delay(sc); 351 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 352 dc_delay(sc); 353 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 354 dc_delay(sc); 355 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 356 dc_delay(sc); 357 358 for (i = 3; i--;) { 359 if (6 & (1 << i)) 360 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 361 else 362 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 363 dc_delay(sc); 364 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 365 dc_delay(sc); 366 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 367 dc_delay(sc); 368 } 369 370 for (i = 1; i <= 12; i++) { 371 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 372 dc_delay(sc); 373 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 374 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 375 dc_delay(sc); 376 break; 377 } 378 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 379 dc_delay(sc); 380 } 381 382 /* Turn off EEPROM access mode. */ 383 dc_eeprom_idle(sc); 384 385 if (i < 4 || i > 12) 386 sc->dc_romwidth = 6; 387 else 388 sc->dc_romwidth = i; 389 390 /* Enter EEPROM access mode. */ 391 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 392 dc_delay(sc); 393 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 394 dc_delay(sc); 395 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 396 dc_delay(sc); 397 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 398 dc_delay(sc); 399 400 /* Turn off EEPROM access mode. */ 401 dc_eeprom_idle(sc); 402 } 403 404 static void 405 dc_eeprom_idle(sc) 406 struct dc_softc *sc; 407 { 408 register int i; 409 410 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 411 dc_delay(sc); 412 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 413 dc_delay(sc); 414 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 415 dc_delay(sc); 416 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 417 dc_delay(sc); 418 419 for (i = 0; i < 25; i++) { 420 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 421 dc_delay(sc); 422 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 423 dc_delay(sc); 424 } 425 426 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 427 dc_delay(sc); 428 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 429 dc_delay(sc); 430 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 431 432 return; 433 } 434 435 /* 436 * Send a read command and address to the EEPROM, check for ACK. 437 */ 438 static void 439 dc_eeprom_putbyte(sc, addr) 440 struct dc_softc *sc; 441 int addr; 442 { 443 register int d, i; 444 445 d = DC_EECMD_READ >> 6; 446 for (i = 3; i--; ) { 447 if (d & (1 << i)) 448 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 449 else 450 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 451 dc_delay(sc); 452 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 453 dc_delay(sc); 454 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 455 dc_delay(sc); 456 } 457 458 /* 459 * Feed in each bit and strobe the clock. 460 */ 461 for (i = sc->dc_romwidth; i--;) { 462 if (addr & (1 << i)) { 463 SIO_SET(DC_SIO_EE_DATAIN); 464 } else { 465 SIO_CLR(DC_SIO_EE_DATAIN); 466 } 467 dc_delay(sc); 468 SIO_SET(DC_SIO_EE_CLK); 469 dc_delay(sc); 470 SIO_CLR(DC_SIO_EE_CLK); 471 dc_delay(sc); 472 } 473 474 return; 475 } 476 477 /* 478 * Read a word of data stored in the EEPROM at address 'addr.' 479 * The PNIC 82c168/82c169 has its own non-standard way to read 480 * the EEPROM. 481 */ 482 static void 483 dc_eeprom_getword_pnic(sc, addr, dest) 484 struct dc_softc *sc; 485 int addr; 486 u_int16_t *dest; 487 { 488 register int i; 489 u_int32_t r; 490 491 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); 492 493 for (i = 0; i < DC_TIMEOUT; i++) { 494 DELAY(1); 495 r = CSR_READ_4(sc, DC_SIO); 496 if (!(r & DC_PN_SIOCTL_BUSY)) { 497 *dest = (u_int16_t)(r & 0xFFFF); 498 return; 499 } 500 } 501 502 return; 503 } 504 505 /* 506 * Read a word of data stored in the EEPROM at address 'addr.' 507 * The Xircom X3201 has its own non-standard way to read 508 * the EEPROM, too. 509 */ 510 static void 511 dc_eeprom_getword_xircom(sc, addr, dest) 512 struct dc_softc *sc; 513 int addr; 514 u_int16_t *dest; 515 { 516 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 517 518 addr *= 2; 519 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 520 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff; 521 addr += 1; 522 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 523 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff) << 8; 524 525 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 526 return; 527 } 528 529 /* 530 * Read a word of data stored in the EEPROM at address 'addr.' 531 */ 532 static void 533 dc_eeprom_getword(sc, addr, dest) 534 struct dc_softc *sc; 535 int addr; 536 u_int16_t *dest; 537 { 538 register int i; 539 u_int16_t word = 0; 540 541 /* Force EEPROM to idle state. */ 542 dc_eeprom_idle(sc); 543 544 /* Enter EEPROM access mode. */ 545 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 546 dc_delay(sc); 547 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 548 dc_delay(sc); 549 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 550 dc_delay(sc); 551 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 552 dc_delay(sc); 553 554 /* 555 * Send address of word we want to read. 556 */ 557 dc_eeprom_putbyte(sc, addr); 558 559 /* 560 * Start reading bits from EEPROM. 561 */ 562 for (i = 0x8000; i; i >>= 1) { 563 SIO_SET(DC_SIO_EE_CLK); 564 dc_delay(sc); 565 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 566 word |= i; 567 dc_delay(sc); 568 SIO_CLR(DC_SIO_EE_CLK); 569 dc_delay(sc); 570 } 571 572 /* Turn off EEPROM access mode. */ 573 dc_eeprom_idle(sc); 574 575 *dest = word; 576 577 return; 578 } 579 580 /* 581 * Read a sequence of words from the EEPROM. 582 */ 583 static void 584 dc_read_eeprom(sc, dest, off, cnt, swap) 585 struct dc_softc *sc; 586 caddr_t dest; 587 int off; 588 int cnt; 589 int swap; 590 { 591 int i; 592 u_int16_t word = 0, *ptr; 593 594 for (i = 0; i < cnt; i++) { 595 if (DC_IS_PNIC(sc)) 596 dc_eeprom_getword_pnic(sc, off + i, &word); 597 else if (DC_IS_XIRCOM(sc)) 598 dc_eeprom_getword_xircom(sc, off + i, &word); 599 else 600 dc_eeprom_getword(sc, off + i, &word); 601 ptr = (u_int16_t *)(dest + (i * 2)); 602 if (swap) 603 *ptr = ntohs(word); 604 else 605 *ptr = word; 606 } 607 608 return; 609 } 610 611 /* 612 * The following two routines are taken from the Macronix 98713 613 * Application Notes pp.19-21. 614 */ 615 /* 616 * Write a bit to the MII bus. 617 */ 618 static void 619 dc_mii_writebit(sc, bit) 620 struct dc_softc *sc; 621 int bit; 622 { 623 if (bit) 624 CSR_WRITE_4(sc, DC_SIO, 625 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); 626 else 627 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 628 629 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 630 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 631 632 return; 633 } 634 635 /* 636 * Read a bit from the MII bus. 637 */ 638 static int 639 dc_mii_readbit(sc) 640 struct dc_softc *sc; 641 { 642 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); 643 CSR_READ_4(sc, DC_SIO); 644 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 645 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 646 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 647 return(1); 648 649 return(0); 650 } 651 652 /* 653 * Sync the PHYs by setting data bit and strobing the clock 32 times. 654 */ 655 static void 656 dc_mii_sync(sc) 657 struct dc_softc *sc; 658 { 659 register int i; 660 661 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 662 663 for (i = 0; i < 32; i++) 664 dc_mii_writebit(sc, 1); 665 666 return; 667 } 668 669 /* 670 * Clock a series of bits through the MII. 671 */ 672 static void 673 dc_mii_send(sc, bits, cnt) 674 struct dc_softc *sc; 675 u_int32_t bits; 676 int cnt; 677 { 678 int i; 679 680 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 681 dc_mii_writebit(sc, bits & i); 682 } 683 684 /* 685 * Read an PHY register through the MII. 686 */ 687 static int 688 dc_mii_readreg(sc, frame) 689 struct dc_softc *sc; 690 struct dc_mii_frame *frame; 691 692 { 693 int i, ack; 694 695 DC_LOCK(sc); 696 697 /* 698 * Set up frame for RX. 699 */ 700 frame->mii_stdelim = DC_MII_STARTDELIM; 701 frame->mii_opcode = DC_MII_READOP; 702 frame->mii_turnaround = 0; 703 frame->mii_data = 0; 704 705 /* 706 * Sync the PHYs. 707 */ 708 dc_mii_sync(sc); 709 710 /* 711 * Send command/address info. 712 */ 713 dc_mii_send(sc, frame->mii_stdelim, 2); 714 dc_mii_send(sc, frame->mii_opcode, 2); 715 dc_mii_send(sc, frame->mii_phyaddr, 5); 716 dc_mii_send(sc, frame->mii_regaddr, 5); 717 718 #ifdef notdef 719 /* Idle bit */ 720 dc_mii_writebit(sc, 1); 721 dc_mii_writebit(sc, 0); 722 #endif 723 724 /* Check for ack */ 725 ack = dc_mii_readbit(sc); 726 727 /* 728 * Now try reading data bits. If the ack failed, we still 729 * need to clock through 16 cycles to keep the PHY(s) in sync. 730 */ 731 if (ack) { 732 for(i = 0; i < 16; i++) { 733 dc_mii_readbit(sc); 734 } 735 goto fail; 736 } 737 738 for (i = 0x8000; i; i >>= 1) { 739 if (!ack) { 740 if (dc_mii_readbit(sc)) 741 frame->mii_data |= i; 742 } 743 } 744 745 fail: 746 747 dc_mii_writebit(sc, 0); 748 dc_mii_writebit(sc, 0); 749 750 DC_UNLOCK(sc); 751 752 if (ack) 753 return(1); 754 return(0); 755 } 756 757 /* 758 * Write to a PHY register through the MII. 759 */ 760 static int 761 dc_mii_writereg(sc, frame) 762 struct dc_softc *sc; 763 struct dc_mii_frame *frame; 764 765 { 766 DC_LOCK(sc); 767 /* 768 * Set up frame for TX. 769 */ 770 771 frame->mii_stdelim = DC_MII_STARTDELIM; 772 frame->mii_opcode = DC_MII_WRITEOP; 773 frame->mii_turnaround = DC_MII_TURNAROUND; 774 775 /* 776 * Sync the PHYs. 777 */ 778 dc_mii_sync(sc); 779 780 dc_mii_send(sc, frame->mii_stdelim, 2); 781 dc_mii_send(sc, frame->mii_opcode, 2); 782 dc_mii_send(sc, frame->mii_phyaddr, 5); 783 dc_mii_send(sc, frame->mii_regaddr, 5); 784 dc_mii_send(sc, frame->mii_turnaround, 2); 785 dc_mii_send(sc, frame->mii_data, 16); 786 787 /* Idle bit. */ 788 dc_mii_writebit(sc, 0); 789 dc_mii_writebit(sc, 0); 790 791 DC_UNLOCK(sc); 792 793 return(0); 794 } 795 796 static int 797 dc_miibus_readreg(dev, phy, reg) 798 device_t dev; 799 int phy, reg; 800 { 801 struct dc_mii_frame frame; 802 struct dc_softc *sc; 803 int i, rval, phy_reg = 0; 804 805 sc = device_get_softc(dev); 806 bzero((char *)&frame, sizeof(frame)); 807 808 /* 809 * Note: both the AL981 and AN985 have internal PHYs, 810 * however the AL981 provides direct access to the PHY 811 * registers while the AN985 uses a serial MII interface. 812 * The AN985's MII interface is also buggy in that you 813 * can read from any MII address (0 to 31), but only address 1 814 * behaves normally. To deal with both cases, we pretend 815 * that the PHY is at MII address 1. 816 */ 817 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 818 return(0); 819 820 /* 821 * Note: the ukphy probes of the RS7112 report a PHY at 822 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 823 * so we only respond to correct one. 824 */ 825 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 826 return(0); 827 828 if (sc->dc_pmode != DC_PMODE_MII) { 829 if (phy == (MII_NPHY - 1)) { 830 switch(reg) { 831 case MII_BMSR: 832 /* 833 * Fake something to make the probe 834 * code think there's a PHY here. 835 */ 836 return(BMSR_MEDIAMASK); 837 break; 838 case MII_PHYIDR1: 839 if (DC_IS_PNIC(sc)) 840 return(DC_VENDORID_LO); 841 return(DC_VENDORID_DEC); 842 break; 843 case MII_PHYIDR2: 844 if (DC_IS_PNIC(sc)) 845 return(DC_DEVICEID_82C168); 846 return(DC_DEVICEID_21143); 847 break; 848 default: 849 return(0); 850 break; 851 } 852 } else 853 return(0); 854 } 855 856 if (DC_IS_PNIC(sc)) { 857 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 858 (phy << 23) | (reg << 18)); 859 for (i = 0; i < DC_TIMEOUT; i++) { 860 DELAY(1); 861 rval = CSR_READ_4(sc, DC_PN_MII); 862 if (!(rval & DC_PN_MII_BUSY)) { 863 rval &= 0xFFFF; 864 return(rval == 0xFFFF ? 0 : rval); 865 } 866 } 867 return(0); 868 } 869 870 if (DC_IS_COMET(sc)) { 871 switch(reg) { 872 case MII_BMCR: 873 phy_reg = DC_AL_BMCR; 874 break; 875 case MII_BMSR: 876 phy_reg = DC_AL_BMSR; 877 break; 878 case MII_PHYIDR1: 879 phy_reg = DC_AL_VENID; 880 break; 881 case MII_PHYIDR2: 882 phy_reg = DC_AL_DEVID; 883 break; 884 case MII_ANAR: 885 phy_reg = DC_AL_ANAR; 886 break; 887 case MII_ANLPAR: 888 phy_reg = DC_AL_LPAR; 889 break; 890 case MII_ANER: 891 phy_reg = DC_AL_ANER; 892 break; 893 default: 894 printf("dc%d: phy_read: bad phy register %x\n", 895 sc->dc_unit, reg); 896 return(0); 897 break; 898 } 899 900 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 901 902 if (rval == 0xFFFF) 903 return(0); 904 return(rval); 905 } 906 907 frame.mii_phyaddr = phy; 908 frame.mii_regaddr = reg; 909 if (sc->dc_type == DC_TYPE_98713) { 910 phy_reg = CSR_READ_4(sc, DC_NETCFG); 911 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 912 } 913 dc_mii_readreg(sc, &frame); 914 if (sc->dc_type == DC_TYPE_98713) 915 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 916 917 return(frame.mii_data); 918 } 919 920 static int 921 dc_miibus_writereg(dev, phy, reg, data) 922 device_t dev; 923 int phy, reg, data; 924 { 925 struct dc_softc *sc; 926 struct dc_mii_frame frame; 927 int i, phy_reg = 0; 928 929 sc = device_get_softc(dev); 930 bzero((char *)&frame, sizeof(frame)); 931 932 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 933 return(0); 934 935 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 936 return(0); 937 938 if (DC_IS_PNIC(sc)) { 939 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 940 (phy << 23) | (reg << 10) | data); 941 for (i = 0; i < DC_TIMEOUT; i++) { 942 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 943 break; 944 } 945 return(0); 946 } 947 948 if (DC_IS_COMET(sc)) { 949 switch(reg) { 950 case MII_BMCR: 951 phy_reg = DC_AL_BMCR; 952 break; 953 case MII_BMSR: 954 phy_reg = DC_AL_BMSR; 955 break; 956 case MII_PHYIDR1: 957 phy_reg = DC_AL_VENID; 958 break; 959 case MII_PHYIDR2: 960 phy_reg = DC_AL_DEVID; 961 break; 962 case MII_ANAR: 963 phy_reg = DC_AL_ANAR; 964 break; 965 case MII_ANLPAR: 966 phy_reg = DC_AL_LPAR; 967 break; 968 case MII_ANER: 969 phy_reg = DC_AL_ANER; 970 break; 971 default: 972 printf("dc%d: phy_write: bad phy register %x\n", 973 sc->dc_unit, reg); 974 return(0); 975 break; 976 } 977 978 CSR_WRITE_4(sc, phy_reg, data); 979 return(0); 980 } 981 982 frame.mii_phyaddr = phy; 983 frame.mii_regaddr = reg; 984 frame.mii_data = data; 985 986 if (sc->dc_type == DC_TYPE_98713) { 987 phy_reg = CSR_READ_4(sc, DC_NETCFG); 988 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 989 } 990 dc_mii_writereg(sc, &frame); 991 if (sc->dc_type == DC_TYPE_98713) 992 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 993 994 return(0); 995 } 996 997 static void 998 dc_miibus_statchg(dev) 999 device_t dev; 1000 { 1001 struct dc_softc *sc; 1002 struct mii_data *mii; 1003 struct ifmedia *ifm; 1004 1005 sc = device_get_softc(dev); 1006 if (DC_IS_ADMTEK(sc)) 1007 return; 1008 1009 mii = device_get_softc(sc->dc_miibus); 1010 ifm = &mii->mii_media; 1011 if (DC_IS_DAVICOM(sc) && 1012 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 1013 dc_setcfg(sc, ifm->ifm_media); 1014 sc->dc_if_media = ifm->ifm_media; 1015 } else { 1016 dc_setcfg(sc, mii->mii_media_active); 1017 sc->dc_if_media = mii->mii_media_active; 1018 } 1019 1020 return; 1021 } 1022 1023 /* 1024 * Special support for DM9102A cards with HomePNA PHYs. Note: 1025 * with the Davicom DM9102A/DM9801 eval board that I have, it seems 1026 * to be impossible to talk to the management interface of the DM9801 1027 * PHY (its MDIO pin is not connected to anything). Consequently, 1028 * the driver has to just 'know' about the additional mode and deal 1029 * with it itself. *sigh* 1030 */ 1031 static void 1032 dc_miibus_mediainit(dev) 1033 device_t dev; 1034 { 1035 struct dc_softc *sc; 1036 struct mii_data *mii; 1037 struct ifmedia *ifm; 1038 int rev; 1039 1040 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1041 1042 sc = device_get_softc(dev); 1043 mii = device_get_softc(sc->dc_miibus); 1044 ifm = &mii->mii_media; 1045 1046 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) 1047 ifmedia_add(ifm, IFM_ETHER|IFM_HPNA_1, 0, NULL); 1048 1049 return; 1050 } 1051 1052 #define DC_POLY 0xEDB88320 1053 #define DC_BITS_512 9 1054 #define DC_BITS_128 7 1055 #define DC_BITS_64 6 1056 1057 static u_int32_t 1058 dc_crc_le(sc, addr) 1059 struct dc_softc *sc; 1060 caddr_t addr; 1061 { 1062 u_int32_t idx, bit, data, crc; 1063 1064 /* Compute CRC for the address value. */ 1065 crc = 0xFFFFFFFF; /* initial value */ 1066 1067 for (idx = 0; idx < 6; idx++) { 1068 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 1069 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); 1070 } 1071 1072 /* 1073 * The hash table on the PNIC II and the MX98715AEC-C/D/E 1074 * chips is only 128 bits wide. 1075 */ 1076 if (sc->dc_flags & DC_128BIT_HASH) 1077 return (crc & ((1 << DC_BITS_128) - 1)); 1078 1079 /* The hash table on the MX98715BEC is only 64 bits wide. */ 1080 if (sc->dc_flags & DC_64BIT_HASH) 1081 return (crc & ((1 << DC_BITS_64) - 1)); 1082 1083 /* Xircom's hash filtering table is different (read: weird) */ 1084 /* Xircom uses the LEAST significant bits */ 1085 if (DC_IS_XIRCOM(sc)) { 1086 if ((crc & 0x180) == 0x180) 1087 return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4); 1088 else 1089 return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4); 1090 } 1091 1092 return (crc & ((1 << DC_BITS_512) - 1)); 1093 } 1094 1095 /* 1096 * Calculate CRC of a multicast group address, return the lower 6 bits. 1097 */ 1098 static u_int32_t 1099 dc_crc_be(addr) 1100 caddr_t addr; 1101 { 1102 u_int32_t crc, carry; 1103 int i, j; 1104 u_int8_t c; 1105 1106 /* Compute CRC for the address value. */ 1107 crc = 0xFFFFFFFF; /* initial value */ 1108 1109 for (i = 0; i < 6; i++) { 1110 c = *(addr + i); 1111 for (j = 0; j < 8; j++) { 1112 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 1113 crc <<= 1; 1114 c >>= 1; 1115 if (carry) 1116 crc = (crc ^ 0x04c11db6) | carry; 1117 } 1118 } 1119 1120 /* return the filter bit position */ 1121 return((crc >> 26) & 0x0000003F); 1122 } 1123 1124 /* 1125 * 21143-style RX filter setup routine. Filter programming is done by 1126 * downloading a special setup frame into the TX engine. 21143, Macronix, 1127 * PNIC, PNIC II and Davicom chips are programmed this way. 1128 * 1129 * We always program the chip using 'hash perfect' mode, i.e. one perfect 1130 * address (our node address) and a 512-bit hash filter for multicast 1131 * frames. We also sneak the broadcast address into the hash filter since 1132 * we need that too. 1133 */ 1134 static void 1135 dc_setfilt_21143(sc) 1136 struct dc_softc *sc; 1137 { 1138 struct dc_desc *sframe; 1139 u_int32_t h, *sp; 1140 struct ifmultiaddr *ifma; 1141 struct ifnet *ifp; 1142 int i; 1143 1144 ifp = &sc->arpcom.ac_if; 1145 1146 i = sc->dc_cdata.dc_tx_prod; 1147 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1148 sc->dc_cdata.dc_tx_cnt++; 1149 sframe = &sc->dc_ldata->dc_tx_list[i]; 1150 sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; 1151 bzero((char *)sp, DC_SFRAME_LEN); 1152 1153 sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); 1154 sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | 1155 DC_FILTER_HASHPERF | DC_TXCTL_FINT; 1156 1157 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; 1158 1159 /* If we want promiscuous mode, set the allframes bit. */ 1160 if (ifp->if_flags & IFF_PROMISC) 1161 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1162 else 1163 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1164 1165 if (ifp->if_flags & IFF_ALLMULTI) 1166 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1167 else 1168 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1169 1170 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1171 if (ifma->ifma_addr->sa_family != AF_LINK) 1172 continue; 1173 h = dc_crc_le(sc, 1174 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1175 sp[h >> 4] |= 1 << (h & 0xF); 1176 } 1177 1178 if (ifp->if_flags & IFF_BROADCAST) { 1179 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 1180 sp[h >> 4] |= 1 << (h & 0xF); 1181 } 1182 1183 /* Set our MAC address */ 1184 sp[39] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; 1185 sp[40] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; 1186 sp[41] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; 1187 1188 sframe->dc_status = DC_TXSTAT_OWN; 1189 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1190 1191 /* 1192 * The PNIC takes an exceedingly long time to process its 1193 * setup frame; wait 10ms after posting the setup frame 1194 * before proceeding, just so it has time to swallow its 1195 * medicine. 1196 */ 1197 DELAY(10000); 1198 1199 ifp->if_timer = 5; 1200 1201 return; 1202 } 1203 1204 static void 1205 dc_setfilt_admtek(sc) 1206 struct dc_softc *sc; 1207 { 1208 struct ifnet *ifp; 1209 int h = 0; 1210 u_int32_t hashes[2] = { 0, 0 }; 1211 struct ifmultiaddr *ifma; 1212 1213 ifp = &sc->arpcom.ac_if; 1214 1215 /* Init our MAC address */ 1216 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1217 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1218 1219 /* If we want promiscuous mode, set the allframes bit. */ 1220 if (ifp->if_flags & IFF_PROMISC) 1221 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1222 else 1223 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1224 1225 if (ifp->if_flags & IFF_ALLMULTI) 1226 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1227 else 1228 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1229 1230 /* first, zot all the existing hash bits */ 1231 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1232 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1233 1234 /* 1235 * If we're already in promisc or allmulti mode, we 1236 * don't have to bother programming the multicast filter. 1237 */ 1238 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1239 return; 1240 1241 /* now program new ones */ 1242 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1243 if (ifma->ifma_addr->sa_family != AF_LINK) 1244 continue; 1245 h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1246 if (h < 32) 1247 hashes[0] |= (1 << h); 1248 else 1249 hashes[1] |= (1 << (h - 32)); 1250 } 1251 1252 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1253 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1254 1255 return; 1256 } 1257 1258 static void 1259 dc_setfilt_asix(sc) 1260 struct dc_softc *sc; 1261 { 1262 struct ifnet *ifp; 1263 int h = 0; 1264 u_int32_t hashes[2] = { 0, 0 }; 1265 struct ifmultiaddr *ifma; 1266 1267 ifp = &sc->arpcom.ac_if; 1268 1269 /* Init our MAC address */ 1270 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1271 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1272 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1273 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1274 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1275 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1276 1277 /* If we want promiscuous mode, set the allframes bit. */ 1278 if (ifp->if_flags & IFF_PROMISC) 1279 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1280 else 1281 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1282 1283 if (ifp->if_flags & IFF_ALLMULTI) 1284 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1285 else 1286 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1287 1288 /* 1289 * The ASIX chip has a special bit to enable reception 1290 * of broadcast frames. 1291 */ 1292 if (ifp->if_flags & IFF_BROADCAST) 1293 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1294 else 1295 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1296 1297 /* first, zot all the existing hash bits */ 1298 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1299 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1300 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1301 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1302 1303 /* 1304 * If we're already in promisc or allmulti mode, we 1305 * don't have to bother programming the multicast filter. 1306 */ 1307 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1308 return; 1309 1310 /* now program new ones */ 1311 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1312 if (ifma->ifma_addr->sa_family != AF_LINK) 1313 continue; 1314 h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1315 if (h < 32) 1316 hashes[0] |= (1 << h); 1317 else 1318 hashes[1] |= (1 << (h - 32)); 1319 } 1320 1321 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1322 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1323 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1324 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1325 1326 return; 1327 } 1328 1329 static void 1330 dc_setfilt_xircom(sc) 1331 struct dc_softc *sc; 1332 { 1333 struct dc_desc *sframe; 1334 u_int32_t h, *sp; 1335 struct ifmultiaddr *ifma; 1336 struct ifnet *ifp; 1337 int i; 1338 1339 ifp = &sc->arpcom.ac_if; 1340 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1341 1342 i = sc->dc_cdata.dc_tx_prod; 1343 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1344 sc->dc_cdata.dc_tx_cnt++; 1345 sframe = &sc->dc_ldata->dc_tx_list[i]; 1346 sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; 1347 bzero((char *)sp, DC_SFRAME_LEN); 1348 1349 sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); 1350 sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | 1351 DC_FILTER_HASHPERF | DC_TXCTL_FINT; 1352 1353 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; 1354 1355 /* If we want promiscuous mode, set the allframes bit. */ 1356 if (ifp->if_flags & IFF_PROMISC) 1357 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1358 else 1359 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1360 1361 if (ifp->if_flags & IFF_ALLMULTI) 1362 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1363 else 1364 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1365 1366 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1367 if (ifma->ifma_addr->sa_family != AF_LINK) 1368 continue; 1369 h = dc_crc_le(sc, 1370 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1371 sp[h >> 4] |= 1 << (h & 0xF); 1372 } 1373 1374 if (ifp->if_flags & IFF_BROADCAST) { 1375 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 1376 sp[h >> 4] |= 1 << (h & 0xF); 1377 } 1378 1379 /* Set our MAC address */ 1380 sp[0] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; 1381 sp[1] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; 1382 sp[2] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; 1383 1384 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1385 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1386 ifp->if_flags |= IFF_RUNNING; 1387 sframe->dc_status = DC_TXSTAT_OWN; 1388 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1389 1390 /* 1391 * wait some time... 1392 */ 1393 DELAY(1000); 1394 1395 ifp->if_timer = 5; 1396 1397 return; 1398 } 1399 1400 static void 1401 dc_setfilt(sc) 1402 struct dc_softc *sc; 1403 { 1404 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1405 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1406 dc_setfilt_21143(sc); 1407 1408 if (DC_IS_ASIX(sc)) 1409 dc_setfilt_asix(sc); 1410 1411 if (DC_IS_ADMTEK(sc)) 1412 dc_setfilt_admtek(sc); 1413 1414 if (DC_IS_XIRCOM(sc)) 1415 dc_setfilt_xircom(sc); 1416 1417 return; 1418 } 1419 1420 /* 1421 * In order to fiddle with the 1422 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 1423 * first have to put the transmit and/or receive logic in the idle state. 1424 */ 1425 static void 1426 dc_setcfg(sc, media) 1427 struct dc_softc *sc; 1428 int media; 1429 { 1430 int i, restart = 0; 1431 u_int32_t isr; 1432 1433 if (IFM_SUBTYPE(media) == IFM_NONE) 1434 return; 1435 1436 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { 1437 restart = 1; 1438 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1439 1440 for (i = 0; i < DC_TIMEOUT; i++) { 1441 isr = CSR_READ_4(sc, DC_ISR); 1442 if (isr & DC_ISR_TX_IDLE && 1443 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1444 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1445 break; 1446 DELAY(10); 1447 } 1448 1449 if (i == DC_TIMEOUT) 1450 printf("dc%d: failed to force tx and " 1451 "rx to idle state\n", sc->dc_unit); 1452 } 1453 1454 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1455 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1456 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1457 if (sc->dc_pmode == DC_PMODE_MII) { 1458 int watchdogreg; 1459 1460 if (DC_IS_INTEL(sc)) { 1461 /* there's a write enable bit here that reads as 1 */ 1462 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1463 watchdogreg &= ~DC_WDOG_CTLWREN; 1464 watchdogreg |= DC_WDOG_JABBERDIS; 1465 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1466 } else { 1467 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1468 } 1469 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1470 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1471 if (sc->dc_type == DC_TYPE_98713) 1472 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1473 DC_NETCFG_SCRAMBLER)); 1474 if (!DC_IS_DAVICOM(sc)) 1475 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1476 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1477 if (DC_IS_INTEL(sc)) 1478 dc_apply_fixup(sc, IFM_AUTO); 1479 } else { 1480 if (DC_IS_PNIC(sc)) { 1481 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1482 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1483 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1484 } 1485 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1486 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1487 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1488 if (DC_IS_INTEL(sc)) 1489 dc_apply_fixup(sc, 1490 (media & IFM_GMASK) == IFM_FDX ? 1491 IFM_100_TX|IFM_FDX : IFM_100_TX); 1492 } 1493 } 1494 1495 if (IFM_SUBTYPE(media) == IFM_10_T) { 1496 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1497 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1498 if (sc->dc_pmode == DC_PMODE_MII) { 1499 int watchdogreg; 1500 1501 /* there's a write enable bit here that reads as 1 */ 1502 if (DC_IS_INTEL(sc)) { 1503 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1504 watchdogreg &= ~DC_WDOG_CTLWREN; 1505 watchdogreg |= DC_WDOG_JABBERDIS; 1506 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1507 } else { 1508 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1509 } 1510 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1511 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1512 if (sc->dc_type == DC_TYPE_98713) 1513 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1514 if (!DC_IS_DAVICOM(sc)) 1515 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1516 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1517 if (DC_IS_INTEL(sc)) 1518 dc_apply_fixup(sc, IFM_AUTO); 1519 } else { 1520 if (DC_IS_PNIC(sc)) { 1521 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1522 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1523 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1524 } 1525 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1526 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1527 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1528 if (DC_IS_INTEL(sc)) { 1529 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1530 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1531 if ((media & IFM_GMASK) == IFM_FDX) 1532 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1533 else 1534 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1535 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1536 DC_CLRBIT(sc, DC_10BTCTRL, 1537 DC_TCTL_AUTONEGENBL); 1538 dc_apply_fixup(sc, 1539 (media & IFM_GMASK) == IFM_FDX ? 1540 IFM_10_T|IFM_FDX : IFM_10_T); 1541 DELAY(20000); 1542 } 1543 } 1544 } 1545 1546 /* 1547 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1548 * PHY and we want HomePNA mode, set the portsel bit to turn 1549 * on the external MII port. 1550 */ 1551 if (DC_IS_DAVICOM(sc)) { 1552 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1553 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1554 sc->dc_link = 1; 1555 } else { 1556 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1557 } 1558 } 1559 1560 if ((media & IFM_GMASK) == IFM_FDX) { 1561 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1562 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1563 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1564 } else { 1565 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1566 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1567 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1568 } 1569 1570 if (restart) 1571 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); 1572 1573 return; 1574 } 1575 1576 static void 1577 dc_reset(sc) 1578 struct dc_softc *sc; 1579 { 1580 register int i; 1581 1582 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1583 1584 for (i = 0; i < DC_TIMEOUT; i++) { 1585 DELAY(10); 1586 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1587 break; 1588 } 1589 1590 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) || 1591 DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { 1592 DELAY(10000); 1593 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1594 i = 0; 1595 } 1596 1597 if (i == DC_TIMEOUT) 1598 printf("dc%d: reset never completed!\n", sc->dc_unit); 1599 1600 /* Wait a little while for the chip to get its brains in order. */ 1601 DELAY(1000); 1602 1603 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1604 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1605 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1606 1607 /* 1608 * Bring the SIA out of reset. In some cases, it looks 1609 * like failing to unreset the SIA soon enough gets it 1610 * into a state where it will never come out of reset 1611 * until we reset the whole chip again. 1612 */ 1613 if (DC_IS_INTEL(sc)) { 1614 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1615 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1616 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1617 } 1618 1619 return; 1620 } 1621 1622 static struct dc_type * 1623 dc_devtype(dev) 1624 device_t dev; 1625 { 1626 struct dc_type *t; 1627 u_int32_t rev; 1628 1629 t = dc_devs; 1630 1631 while(t->dc_name != NULL) { 1632 if ((pci_get_vendor(dev) == t->dc_vid) && 1633 (pci_get_device(dev) == t->dc_did)) { 1634 /* Check the PCI revision */ 1635 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1636 if (t->dc_did == DC_DEVICEID_98713 && 1637 rev >= DC_REVISION_98713A) 1638 t++; 1639 if (t->dc_did == DC_DEVICEID_98713_CP && 1640 rev >= DC_REVISION_98713A) 1641 t++; 1642 if (t->dc_did == DC_DEVICEID_987x5 && 1643 rev >= DC_REVISION_98715AEC_C) 1644 t++; 1645 if (t->dc_did == DC_DEVICEID_987x5 && 1646 rev >= DC_REVISION_98725) 1647 t++; 1648 if (t->dc_did == DC_DEVICEID_AX88140A && 1649 rev >= DC_REVISION_88141) 1650 t++; 1651 if (t->dc_did == DC_DEVICEID_82C168 && 1652 rev >= DC_REVISION_82C169) 1653 t++; 1654 if (t->dc_did == DC_DEVICEID_DM9102 && 1655 rev >= DC_REVISION_DM9102A) 1656 t++; 1657 return(t); 1658 } 1659 t++; 1660 } 1661 1662 return(NULL); 1663 } 1664 1665 /* 1666 * Probe for a 21143 or clone chip. Check the PCI vendor and device 1667 * IDs against our list and return a device name if we find a match. 1668 * We do a little bit of extra work to identify the exact type of 1669 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, 1670 * but different revision IDs. The same is true for 98715/98715A 1671 * chips and the 98725, as well as the ASIX and ADMtek chips. In some 1672 * cases, the exact chip revision affects driver behavior. 1673 */ 1674 static int 1675 dc_probe(dev) 1676 device_t dev; 1677 { 1678 struct dc_type *t; 1679 1680 t = dc_devtype(dev); 1681 1682 if (t != NULL) { 1683 device_set_desc(dev, t->dc_name); 1684 return(0); 1685 } 1686 1687 return(ENXIO); 1688 } 1689 1690 static void 1691 dc_acpi(dev) 1692 device_t dev; 1693 { 1694 int unit; 1695 1696 unit = device_get_unit(dev); 1697 1698 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1699 u_int32_t iobase, membase, irq; 1700 1701 /* Save important PCI config data. */ 1702 iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); 1703 membase = pci_read_config(dev, DC_PCI_CFBMA, 4); 1704 irq = pci_read_config(dev, DC_PCI_CFIT, 4); 1705 1706 /* Reset the power state. */ 1707 printf("dc%d: chip is in D%d power mode " 1708 "-- setting to D0\n", unit, 1709 pci_get_powerstate(dev)); 1710 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1711 1712 /* Restore PCI config data. */ 1713 pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); 1714 pci_write_config(dev, DC_PCI_CFBMA, membase, 4); 1715 pci_write_config(dev, DC_PCI_CFIT, irq, 4); 1716 } 1717 1718 return; 1719 } 1720 1721 static void 1722 dc_apply_fixup(sc, media) 1723 struct dc_softc *sc; 1724 int media; 1725 { 1726 struct dc_mediainfo *m; 1727 u_int8_t *p; 1728 int i; 1729 u_int32_t reg; 1730 1731 m = sc->dc_mi; 1732 1733 while (m != NULL) { 1734 if (m->dc_media == media) 1735 break; 1736 m = m->dc_next; 1737 } 1738 1739 if (m == NULL) 1740 return; 1741 1742 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1743 reg = (p[0] | (p[1] << 8)) << 16; 1744 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1745 } 1746 1747 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1748 reg = (p[0] | (p[1] << 8)) << 16; 1749 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1750 } 1751 1752 return; 1753 } 1754 1755 static void 1756 dc_decode_leaf_sia(sc, l) 1757 struct dc_softc *sc; 1758 struct dc_eblock_sia *l; 1759 { 1760 struct dc_mediainfo *m; 1761 1762 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); 1763 bzero(m, sizeof(struct dc_mediainfo)); 1764 if (l->dc_sia_code == DC_SIA_CODE_10BT) 1765 m->dc_media = IFM_10_T; 1766 1767 if (l->dc_sia_code == DC_SIA_CODE_10BT_FDX) 1768 m->dc_media = IFM_10_T|IFM_FDX; 1769 1770 if (l->dc_sia_code == DC_SIA_CODE_10B2) 1771 m->dc_media = IFM_10_2; 1772 1773 if (l->dc_sia_code == DC_SIA_CODE_10B5) 1774 m->dc_media = IFM_10_5; 1775 1776 m->dc_gp_len = 2; 1777 m->dc_gp_ptr = (u_int8_t *)&l->dc_sia_gpio_ctl; 1778 1779 m->dc_next = sc->dc_mi; 1780 sc->dc_mi = m; 1781 1782 sc->dc_pmode = DC_PMODE_SIA; 1783 1784 return; 1785 } 1786 1787 static void 1788 dc_decode_leaf_sym(sc, l) 1789 struct dc_softc *sc; 1790 struct dc_eblock_sym *l; 1791 { 1792 struct dc_mediainfo *m; 1793 1794 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); 1795 bzero(m, sizeof(struct dc_mediainfo)); 1796 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1797 m->dc_media = IFM_100_TX; 1798 1799 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1800 m->dc_media = IFM_100_TX|IFM_FDX; 1801 1802 m->dc_gp_len = 2; 1803 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1804 1805 m->dc_next = sc->dc_mi; 1806 sc->dc_mi = m; 1807 1808 sc->dc_pmode = DC_PMODE_SYM; 1809 1810 return; 1811 } 1812 1813 static void 1814 dc_decode_leaf_mii(sc, l) 1815 struct dc_softc *sc; 1816 struct dc_eblock_mii *l; 1817 { 1818 u_int8_t *p; 1819 struct dc_mediainfo *m; 1820 1821 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); 1822 bzero(m, sizeof(struct dc_mediainfo)); 1823 /* We abuse IFM_AUTO to represent MII. */ 1824 m->dc_media = IFM_AUTO; 1825 m->dc_gp_len = l->dc_gpr_len; 1826 1827 p = (u_int8_t *)l; 1828 p += sizeof(struct dc_eblock_mii); 1829 m->dc_gp_ptr = p; 1830 p += 2 * l->dc_gpr_len; 1831 m->dc_reset_len = *p; 1832 p++; 1833 m->dc_reset_ptr = p; 1834 1835 m->dc_next = sc->dc_mi; 1836 sc->dc_mi = m; 1837 1838 return; 1839 } 1840 1841 static void 1842 dc_read_srom(sc, bits) 1843 struct dc_softc *sc; 1844 int bits; 1845 { 1846 int size; 1847 1848 size = 2 << bits; 1849 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); 1850 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1851 } 1852 1853 static void 1854 dc_parse_21143_srom(sc) 1855 struct dc_softc *sc; 1856 { 1857 struct dc_leaf_hdr *lhdr; 1858 struct dc_eblock_hdr *hdr; 1859 int i, loff; 1860 char *ptr; 1861 1862 loff = sc->dc_srom[27]; 1863 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1864 1865 ptr = (char *)lhdr; 1866 ptr += sizeof(struct dc_leaf_hdr) - 1; 1867 for (i = 0; i < lhdr->dc_mcnt; i++) { 1868 hdr = (struct dc_eblock_hdr *)ptr; 1869 switch(hdr->dc_type) { 1870 case DC_EBLOCK_MII: 1871 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1872 break; 1873 case DC_EBLOCK_SIA: 1874 dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr); 1875 break; 1876 case DC_EBLOCK_SYM: 1877 dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr); 1878 break; 1879 default: 1880 /* Don't care. Yet. */ 1881 break; 1882 } 1883 ptr += (hdr->dc_len & 0x7F); 1884 ptr++; 1885 } 1886 1887 return; 1888 } 1889 1890 /* 1891 * Attach the interface. Allocate softc structures, do ifmedia 1892 * setup and ethernet/BPF attach. 1893 */ 1894 static int 1895 dc_attach(dev) 1896 device_t dev; 1897 { 1898 int tmp = 0; 1899 u_char eaddr[ETHER_ADDR_LEN]; 1900 u_int32_t command; 1901 struct dc_softc *sc; 1902 struct ifnet *ifp; 1903 u_int32_t revision; 1904 int unit, error = 0, rid, mac_offset; 1905 u_int8_t *mac; 1906 1907 sc = device_get_softc(dev); 1908 unit = device_get_unit(dev); 1909 bzero(sc, sizeof(struct dc_softc)); 1910 1911 mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1912 MTX_DEF | MTX_RECURSE); 1913 1914 /* 1915 * Handle power management nonsense. 1916 */ 1917 dc_acpi(dev); 1918 1919 /* 1920 * Map control/status registers. 1921 */ 1922 pci_enable_busmaster(dev); 1923 pci_enable_io(dev, SYS_RES_IOPORT); 1924 pci_enable_io(dev, SYS_RES_MEMORY); 1925 command = pci_read_config(dev, PCIR_COMMAND, 4); 1926 1927 #ifdef DC_USEIOSPACE 1928 if (!(command & PCIM_CMD_PORTEN)) { 1929 printf("dc%d: failed to enable I/O ports!\n", unit); 1930 error = ENXIO; 1931 goto fail; 1932 } 1933 #else 1934 if (!(command & PCIM_CMD_MEMEN)) { 1935 printf("dc%d: failed to enable memory mapping!\n", unit); 1936 error = ENXIO; 1937 goto fail; 1938 } 1939 #endif 1940 1941 rid = DC_RID; 1942 sc->dc_res = bus_alloc_resource(dev, DC_RES, &rid, 1943 0, ~0, 1, RF_ACTIVE); 1944 1945 if (sc->dc_res == NULL) { 1946 printf("dc%d: couldn't map ports/memory\n", unit); 1947 error = ENXIO; 1948 goto fail; 1949 } 1950 1951 sc->dc_btag = rman_get_bustag(sc->dc_res); 1952 sc->dc_bhandle = rman_get_bushandle(sc->dc_res); 1953 1954 /* Need this info to decide on a chip type. */ 1955 sc->dc_info = dc_devtype(dev); 1956 revision = pci_read_config(dev, DC_PCI_CFRV, 4) & 0x000000FF; 1957 1958 switch(sc->dc_info->dc_did) { 1959 case DC_DEVICEID_21143: 1960 sc->dc_type = DC_TYPE_21143; 1961 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1962 sc->dc_flags |= DC_REDUCED_MII_POLL; 1963 /* Save EEPROM contents so we can parse them later. */ 1964 dc_eeprom_width(sc); 1965 dc_read_srom(sc, sc->dc_romwidth); 1966 break; 1967 case DC_DEVICEID_DM9009: 1968 case DC_DEVICEID_DM9100: 1969 case DC_DEVICEID_DM9102: 1970 sc->dc_type = DC_TYPE_DM9102; 1971 sc->dc_flags |= DC_TX_COALESCE|DC_TX_INTR_ALWAYS; 1972 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_TX_STORENFWD; 1973 sc->dc_pmode = DC_PMODE_MII; 1974 /* Increase the latency timer value. */ 1975 command = pci_read_config(dev, DC_PCI_CFLT, 4); 1976 command &= 0xFFFF00FF; 1977 command |= 0x00008000; 1978 pci_write_config(dev, DC_PCI_CFLT, command, 4); 1979 break; 1980 case DC_DEVICEID_AL981: 1981 sc->dc_type = DC_TYPE_AL981; 1982 sc->dc_flags |= DC_TX_USE_TX_INTR; 1983 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1984 sc->dc_pmode = DC_PMODE_MII; 1985 dc_eeprom_width(sc); 1986 dc_read_srom(sc, sc->dc_romwidth); 1987 break; 1988 case DC_DEVICEID_AN985: 1989 case DC_DEVICEID_FE2500: 1990 case DC_DEVICEID_EN2242: 1991 case DC_DEVICEID_HAWKING_PN672TX: 1992 sc->dc_type = DC_TYPE_AN985; 1993 sc->dc_flags |= DC_TX_USE_TX_INTR; 1994 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1995 sc->dc_pmode = DC_PMODE_MII; 1996 dc_eeprom_width(sc); 1997 dc_read_srom(sc, sc->dc_romwidth); 1998 break; 1999 case DC_DEVICEID_98713: 2000 case DC_DEVICEID_98713_CP: 2001 if (revision < DC_REVISION_98713A) { 2002 sc->dc_type = DC_TYPE_98713; 2003 } 2004 if (revision >= DC_REVISION_98713A) { 2005 sc->dc_type = DC_TYPE_98713A; 2006 sc->dc_flags |= DC_21143_NWAY; 2007 } 2008 sc->dc_flags |= DC_REDUCED_MII_POLL; 2009 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 2010 break; 2011 case DC_DEVICEID_987x5: 2012 case DC_DEVICEID_EN1217: 2013 /* 2014 * Macronix MX98715AEC-C/D/E parts have only a 2015 * 128-bit hash table. We need to deal with these 2016 * in the same manner as the PNIC II so that we 2017 * get the right number of bits out of the 2018 * CRC routine. 2019 */ 2020 if (revision >= DC_REVISION_98715AEC_C && 2021 revision < DC_REVISION_98725) 2022 sc->dc_flags |= DC_128BIT_HASH; 2023 sc->dc_type = DC_TYPE_987x5; 2024 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 2025 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 2026 break; 2027 case DC_DEVICEID_98727: 2028 sc->dc_type = DC_TYPE_987x5; 2029 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 2030 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 2031 break; 2032 case DC_DEVICEID_82C115: 2033 sc->dc_type = DC_TYPE_PNICII; 2034 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR|DC_128BIT_HASH; 2035 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 2036 break; 2037 case DC_DEVICEID_82C168: 2038 sc->dc_type = DC_TYPE_PNIC; 2039 sc->dc_flags |= DC_TX_STORENFWD|DC_TX_INTR_ALWAYS; 2040 sc->dc_flags |= DC_PNIC_RX_BUG_WAR; 2041 sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); 2042 if (revision < DC_REVISION_82C169) 2043 sc->dc_pmode = DC_PMODE_SYM; 2044 break; 2045 case DC_DEVICEID_AX88140A: 2046 sc->dc_type = DC_TYPE_ASIX; 2047 sc->dc_flags |= DC_TX_USE_TX_INTR|DC_TX_INTR_FIRSTFRAG; 2048 sc->dc_flags |= DC_REDUCED_MII_POLL; 2049 sc->dc_pmode = DC_PMODE_MII; 2050 break; 2051 case DC_DEVICEID_X3201: 2052 sc->dc_type = DC_TYPE_XIRCOM; 2053 sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | 2054 DC_TX_ALIGN; 2055 /* 2056 * We don't actually need to coalesce, but we're doing 2057 * it to obtain a double word aligned buffer. 2058 * The DC_TX_COALESCE flag is required. 2059 */ 2060 sc->dc_pmode = DC_PMODE_MII; 2061 break; 2062 case DC_DEVICEID_RS7112: 2063 sc->dc_type = DC_TYPE_CONEXANT; 2064 sc->dc_flags |= DC_TX_INTR_ALWAYS; 2065 sc->dc_flags |= DC_REDUCED_MII_POLL; 2066 sc->dc_pmode = DC_PMODE_MII; 2067 dc_eeprom_width(sc); 2068 dc_read_srom(sc, sc->dc_romwidth); 2069 break; 2070 default: 2071 printf("dc%d: unknown device: %x\n", sc->dc_unit, 2072 sc->dc_info->dc_did); 2073 break; 2074 } 2075 2076 /* Save the cache line size. */ 2077 if (DC_IS_DAVICOM(sc)) 2078 sc->dc_cachesize = 0; 2079 else 2080 sc->dc_cachesize = pci_read_config(dev, 2081 DC_PCI_CFLT, 4) & 0xFF; 2082 2083 /* Reset the adapter. */ 2084 dc_reset(sc); 2085 2086 /* Take 21143 out of snooze mode */ 2087 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { 2088 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2089 command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); 2090 pci_write_config(dev, DC_PCI_CFDD, command, 4); 2091 } 2092 2093 /* 2094 * Try to learn something about the supported media. 2095 * We know that ASIX and ADMtek and Davicom devices 2096 * will *always* be using MII media, so that's a no-brainer. 2097 * The tricky ones are the Macronix/PNIC II and the 2098 * Intel 21143. 2099 */ 2100 if (DC_IS_INTEL(sc)) 2101 dc_parse_21143_srom(sc); 2102 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2103 if (sc->dc_type == DC_TYPE_98713) 2104 sc->dc_pmode = DC_PMODE_MII; 2105 else 2106 sc->dc_pmode = DC_PMODE_SYM; 2107 } else if (!sc->dc_pmode) 2108 sc->dc_pmode = DC_PMODE_MII; 2109 2110 /* 2111 * Get station address from the EEPROM. 2112 */ 2113 switch(sc->dc_type) { 2114 case DC_TYPE_98713: 2115 case DC_TYPE_98713A: 2116 case DC_TYPE_987x5: 2117 case DC_TYPE_PNICII: 2118 dc_read_eeprom(sc, (caddr_t)&mac_offset, 2119 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 2120 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); 2121 break; 2122 case DC_TYPE_PNIC: 2123 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); 2124 break; 2125 case DC_TYPE_DM9102: 2126 case DC_TYPE_21143: 2127 case DC_TYPE_ASIX: 2128 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2129 break; 2130 case DC_TYPE_AL981: 2131 case DC_TYPE_AN985: 2132 bcopy(&sc->dc_srom[DC_AL_EE_NODEADDR], (caddr_t)&eaddr, 2133 ETHER_ADDR_LEN); 2134 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_AL_EE_NODEADDR, 3, 0); 2135 break; 2136 case DC_TYPE_CONEXANT: 2137 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, 6); 2138 break; 2139 case DC_TYPE_XIRCOM: 2140 /* The MAC comes from the CIS */ 2141 mac = pci_get_ether(dev); 2142 if (!mac) { 2143 device_printf(dev, "No station address in CIS!\n"); 2144 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2145 error = ENXIO; 2146 goto fail; 2147 } 2148 bcopy(mac, eaddr, ETHER_ADDR_LEN); 2149 break; 2150 default: 2151 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2152 break; 2153 } 2154 2155 /* 2156 * A 21143 or clone chip was detected. Inform the world. 2157 */ 2158 printf("dc%d: Ethernet address: %6D\n", unit, eaddr, ":"); 2159 2160 sc->dc_unit = unit; 2161 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 2162 2163 sc->dc_ldata = contigmalloc(sizeof(struct dc_list_data), M_DEVBUF, 2164 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 2165 2166 if (sc->dc_ldata == NULL) { 2167 printf("dc%d: no memory for list buffers!\n", unit); 2168 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2169 error = ENXIO; 2170 goto fail; 2171 } 2172 2173 bzero(sc->dc_ldata, sizeof(struct dc_list_data)); 2174 2175 ifp = &sc->arpcom.ac_if; 2176 ifp->if_softc = sc; 2177 ifp->if_unit = unit; 2178 ifp->if_name = "dc"; 2179 /* XXX: bleah, MTU gets overwritten in ether_ifattach() */ 2180 ifp->if_mtu = ETHERMTU; 2181 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2182 ifp->if_ioctl = dc_ioctl; 2183 ifp->if_output = ether_output; 2184 ifp->if_start = dc_start; 2185 ifp->if_watchdog = dc_watchdog; 2186 ifp->if_init = dc_init; 2187 ifp->if_baudrate = 10000000; 2188 ifp->if_snd.ifq_maxlen = DC_TX_LIST_CNT - 1; 2189 2190 /* 2191 * Do MII setup. If this is a 21143, check for a PHY on the 2192 * MII bus after applying any necessary fixups to twiddle the 2193 * GPIO bits. If we don't end up finding a PHY, restore the 2194 * old selection (SIA only or SIA/SYM) and attach the dcphy 2195 * driver instead. 2196 */ 2197 if (DC_IS_INTEL(sc)) { 2198 dc_apply_fixup(sc, IFM_AUTO); 2199 tmp = sc->dc_pmode; 2200 sc->dc_pmode = DC_PMODE_MII; 2201 } 2202 2203 error = mii_phy_probe(dev, &sc->dc_miibus, 2204 dc_ifmedia_upd, dc_ifmedia_sts); 2205 2206 if (error && DC_IS_INTEL(sc)) { 2207 sc->dc_pmode = tmp; 2208 if (sc->dc_pmode != DC_PMODE_SIA) 2209 sc->dc_pmode = DC_PMODE_SYM; 2210 sc->dc_flags |= DC_21143_NWAY; 2211 mii_phy_probe(dev, &sc->dc_miibus, 2212 dc_ifmedia_upd, dc_ifmedia_sts); 2213 /* 2214 * For non-MII cards, we need to have the 21143 2215 * drive the LEDs. Except there are some systems 2216 * like the NEC VersaPro NoteBook PC which have no 2217 * LEDs, and twiddling these bits has adverse effects 2218 * on them. (I.e. you suddenly can't get a link.) 2219 */ 2220 if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) 2221 sc->dc_flags |= DC_TULIP_LEDS; 2222 error = 0; 2223 } 2224 2225 if (error) { 2226 printf("dc%d: MII without any PHY!\n", sc->dc_unit); 2227 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2228 error = ENXIO; 2229 goto fail; 2230 } 2231 2232 if (DC_IS_XIRCOM(sc)) { 2233 /* 2234 * setup General Purpose Port mode and data so the tulip 2235 * can talk to the MII. 2236 */ 2237 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2238 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2239 DELAY(10); 2240 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2241 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2242 DELAY(10); 2243 } 2244 2245 if (DC_IS_ADMTEK(sc)) { 2246 /* 2247 * Set automatic TX underrun recovery for the ADMtek chips 2248 */ 2249 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 2250 } 2251 2252 /* 2253 * Tell the upper layer(s) we support long frames. 2254 */ 2255 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2256 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2257 2258 callout_init(&sc->dc_stat_ch, IS_MPSAFE); 2259 2260 #ifdef SRM_MEDIA 2261 sc->dc_srm_media = 0; 2262 2263 /* Remember the SRM console media setting */ 2264 if (DC_IS_INTEL(sc)) { 2265 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2266 command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); 2267 switch ((command >> 8) & 0xff) { 2268 case 3: 2269 sc->dc_srm_media = IFM_10_T; 2270 break; 2271 case 4: 2272 sc->dc_srm_media = IFM_10_T | IFM_FDX; 2273 break; 2274 case 5: 2275 sc->dc_srm_media = IFM_100_TX; 2276 break; 2277 case 6: 2278 sc->dc_srm_media = IFM_100_TX | IFM_FDX; 2279 break; 2280 } 2281 if (sc->dc_srm_media) 2282 sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER; 2283 } 2284 #endif 2285 2286 /* 2287 * Call MI attach routine. 2288 */ 2289 ether_ifattach(ifp, eaddr); 2290 2291 /* Allocate interrupt */ 2292 rid = 0; 2293 sc->dc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 2294 RF_SHAREABLE | RF_ACTIVE); 2295 2296 if (sc->dc_irq == NULL) { 2297 printf("dc%d: couldn't map interrupt\n", unit); 2298 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2299 error = ENXIO; 2300 goto fail; 2301 } 2302 2303 error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | 2304 (IS_MPSAFE ? INTR_MPSAFE : 0), 2305 dc_intr, sc, &sc->dc_intrhand); 2306 2307 if (error) { 2308 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2309 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2310 printf("dc%d: couldn't set up irq\n", unit); 2311 } 2312 2313 fail: 2314 if (error != 0) 2315 mtx_destroy(&sc->dc_mtx); 2316 return (error); 2317 } 2318 2319 static int 2320 dc_detach(dev) 2321 device_t dev; 2322 { 2323 struct dc_softc *sc; 2324 struct ifnet *ifp; 2325 struct dc_mediainfo *m; 2326 2327 sc = device_get_softc(dev); 2328 2329 DC_LOCK(sc); 2330 2331 ifp = &sc->arpcom.ac_if; 2332 2333 dc_stop(sc); 2334 ether_ifdetach(ifp); 2335 2336 bus_generic_detach(dev); 2337 device_delete_child(dev, sc->dc_miibus); 2338 2339 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); 2340 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2341 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2342 2343 contigfree(sc->dc_ldata, sizeof(struct dc_list_data), M_DEVBUF); 2344 if (sc->dc_pnic_rx_buf != NULL) 2345 free(sc->dc_pnic_rx_buf, M_DEVBUF); 2346 2347 while(sc->dc_mi != NULL) { 2348 m = sc->dc_mi->dc_next; 2349 free(sc->dc_mi, M_DEVBUF); 2350 sc->dc_mi = m; 2351 } 2352 free(sc->dc_srom, M_DEVBUF); 2353 2354 DC_UNLOCK(sc); 2355 mtx_destroy(&sc->dc_mtx); 2356 2357 return(0); 2358 } 2359 2360 /* 2361 * Initialize the transmit descriptors. 2362 */ 2363 static int 2364 dc_list_tx_init(sc) 2365 struct dc_softc *sc; 2366 { 2367 struct dc_chain_data *cd; 2368 struct dc_list_data *ld; 2369 int i, nexti; 2370 2371 cd = &sc->dc_cdata; 2372 ld = sc->dc_ldata; 2373 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2374 nexti = (i == (DC_TX_LIST_CNT - 1)) ? 0 : i+1; 2375 ld->dc_tx_list[i].dc_next = vtophys(&ld->dc_tx_list[nexti]); 2376 cd->dc_tx_chain[i] = NULL; 2377 ld->dc_tx_list[i].dc_data = 0; 2378 ld->dc_tx_list[i].dc_ctl = 0; 2379 } 2380 2381 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 2382 2383 return(0); 2384 } 2385 2386 2387 /* 2388 * Initialize the RX descriptors and allocate mbufs for them. Note that 2389 * we arrange the descriptors in a closed ring, so that the last descriptor 2390 * points back to the first. 2391 */ 2392 static int 2393 dc_list_rx_init(sc) 2394 struct dc_softc *sc; 2395 { 2396 struct dc_chain_data *cd; 2397 struct dc_list_data *ld; 2398 int i, nexti; 2399 2400 cd = &sc->dc_cdata; 2401 ld = sc->dc_ldata; 2402 2403 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2404 if (dc_newbuf(sc, i, NULL) == ENOBUFS) 2405 return(ENOBUFS); 2406 nexti = (i == (DC_RX_LIST_CNT - 1)) ? 0 : i+1; 2407 ld->dc_rx_list[i].dc_next = vtophys(&ld->dc_rx_list[nexti]); 2408 } 2409 2410 cd->dc_rx_prod = 0; 2411 2412 return(0); 2413 } 2414 2415 /* 2416 * Initialize an RX descriptor and attach an MBUF cluster. 2417 */ 2418 static int 2419 dc_newbuf(sc, i, m) 2420 struct dc_softc *sc; 2421 int i; 2422 struct mbuf *m; 2423 { 2424 struct mbuf *m_new = NULL; 2425 struct dc_desc *c; 2426 2427 c = &sc->dc_ldata->dc_rx_list[i]; 2428 2429 if (m == NULL) { 2430 MGETHDR(m_new, M_NOWAIT, MT_DATA); 2431 if (m_new == NULL) 2432 return(ENOBUFS); 2433 2434 MCLGET(m_new, M_NOWAIT); 2435 if (!(m_new->m_flags & M_EXT)) { 2436 m_freem(m_new); 2437 return(ENOBUFS); 2438 } 2439 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2440 } else { 2441 m_new = m; 2442 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2443 m_new->m_data = m_new->m_ext.ext_buf; 2444 } 2445 2446 m_adj(m_new, sizeof(u_int64_t)); 2447 2448 /* 2449 * If this is a PNIC chip, zero the buffer. This is part 2450 * of the workaround for the receive bug in the 82c168 and 2451 * 82c169 chips. 2452 */ 2453 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 2454 bzero((char *)mtod(m_new, char *), m_new->m_len); 2455 2456 sc->dc_cdata.dc_rx_chain[i] = m_new; 2457 c->dc_data = vtophys(mtod(m_new, caddr_t)); 2458 c->dc_ctl = DC_RXCTL_RLINK | DC_RXLEN; 2459 c->dc_status = DC_RXSTAT_OWN; 2460 2461 return(0); 2462 } 2463 2464 /* 2465 * Grrrrr. 2466 * The PNIC chip has a terrible bug in it that manifests itself during 2467 * periods of heavy activity. The exact mode of failure if difficult to 2468 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 2469 * will happen on slow machines. The bug is that sometimes instead of 2470 * uploading one complete frame during reception, it uploads what looks 2471 * like the entire contents of its FIFO memory. The frame we want is at 2472 * the end of the whole mess, but we never know exactly how much data has 2473 * been uploaded, so salvaging the frame is hard. 2474 * 2475 * There is only one way to do it reliably, and it's disgusting. 2476 * Here's what we know: 2477 * 2478 * - We know there will always be somewhere between one and three extra 2479 * descriptors uploaded. 2480 * 2481 * - We know the desired received frame will always be at the end of the 2482 * total data upload. 2483 * 2484 * - We know the size of the desired received frame because it will be 2485 * provided in the length field of the status word in the last descriptor. 2486 * 2487 * Here's what we do: 2488 * 2489 * - When we allocate buffers for the receive ring, we bzero() them. 2490 * This means that we know that the buffer contents should be all 2491 * zeros, except for data uploaded by the chip. 2492 * 2493 * - We also force the PNIC chip to upload frames that include the 2494 * ethernet CRC at the end. 2495 * 2496 * - We gather all of the bogus frame data into a single buffer. 2497 * 2498 * - We then position a pointer at the end of this buffer and scan 2499 * backwards until we encounter the first non-zero byte of data. 2500 * This is the end of the received frame. We know we will encounter 2501 * some data at the end of the frame because the CRC will always be 2502 * there, so even if the sender transmits a packet of all zeros, 2503 * we won't be fooled. 2504 * 2505 * - We know the size of the actual received frame, so we subtract 2506 * that value from the current pointer location. This brings us 2507 * to the start of the actual received packet. 2508 * 2509 * - We copy this into an mbuf and pass it on, along with the actual 2510 * frame length. 2511 * 2512 * The performance hit is tremendous, but it beats dropping frames all 2513 * the time. 2514 */ 2515 2516 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) 2517 static void 2518 dc_pnic_rx_bug_war(sc, idx) 2519 struct dc_softc *sc; 2520 int idx; 2521 { 2522 struct dc_desc *cur_rx; 2523 struct dc_desc *c = NULL; 2524 struct mbuf *m = NULL; 2525 unsigned char *ptr; 2526 int i, total_len; 2527 u_int32_t rxstat = 0; 2528 2529 i = sc->dc_pnic_rx_bug_save; 2530 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2531 ptr = sc->dc_pnic_rx_buf; 2532 bzero(ptr, sizeof(DC_RXLEN * 5)); 2533 2534 /* Copy all the bytes from the bogus buffers. */ 2535 while (1) { 2536 c = &sc->dc_ldata->dc_rx_list[i]; 2537 rxstat = c->dc_status; 2538 m = sc->dc_cdata.dc_rx_chain[i]; 2539 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2540 ptr += DC_RXLEN; 2541 /* If this is the last buffer, break out. */ 2542 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2543 break; 2544 dc_newbuf(sc, i, m); 2545 DC_INC(i, DC_RX_LIST_CNT); 2546 } 2547 2548 /* Find the length of the actual receive frame. */ 2549 total_len = DC_RXBYTES(rxstat); 2550 2551 /* Scan backwards until we hit a non-zero byte. */ 2552 while(*ptr == 0x00) 2553 ptr--; 2554 2555 /* Round off. */ 2556 if ((uintptr_t)(ptr) & 0x3) 2557 ptr -= 1; 2558 2559 /* Now find the start of the frame. */ 2560 ptr -= total_len; 2561 if (ptr < sc->dc_pnic_rx_buf) 2562 ptr = sc->dc_pnic_rx_buf; 2563 2564 /* 2565 * Now copy the salvaged frame to the last mbuf and fake up 2566 * the status word to make it look like a successful 2567 * frame reception. 2568 */ 2569 dc_newbuf(sc, i, m); 2570 bcopy(ptr, mtod(m, char *), total_len); 2571 cur_rx->dc_status = rxstat | DC_RXSTAT_FIRSTFRAG; 2572 2573 return; 2574 } 2575 2576 /* 2577 * This routine searches the RX ring for dirty descriptors in the 2578 * event that the rxeof routine falls out of sync with the chip's 2579 * current descriptor pointer. This may happen sometimes as a result 2580 * of a "no RX buffer available" condition that happens when the chip 2581 * consumes all of the RX buffers before the driver has a chance to 2582 * process the RX ring. This routine may need to be called more than 2583 * once to bring the driver back in sync with the chip, however we 2584 * should still be getting RX DONE interrupts to drive the search 2585 * for new packets in the RX ring, so we should catch up eventually. 2586 */ 2587 static int 2588 dc_rx_resync(sc) 2589 struct dc_softc *sc; 2590 { 2591 int i, pos; 2592 struct dc_desc *cur_rx; 2593 2594 pos = sc->dc_cdata.dc_rx_prod; 2595 2596 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2597 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2598 if (!(cur_rx->dc_status & DC_RXSTAT_OWN)) 2599 break; 2600 DC_INC(pos, DC_RX_LIST_CNT); 2601 } 2602 2603 /* If the ring really is empty, then just return. */ 2604 if (i == DC_RX_LIST_CNT) 2605 return(0); 2606 2607 /* We've fallen behing the chip: catch it. */ 2608 sc->dc_cdata.dc_rx_prod = pos; 2609 2610 return(EAGAIN); 2611 } 2612 2613 /* 2614 * A frame has been uploaded: pass the resulting mbuf chain up to 2615 * the higher level protocols. 2616 */ 2617 static void 2618 dc_rxeof(sc) 2619 struct dc_softc *sc; 2620 { 2621 struct mbuf *m; 2622 struct ifnet *ifp; 2623 struct dc_desc *cur_rx; 2624 int i, total_len = 0; 2625 u_int32_t rxstat; 2626 2627 ifp = &sc->arpcom.ac_if; 2628 i = sc->dc_cdata.dc_rx_prod; 2629 2630 while(!(sc->dc_ldata->dc_rx_list[i].dc_status & DC_RXSTAT_OWN)) { 2631 2632 #ifdef DEVICE_POLLING 2633 if (ifp->if_flags & IFF_POLLING) { 2634 if (sc->rxcycles <= 0) 2635 break; 2636 sc->rxcycles--; 2637 } 2638 #endif /* DEVICE_POLLING */ 2639 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2640 rxstat = cur_rx->dc_status; 2641 m = sc->dc_cdata.dc_rx_chain[i]; 2642 total_len = DC_RXBYTES(rxstat); 2643 2644 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2645 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2646 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2647 sc->dc_pnic_rx_bug_save = i; 2648 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2649 DC_INC(i, DC_RX_LIST_CNT); 2650 continue; 2651 } 2652 dc_pnic_rx_bug_war(sc, i); 2653 rxstat = cur_rx->dc_status; 2654 total_len = DC_RXBYTES(rxstat); 2655 } 2656 } 2657 2658 sc->dc_cdata.dc_rx_chain[i] = NULL; 2659 2660 /* 2661 * If an error occurs, update stats, clear the 2662 * status word and leave the mbuf cluster in place: 2663 * it should simply get re-used next time this descriptor 2664 * comes up in the ring. However, don't report long 2665 * frames as errors since they could be vlans 2666 */ 2667 if ((rxstat & DC_RXSTAT_RXERR)){ 2668 if (!(rxstat & DC_RXSTAT_GIANT) || 2669 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2670 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2671 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2672 ifp->if_ierrors++; 2673 if (rxstat & DC_RXSTAT_COLLSEEN) 2674 ifp->if_collisions++; 2675 dc_newbuf(sc, i, m); 2676 if (rxstat & DC_RXSTAT_CRCERR) { 2677 DC_INC(i, DC_RX_LIST_CNT); 2678 continue; 2679 } else { 2680 dc_init(sc); 2681 return; 2682 } 2683 } 2684 } 2685 2686 /* No errors; receive the packet. */ 2687 total_len -= ETHER_CRC_LEN; 2688 #ifdef __i386__ 2689 /* 2690 * On the x86 we do not have alignment problems, so try to 2691 * allocate a new buffer for the receive ring, and pass up 2692 * the one where the packet is already, saving the expensive 2693 * copy done in m_devget(). 2694 * If we are on an architecture with alignment problems, or 2695 * if the allocation fails, then use m_devget and leave the 2696 * existing buffer in the receive ring. 2697 */ 2698 if (dc_quick && dc_newbuf(sc, i, NULL) == 0) { 2699 m->m_pkthdr.rcvif = ifp; 2700 m->m_pkthdr.len = m->m_len = total_len; 2701 DC_INC(i, DC_RX_LIST_CNT); 2702 } else 2703 #endif 2704 { 2705 struct mbuf *m0; 2706 2707 m0 = m_devget(mtod(m, char *), total_len, 2708 ETHER_ALIGN, ifp, NULL); 2709 dc_newbuf(sc, i, m); 2710 DC_INC(i, DC_RX_LIST_CNT); 2711 if (m0 == NULL) { 2712 ifp->if_ierrors++; 2713 continue; 2714 } 2715 m = m0; 2716 } 2717 2718 ifp->if_ipackets++; 2719 (*ifp->if_input)(ifp, m); 2720 } 2721 2722 sc->dc_cdata.dc_rx_prod = i; 2723 } 2724 2725 /* 2726 * A frame was downloaded to the chip. It's safe for us to clean up 2727 * the list buffers. 2728 */ 2729 2730 static void 2731 dc_txeof(sc) 2732 struct dc_softc *sc; 2733 { 2734 struct dc_desc *cur_tx = NULL; 2735 struct ifnet *ifp; 2736 int idx; 2737 2738 ifp = &sc->arpcom.ac_if; 2739 2740 /* 2741 * Go through our tx list and free mbufs for those 2742 * frames that have been transmitted. 2743 */ 2744 idx = sc->dc_cdata.dc_tx_cons; 2745 while(idx != sc->dc_cdata.dc_tx_prod) { 2746 u_int32_t txstat; 2747 2748 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2749 txstat = cur_tx->dc_status; 2750 2751 if (txstat & DC_TXSTAT_OWN) 2752 break; 2753 2754 if (!(cur_tx->dc_ctl & DC_TXCTL_LASTFRAG) || 2755 cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2756 if (cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2757 /* 2758 * Yes, the PNIC is so brain damaged 2759 * that it will sometimes generate a TX 2760 * underrun error while DMAing the RX 2761 * filter setup frame. If we detect this, 2762 * we have to send the setup frame again, 2763 * or else the filter won't be programmed 2764 * correctly. 2765 */ 2766 if (DC_IS_PNIC(sc)) { 2767 if (txstat & DC_TXSTAT_ERRSUM) 2768 dc_setfilt(sc); 2769 } 2770 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2771 } 2772 sc->dc_cdata.dc_tx_cnt--; 2773 DC_INC(idx, DC_TX_LIST_CNT); 2774 continue; 2775 } 2776 2777 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2778 /* 2779 * XXX: Why does my Xircom taunt me so? 2780 * For some reason it likes setting the CARRLOST flag 2781 * even when the carrier is there. wtf?!? 2782 * Who knows, but Conexant chips have the 2783 * same problem. Maybe they took lessons 2784 * from Xircom. 2785 */ 2786 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2787 sc->dc_pmode == DC_PMODE_MII && 2788 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2789 DC_TXSTAT_NOCARRIER))) 2790 txstat &= ~DC_TXSTAT_ERRSUM; 2791 } else { 2792 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2793 sc->dc_pmode == DC_PMODE_MII && 2794 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2795 DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) 2796 txstat &= ~DC_TXSTAT_ERRSUM; 2797 } 2798 2799 if (txstat & DC_TXSTAT_ERRSUM) { 2800 ifp->if_oerrors++; 2801 if (txstat & DC_TXSTAT_EXCESSCOLL) 2802 ifp->if_collisions++; 2803 if (txstat & DC_TXSTAT_LATECOLL) 2804 ifp->if_collisions++; 2805 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2806 dc_init(sc); 2807 return; 2808 } 2809 } 2810 2811 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2812 2813 ifp->if_opackets++; 2814 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { 2815 m_freem(sc->dc_cdata.dc_tx_chain[idx]); 2816 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2817 } 2818 2819 sc->dc_cdata.dc_tx_cnt--; 2820 DC_INC(idx, DC_TX_LIST_CNT); 2821 } 2822 2823 if (idx != sc->dc_cdata.dc_tx_cons) { 2824 /* some buffers have been freed */ 2825 sc->dc_cdata.dc_tx_cons = idx; 2826 ifp->if_flags &= ~IFF_OACTIVE; 2827 } 2828 ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5; 2829 2830 return; 2831 } 2832 2833 static void 2834 dc_tick(xsc) 2835 void *xsc; 2836 { 2837 struct dc_softc *sc; 2838 struct mii_data *mii; 2839 struct ifnet *ifp; 2840 u_int32_t r; 2841 2842 sc = xsc; 2843 DC_LOCK(sc); 2844 ifp = &sc->arpcom.ac_if; 2845 mii = device_get_softc(sc->dc_miibus); 2846 2847 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2848 if (sc->dc_flags & DC_21143_NWAY) { 2849 r = CSR_READ_4(sc, DC_10BTSTAT); 2850 if (IFM_SUBTYPE(mii->mii_media_active) == 2851 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2852 sc->dc_link = 0; 2853 mii_mediachg(mii); 2854 } 2855 if (IFM_SUBTYPE(mii->mii_media_active) == 2856 IFM_10_T && (r & DC_TSTAT_LS10)) { 2857 sc->dc_link = 0; 2858 mii_mediachg(mii); 2859 } 2860 if (sc->dc_link == 0) 2861 mii_tick(mii); 2862 } else { 2863 r = CSR_READ_4(sc, DC_ISR); 2864 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && 2865 sc->dc_cdata.dc_tx_cnt == 0) { 2866 mii_tick(mii); 2867 if (!(mii->mii_media_status & IFM_ACTIVE)) 2868 sc->dc_link = 0; 2869 } 2870 } 2871 } else 2872 mii_tick(mii); 2873 2874 /* 2875 * When the init routine completes, we expect to be able to send 2876 * packets right away, and in fact the network code will send a 2877 * gratuitous ARP the moment the init routine marks the interface 2878 * as running. However, even though the MAC may have been initialized, 2879 * there may be a delay of a few seconds before the PHY completes 2880 * autonegotiation and the link is brought up. Any transmissions 2881 * made during that delay will be lost. Dealing with this is tricky: 2882 * we can't just pause in the init routine while waiting for the 2883 * PHY to come ready since that would bring the whole system to 2884 * a screeching halt for several seconds. 2885 * 2886 * What we do here is prevent the TX start routine from sending 2887 * any packets until a link has been established. After the 2888 * interface has been initialized, the tick routine will poll 2889 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2890 * that time, packets will stay in the send queue, and once the 2891 * link comes up, they will be flushed out to the wire. 2892 */ 2893 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && 2894 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2895 sc->dc_link++; 2896 if (ifp->if_snd.ifq_head != NULL) 2897 dc_start(ifp); 2898 } 2899 2900 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2901 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 2902 else 2903 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 2904 2905 DC_UNLOCK(sc); 2906 2907 return; 2908 } 2909 2910 /* 2911 * A transmit underrun has occurred. Back off the transmit threshold, 2912 * or switch to store and forward mode if we have to. 2913 */ 2914 static void 2915 dc_tx_underrun(sc) 2916 struct dc_softc *sc; 2917 { 2918 u_int32_t isr; 2919 int i; 2920 2921 if (DC_IS_DAVICOM(sc)) 2922 dc_init(sc); 2923 2924 if (DC_IS_INTEL(sc)) { 2925 /* 2926 * The real 21143 requires that the transmitter be idle 2927 * in order to change the transmit threshold or store 2928 * and forward state. 2929 */ 2930 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2931 2932 for (i = 0; i < DC_TIMEOUT; i++) { 2933 isr = CSR_READ_4(sc, DC_ISR); 2934 if (isr & DC_ISR_TX_IDLE) 2935 break; 2936 DELAY(10); 2937 } 2938 if (i == DC_TIMEOUT) { 2939 printf("dc%d: failed to force tx to idle state\n", 2940 sc->dc_unit); 2941 dc_init(sc); 2942 } 2943 } 2944 2945 printf("dc%d: TX underrun -- ", sc->dc_unit); 2946 sc->dc_txthresh += DC_TXTHRESH_INC; 2947 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2948 printf("using store and forward mode\n"); 2949 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2950 } else { 2951 printf("increasing TX threshold\n"); 2952 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2953 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2954 } 2955 2956 if (DC_IS_INTEL(sc)) 2957 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2958 2959 return; 2960 } 2961 2962 #ifdef DEVICE_POLLING 2963 static poll_handler_t dc_poll; 2964 2965 static void 2966 dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2967 { 2968 struct dc_softc *sc = ifp->if_softc; 2969 2970 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 2971 /* Re-enable interrupts. */ 2972 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2973 return; 2974 } 2975 sc->rxcycles = count; 2976 dc_rxeof(sc); 2977 dc_txeof(sc); 2978 if (ifp->if_snd.ifq_head != NULL && !(ifp->if_flags & IFF_OACTIVE)) 2979 dc_start(ifp); 2980 2981 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2982 u_int32_t status; 2983 2984 status = CSR_READ_4(sc, DC_ISR); 2985 status &= (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF| 2986 DC_ISR_TX_NOBUF|DC_ISR_TX_IDLE|DC_ISR_TX_UNDERRUN| 2987 DC_ISR_BUS_ERR); 2988 if (!status) 2989 return; 2990 /* ack what we have */ 2991 CSR_WRITE_4(sc, DC_ISR, status); 2992 2993 if (status & (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF)) { 2994 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); 2995 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); 2996 2997 if (dc_rx_resync(sc)) 2998 dc_rxeof(sc); 2999 } 3000 /* restart transmit unit if necessary */ 3001 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) 3002 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3003 3004 if (status & DC_ISR_TX_UNDERRUN) 3005 dc_tx_underrun(sc); 3006 3007 if (status & DC_ISR_BUS_ERR) { 3008 printf("dc_poll: dc%d bus error\n", sc->dc_unit); 3009 dc_reset(sc); 3010 dc_init(sc); 3011 } 3012 } 3013 } 3014 #endif /* DEVICE_POLLING */ 3015 3016 static void 3017 dc_intr(arg) 3018 void *arg; 3019 { 3020 struct dc_softc *sc; 3021 struct ifnet *ifp; 3022 u_int32_t status; 3023 3024 sc = arg; 3025 3026 if (sc->suspended) { 3027 return; 3028 } 3029 3030 if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 3031 return; 3032 3033 DC_LOCK(sc); 3034 ifp = &sc->arpcom.ac_if; 3035 #ifdef DEVICE_POLLING 3036 if (ifp->if_flags & IFF_POLLING) 3037 goto done; 3038 if (ether_poll_register(dc_poll, ifp)) { /* ok, disable interrupts */ 3039 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3040 goto done; 3041 } 3042 #endif /* DEVICE_POLLING */ 3043 3044 /* Suppress unwanted interrupts */ 3045 if (!(ifp->if_flags & IFF_UP)) { 3046 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 3047 dc_stop(sc); 3048 DC_UNLOCK(sc); 3049 return; 3050 } 3051 3052 /* Disable interrupts. */ 3053 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3054 3055 while(((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) 3056 && status != 0xFFFFFFFF) { 3057 3058 CSR_WRITE_4(sc, DC_ISR, status); 3059 3060 if (status & DC_ISR_RX_OK) { 3061 int curpkts; 3062 curpkts = ifp->if_ipackets; 3063 dc_rxeof(sc); 3064 if (curpkts == ifp->if_ipackets) { 3065 while(dc_rx_resync(sc)) 3066 dc_rxeof(sc); 3067 } 3068 } 3069 3070 if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) 3071 dc_txeof(sc); 3072 3073 if (status & DC_ISR_TX_IDLE) { 3074 dc_txeof(sc); 3075 if (sc->dc_cdata.dc_tx_cnt) { 3076 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3077 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3078 } 3079 } 3080 3081 if (status & DC_ISR_TX_UNDERRUN) 3082 dc_tx_underrun(sc); 3083 3084 if ((status & DC_ISR_RX_WATDOGTIMEO) 3085 || (status & DC_ISR_RX_NOBUF)) { 3086 int curpkts; 3087 curpkts = ifp->if_ipackets; 3088 dc_rxeof(sc); 3089 if (curpkts == ifp->if_ipackets) { 3090 while(dc_rx_resync(sc)) 3091 dc_rxeof(sc); 3092 } 3093 } 3094 3095 if (status & DC_ISR_BUS_ERR) { 3096 dc_reset(sc); 3097 dc_init(sc); 3098 } 3099 } 3100 3101 /* Re-enable interrupts. */ 3102 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3103 3104 if (ifp->if_snd.ifq_head != NULL) 3105 dc_start(ifp); 3106 3107 #ifdef DEVICE_POLLING 3108 done: 3109 #endif /* DEVICE_POLLING */ 3110 3111 DC_UNLOCK(sc); 3112 3113 return; 3114 } 3115 3116 /* 3117 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 3118 * pointers to the fragment pointers. 3119 */ 3120 static int 3121 dc_encap(sc, m_head, txidx) 3122 struct dc_softc *sc; 3123 struct mbuf *m_head; 3124 u_int32_t *txidx; 3125 { 3126 struct dc_desc *f = NULL; 3127 struct mbuf *m; 3128 int frag, cur, cnt = 0; 3129 3130 /* 3131 * Start packing the mbufs in this chain into 3132 * the fragment pointers. Stop when we run out 3133 * of fragments or hit the end of the mbuf chain. 3134 */ 3135 m = m_head; 3136 cur = frag = *txidx; 3137 3138 for (m = m_head; m != NULL; m = m->m_next) { 3139 if (m->m_len != 0) { 3140 if (sc->dc_flags & DC_TX_ADMTEK_WAR) { 3141 if (*txidx != sc->dc_cdata.dc_tx_prod && 3142 frag == (DC_TX_LIST_CNT - 1)) 3143 return(ENOBUFS); 3144 } 3145 if ((DC_TX_LIST_CNT - 3146 (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) 3147 return(ENOBUFS); 3148 3149 f = &sc->dc_ldata->dc_tx_list[frag]; 3150 f->dc_ctl = DC_TXCTL_TLINK | m->m_len; 3151 if (cnt == 0) { 3152 f->dc_status = 0; 3153 f->dc_ctl |= DC_TXCTL_FIRSTFRAG; 3154 } else 3155 f->dc_status = DC_TXSTAT_OWN; 3156 f->dc_data = vtophys(mtod(m, vm_offset_t)); 3157 cur = frag; 3158 DC_INC(frag, DC_TX_LIST_CNT); 3159 cnt++; 3160 } 3161 } 3162 3163 if (m != NULL) 3164 return(ENOBUFS); 3165 3166 sc->dc_cdata.dc_tx_cnt += cnt; 3167 sc->dc_cdata.dc_tx_chain[cur] = m_head; 3168 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_LASTFRAG; 3169 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 3170 sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= DC_TXCTL_FINT; 3171 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 3172 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 3173 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 3174 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 3175 sc->dc_ldata->dc_tx_list[*txidx].dc_status = DC_TXSTAT_OWN; 3176 *txidx = frag; 3177 3178 return(0); 3179 } 3180 3181 /* 3182 * Coalesce an mbuf chain into a single mbuf cluster buffer. 3183 * Needed for some really badly behaved chips that just can't 3184 * do scatter/gather correctly. 3185 */ 3186 static int 3187 dc_coal(sc, m_head) 3188 struct dc_softc *sc; 3189 struct mbuf **m_head; 3190 { 3191 struct mbuf *m_new, *m; 3192 3193 m = *m_head; 3194 MGETHDR(m_new, M_NOWAIT, MT_DATA); 3195 if (m_new == NULL) 3196 return(ENOBUFS); 3197 if (m->m_pkthdr.len > MHLEN) { 3198 MCLGET(m_new, M_NOWAIT); 3199 if (!(m_new->m_flags & M_EXT)) { 3200 m_freem(m_new); 3201 return(ENOBUFS); 3202 } 3203 } 3204 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); 3205 m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len; 3206 m_freem(m); 3207 *m_head = m_new; 3208 3209 return(0); 3210 } 3211 3212 /* 3213 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3214 * to the mbuf data regions directly in the transmit lists. We also save a 3215 * copy of the pointers since the transmit list fragment pointers are 3216 * physical addresses. 3217 */ 3218 3219 static void 3220 dc_start(ifp) 3221 struct ifnet *ifp; 3222 { 3223 struct dc_softc *sc; 3224 struct mbuf *m_head = NULL; 3225 int idx; 3226 3227 sc = ifp->if_softc; 3228 3229 DC_LOCK(sc); 3230 3231 if (!sc->dc_link && ifp->if_snd.ifq_len < 10) { 3232 DC_UNLOCK(sc); 3233 return; 3234 } 3235 3236 if (ifp->if_flags & IFF_OACTIVE) { 3237 DC_UNLOCK(sc); 3238 return; 3239 } 3240 3241 idx = sc->dc_cdata.dc_tx_prod; 3242 3243 while(sc->dc_cdata.dc_tx_chain[idx] == NULL) { 3244 IF_DEQUEUE(&ifp->if_snd, m_head); 3245 if (m_head == NULL) 3246 break; 3247 3248 if (sc->dc_flags & DC_TX_COALESCE && 3249 (m_head->m_next != NULL || 3250 sc->dc_flags & DC_TX_ALIGN)) { 3251 if (dc_coal(sc, &m_head)) { 3252 IF_PREPEND(&ifp->if_snd, m_head); 3253 ifp->if_flags |= IFF_OACTIVE; 3254 break; 3255 } 3256 } 3257 3258 if (dc_encap(sc, m_head, &idx)) { 3259 IF_PREPEND(&ifp->if_snd, m_head); 3260 ifp->if_flags |= IFF_OACTIVE; 3261 break; 3262 } 3263 3264 /* 3265 * If there's a BPF listener, bounce a copy of this frame 3266 * to him. 3267 */ 3268 BPF_MTAP(ifp, m_head); 3269 3270 if (sc->dc_flags & DC_TX_ONE) { 3271 ifp->if_flags |= IFF_OACTIVE; 3272 break; 3273 } 3274 } 3275 3276 /* Transmit */ 3277 sc->dc_cdata.dc_tx_prod = idx; 3278 if (!(sc->dc_flags & DC_TX_POLL)) 3279 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3280 3281 /* 3282 * Set a timeout in case the chip goes out to lunch. 3283 */ 3284 ifp->if_timer = 5; 3285 3286 DC_UNLOCK(sc); 3287 3288 return; 3289 } 3290 3291 static void 3292 dc_init(xsc) 3293 void *xsc; 3294 { 3295 struct dc_softc *sc = xsc; 3296 struct ifnet *ifp = &sc->arpcom.ac_if; 3297 struct mii_data *mii; 3298 3299 DC_LOCK(sc); 3300 3301 mii = device_get_softc(sc->dc_miibus); 3302 3303 /* 3304 * Cancel pending I/O and free all RX/TX buffers. 3305 */ 3306 dc_stop(sc); 3307 dc_reset(sc); 3308 3309 /* 3310 * Set cache alignment and burst length. 3311 */ 3312 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 3313 CSR_WRITE_4(sc, DC_BUSCTL, 0); 3314 else 3315 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); 3316 /* 3317 * Evenly share the bus between receive and transmit process. 3318 */ 3319 if (DC_IS_INTEL(sc)) 3320 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 3321 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 3322 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 3323 } else { 3324 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 3325 } 3326 if (sc->dc_flags & DC_TX_POLL) 3327 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 3328 switch(sc->dc_cachesize) { 3329 case 32: 3330 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 3331 break; 3332 case 16: 3333 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 3334 break; 3335 case 8: 3336 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 3337 break; 3338 case 0: 3339 default: 3340 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 3341 break; 3342 } 3343 3344 if (sc->dc_flags & DC_TX_STORENFWD) 3345 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3346 else { 3347 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3348 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3349 } else { 3350 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3351 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3352 } 3353 } 3354 3355 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 3356 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 3357 3358 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 3359 /* 3360 * The app notes for the 98713 and 98715A say that 3361 * in order to have the chips operate properly, a magic 3362 * number must be written to CSR16. Macronix does not 3363 * document the meaning of these bits so there's no way 3364 * to know exactly what they do. The 98713 has a magic 3365 * number all its own; the rest all use a different one. 3366 */ 3367 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 3368 if (sc->dc_type == DC_TYPE_98713) 3369 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 3370 else 3371 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 3372 } 3373 3374 if (DC_IS_XIRCOM(sc)) { 3375 /* 3376 * setup General Purpose Port mode and data so the tulip 3377 * can talk to the MII. 3378 */ 3379 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 3380 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3381 DELAY(10); 3382 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 3383 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3384 DELAY(10); 3385 } 3386 3387 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3388 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 3389 3390 /* Init circular RX list. */ 3391 if (dc_list_rx_init(sc) == ENOBUFS) { 3392 printf("dc%d: initialization failed: no " 3393 "memory for rx buffers\n", sc->dc_unit); 3394 dc_stop(sc); 3395 DC_UNLOCK(sc); 3396 return; 3397 } 3398 3399 /* 3400 * Init tx descriptors. 3401 */ 3402 dc_list_tx_init(sc); 3403 3404 /* 3405 * Load the address of the RX list. 3406 */ 3407 CSR_WRITE_4(sc, DC_RXADDR, vtophys(&sc->dc_ldata->dc_rx_list[0])); 3408 CSR_WRITE_4(sc, DC_TXADDR, vtophys(&sc->dc_ldata->dc_tx_list[0])); 3409 3410 /* 3411 * Enable interrupts. 3412 */ 3413 #ifdef DEVICE_POLLING 3414 /* 3415 * ... but only if we are not polling, and make sure they are off in 3416 * the case of polling. Some cards (e.g. fxp) turn interrupts on 3417 * after a reset. 3418 */ 3419 if (ifp->if_flags & IFF_POLLING) 3420 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3421 else 3422 #endif 3423 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3424 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 3425 3426 /* Enable transmitter. */ 3427 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3428 3429 /* 3430 * If this is an Intel 21143 and we're not using the 3431 * MII port, program the LED control pins so we get 3432 * link and activity indications. 3433 */ 3434 if (sc->dc_flags & DC_TULIP_LEDS) { 3435 CSR_WRITE_4(sc, DC_WATCHDOG, 3436 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); 3437 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 3438 } 3439 3440 /* 3441 * Load the RX/multicast filter. We do this sort of late 3442 * because the filter programming scheme on the 21143 and 3443 * some clones requires DMAing a setup frame via the TX 3444 * engine, and we need the transmitter enabled for that. 3445 */ 3446 dc_setfilt(sc); 3447 3448 /* Enable receiver. */ 3449 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 3450 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 3451 3452 mii_mediachg(mii); 3453 dc_setcfg(sc, sc->dc_if_media); 3454 3455 ifp->if_flags |= IFF_RUNNING; 3456 ifp->if_flags &= ~IFF_OACTIVE; 3457 3458 /* Don't start the ticker if this is a homePNA link. */ 3459 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 3460 sc->dc_link = 1; 3461 else { 3462 if (sc->dc_flags & DC_21143_NWAY) 3463 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 3464 else 3465 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 3466 } 3467 3468 #ifdef SRM_MEDIA 3469 if(sc->dc_srm_media) { 3470 struct ifreq ifr; 3471 3472 ifr.ifr_media = sc->dc_srm_media; 3473 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); 3474 sc->dc_srm_media = 0; 3475 } 3476 #endif 3477 DC_UNLOCK(sc); 3478 return; 3479 } 3480 3481 /* 3482 * Set media options. 3483 */ 3484 static int 3485 dc_ifmedia_upd(ifp) 3486 struct ifnet *ifp; 3487 { 3488 struct dc_softc *sc; 3489 struct mii_data *mii; 3490 struct ifmedia *ifm; 3491 3492 sc = ifp->if_softc; 3493 mii = device_get_softc(sc->dc_miibus); 3494 mii_mediachg(mii); 3495 ifm = &mii->mii_media; 3496 3497 if (DC_IS_DAVICOM(sc) && 3498 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 3499 dc_setcfg(sc, ifm->ifm_media); 3500 else 3501 sc->dc_link = 0; 3502 3503 return(0); 3504 } 3505 3506 /* 3507 * Report current media status. 3508 */ 3509 static void 3510 dc_ifmedia_sts(ifp, ifmr) 3511 struct ifnet *ifp; 3512 struct ifmediareq *ifmr; 3513 { 3514 struct dc_softc *sc; 3515 struct mii_data *mii; 3516 struct ifmedia *ifm; 3517 3518 sc = ifp->if_softc; 3519 mii = device_get_softc(sc->dc_miibus); 3520 mii_pollstat(mii); 3521 ifm = &mii->mii_media; 3522 if (DC_IS_DAVICOM(sc)) { 3523 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 3524 ifmr->ifm_active = ifm->ifm_media; 3525 ifmr->ifm_status = 0; 3526 return; 3527 } 3528 } 3529 ifmr->ifm_active = mii->mii_media_active; 3530 ifmr->ifm_status = mii->mii_media_status; 3531 3532 return; 3533 } 3534 3535 static int 3536 dc_ioctl(ifp, command, data) 3537 struct ifnet *ifp; 3538 u_long command; 3539 caddr_t data; 3540 { 3541 struct dc_softc *sc = ifp->if_softc; 3542 struct ifreq *ifr = (struct ifreq *) data; 3543 struct mii_data *mii; 3544 int error = 0; 3545 3546 DC_LOCK(sc); 3547 3548 switch(command) { 3549 case SIOCSIFFLAGS: 3550 if (ifp->if_flags & IFF_UP) { 3551 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & 3552 (IFF_PROMISC | IFF_ALLMULTI); 3553 3554 if (ifp->if_flags & IFF_RUNNING) { 3555 if (need_setfilt) 3556 dc_setfilt(sc); 3557 } else { 3558 sc->dc_txthresh = 0; 3559 dc_init(sc); 3560 } 3561 } else { 3562 if (ifp->if_flags & IFF_RUNNING) 3563 dc_stop(sc); 3564 } 3565 sc->dc_if_flags = ifp->if_flags; 3566 error = 0; 3567 break; 3568 case SIOCADDMULTI: 3569 case SIOCDELMULTI: 3570 dc_setfilt(sc); 3571 error = 0; 3572 break; 3573 case SIOCGIFMEDIA: 3574 case SIOCSIFMEDIA: 3575 mii = device_get_softc(sc->dc_miibus); 3576 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3577 #ifdef SRM_MEDIA 3578 if (sc->dc_srm_media) 3579 sc->dc_srm_media = 0; 3580 #endif 3581 break; 3582 default: 3583 error = ether_ioctl(ifp, command, data); 3584 break; 3585 } 3586 3587 DC_UNLOCK(sc); 3588 3589 return(error); 3590 } 3591 3592 static void 3593 dc_watchdog(ifp) 3594 struct ifnet *ifp; 3595 { 3596 struct dc_softc *sc; 3597 3598 sc = ifp->if_softc; 3599 3600 DC_LOCK(sc); 3601 3602 ifp->if_oerrors++; 3603 printf("dc%d: watchdog timeout\n", sc->dc_unit); 3604 3605 dc_stop(sc); 3606 dc_reset(sc); 3607 dc_init(sc); 3608 3609 if (ifp->if_snd.ifq_head != NULL) 3610 dc_start(ifp); 3611 3612 DC_UNLOCK(sc); 3613 3614 return; 3615 } 3616 3617 /* 3618 * Stop the adapter and free any mbufs allocated to the 3619 * RX and TX lists. 3620 */ 3621 static void 3622 dc_stop(sc) 3623 struct dc_softc *sc; 3624 { 3625 register int i; 3626 struct ifnet *ifp; 3627 3628 DC_LOCK(sc); 3629 3630 ifp = &sc->arpcom.ac_if; 3631 ifp->if_timer = 0; 3632 3633 callout_stop(&sc->dc_stat_ch); 3634 3635 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3636 #ifdef DEVICE_POLLING 3637 ether_poll_deregister(ifp); 3638 #endif 3639 3640 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); 3641 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3642 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3643 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3644 sc->dc_link = 0; 3645 3646 /* 3647 * Free data in the RX lists. 3648 */ 3649 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3650 if (sc->dc_cdata.dc_rx_chain[i] != NULL) { 3651 m_freem(sc->dc_cdata.dc_rx_chain[i]); 3652 sc->dc_cdata.dc_rx_chain[i] = NULL; 3653 } 3654 } 3655 bzero((char *)&sc->dc_ldata->dc_rx_list, 3656 sizeof(sc->dc_ldata->dc_rx_list)); 3657 3658 /* 3659 * Free the TX list buffers. 3660 */ 3661 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3662 if (sc->dc_cdata.dc_tx_chain[i] != NULL) { 3663 if (sc->dc_ldata->dc_tx_list[i].dc_ctl & 3664 DC_TXCTL_SETUP) { 3665 sc->dc_cdata.dc_tx_chain[i] = NULL; 3666 continue; 3667 } 3668 m_freem(sc->dc_cdata.dc_tx_chain[i]); 3669 sc->dc_cdata.dc_tx_chain[i] = NULL; 3670 } 3671 } 3672 3673 bzero((char *)&sc->dc_ldata->dc_tx_list, 3674 sizeof(sc->dc_ldata->dc_tx_list)); 3675 3676 DC_UNLOCK(sc); 3677 3678 return; 3679 } 3680 3681 /* 3682 * Device suspend routine. Stop the interface and save some PCI 3683 * settings in case the BIOS doesn't restore them properly on 3684 * resume. 3685 */ 3686 static int 3687 dc_suspend(dev) 3688 device_t dev; 3689 { 3690 register int i; 3691 int s; 3692 struct dc_softc *sc; 3693 3694 s = splimp(); 3695 3696 sc = device_get_softc(dev); 3697 3698 dc_stop(sc); 3699 3700 for (i = 0; i < 5; i++) 3701 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 3702 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3703 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3704 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3705 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3706 3707 sc->suspended = 1; 3708 3709 splx(s); 3710 return (0); 3711 } 3712 3713 /* 3714 * Device resume routine. Restore some PCI settings in case the BIOS 3715 * doesn't, re-enable busmastering, and restart the interface if 3716 * appropriate. 3717 */ 3718 static int 3719 dc_resume(dev) 3720 device_t dev; 3721 { 3722 register int i; 3723 int s; 3724 struct dc_softc *sc; 3725 struct ifnet *ifp; 3726 3727 s = splimp(); 3728 3729 sc = device_get_softc(dev); 3730 ifp = &sc->arpcom.ac_if; 3731 3732 dc_acpi(dev); 3733 3734 /* better way to do this? */ 3735 for (i = 0; i < 5; i++) 3736 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 3737 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3738 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3739 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3740 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3741 3742 /* reenable busmastering */ 3743 pci_enable_busmaster(dev); 3744 pci_enable_io(dev, DC_RES); 3745 3746 /* reinitialize interface if necessary */ 3747 if (ifp->if_flags & IFF_UP) 3748 dc_init(sc); 3749 3750 sc->suspended = 0; 3751 3752 splx(s); 3753 return (0); 3754 } 3755 3756 /* 3757 * Stop all chip I/O so that the kernel's probe routines don't 3758 * get confused by errant DMAs when rebooting. 3759 */ 3760 static void 3761 dc_shutdown(dev) 3762 device_t dev; 3763 { 3764 struct dc_softc *sc; 3765 3766 sc = device_get_softc(dev); 3767 3768 dc_stop(sc); 3769 3770 return; 3771 } 3772