1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 35 /* 36 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 37 * series chips and several workalikes including the following: 38 * 39 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 40 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 41 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 42 * ASIX Electronics AX88140A (www.asix.com.tw) 43 * ASIX Electronics AX88141 (www.asix.com.tw) 44 * ADMtek AL981 (www.admtek.com.tw) 45 * ADMtek AN985 (www.admtek.com.tw) 46 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 47 * Accton EN1217 (www.accton.com) 48 * Xircom X3201 (www.xircom.com) 49 * Abocom FE2500 50 * Conexant LANfinity (www.conexant.com) 51 * 52 * Datasheets for the 21143 are available at developer.intel.com. 53 * Datasheets for the clone parts can be found at their respective sites. 54 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 55 * The PNIC II is essentially a Macronix 98715A chip; the only difference 56 * worth noting is that its multicast hash table is only 128 bits wide 57 * instead of 512. 58 * 59 * Written by Bill Paul <wpaul@ee.columbia.edu> 60 * Electrical Engineering Department 61 * Columbia University, New York City 62 */ 63 64 /* 65 * The Intel 21143 is the successor to the DEC 21140. It is basically 66 * the same as the 21140 but with a few new features. The 21143 supports 67 * three kinds of media attachments: 68 * 69 * o MII port, for 10Mbps and 100Mbps support and NWAY 70 * autonegotiation provided by an external PHY. 71 * o SYM port, for symbol mode 100Mbps support. 72 * o 10baseT port. 73 * o AUI/BNC port. 74 * 75 * The 100Mbps SYM port and 10baseT port can be used together in 76 * combination with the internal NWAY support to create a 10/100 77 * autosensing configuration. 78 * 79 * Note that not all tulip workalikes are handled in this driver: we only 80 * deal with those which are relatively well behaved. The Winbond is 81 * handled separately due to its different register offsets and the 82 * special handling needed for its various bugs. The PNIC is handled 83 * here, but I'm not thrilled about it. 84 * 85 * All of the workalike chips use some form of MII transceiver support 86 * with the exception of the Macronix chips, which also have a SYM port. 87 * The ASIX AX88140A is also documented to have a SYM port, but all 88 * the cards I've seen use an MII transceiver, probably because the 89 * AX88140A doesn't support internal NWAY. 90 */ 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/sockio.h> 95 #include <sys/mbuf.h> 96 #include <sys/malloc.h> 97 #include <sys/kernel.h> 98 #include <sys/socket.h> 99 #include <sys/sysctl.h> 100 101 #include <net/if.h> 102 #include <net/if_arp.h> 103 #include <net/ethernet.h> 104 #include <net/if_dl.h> 105 #include <net/if_media.h> 106 #include <net/if_types.h> 107 #include <net/if_vlan_var.h> 108 109 #include <net/bpf.h> 110 111 #include <vm/vm.h> /* for vtophys */ 112 #include <vm/pmap.h> /* for vtophys */ 113 #include <machine/bus_pio.h> 114 #include <machine/bus_memio.h> 115 #include <machine/bus.h> 116 #include <machine/resource.h> 117 #include <sys/bus.h> 118 #include <sys/rman.h> 119 120 #include <dev/mii/mii.h> 121 #include <dev/mii/miivar.h> 122 123 #include <pci/pcireg.h> 124 #include <pci/pcivar.h> 125 126 #define DC_USEIOSPACE 127 #ifdef __alpha__ 128 #define SRM_MEDIA 129 #endif 130 131 #include <pci/if_dcreg.h> 132 133 MODULE_DEPEND(dc, miibus, 1, 1, 1); 134 135 /* "controller miibus0" required. See GENERIC if you get errors here. */ 136 #include "miibus_if.h" 137 138 #ifndef lint 139 static const char rcsid[] = 140 "$FreeBSD$"; 141 #endif 142 143 /* 144 * Various supported device vendors/types and their names. 145 */ 146 static struct dc_type dc_devs[] = { 147 { DC_VENDORID_DEC, DC_DEVICEID_21143, 148 "Intel 21143 10/100BaseTX" }, 149 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009, 150 "Davicom DM9009 10/100BaseTX" }, 151 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, 152 "Davicom DM9100 10/100BaseTX" }, 153 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 154 "Davicom DM9102 10/100BaseTX" }, 155 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 156 "Davicom DM9102A 10/100BaseTX" }, 157 { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, 158 "ADMtek AL981 10/100BaseTX" }, 159 { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, 160 "ADMtek AN985 10/100BaseTX" }, 161 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 162 "ASIX AX88140A 10/100BaseTX" }, 163 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 164 "ASIX AX88141 10/100BaseTX" }, 165 { DC_VENDORID_MX, DC_DEVICEID_98713, 166 "Macronix 98713 10/100BaseTX" }, 167 { DC_VENDORID_MX, DC_DEVICEID_98713, 168 "Macronix 98713A 10/100BaseTX" }, 169 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 170 "Compex RL100-TX 10/100BaseTX" }, 171 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 172 "Compex RL100-TX 10/100BaseTX" }, 173 { DC_VENDORID_MX, DC_DEVICEID_987x5, 174 "Macronix 98715/98715A 10/100BaseTX" }, 175 { DC_VENDORID_MX, DC_DEVICEID_987x5, 176 "Macronix 98715AEC-C 10/100BaseTX" }, 177 { DC_VENDORID_MX, DC_DEVICEID_987x5, 178 "Macronix 98725 10/100BaseTX" }, 179 { DC_VENDORID_MX, DC_DEVICEID_98727, 180 "Macronix 98727/98732 10/100BaseTX" }, 181 { DC_VENDORID_LO, DC_DEVICEID_82C115, 182 "LC82C115 PNIC II 10/100BaseTX" }, 183 { DC_VENDORID_LO, DC_DEVICEID_82C168, 184 "82c168 PNIC 10/100BaseTX" }, 185 { DC_VENDORID_LO, DC_DEVICEID_82C168, 186 "82c169 PNIC 10/100BaseTX" }, 187 { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, 188 "Accton EN1217 10/100BaseTX" }, 189 { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, 190 "Accton EN2242 MiniPCI 10/100BaseTX" }, 191 { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, 192 "Xircom X3201 10/100BaseTX" }, 193 { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500, 194 "Abocom FE2500 10/100BaseTX" }, 195 { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112, 196 "Conexant LANfinity MiniPCI 10/100BaseTX" }, 197 { DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX, 198 "Hawking CB102 CardBus 10/100" }, 199 { 0, 0, NULL } 200 }; 201 202 static int dc_probe (device_t); 203 static int dc_attach (device_t); 204 static int dc_detach (device_t); 205 static int dc_suspend (device_t); 206 static int dc_resume (device_t); 207 static void dc_acpi (device_t); 208 static struct dc_type *dc_devtype (device_t); 209 static int dc_newbuf (struct dc_softc *, int, struct mbuf *); 210 static int dc_encap (struct dc_softc *, struct mbuf *, u_int32_t *); 211 static int dc_coal (struct dc_softc *, struct mbuf **); 212 static void dc_pnic_rx_bug_war (struct dc_softc *, int); 213 static int dc_rx_resync (struct dc_softc *); 214 static void dc_rxeof (struct dc_softc *); 215 static void dc_txeof (struct dc_softc *); 216 static void dc_tick (void *); 217 static void dc_tx_underrun (struct dc_softc *); 218 static void dc_intr (void *); 219 static void dc_start (struct ifnet *); 220 static int dc_ioctl (struct ifnet *, u_long, caddr_t); 221 static void dc_init (void *); 222 static void dc_stop (struct dc_softc *); 223 static void dc_watchdog (struct ifnet *); 224 static void dc_shutdown (device_t); 225 static int dc_ifmedia_upd (struct ifnet *); 226 static void dc_ifmedia_sts (struct ifnet *, struct ifmediareq *); 227 228 static void dc_delay (struct dc_softc *); 229 static void dc_eeprom_idle (struct dc_softc *); 230 static void dc_eeprom_putbyte (struct dc_softc *, int); 231 static void dc_eeprom_getword (struct dc_softc *, int, u_int16_t *); 232 static void dc_eeprom_getword_pnic 233 (struct dc_softc *, int, u_int16_t *); 234 static void dc_eeprom_getword_xircom 235 (struct dc_softc *, int, u_int16_t *); 236 static void dc_eeprom_width (struct dc_softc *); 237 static void dc_read_eeprom (struct dc_softc *, caddr_t, int, int, int); 238 239 static void dc_mii_writebit (struct dc_softc *, int); 240 static int dc_mii_readbit (struct dc_softc *); 241 static void dc_mii_sync (struct dc_softc *); 242 static void dc_mii_send (struct dc_softc *, u_int32_t, int); 243 static int dc_mii_readreg (struct dc_softc *, struct dc_mii_frame *); 244 static int dc_mii_writereg (struct dc_softc *, struct dc_mii_frame *); 245 static int dc_miibus_readreg (device_t, int, int); 246 static int dc_miibus_writereg (device_t, int, int, int); 247 static void dc_miibus_statchg (device_t); 248 static void dc_miibus_mediainit (device_t); 249 250 static void dc_setcfg (struct dc_softc *, int); 251 static u_int32_t dc_crc_le (struct dc_softc *, caddr_t); 252 static u_int32_t dc_crc_be (caddr_t); 253 static void dc_setfilt_21143 (struct dc_softc *); 254 static void dc_setfilt_asix (struct dc_softc *); 255 static void dc_setfilt_admtek (struct dc_softc *); 256 static void dc_setfilt_xircom (struct dc_softc *); 257 258 static void dc_setfilt (struct dc_softc *); 259 260 static void dc_reset (struct dc_softc *); 261 static int dc_list_rx_init (struct dc_softc *); 262 static int dc_list_tx_init (struct dc_softc *); 263 264 static void dc_read_srom (struct dc_softc *, int); 265 static void dc_parse_21143_srom (struct dc_softc *); 266 static void dc_decode_leaf_sia (struct dc_softc *, struct dc_eblock_sia *); 267 static void dc_decode_leaf_mii (struct dc_softc *, struct dc_eblock_mii *); 268 static void dc_decode_leaf_sym (struct dc_softc *, struct dc_eblock_sym *); 269 static void dc_apply_fixup (struct dc_softc *, int); 270 271 #ifdef DC_USEIOSPACE 272 #define DC_RES SYS_RES_IOPORT 273 #define DC_RID DC_PCI_CFBIO 274 #else 275 #define DC_RES SYS_RES_MEMORY 276 #define DC_RID DC_PCI_CFBMA 277 #endif 278 279 static device_method_t dc_methods[] = { 280 /* Device interface */ 281 DEVMETHOD(device_probe, dc_probe), 282 DEVMETHOD(device_attach, dc_attach), 283 DEVMETHOD(device_detach, dc_detach), 284 DEVMETHOD(device_suspend, dc_suspend), 285 DEVMETHOD(device_resume, dc_resume), 286 DEVMETHOD(device_shutdown, dc_shutdown), 287 288 /* bus interface */ 289 DEVMETHOD(bus_print_child, bus_generic_print_child), 290 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 291 292 /* MII interface */ 293 DEVMETHOD(miibus_readreg, dc_miibus_readreg), 294 DEVMETHOD(miibus_writereg, dc_miibus_writereg), 295 DEVMETHOD(miibus_statchg, dc_miibus_statchg), 296 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), 297 298 { 0, 0 } 299 }; 300 301 static driver_t dc_driver = { 302 "dc", 303 dc_methods, 304 sizeof(struct dc_softc) 305 }; 306 307 static devclass_t dc_devclass; 308 #ifdef __i386__ 309 static int dc_quick=1; 310 SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, 311 &dc_quick,0,"do not mdevget in dc driver"); 312 #endif 313 314 DRIVER_MODULE(if_dc, cardbus, dc_driver, dc_devclass, 0, 0); 315 DRIVER_MODULE(if_dc, pci, dc_driver, dc_devclass, 0, 0); 316 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); 317 318 #define DC_SETBIT(sc, reg, x) \ 319 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 320 321 #define DC_CLRBIT(sc, reg, x) \ 322 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 323 324 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 325 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 326 327 #define IS_MPSAFE 0 328 329 static void 330 dc_delay(sc) 331 struct dc_softc *sc; 332 { 333 int idx; 334 335 for (idx = (300 / 33) + 1; idx > 0; idx--) 336 CSR_READ_4(sc, DC_BUSCTL); 337 } 338 339 static void 340 dc_eeprom_width(sc) 341 struct dc_softc *sc; 342 { 343 int i; 344 345 /* Force EEPROM to idle state. */ 346 dc_eeprom_idle(sc); 347 348 /* Enter EEPROM access mode. */ 349 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 350 dc_delay(sc); 351 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 352 dc_delay(sc); 353 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 354 dc_delay(sc); 355 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 356 dc_delay(sc); 357 358 for (i = 3; i--;) { 359 if (6 & (1 << i)) 360 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 361 else 362 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 363 dc_delay(sc); 364 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 365 dc_delay(sc); 366 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 367 dc_delay(sc); 368 } 369 370 for (i = 1; i <= 12; i++) { 371 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 372 dc_delay(sc); 373 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 374 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 375 dc_delay(sc); 376 break; 377 } 378 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 379 dc_delay(sc); 380 } 381 382 /* Turn off EEPROM access mode. */ 383 dc_eeprom_idle(sc); 384 385 if (i < 4 || i > 12) 386 sc->dc_romwidth = 6; 387 else 388 sc->dc_romwidth = i; 389 390 /* Enter EEPROM access mode. */ 391 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 392 dc_delay(sc); 393 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 394 dc_delay(sc); 395 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 396 dc_delay(sc); 397 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 398 dc_delay(sc); 399 400 /* Turn off EEPROM access mode. */ 401 dc_eeprom_idle(sc); 402 } 403 404 static void 405 dc_eeprom_idle(sc) 406 struct dc_softc *sc; 407 { 408 register int i; 409 410 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 411 dc_delay(sc); 412 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 413 dc_delay(sc); 414 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 415 dc_delay(sc); 416 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 417 dc_delay(sc); 418 419 for (i = 0; i < 25; i++) { 420 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 421 dc_delay(sc); 422 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 423 dc_delay(sc); 424 } 425 426 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 427 dc_delay(sc); 428 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 429 dc_delay(sc); 430 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 431 432 return; 433 } 434 435 /* 436 * Send a read command and address to the EEPROM, check for ACK. 437 */ 438 static void 439 dc_eeprom_putbyte(sc, addr) 440 struct dc_softc *sc; 441 int addr; 442 { 443 register int d, i; 444 445 d = DC_EECMD_READ >> 6; 446 for (i = 3; i--; ) { 447 if (d & (1 << i)) 448 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 449 else 450 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 451 dc_delay(sc); 452 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 453 dc_delay(sc); 454 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 455 dc_delay(sc); 456 } 457 458 /* 459 * Feed in each bit and strobe the clock. 460 */ 461 for (i = sc->dc_romwidth; i--;) { 462 if (addr & (1 << i)) { 463 SIO_SET(DC_SIO_EE_DATAIN); 464 } else { 465 SIO_CLR(DC_SIO_EE_DATAIN); 466 } 467 dc_delay(sc); 468 SIO_SET(DC_SIO_EE_CLK); 469 dc_delay(sc); 470 SIO_CLR(DC_SIO_EE_CLK); 471 dc_delay(sc); 472 } 473 474 return; 475 } 476 477 /* 478 * Read a word of data stored in the EEPROM at address 'addr.' 479 * The PNIC 82c168/82c169 has its own non-standard way to read 480 * the EEPROM. 481 */ 482 static void 483 dc_eeprom_getword_pnic(sc, addr, dest) 484 struct dc_softc *sc; 485 int addr; 486 u_int16_t *dest; 487 { 488 register int i; 489 u_int32_t r; 490 491 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); 492 493 for (i = 0; i < DC_TIMEOUT; i++) { 494 DELAY(1); 495 r = CSR_READ_4(sc, DC_SIO); 496 if (!(r & DC_PN_SIOCTL_BUSY)) { 497 *dest = (u_int16_t)(r & 0xFFFF); 498 return; 499 } 500 } 501 502 return; 503 } 504 505 /* 506 * Read a word of data stored in the EEPROM at address 'addr.' 507 * The Xircom X3201 has its own non-standard way to read 508 * the EEPROM, too. 509 */ 510 static void 511 dc_eeprom_getword_xircom(sc, addr, dest) 512 struct dc_softc *sc; 513 int addr; 514 u_int16_t *dest; 515 { 516 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 517 518 addr *= 2; 519 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 520 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff; 521 addr += 1; 522 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 523 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff) << 8; 524 525 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 526 return; 527 } 528 529 /* 530 * Read a word of data stored in the EEPROM at address 'addr.' 531 */ 532 static void 533 dc_eeprom_getword(sc, addr, dest) 534 struct dc_softc *sc; 535 int addr; 536 u_int16_t *dest; 537 { 538 register int i; 539 u_int16_t word = 0; 540 541 /* Force EEPROM to idle state. */ 542 dc_eeprom_idle(sc); 543 544 /* Enter EEPROM access mode. */ 545 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 546 dc_delay(sc); 547 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 548 dc_delay(sc); 549 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 550 dc_delay(sc); 551 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 552 dc_delay(sc); 553 554 /* 555 * Send address of word we want to read. 556 */ 557 dc_eeprom_putbyte(sc, addr); 558 559 /* 560 * Start reading bits from EEPROM. 561 */ 562 for (i = 0x8000; i; i >>= 1) { 563 SIO_SET(DC_SIO_EE_CLK); 564 dc_delay(sc); 565 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 566 word |= i; 567 dc_delay(sc); 568 SIO_CLR(DC_SIO_EE_CLK); 569 dc_delay(sc); 570 } 571 572 /* Turn off EEPROM access mode. */ 573 dc_eeprom_idle(sc); 574 575 *dest = word; 576 577 return; 578 } 579 580 /* 581 * Read a sequence of words from the EEPROM. 582 */ 583 static void 584 dc_read_eeprom(sc, dest, off, cnt, swap) 585 struct dc_softc *sc; 586 caddr_t dest; 587 int off; 588 int cnt; 589 int swap; 590 { 591 int i; 592 u_int16_t word = 0, *ptr; 593 594 for (i = 0; i < cnt; i++) { 595 if (DC_IS_PNIC(sc)) 596 dc_eeprom_getword_pnic(sc, off + i, &word); 597 else if (DC_IS_XIRCOM(sc)) 598 dc_eeprom_getword_xircom(sc, off + i, &word); 599 else 600 dc_eeprom_getword(sc, off + i, &word); 601 ptr = (u_int16_t *)(dest + (i * 2)); 602 if (swap) 603 *ptr = ntohs(word); 604 else 605 *ptr = word; 606 } 607 608 return; 609 } 610 611 /* 612 * The following two routines are taken from the Macronix 98713 613 * Application Notes pp.19-21. 614 */ 615 /* 616 * Write a bit to the MII bus. 617 */ 618 static void 619 dc_mii_writebit(sc, bit) 620 struct dc_softc *sc; 621 int bit; 622 { 623 if (bit) 624 CSR_WRITE_4(sc, DC_SIO, 625 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); 626 else 627 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 628 629 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 630 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 631 632 return; 633 } 634 635 /* 636 * Read a bit from the MII bus. 637 */ 638 static int 639 dc_mii_readbit(sc) 640 struct dc_softc *sc; 641 { 642 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); 643 CSR_READ_4(sc, DC_SIO); 644 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 645 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 646 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 647 return(1); 648 649 return(0); 650 } 651 652 /* 653 * Sync the PHYs by setting data bit and strobing the clock 32 times. 654 */ 655 static void 656 dc_mii_sync(sc) 657 struct dc_softc *sc; 658 { 659 register int i; 660 661 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 662 663 for (i = 0; i < 32; i++) 664 dc_mii_writebit(sc, 1); 665 666 return; 667 } 668 669 /* 670 * Clock a series of bits through the MII. 671 */ 672 static void 673 dc_mii_send(sc, bits, cnt) 674 struct dc_softc *sc; 675 u_int32_t bits; 676 int cnt; 677 { 678 int i; 679 680 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 681 dc_mii_writebit(sc, bits & i); 682 } 683 684 /* 685 * Read an PHY register through the MII. 686 */ 687 static int 688 dc_mii_readreg(sc, frame) 689 struct dc_softc *sc; 690 struct dc_mii_frame *frame; 691 692 { 693 int i, ack; 694 695 DC_LOCK(sc); 696 697 /* 698 * Set up frame for RX. 699 */ 700 frame->mii_stdelim = DC_MII_STARTDELIM; 701 frame->mii_opcode = DC_MII_READOP; 702 frame->mii_turnaround = 0; 703 frame->mii_data = 0; 704 705 /* 706 * Sync the PHYs. 707 */ 708 dc_mii_sync(sc); 709 710 /* 711 * Send command/address info. 712 */ 713 dc_mii_send(sc, frame->mii_stdelim, 2); 714 dc_mii_send(sc, frame->mii_opcode, 2); 715 dc_mii_send(sc, frame->mii_phyaddr, 5); 716 dc_mii_send(sc, frame->mii_regaddr, 5); 717 718 #ifdef notdef 719 /* Idle bit */ 720 dc_mii_writebit(sc, 1); 721 dc_mii_writebit(sc, 0); 722 #endif 723 724 /* Check for ack */ 725 ack = dc_mii_readbit(sc); 726 727 /* 728 * Now try reading data bits. If the ack failed, we still 729 * need to clock through 16 cycles to keep the PHY(s) in sync. 730 */ 731 if (ack) { 732 for(i = 0; i < 16; i++) { 733 dc_mii_readbit(sc); 734 } 735 goto fail; 736 } 737 738 for (i = 0x8000; i; i >>= 1) { 739 if (!ack) { 740 if (dc_mii_readbit(sc)) 741 frame->mii_data |= i; 742 } 743 } 744 745 fail: 746 747 dc_mii_writebit(sc, 0); 748 dc_mii_writebit(sc, 0); 749 750 DC_UNLOCK(sc); 751 752 if (ack) 753 return(1); 754 return(0); 755 } 756 757 /* 758 * Write to a PHY register through the MII. 759 */ 760 static int 761 dc_mii_writereg(sc, frame) 762 struct dc_softc *sc; 763 struct dc_mii_frame *frame; 764 765 { 766 DC_LOCK(sc); 767 /* 768 * Set up frame for TX. 769 */ 770 771 frame->mii_stdelim = DC_MII_STARTDELIM; 772 frame->mii_opcode = DC_MII_WRITEOP; 773 frame->mii_turnaround = DC_MII_TURNAROUND; 774 775 /* 776 * Sync the PHYs. 777 */ 778 dc_mii_sync(sc); 779 780 dc_mii_send(sc, frame->mii_stdelim, 2); 781 dc_mii_send(sc, frame->mii_opcode, 2); 782 dc_mii_send(sc, frame->mii_phyaddr, 5); 783 dc_mii_send(sc, frame->mii_regaddr, 5); 784 dc_mii_send(sc, frame->mii_turnaround, 2); 785 dc_mii_send(sc, frame->mii_data, 16); 786 787 /* Idle bit. */ 788 dc_mii_writebit(sc, 0); 789 dc_mii_writebit(sc, 0); 790 791 DC_UNLOCK(sc); 792 793 return(0); 794 } 795 796 static int 797 dc_miibus_readreg(dev, phy, reg) 798 device_t dev; 799 int phy, reg; 800 { 801 struct dc_mii_frame frame; 802 struct dc_softc *sc; 803 int i, rval, phy_reg = 0; 804 805 sc = device_get_softc(dev); 806 bzero((char *)&frame, sizeof(frame)); 807 808 /* 809 * Note: both the AL981 and AN985 have internal PHYs, 810 * however the AL981 provides direct access to the PHY 811 * registers while the AN985 uses a serial MII interface. 812 * The AN985's MII interface is also buggy in that you 813 * can read from any MII address (0 to 31), but only address 1 814 * behaves normally. To deal with both cases, we pretend 815 * that the PHY is at MII address 1. 816 */ 817 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 818 return(0); 819 820 /* 821 * Note: the ukphy probes of the RS7112 report a PHY at 822 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 823 * so we only respond to correct one. 824 */ 825 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 826 return(0); 827 828 if (sc->dc_pmode != DC_PMODE_MII) { 829 if (phy == (MII_NPHY - 1)) { 830 switch(reg) { 831 case MII_BMSR: 832 /* 833 * Fake something to make the probe 834 * code think there's a PHY here. 835 */ 836 return(BMSR_MEDIAMASK); 837 break; 838 case MII_PHYIDR1: 839 if (DC_IS_PNIC(sc)) 840 return(DC_VENDORID_LO); 841 return(DC_VENDORID_DEC); 842 break; 843 case MII_PHYIDR2: 844 if (DC_IS_PNIC(sc)) 845 return(DC_DEVICEID_82C168); 846 return(DC_DEVICEID_21143); 847 break; 848 default: 849 return(0); 850 break; 851 } 852 } else 853 return(0); 854 } 855 856 if (DC_IS_PNIC(sc)) { 857 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 858 (phy << 23) | (reg << 18)); 859 for (i = 0; i < DC_TIMEOUT; i++) { 860 DELAY(1); 861 rval = CSR_READ_4(sc, DC_PN_MII); 862 if (!(rval & DC_PN_MII_BUSY)) { 863 rval &= 0xFFFF; 864 return(rval == 0xFFFF ? 0 : rval); 865 } 866 } 867 return(0); 868 } 869 870 if (DC_IS_COMET(sc)) { 871 switch(reg) { 872 case MII_BMCR: 873 phy_reg = DC_AL_BMCR; 874 break; 875 case MII_BMSR: 876 phy_reg = DC_AL_BMSR; 877 break; 878 case MII_PHYIDR1: 879 phy_reg = DC_AL_VENID; 880 break; 881 case MII_PHYIDR2: 882 phy_reg = DC_AL_DEVID; 883 break; 884 case MII_ANAR: 885 phy_reg = DC_AL_ANAR; 886 break; 887 case MII_ANLPAR: 888 phy_reg = DC_AL_LPAR; 889 break; 890 case MII_ANER: 891 phy_reg = DC_AL_ANER; 892 break; 893 default: 894 printf("dc%d: phy_read: bad phy register %x\n", 895 sc->dc_unit, reg); 896 return(0); 897 break; 898 } 899 900 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 901 902 if (rval == 0xFFFF) 903 return(0); 904 return(rval); 905 } 906 907 frame.mii_phyaddr = phy; 908 frame.mii_regaddr = reg; 909 if (sc->dc_type == DC_TYPE_98713) { 910 phy_reg = CSR_READ_4(sc, DC_NETCFG); 911 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 912 } 913 dc_mii_readreg(sc, &frame); 914 if (sc->dc_type == DC_TYPE_98713) 915 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 916 917 return(frame.mii_data); 918 } 919 920 static int 921 dc_miibus_writereg(dev, phy, reg, data) 922 device_t dev; 923 int phy, reg, data; 924 { 925 struct dc_softc *sc; 926 struct dc_mii_frame frame; 927 int i, phy_reg = 0; 928 929 sc = device_get_softc(dev); 930 bzero((char *)&frame, sizeof(frame)); 931 932 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 933 return(0); 934 935 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 936 return(0); 937 938 if (DC_IS_PNIC(sc)) { 939 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 940 (phy << 23) | (reg << 10) | data); 941 for (i = 0; i < DC_TIMEOUT; i++) { 942 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 943 break; 944 } 945 return(0); 946 } 947 948 if (DC_IS_COMET(sc)) { 949 switch(reg) { 950 case MII_BMCR: 951 phy_reg = DC_AL_BMCR; 952 break; 953 case MII_BMSR: 954 phy_reg = DC_AL_BMSR; 955 break; 956 case MII_PHYIDR1: 957 phy_reg = DC_AL_VENID; 958 break; 959 case MII_PHYIDR2: 960 phy_reg = DC_AL_DEVID; 961 break; 962 case MII_ANAR: 963 phy_reg = DC_AL_ANAR; 964 break; 965 case MII_ANLPAR: 966 phy_reg = DC_AL_LPAR; 967 break; 968 case MII_ANER: 969 phy_reg = DC_AL_ANER; 970 break; 971 default: 972 printf("dc%d: phy_write: bad phy register %x\n", 973 sc->dc_unit, reg); 974 return(0); 975 break; 976 } 977 978 CSR_WRITE_4(sc, phy_reg, data); 979 return(0); 980 } 981 982 frame.mii_phyaddr = phy; 983 frame.mii_regaddr = reg; 984 frame.mii_data = data; 985 986 if (sc->dc_type == DC_TYPE_98713) { 987 phy_reg = CSR_READ_4(sc, DC_NETCFG); 988 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 989 } 990 dc_mii_writereg(sc, &frame); 991 if (sc->dc_type == DC_TYPE_98713) 992 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 993 994 return(0); 995 } 996 997 static void 998 dc_miibus_statchg(dev) 999 device_t dev; 1000 { 1001 struct dc_softc *sc; 1002 struct mii_data *mii; 1003 struct ifmedia *ifm; 1004 1005 sc = device_get_softc(dev); 1006 if (DC_IS_ADMTEK(sc)) 1007 return; 1008 1009 mii = device_get_softc(sc->dc_miibus); 1010 ifm = &mii->mii_media; 1011 if (DC_IS_DAVICOM(sc) && 1012 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 1013 dc_setcfg(sc, ifm->ifm_media); 1014 sc->dc_if_media = ifm->ifm_media; 1015 } else { 1016 dc_setcfg(sc, mii->mii_media_active); 1017 sc->dc_if_media = mii->mii_media_active; 1018 } 1019 1020 return; 1021 } 1022 1023 /* 1024 * Special support for DM9102A cards with HomePNA PHYs. Note: 1025 * with the Davicom DM9102A/DM9801 eval board that I have, it seems 1026 * to be impossible to talk to the management interface of the DM9801 1027 * PHY (its MDIO pin is not connected to anything). Consequently, 1028 * the driver has to just 'know' about the additional mode and deal 1029 * with it itself. *sigh* 1030 */ 1031 static void 1032 dc_miibus_mediainit(dev) 1033 device_t dev; 1034 { 1035 struct dc_softc *sc; 1036 struct mii_data *mii; 1037 struct ifmedia *ifm; 1038 int rev; 1039 1040 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1041 1042 sc = device_get_softc(dev); 1043 mii = device_get_softc(sc->dc_miibus); 1044 ifm = &mii->mii_media; 1045 1046 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) 1047 ifmedia_add(ifm, IFM_ETHER|IFM_HPNA_1, 0, NULL); 1048 1049 return; 1050 } 1051 1052 #define DC_POLY 0xEDB88320 1053 #define DC_BITS_512 9 1054 #define DC_BITS_128 7 1055 #define DC_BITS_64 6 1056 1057 static u_int32_t 1058 dc_crc_le(sc, addr) 1059 struct dc_softc *sc; 1060 caddr_t addr; 1061 { 1062 u_int32_t idx, bit, data, crc; 1063 1064 /* Compute CRC for the address value. */ 1065 crc = 0xFFFFFFFF; /* initial value */ 1066 1067 for (idx = 0; idx < 6; idx++) { 1068 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 1069 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); 1070 } 1071 1072 /* 1073 * The hash table on the PNIC II and the MX98715AEC-C/D/E 1074 * chips is only 128 bits wide. 1075 */ 1076 if (sc->dc_flags & DC_128BIT_HASH) 1077 return (crc & ((1 << DC_BITS_128) - 1)); 1078 1079 /* The hash table on the MX98715BEC is only 64 bits wide. */ 1080 if (sc->dc_flags & DC_64BIT_HASH) 1081 return (crc & ((1 << DC_BITS_64) - 1)); 1082 1083 /* Xircom's hash filtering table is different (read: weird) */ 1084 /* Xircom uses the LEAST significant bits */ 1085 if (DC_IS_XIRCOM(sc)) { 1086 if ((crc & 0x180) == 0x180) 1087 return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4); 1088 else 1089 return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4); 1090 } 1091 1092 return (crc & ((1 << DC_BITS_512) - 1)); 1093 } 1094 1095 /* 1096 * Calculate CRC of a multicast group address, return the lower 6 bits. 1097 */ 1098 static u_int32_t 1099 dc_crc_be(addr) 1100 caddr_t addr; 1101 { 1102 u_int32_t crc, carry; 1103 int i, j; 1104 u_int8_t c; 1105 1106 /* Compute CRC for the address value. */ 1107 crc = 0xFFFFFFFF; /* initial value */ 1108 1109 for (i = 0; i < 6; i++) { 1110 c = *(addr + i); 1111 for (j = 0; j < 8; j++) { 1112 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 1113 crc <<= 1; 1114 c >>= 1; 1115 if (carry) 1116 crc = (crc ^ 0x04c11db6) | carry; 1117 } 1118 } 1119 1120 /* return the filter bit position */ 1121 return((crc >> 26) & 0x0000003F); 1122 } 1123 1124 /* 1125 * 21143-style RX filter setup routine. Filter programming is done by 1126 * downloading a special setup frame into the TX engine. 21143, Macronix, 1127 * PNIC, PNIC II and Davicom chips are programmed this way. 1128 * 1129 * We always program the chip using 'hash perfect' mode, i.e. one perfect 1130 * address (our node address) and a 512-bit hash filter for multicast 1131 * frames. We also sneak the broadcast address into the hash filter since 1132 * we need that too. 1133 */ 1134 static void 1135 dc_setfilt_21143(sc) 1136 struct dc_softc *sc; 1137 { 1138 struct dc_desc *sframe; 1139 u_int32_t h, *sp; 1140 struct ifmultiaddr *ifma; 1141 struct ifnet *ifp; 1142 int i; 1143 1144 ifp = &sc->arpcom.ac_if; 1145 1146 i = sc->dc_cdata.dc_tx_prod; 1147 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1148 sc->dc_cdata.dc_tx_cnt++; 1149 sframe = &sc->dc_ldata->dc_tx_list[i]; 1150 sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; 1151 bzero((char *)sp, DC_SFRAME_LEN); 1152 1153 sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); 1154 sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | 1155 DC_FILTER_HASHPERF | DC_TXCTL_FINT; 1156 1157 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; 1158 1159 /* If we want promiscuous mode, set the allframes bit. */ 1160 if (ifp->if_flags & IFF_PROMISC) 1161 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1162 else 1163 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1164 1165 if (ifp->if_flags & IFF_ALLMULTI) 1166 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1167 else 1168 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1169 1170 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1171 if (ifma->ifma_addr->sa_family != AF_LINK) 1172 continue; 1173 h = dc_crc_le(sc, 1174 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1175 sp[h >> 4] |= 1 << (h & 0xF); 1176 } 1177 1178 if (ifp->if_flags & IFF_BROADCAST) { 1179 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 1180 sp[h >> 4] |= 1 << (h & 0xF); 1181 } 1182 1183 /* Set our MAC address */ 1184 sp[39] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; 1185 sp[40] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; 1186 sp[41] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; 1187 1188 sframe->dc_status = DC_TXSTAT_OWN; 1189 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1190 1191 /* 1192 * The PNIC takes an exceedingly long time to process its 1193 * setup frame; wait 10ms after posting the setup frame 1194 * before proceeding, just so it has time to swallow its 1195 * medicine. 1196 */ 1197 DELAY(10000); 1198 1199 ifp->if_timer = 5; 1200 1201 return; 1202 } 1203 1204 static void 1205 dc_setfilt_admtek(sc) 1206 struct dc_softc *sc; 1207 { 1208 struct ifnet *ifp; 1209 int h = 0; 1210 u_int32_t hashes[2] = { 0, 0 }; 1211 struct ifmultiaddr *ifma; 1212 1213 ifp = &sc->arpcom.ac_if; 1214 1215 /* Init our MAC address */ 1216 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1217 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1218 1219 /* If we want promiscuous mode, set the allframes bit. */ 1220 if (ifp->if_flags & IFF_PROMISC) 1221 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1222 else 1223 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1224 1225 if (ifp->if_flags & IFF_ALLMULTI) 1226 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1227 else 1228 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1229 1230 /* first, zot all the existing hash bits */ 1231 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1232 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1233 1234 /* 1235 * If we're already in promisc or allmulti mode, we 1236 * don't have to bother programming the multicast filter. 1237 */ 1238 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1239 return; 1240 1241 /* now program new ones */ 1242 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1243 if (ifma->ifma_addr->sa_family != AF_LINK) 1244 continue; 1245 h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1246 if (h < 32) 1247 hashes[0] |= (1 << h); 1248 else 1249 hashes[1] |= (1 << (h - 32)); 1250 } 1251 1252 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1253 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1254 1255 return; 1256 } 1257 1258 static void 1259 dc_setfilt_asix(sc) 1260 struct dc_softc *sc; 1261 { 1262 struct ifnet *ifp; 1263 int h = 0; 1264 u_int32_t hashes[2] = { 0, 0 }; 1265 struct ifmultiaddr *ifma; 1266 1267 ifp = &sc->arpcom.ac_if; 1268 1269 /* Init our MAC address */ 1270 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1271 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1272 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1273 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1274 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1275 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1276 1277 /* If we want promiscuous mode, set the allframes bit. */ 1278 if (ifp->if_flags & IFF_PROMISC) 1279 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1280 else 1281 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1282 1283 if (ifp->if_flags & IFF_ALLMULTI) 1284 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1285 else 1286 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1287 1288 /* 1289 * The ASIX chip has a special bit to enable reception 1290 * of broadcast frames. 1291 */ 1292 if (ifp->if_flags & IFF_BROADCAST) 1293 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1294 else 1295 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1296 1297 /* first, zot all the existing hash bits */ 1298 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1299 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1300 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1301 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1302 1303 /* 1304 * If we're already in promisc or allmulti mode, we 1305 * don't have to bother programming the multicast filter. 1306 */ 1307 if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) 1308 return; 1309 1310 /* now program new ones */ 1311 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1312 if (ifma->ifma_addr->sa_family != AF_LINK) 1313 continue; 1314 h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1315 if (h < 32) 1316 hashes[0] |= (1 << h); 1317 else 1318 hashes[1] |= (1 << (h - 32)); 1319 } 1320 1321 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1322 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1323 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1324 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1325 1326 return; 1327 } 1328 1329 static void 1330 dc_setfilt_xircom(sc) 1331 struct dc_softc *sc; 1332 { 1333 struct dc_desc *sframe; 1334 u_int32_t h, *sp; 1335 struct ifmultiaddr *ifma; 1336 struct ifnet *ifp; 1337 int i; 1338 1339 ifp = &sc->arpcom.ac_if; 1340 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1341 1342 i = sc->dc_cdata.dc_tx_prod; 1343 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1344 sc->dc_cdata.dc_tx_cnt++; 1345 sframe = &sc->dc_ldata->dc_tx_list[i]; 1346 sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; 1347 bzero((char *)sp, DC_SFRAME_LEN); 1348 1349 sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); 1350 sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | 1351 DC_FILTER_HASHPERF | DC_TXCTL_FINT; 1352 1353 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; 1354 1355 /* If we want promiscuous mode, set the allframes bit. */ 1356 if (ifp->if_flags & IFF_PROMISC) 1357 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1358 else 1359 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1360 1361 if (ifp->if_flags & IFF_ALLMULTI) 1362 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1363 else 1364 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1365 1366 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1367 if (ifma->ifma_addr->sa_family != AF_LINK) 1368 continue; 1369 h = dc_crc_le(sc, 1370 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1371 sp[h >> 4] |= 1 << (h & 0xF); 1372 } 1373 1374 if (ifp->if_flags & IFF_BROADCAST) { 1375 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); 1376 sp[h >> 4] |= 1 << (h & 0xF); 1377 } 1378 1379 /* Set our MAC address */ 1380 sp[0] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; 1381 sp[1] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; 1382 sp[2] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; 1383 1384 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1385 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1386 ifp->if_flags |= IFF_RUNNING; 1387 sframe->dc_status = DC_TXSTAT_OWN; 1388 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1389 1390 /* 1391 * wait some time... 1392 */ 1393 DELAY(1000); 1394 1395 ifp->if_timer = 5; 1396 1397 return; 1398 } 1399 1400 static void 1401 dc_setfilt(sc) 1402 struct dc_softc *sc; 1403 { 1404 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1405 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1406 dc_setfilt_21143(sc); 1407 1408 if (DC_IS_ASIX(sc)) 1409 dc_setfilt_asix(sc); 1410 1411 if (DC_IS_ADMTEK(sc)) 1412 dc_setfilt_admtek(sc); 1413 1414 if (DC_IS_XIRCOM(sc)) 1415 dc_setfilt_xircom(sc); 1416 1417 return; 1418 } 1419 1420 /* 1421 * In order to fiddle with the 1422 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 1423 * first have to put the transmit and/or receive logic in the idle state. 1424 */ 1425 static void 1426 dc_setcfg(sc, media) 1427 struct dc_softc *sc; 1428 int media; 1429 { 1430 int i, restart = 0; 1431 u_int32_t isr; 1432 1433 if (IFM_SUBTYPE(media) == IFM_NONE) 1434 return; 1435 1436 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { 1437 restart = 1; 1438 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); 1439 1440 for (i = 0; i < DC_TIMEOUT; i++) { 1441 isr = CSR_READ_4(sc, DC_ISR); 1442 if (isr & DC_ISR_TX_IDLE && 1443 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1444 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1445 break; 1446 DELAY(10); 1447 } 1448 1449 if (i == DC_TIMEOUT) 1450 printf("dc%d: failed to force tx and " 1451 "rx to idle state\n", sc->dc_unit); 1452 } 1453 1454 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1455 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1456 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1457 if (sc->dc_pmode == DC_PMODE_MII) { 1458 int watchdogreg; 1459 1460 if (DC_IS_INTEL(sc)) { 1461 /* there's a write enable bit here that reads as 1 */ 1462 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1463 watchdogreg &= ~DC_WDOG_CTLWREN; 1464 watchdogreg |= DC_WDOG_JABBERDIS; 1465 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1466 } else { 1467 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1468 } 1469 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1470 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1471 if (sc->dc_type == DC_TYPE_98713) 1472 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1473 DC_NETCFG_SCRAMBLER)); 1474 if (!DC_IS_DAVICOM(sc)) 1475 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1476 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1477 if (DC_IS_INTEL(sc)) 1478 dc_apply_fixup(sc, IFM_AUTO); 1479 } else { 1480 if (DC_IS_PNIC(sc)) { 1481 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1482 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1483 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1484 } 1485 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1486 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1487 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1488 if (DC_IS_INTEL(sc)) 1489 dc_apply_fixup(sc, 1490 (media & IFM_GMASK) == IFM_FDX ? 1491 IFM_100_TX|IFM_FDX : IFM_100_TX); 1492 } 1493 } 1494 1495 if (IFM_SUBTYPE(media) == IFM_10_T) { 1496 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1497 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1498 if (sc->dc_pmode == DC_PMODE_MII) { 1499 int watchdogreg; 1500 1501 /* there's a write enable bit here that reads as 1 */ 1502 if (DC_IS_INTEL(sc)) { 1503 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1504 watchdogreg &= ~DC_WDOG_CTLWREN; 1505 watchdogreg |= DC_WDOG_JABBERDIS; 1506 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1507 } else { 1508 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1509 } 1510 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| 1511 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); 1512 if (sc->dc_type == DC_TYPE_98713) 1513 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1514 if (!DC_IS_DAVICOM(sc)) 1515 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1516 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1517 if (DC_IS_INTEL(sc)) 1518 dc_apply_fixup(sc, IFM_AUTO); 1519 } else { 1520 if (DC_IS_PNIC(sc)) { 1521 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1522 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1523 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1524 } 1525 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1526 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1527 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1528 if (DC_IS_INTEL(sc)) { 1529 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1530 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1531 if ((media & IFM_GMASK) == IFM_FDX) 1532 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1533 else 1534 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1535 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1536 DC_CLRBIT(sc, DC_10BTCTRL, 1537 DC_TCTL_AUTONEGENBL); 1538 dc_apply_fixup(sc, 1539 (media & IFM_GMASK) == IFM_FDX ? 1540 IFM_10_T|IFM_FDX : IFM_10_T); 1541 DELAY(20000); 1542 } 1543 } 1544 } 1545 1546 /* 1547 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1548 * PHY and we want HomePNA mode, set the portsel bit to turn 1549 * on the external MII port. 1550 */ 1551 if (DC_IS_DAVICOM(sc)) { 1552 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1553 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1554 sc->dc_link = 1; 1555 } else { 1556 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1557 } 1558 } 1559 1560 if (DC_IS_ADMTEK(sc)) 1561 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 1562 1563 if ((media & IFM_GMASK) == IFM_FDX) { 1564 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1565 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1566 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1567 } else { 1568 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1569 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1570 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1571 } 1572 1573 if (restart) 1574 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); 1575 1576 return; 1577 } 1578 1579 static void 1580 dc_reset(sc) 1581 struct dc_softc *sc; 1582 { 1583 register int i; 1584 1585 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1586 1587 for (i = 0; i < DC_TIMEOUT; i++) { 1588 DELAY(10); 1589 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1590 break; 1591 } 1592 1593 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) || 1594 DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { 1595 DELAY(10000); 1596 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1597 i = 0; 1598 } 1599 1600 if (i == DC_TIMEOUT) 1601 printf("dc%d: reset never completed!\n", sc->dc_unit); 1602 1603 /* Wait a little while for the chip to get its brains in order. */ 1604 DELAY(1000); 1605 1606 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1607 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1608 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1609 1610 /* 1611 * Bring the SIA out of reset. In some cases, it looks 1612 * like failing to unreset the SIA soon enough gets it 1613 * into a state where it will never come out of reset 1614 * until we reset the whole chip again. 1615 */ 1616 if (DC_IS_INTEL(sc)) { 1617 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1618 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1619 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1620 } 1621 1622 return; 1623 } 1624 1625 static struct dc_type * 1626 dc_devtype(dev) 1627 device_t dev; 1628 { 1629 struct dc_type *t; 1630 u_int32_t rev; 1631 1632 t = dc_devs; 1633 1634 while(t->dc_name != NULL) { 1635 if ((pci_get_vendor(dev) == t->dc_vid) && 1636 (pci_get_device(dev) == t->dc_did)) { 1637 /* Check the PCI revision */ 1638 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1639 if (t->dc_did == DC_DEVICEID_98713 && 1640 rev >= DC_REVISION_98713A) 1641 t++; 1642 if (t->dc_did == DC_DEVICEID_98713_CP && 1643 rev >= DC_REVISION_98713A) 1644 t++; 1645 if (t->dc_did == DC_DEVICEID_987x5 && 1646 rev >= DC_REVISION_98715AEC_C) 1647 t++; 1648 if (t->dc_did == DC_DEVICEID_987x5 && 1649 rev >= DC_REVISION_98725) 1650 t++; 1651 if (t->dc_did == DC_DEVICEID_AX88140A && 1652 rev >= DC_REVISION_88141) 1653 t++; 1654 if (t->dc_did == DC_DEVICEID_82C168 && 1655 rev >= DC_REVISION_82C169) 1656 t++; 1657 if (t->dc_did == DC_DEVICEID_DM9102 && 1658 rev >= DC_REVISION_DM9102A) 1659 t++; 1660 return(t); 1661 } 1662 t++; 1663 } 1664 1665 return(NULL); 1666 } 1667 1668 /* 1669 * Probe for a 21143 or clone chip. Check the PCI vendor and device 1670 * IDs against our list and return a device name if we find a match. 1671 * We do a little bit of extra work to identify the exact type of 1672 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, 1673 * but different revision IDs. The same is true for 98715/98715A 1674 * chips and the 98725, as well as the ASIX and ADMtek chips. In some 1675 * cases, the exact chip revision affects driver behavior. 1676 */ 1677 static int 1678 dc_probe(dev) 1679 device_t dev; 1680 { 1681 struct dc_type *t; 1682 1683 t = dc_devtype(dev); 1684 1685 if (t != NULL) { 1686 device_set_desc(dev, t->dc_name); 1687 return(0); 1688 } 1689 1690 return(ENXIO); 1691 } 1692 1693 static void 1694 dc_acpi(dev) 1695 device_t dev; 1696 { 1697 int unit; 1698 1699 unit = device_get_unit(dev); 1700 1701 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1702 u_int32_t iobase, membase, irq; 1703 1704 /* Save important PCI config data. */ 1705 iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); 1706 membase = pci_read_config(dev, DC_PCI_CFBMA, 4); 1707 irq = pci_read_config(dev, DC_PCI_CFIT, 4); 1708 1709 /* Reset the power state. */ 1710 printf("dc%d: chip is in D%d power mode " 1711 "-- setting to D0\n", unit, 1712 pci_get_powerstate(dev)); 1713 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1714 1715 /* Restore PCI config data. */ 1716 pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); 1717 pci_write_config(dev, DC_PCI_CFBMA, membase, 4); 1718 pci_write_config(dev, DC_PCI_CFIT, irq, 4); 1719 } 1720 1721 return; 1722 } 1723 1724 static void 1725 dc_apply_fixup(sc, media) 1726 struct dc_softc *sc; 1727 int media; 1728 { 1729 struct dc_mediainfo *m; 1730 u_int8_t *p; 1731 int i; 1732 u_int32_t reg; 1733 1734 m = sc->dc_mi; 1735 1736 while (m != NULL) { 1737 if (m->dc_media == media) 1738 break; 1739 m = m->dc_next; 1740 } 1741 1742 if (m == NULL) 1743 return; 1744 1745 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1746 reg = (p[0] | (p[1] << 8)) << 16; 1747 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1748 } 1749 1750 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1751 reg = (p[0] | (p[1] << 8)) << 16; 1752 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1753 } 1754 1755 return; 1756 } 1757 1758 static void 1759 dc_decode_leaf_sia(sc, l) 1760 struct dc_softc *sc; 1761 struct dc_eblock_sia *l; 1762 { 1763 struct dc_mediainfo *m; 1764 1765 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); 1766 bzero(m, sizeof(struct dc_mediainfo)); 1767 if (l->dc_sia_code == DC_SIA_CODE_10BT) 1768 m->dc_media = IFM_10_T; 1769 1770 if (l->dc_sia_code == DC_SIA_CODE_10BT_FDX) 1771 m->dc_media = IFM_10_T|IFM_FDX; 1772 1773 if (l->dc_sia_code == DC_SIA_CODE_10B2) 1774 m->dc_media = IFM_10_2; 1775 1776 if (l->dc_sia_code == DC_SIA_CODE_10B5) 1777 m->dc_media = IFM_10_5; 1778 1779 m->dc_gp_len = 2; 1780 m->dc_gp_ptr = (u_int8_t *)&l->dc_sia_gpio_ctl; 1781 1782 m->dc_next = sc->dc_mi; 1783 sc->dc_mi = m; 1784 1785 sc->dc_pmode = DC_PMODE_SIA; 1786 1787 return; 1788 } 1789 1790 static void 1791 dc_decode_leaf_sym(sc, l) 1792 struct dc_softc *sc; 1793 struct dc_eblock_sym *l; 1794 { 1795 struct dc_mediainfo *m; 1796 1797 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); 1798 bzero(m, sizeof(struct dc_mediainfo)); 1799 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1800 m->dc_media = IFM_100_TX; 1801 1802 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1803 m->dc_media = IFM_100_TX|IFM_FDX; 1804 1805 m->dc_gp_len = 2; 1806 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1807 1808 m->dc_next = sc->dc_mi; 1809 sc->dc_mi = m; 1810 1811 sc->dc_pmode = DC_PMODE_SYM; 1812 1813 return; 1814 } 1815 1816 static void 1817 dc_decode_leaf_mii(sc, l) 1818 struct dc_softc *sc; 1819 struct dc_eblock_mii *l; 1820 { 1821 u_int8_t *p; 1822 struct dc_mediainfo *m; 1823 1824 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); 1825 bzero(m, sizeof(struct dc_mediainfo)); 1826 /* We abuse IFM_AUTO to represent MII. */ 1827 m->dc_media = IFM_AUTO; 1828 m->dc_gp_len = l->dc_gpr_len; 1829 1830 p = (u_int8_t *)l; 1831 p += sizeof(struct dc_eblock_mii); 1832 m->dc_gp_ptr = p; 1833 p += 2 * l->dc_gpr_len; 1834 m->dc_reset_len = *p; 1835 p++; 1836 m->dc_reset_ptr = p; 1837 1838 m->dc_next = sc->dc_mi; 1839 sc->dc_mi = m; 1840 1841 return; 1842 } 1843 1844 static void 1845 dc_read_srom(sc, bits) 1846 struct dc_softc *sc; 1847 int bits; 1848 { 1849 int size; 1850 1851 size = 2 << bits; 1852 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); 1853 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1854 } 1855 1856 static void 1857 dc_parse_21143_srom(sc) 1858 struct dc_softc *sc; 1859 { 1860 struct dc_leaf_hdr *lhdr; 1861 struct dc_eblock_hdr *hdr; 1862 int i, loff; 1863 char *ptr; 1864 1865 loff = sc->dc_srom[27]; 1866 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1867 1868 ptr = (char *)lhdr; 1869 ptr += sizeof(struct dc_leaf_hdr) - 1; 1870 for (i = 0; i < lhdr->dc_mcnt; i++) { 1871 hdr = (struct dc_eblock_hdr *)ptr; 1872 switch(hdr->dc_type) { 1873 case DC_EBLOCK_MII: 1874 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1875 break; 1876 case DC_EBLOCK_SIA: 1877 dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr); 1878 break; 1879 case DC_EBLOCK_SYM: 1880 dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr); 1881 break; 1882 default: 1883 /* Don't care. Yet. */ 1884 break; 1885 } 1886 ptr += (hdr->dc_len & 0x7F); 1887 ptr++; 1888 } 1889 1890 return; 1891 } 1892 1893 /* 1894 * Attach the interface. Allocate softc structures, do ifmedia 1895 * setup and ethernet/BPF attach. 1896 */ 1897 static int 1898 dc_attach(dev) 1899 device_t dev; 1900 { 1901 int tmp = 0; 1902 u_char eaddr[ETHER_ADDR_LEN]; 1903 u_int32_t command; 1904 struct dc_softc *sc; 1905 struct ifnet *ifp; 1906 u_int32_t revision; 1907 int unit, error = 0, rid, mac_offset; 1908 u_int8_t *mac; 1909 1910 sc = device_get_softc(dev); 1911 unit = device_get_unit(dev); 1912 bzero(sc, sizeof(struct dc_softc)); 1913 1914 mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1915 MTX_DEF | MTX_RECURSE); 1916 1917 /* 1918 * Handle power management nonsense. 1919 */ 1920 dc_acpi(dev); 1921 1922 /* 1923 * Map control/status registers. 1924 */ 1925 pci_enable_busmaster(dev); 1926 pci_enable_io(dev, SYS_RES_IOPORT); 1927 pci_enable_io(dev, SYS_RES_MEMORY); 1928 command = pci_read_config(dev, PCIR_COMMAND, 4); 1929 1930 #ifdef DC_USEIOSPACE 1931 if (!(command & PCIM_CMD_PORTEN)) { 1932 printf("dc%d: failed to enable I/O ports!\n", unit); 1933 error = ENXIO; 1934 goto fail; 1935 } 1936 #else 1937 if (!(command & PCIM_CMD_MEMEN)) { 1938 printf("dc%d: failed to enable memory mapping!\n", unit); 1939 error = ENXIO; 1940 goto fail; 1941 } 1942 #endif 1943 1944 rid = DC_RID; 1945 sc->dc_res = bus_alloc_resource(dev, DC_RES, &rid, 1946 0, ~0, 1, RF_ACTIVE); 1947 1948 if (sc->dc_res == NULL) { 1949 printf("dc%d: couldn't map ports/memory\n", unit); 1950 error = ENXIO; 1951 goto fail; 1952 } 1953 1954 sc->dc_btag = rman_get_bustag(sc->dc_res); 1955 sc->dc_bhandle = rman_get_bushandle(sc->dc_res); 1956 1957 /* Need this info to decide on a chip type. */ 1958 sc->dc_info = dc_devtype(dev); 1959 revision = pci_read_config(dev, DC_PCI_CFRV, 4) & 0x000000FF; 1960 1961 switch(sc->dc_info->dc_did) { 1962 case DC_DEVICEID_21143: 1963 sc->dc_type = DC_TYPE_21143; 1964 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 1965 sc->dc_flags |= DC_REDUCED_MII_POLL; 1966 /* Save EEPROM contents so we can parse them later. */ 1967 dc_eeprom_width(sc); 1968 dc_read_srom(sc, sc->dc_romwidth); 1969 break; 1970 case DC_DEVICEID_DM9009: 1971 case DC_DEVICEID_DM9100: 1972 case DC_DEVICEID_DM9102: 1973 sc->dc_type = DC_TYPE_DM9102; 1974 sc->dc_flags |= DC_TX_COALESCE|DC_TX_INTR_ALWAYS; 1975 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_TX_STORENFWD; 1976 sc->dc_pmode = DC_PMODE_MII; 1977 /* Increase the latency timer value. */ 1978 command = pci_read_config(dev, DC_PCI_CFLT, 4); 1979 command &= 0xFFFF00FF; 1980 command |= 0x00008000; 1981 pci_write_config(dev, DC_PCI_CFLT, command, 4); 1982 break; 1983 case DC_DEVICEID_AL981: 1984 sc->dc_type = DC_TYPE_AL981; 1985 sc->dc_flags |= DC_TX_USE_TX_INTR; 1986 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1987 sc->dc_pmode = DC_PMODE_MII; 1988 dc_eeprom_width(sc); 1989 dc_read_srom(sc, sc->dc_romwidth); 1990 break; 1991 case DC_DEVICEID_AN985: 1992 case DC_DEVICEID_FE2500: 1993 case DC_DEVICEID_EN2242: 1994 case DC_DEVICEID_HAWKING_PN672TX: 1995 sc->dc_type = DC_TYPE_AN985; 1996 sc->dc_flags |= DC_TX_USE_TX_INTR; 1997 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1998 sc->dc_pmode = DC_PMODE_MII; 1999 dc_eeprom_width(sc); 2000 dc_read_srom(sc, sc->dc_romwidth); 2001 break; 2002 case DC_DEVICEID_98713: 2003 case DC_DEVICEID_98713_CP: 2004 if (revision < DC_REVISION_98713A) { 2005 sc->dc_type = DC_TYPE_98713; 2006 } 2007 if (revision >= DC_REVISION_98713A) { 2008 sc->dc_type = DC_TYPE_98713A; 2009 sc->dc_flags |= DC_21143_NWAY; 2010 } 2011 sc->dc_flags |= DC_REDUCED_MII_POLL; 2012 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 2013 break; 2014 case DC_DEVICEID_987x5: 2015 case DC_DEVICEID_EN1217: 2016 /* 2017 * Macronix MX98715AEC-C/D/E parts have only a 2018 * 128-bit hash table. We need to deal with these 2019 * in the same manner as the PNIC II so that we 2020 * get the right number of bits out of the 2021 * CRC routine. 2022 */ 2023 if (revision >= DC_REVISION_98715AEC_C && 2024 revision < DC_REVISION_98725) 2025 sc->dc_flags |= DC_128BIT_HASH; 2026 sc->dc_type = DC_TYPE_987x5; 2027 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 2028 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 2029 break; 2030 case DC_DEVICEID_98727: 2031 sc->dc_type = DC_TYPE_987x5; 2032 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; 2033 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 2034 break; 2035 case DC_DEVICEID_82C115: 2036 sc->dc_type = DC_TYPE_PNICII; 2037 sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR|DC_128BIT_HASH; 2038 sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; 2039 break; 2040 case DC_DEVICEID_82C168: 2041 sc->dc_type = DC_TYPE_PNIC; 2042 sc->dc_flags |= DC_TX_STORENFWD|DC_TX_INTR_ALWAYS; 2043 sc->dc_flags |= DC_PNIC_RX_BUG_WAR; 2044 sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); 2045 if (revision < DC_REVISION_82C169) 2046 sc->dc_pmode = DC_PMODE_SYM; 2047 break; 2048 case DC_DEVICEID_AX88140A: 2049 sc->dc_type = DC_TYPE_ASIX; 2050 sc->dc_flags |= DC_TX_USE_TX_INTR|DC_TX_INTR_FIRSTFRAG; 2051 sc->dc_flags |= DC_REDUCED_MII_POLL; 2052 sc->dc_pmode = DC_PMODE_MII; 2053 break; 2054 case DC_DEVICEID_X3201: 2055 sc->dc_type = DC_TYPE_XIRCOM; 2056 sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | 2057 DC_TX_ALIGN; 2058 /* 2059 * We don't actually need to coalesce, but we're doing 2060 * it to obtain a double word aligned buffer. 2061 * The DC_TX_COALESCE flag is required. 2062 */ 2063 sc->dc_pmode = DC_PMODE_MII; 2064 break; 2065 case DC_DEVICEID_RS7112: 2066 sc->dc_type = DC_TYPE_CONEXANT; 2067 sc->dc_flags |= DC_TX_INTR_ALWAYS; 2068 sc->dc_flags |= DC_REDUCED_MII_POLL; 2069 sc->dc_pmode = DC_PMODE_MII; 2070 dc_eeprom_width(sc); 2071 dc_read_srom(sc, sc->dc_romwidth); 2072 break; 2073 default: 2074 printf("dc%d: unknown device: %x\n", sc->dc_unit, 2075 sc->dc_info->dc_did); 2076 break; 2077 } 2078 2079 /* Save the cache line size. */ 2080 if (DC_IS_DAVICOM(sc)) 2081 sc->dc_cachesize = 0; 2082 else 2083 sc->dc_cachesize = pci_read_config(dev, 2084 DC_PCI_CFLT, 4) & 0xFF; 2085 2086 /* Reset the adapter. */ 2087 dc_reset(sc); 2088 2089 /* Take 21143 out of snooze mode */ 2090 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { 2091 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2092 command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); 2093 pci_write_config(dev, DC_PCI_CFDD, command, 4); 2094 } 2095 2096 /* 2097 * Try to learn something about the supported media. 2098 * We know that ASIX and ADMtek and Davicom devices 2099 * will *always* be using MII media, so that's a no-brainer. 2100 * The tricky ones are the Macronix/PNIC II and the 2101 * Intel 21143. 2102 */ 2103 if (DC_IS_INTEL(sc)) 2104 dc_parse_21143_srom(sc); 2105 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2106 if (sc->dc_type == DC_TYPE_98713) 2107 sc->dc_pmode = DC_PMODE_MII; 2108 else 2109 sc->dc_pmode = DC_PMODE_SYM; 2110 } else if (!sc->dc_pmode) 2111 sc->dc_pmode = DC_PMODE_MII; 2112 2113 /* 2114 * Get station address from the EEPROM. 2115 */ 2116 switch(sc->dc_type) { 2117 case DC_TYPE_98713: 2118 case DC_TYPE_98713A: 2119 case DC_TYPE_987x5: 2120 case DC_TYPE_PNICII: 2121 dc_read_eeprom(sc, (caddr_t)&mac_offset, 2122 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 2123 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); 2124 break; 2125 case DC_TYPE_PNIC: 2126 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); 2127 break; 2128 case DC_TYPE_DM9102: 2129 case DC_TYPE_21143: 2130 case DC_TYPE_ASIX: 2131 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2132 break; 2133 case DC_TYPE_AL981: 2134 case DC_TYPE_AN985: 2135 bcopy(&sc->dc_srom[DC_AL_EE_NODEADDR], (caddr_t)&eaddr, 2136 ETHER_ADDR_LEN); 2137 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_AL_EE_NODEADDR, 3, 0); 2138 break; 2139 case DC_TYPE_CONEXANT: 2140 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, 6); 2141 break; 2142 case DC_TYPE_XIRCOM: 2143 /* The MAC comes from the CIS */ 2144 mac = pci_get_ether(dev); 2145 if (!mac) { 2146 device_printf(dev, "No station address in CIS!\n"); 2147 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2148 error = ENXIO; 2149 goto fail; 2150 } 2151 bcopy(mac, eaddr, ETHER_ADDR_LEN); 2152 break; 2153 default: 2154 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2155 break; 2156 } 2157 2158 /* 2159 * A 21143 or clone chip was detected. Inform the world. 2160 */ 2161 printf("dc%d: Ethernet address: %6D\n", unit, eaddr, ":"); 2162 2163 sc->dc_unit = unit; 2164 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 2165 2166 sc->dc_ldata = contigmalloc(sizeof(struct dc_list_data), M_DEVBUF, 2167 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 2168 2169 if (sc->dc_ldata == NULL) { 2170 printf("dc%d: no memory for list buffers!\n", unit); 2171 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2172 error = ENXIO; 2173 goto fail; 2174 } 2175 2176 bzero(sc->dc_ldata, sizeof(struct dc_list_data)); 2177 2178 ifp = &sc->arpcom.ac_if; 2179 ifp->if_softc = sc; 2180 ifp->if_unit = unit; 2181 ifp->if_name = "dc"; 2182 /* XXX: bleah, MTU gets overwritten in ether_ifattach() */ 2183 ifp->if_mtu = ETHERMTU; 2184 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2185 ifp->if_ioctl = dc_ioctl; 2186 ifp->if_output = ether_output; 2187 ifp->if_start = dc_start; 2188 ifp->if_watchdog = dc_watchdog; 2189 ifp->if_init = dc_init; 2190 ifp->if_baudrate = 10000000; 2191 ifp->if_snd.ifq_maxlen = DC_TX_LIST_CNT - 1; 2192 2193 /* 2194 * Do MII setup. If this is a 21143, check for a PHY on the 2195 * MII bus after applying any necessary fixups to twiddle the 2196 * GPIO bits. If we don't end up finding a PHY, restore the 2197 * old selection (SIA only or SIA/SYM) and attach the dcphy 2198 * driver instead. 2199 */ 2200 if (DC_IS_INTEL(sc)) { 2201 dc_apply_fixup(sc, IFM_AUTO); 2202 tmp = sc->dc_pmode; 2203 sc->dc_pmode = DC_PMODE_MII; 2204 } 2205 2206 error = mii_phy_probe(dev, &sc->dc_miibus, 2207 dc_ifmedia_upd, dc_ifmedia_sts); 2208 2209 if (error && DC_IS_INTEL(sc)) { 2210 sc->dc_pmode = tmp; 2211 if (sc->dc_pmode != DC_PMODE_SIA) 2212 sc->dc_pmode = DC_PMODE_SYM; 2213 sc->dc_flags |= DC_21143_NWAY; 2214 mii_phy_probe(dev, &sc->dc_miibus, 2215 dc_ifmedia_upd, dc_ifmedia_sts); 2216 /* 2217 * For non-MII cards, we need to have the 21143 2218 * drive the LEDs. Except there are some systems 2219 * like the NEC VersaPro NoteBook PC which have no 2220 * LEDs, and twiddling these bits has adverse effects 2221 * on them. (I.e. you suddenly can't get a link.) 2222 */ 2223 if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) 2224 sc->dc_flags |= DC_TULIP_LEDS; 2225 error = 0; 2226 } 2227 2228 if (error) { 2229 printf("dc%d: MII without any PHY!\n", sc->dc_unit); 2230 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2231 error = ENXIO; 2232 goto fail; 2233 } 2234 2235 if (DC_IS_XIRCOM(sc)) { 2236 /* 2237 * setup General Purpose Port mode and data so the tulip 2238 * can talk to the MII. 2239 */ 2240 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2241 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2242 DELAY(10); 2243 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2244 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2245 DELAY(10); 2246 } 2247 2248 /* 2249 * Tell the upper layer(s) we support long frames. 2250 */ 2251 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2252 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2253 2254 callout_init(&sc->dc_stat_ch, IS_MPSAFE); 2255 2256 #ifdef SRM_MEDIA 2257 sc->dc_srm_media = 0; 2258 2259 /* Remember the SRM console media setting */ 2260 if (DC_IS_INTEL(sc)) { 2261 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2262 command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); 2263 switch ((command >> 8) & 0xff) { 2264 case 3: 2265 sc->dc_srm_media = IFM_10_T; 2266 break; 2267 case 4: 2268 sc->dc_srm_media = IFM_10_T | IFM_FDX; 2269 break; 2270 case 5: 2271 sc->dc_srm_media = IFM_100_TX; 2272 break; 2273 case 6: 2274 sc->dc_srm_media = IFM_100_TX | IFM_FDX; 2275 break; 2276 } 2277 if (sc->dc_srm_media) 2278 sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER; 2279 } 2280 #endif 2281 2282 /* 2283 * Call MI attach routine. 2284 */ 2285 ether_ifattach(ifp, eaddr); 2286 2287 /* Allocate interrupt */ 2288 rid = 0; 2289 sc->dc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 2290 RF_SHAREABLE | RF_ACTIVE); 2291 2292 if (sc->dc_irq == NULL) { 2293 printf("dc%d: couldn't map interrupt\n", unit); 2294 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2295 error = ENXIO; 2296 goto fail; 2297 } 2298 2299 error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | 2300 (IS_MPSAFE ? INTR_MPSAFE : 0), 2301 dc_intr, sc, &sc->dc_intrhand); 2302 2303 if (error) { 2304 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2305 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2306 printf("dc%d: couldn't set up irq\n", unit); 2307 } 2308 2309 fail: 2310 if (error != 0) 2311 mtx_destroy(&sc->dc_mtx); 2312 return (error); 2313 } 2314 2315 static int 2316 dc_detach(dev) 2317 device_t dev; 2318 { 2319 struct dc_softc *sc; 2320 struct ifnet *ifp; 2321 struct dc_mediainfo *m; 2322 2323 sc = device_get_softc(dev); 2324 2325 DC_LOCK(sc); 2326 2327 ifp = &sc->arpcom.ac_if; 2328 2329 dc_stop(sc); 2330 ether_ifdetach(ifp); 2331 2332 bus_generic_detach(dev); 2333 device_delete_child(dev, sc->dc_miibus); 2334 2335 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); 2336 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2337 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2338 2339 contigfree(sc->dc_ldata, sizeof(struct dc_list_data), M_DEVBUF); 2340 if (sc->dc_pnic_rx_buf != NULL) 2341 free(sc->dc_pnic_rx_buf, M_DEVBUF); 2342 2343 while(sc->dc_mi != NULL) { 2344 m = sc->dc_mi->dc_next; 2345 free(sc->dc_mi, M_DEVBUF); 2346 sc->dc_mi = m; 2347 } 2348 free(sc->dc_srom, M_DEVBUF); 2349 2350 DC_UNLOCK(sc); 2351 mtx_destroy(&sc->dc_mtx); 2352 2353 return(0); 2354 } 2355 2356 /* 2357 * Initialize the transmit descriptors. 2358 */ 2359 static int 2360 dc_list_tx_init(sc) 2361 struct dc_softc *sc; 2362 { 2363 struct dc_chain_data *cd; 2364 struct dc_list_data *ld; 2365 int i, nexti; 2366 2367 cd = &sc->dc_cdata; 2368 ld = sc->dc_ldata; 2369 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2370 nexti = (i == (DC_TX_LIST_CNT - 1)) ? 0 : i+1; 2371 ld->dc_tx_list[i].dc_next = vtophys(&ld->dc_tx_list[nexti]); 2372 cd->dc_tx_chain[i] = NULL; 2373 ld->dc_tx_list[i].dc_data = 0; 2374 ld->dc_tx_list[i].dc_ctl = 0; 2375 } 2376 2377 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 2378 2379 return(0); 2380 } 2381 2382 2383 /* 2384 * Initialize the RX descriptors and allocate mbufs for them. Note that 2385 * we arrange the descriptors in a closed ring, so that the last descriptor 2386 * points back to the first. 2387 */ 2388 static int 2389 dc_list_rx_init(sc) 2390 struct dc_softc *sc; 2391 { 2392 struct dc_chain_data *cd; 2393 struct dc_list_data *ld; 2394 int i, nexti; 2395 2396 cd = &sc->dc_cdata; 2397 ld = sc->dc_ldata; 2398 2399 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2400 if (dc_newbuf(sc, i, NULL) == ENOBUFS) 2401 return(ENOBUFS); 2402 nexti = (i == (DC_RX_LIST_CNT - 1)) ? 0 : i+1; 2403 ld->dc_rx_list[i].dc_next = vtophys(&ld->dc_rx_list[nexti]); 2404 } 2405 2406 cd->dc_rx_prod = 0; 2407 2408 return(0); 2409 } 2410 2411 /* 2412 * Initialize an RX descriptor and attach an MBUF cluster. 2413 */ 2414 static int 2415 dc_newbuf(sc, i, m) 2416 struct dc_softc *sc; 2417 int i; 2418 struct mbuf *m; 2419 { 2420 struct mbuf *m_new = NULL; 2421 struct dc_desc *c; 2422 2423 c = &sc->dc_ldata->dc_rx_list[i]; 2424 2425 if (m == NULL) { 2426 MGETHDR(m_new, M_NOWAIT, MT_DATA); 2427 if (m_new == NULL) 2428 return(ENOBUFS); 2429 2430 MCLGET(m_new, M_NOWAIT); 2431 if (!(m_new->m_flags & M_EXT)) { 2432 m_freem(m_new); 2433 return(ENOBUFS); 2434 } 2435 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2436 } else { 2437 m_new = m; 2438 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2439 m_new->m_data = m_new->m_ext.ext_buf; 2440 } 2441 2442 m_adj(m_new, sizeof(u_int64_t)); 2443 2444 /* 2445 * If this is a PNIC chip, zero the buffer. This is part 2446 * of the workaround for the receive bug in the 82c168 and 2447 * 82c169 chips. 2448 */ 2449 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 2450 bzero((char *)mtod(m_new, char *), m_new->m_len); 2451 2452 sc->dc_cdata.dc_rx_chain[i] = m_new; 2453 c->dc_data = vtophys(mtod(m_new, caddr_t)); 2454 c->dc_ctl = DC_RXCTL_RLINK | DC_RXLEN; 2455 c->dc_status = DC_RXSTAT_OWN; 2456 2457 return(0); 2458 } 2459 2460 /* 2461 * Grrrrr. 2462 * The PNIC chip has a terrible bug in it that manifests itself during 2463 * periods of heavy activity. The exact mode of failure if difficult to 2464 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 2465 * will happen on slow machines. The bug is that sometimes instead of 2466 * uploading one complete frame during reception, it uploads what looks 2467 * like the entire contents of its FIFO memory. The frame we want is at 2468 * the end of the whole mess, but we never know exactly how much data has 2469 * been uploaded, so salvaging the frame is hard. 2470 * 2471 * There is only one way to do it reliably, and it's disgusting. 2472 * Here's what we know: 2473 * 2474 * - We know there will always be somewhere between one and three extra 2475 * descriptors uploaded. 2476 * 2477 * - We know the desired received frame will always be at the end of the 2478 * total data upload. 2479 * 2480 * - We know the size of the desired received frame because it will be 2481 * provided in the length field of the status word in the last descriptor. 2482 * 2483 * Here's what we do: 2484 * 2485 * - When we allocate buffers for the receive ring, we bzero() them. 2486 * This means that we know that the buffer contents should be all 2487 * zeros, except for data uploaded by the chip. 2488 * 2489 * - We also force the PNIC chip to upload frames that include the 2490 * ethernet CRC at the end. 2491 * 2492 * - We gather all of the bogus frame data into a single buffer. 2493 * 2494 * - We then position a pointer at the end of this buffer and scan 2495 * backwards until we encounter the first non-zero byte of data. 2496 * This is the end of the received frame. We know we will encounter 2497 * some data at the end of the frame because the CRC will always be 2498 * there, so even if the sender transmits a packet of all zeros, 2499 * we won't be fooled. 2500 * 2501 * - We know the size of the actual received frame, so we subtract 2502 * that value from the current pointer location. This brings us 2503 * to the start of the actual received packet. 2504 * 2505 * - We copy this into an mbuf and pass it on, along with the actual 2506 * frame length. 2507 * 2508 * The performance hit is tremendous, but it beats dropping frames all 2509 * the time. 2510 */ 2511 2512 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) 2513 static void 2514 dc_pnic_rx_bug_war(sc, idx) 2515 struct dc_softc *sc; 2516 int idx; 2517 { 2518 struct dc_desc *cur_rx; 2519 struct dc_desc *c = NULL; 2520 struct mbuf *m = NULL; 2521 unsigned char *ptr; 2522 int i, total_len; 2523 u_int32_t rxstat = 0; 2524 2525 i = sc->dc_pnic_rx_bug_save; 2526 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2527 ptr = sc->dc_pnic_rx_buf; 2528 bzero(ptr, sizeof(DC_RXLEN * 5)); 2529 2530 /* Copy all the bytes from the bogus buffers. */ 2531 while (1) { 2532 c = &sc->dc_ldata->dc_rx_list[i]; 2533 rxstat = c->dc_status; 2534 m = sc->dc_cdata.dc_rx_chain[i]; 2535 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2536 ptr += DC_RXLEN; 2537 /* If this is the last buffer, break out. */ 2538 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2539 break; 2540 dc_newbuf(sc, i, m); 2541 DC_INC(i, DC_RX_LIST_CNT); 2542 } 2543 2544 /* Find the length of the actual receive frame. */ 2545 total_len = DC_RXBYTES(rxstat); 2546 2547 /* Scan backwards until we hit a non-zero byte. */ 2548 while(*ptr == 0x00) 2549 ptr--; 2550 2551 /* Round off. */ 2552 if ((uintptr_t)(ptr) & 0x3) 2553 ptr -= 1; 2554 2555 /* Now find the start of the frame. */ 2556 ptr -= total_len; 2557 if (ptr < sc->dc_pnic_rx_buf) 2558 ptr = sc->dc_pnic_rx_buf; 2559 2560 /* 2561 * Now copy the salvaged frame to the last mbuf and fake up 2562 * the status word to make it look like a successful 2563 * frame reception. 2564 */ 2565 dc_newbuf(sc, i, m); 2566 bcopy(ptr, mtod(m, char *), total_len); 2567 cur_rx->dc_status = rxstat | DC_RXSTAT_FIRSTFRAG; 2568 2569 return; 2570 } 2571 2572 /* 2573 * This routine searches the RX ring for dirty descriptors in the 2574 * event that the rxeof routine falls out of sync with the chip's 2575 * current descriptor pointer. This may happen sometimes as a result 2576 * of a "no RX buffer available" condition that happens when the chip 2577 * consumes all of the RX buffers before the driver has a chance to 2578 * process the RX ring. This routine may need to be called more than 2579 * once to bring the driver back in sync with the chip, however we 2580 * should still be getting RX DONE interrupts to drive the search 2581 * for new packets in the RX ring, so we should catch up eventually. 2582 */ 2583 static int 2584 dc_rx_resync(sc) 2585 struct dc_softc *sc; 2586 { 2587 int i, pos; 2588 struct dc_desc *cur_rx; 2589 2590 pos = sc->dc_cdata.dc_rx_prod; 2591 2592 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2593 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2594 if (!(cur_rx->dc_status & DC_RXSTAT_OWN)) 2595 break; 2596 DC_INC(pos, DC_RX_LIST_CNT); 2597 } 2598 2599 /* If the ring really is empty, then just return. */ 2600 if (i == DC_RX_LIST_CNT) 2601 return(0); 2602 2603 /* We've fallen behing the chip: catch it. */ 2604 sc->dc_cdata.dc_rx_prod = pos; 2605 2606 return(EAGAIN); 2607 } 2608 2609 /* 2610 * A frame has been uploaded: pass the resulting mbuf chain up to 2611 * the higher level protocols. 2612 */ 2613 static void 2614 dc_rxeof(sc) 2615 struct dc_softc *sc; 2616 { 2617 struct mbuf *m; 2618 struct ifnet *ifp; 2619 struct dc_desc *cur_rx; 2620 int i, total_len = 0; 2621 u_int32_t rxstat; 2622 2623 ifp = &sc->arpcom.ac_if; 2624 i = sc->dc_cdata.dc_rx_prod; 2625 2626 while(!(sc->dc_ldata->dc_rx_list[i].dc_status & DC_RXSTAT_OWN)) { 2627 2628 #ifdef DEVICE_POLLING 2629 if (ifp->if_flags & IFF_POLLING) { 2630 if (sc->rxcycles <= 0) 2631 break; 2632 sc->rxcycles--; 2633 } 2634 #endif /* DEVICE_POLLING */ 2635 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2636 rxstat = cur_rx->dc_status; 2637 m = sc->dc_cdata.dc_rx_chain[i]; 2638 total_len = DC_RXBYTES(rxstat); 2639 2640 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2641 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2642 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2643 sc->dc_pnic_rx_bug_save = i; 2644 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2645 DC_INC(i, DC_RX_LIST_CNT); 2646 continue; 2647 } 2648 dc_pnic_rx_bug_war(sc, i); 2649 rxstat = cur_rx->dc_status; 2650 total_len = DC_RXBYTES(rxstat); 2651 } 2652 } 2653 2654 sc->dc_cdata.dc_rx_chain[i] = NULL; 2655 2656 /* 2657 * If an error occurs, update stats, clear the 2658 * status word and leave the mbuf cluster in place: 2659 * it should simply get re-used next time this descriptor 2660 * comes up in the ring. However, don't report long 2661 * frames as errors since they could be vlans 2662 */ 2663 if ((rxstat & DC_RXSTAT_RXERR)){ 2664 if (!(rxstat & DC_RXSTAT_GIANT) || 2665 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2666 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2667 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2668 ifp->if_ierrors++; 2669 if (rxstat & DC_RXSTAT_COLLSEEN) 2670 ifp->if_collisions++; 2671 dc_newbuf(sc, i, m); 2672 if (rxstat & DC_RXSTAT_CRCERR) { 2673 DC_INC(i, DC_RX_LIST_CNT); 2674 continue; 2675 } else { 2676 dc_init(sc); 2677 return; 2678 } 2679 } 2680 } 2681 2682 /* No errors; receive the packet. */ 2683 total_len -= ETHER_CRC_LEN; 2684 #ifdef __i386__ 2685 /* 2686 * On the x86 we do not have alignment problems, so try to 2687 * allocate a new buffer for the receive ring, and pass up 2688 * the one where the packet is already, saving the expensive 2689 * copy done in m_devget(). 2690 * If we are on an architecture with alignment problems, or 2691 * if the allocation fails, then use m_devget and leave the 2692 * existing buffer in the receive ring. 2693 */ 2694 if (dc_quick && dc_newbuf(sc, i, NULL) == 0) { 2695 m->m_pkthdr.rcvif = ifp; 2696 m->m_pkthdr.len = m->m_len = total_len; 2697 DC_INC(i, DC_RX_LIST_CNT); 2698 } else 2699 #endif 2700 { 2701 struct mbuf *m0; 2702 2703 m0 = m_devget(mtod(m, char *), total_len, 2704 ETHER_ALIGN, ifp, NULL); 2705 dc_newbuf(sc, i, m); 2706 DC_INC(i, DC_RX_LIST_CNT); 2707 if (m0 == NULL) { 2708 ifp->if_ierrors++; 2709 continue; 2710 } 2711 m = m0; 2712 } 2713 2714 ifp->if_ipackets++; 2715 (*ifp->if_input)(ifp, m); 2716 } 2717 2718 sc->dc_cdata.dc_rx_prod = i; 2719 } 2720 2721 /* 2722 * A frame was downloaded to the chip. It's safe for us to clean up 2723 * the list buffers. 2724 */ 2725 2726 static void 2727 dc_txeof(sc) 2728 struct dc_softc *sc; 2729 { 2730 struct dc_desc *cur_tx = NULL; 2731 struct ifnet *ifp; 2732 int idx; 2733 2734 ifp = &sc->arpcom.ac_if; 2735 2736 /* 2737 * Go through our tx list and free mbufs for those 2738 * frames that have been transmitted. 2739 */ 2740 idx = sc->dc_cdata.dc_tx_cons; 2741 while(idx != sc->dc_cdata.dc_tx_prod) { 2742 u_int32_t txstat; 2743 2744 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2745 txstat = cur_tx->dc_status; 2746 2747 if (txstat & DC_TXSTAT_OWN) 2748 break; 2749 2750 if (!(cur_tx->dc_ctl & DC_TXCTL_LASTFRAG) || 2751 cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2752 if (cur_tx->dc_ctl & DC_TXCTL_SETUP) { 2753 /* 2754 * Yes, the PNIC is so brain damaged 2755 * that it will sometimes generate a TX 2756 * underrun error while DMAing the RX 2757 * filter setup frame. If we detect this, 2758 * we have to send the setup frame again, 2759 * or else the filter won't be programmed 2760 * correctly. 2761 */ 2762 if (DC_IS_PNIC(sc)) { 2763 if (txstat & DC_TXSTAT_ERRSUM) 2764 dc_setfilt(sc); 2765 } 2766 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2767 } 2768 sc->dc_cdata.dc_tx_cnt--; 2769 DC_INC(idx, DC_TX_LIST_CNT); 2770 continue; 2771 } 2772 2773 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2774 /* 2775 * XXX: Why does my Xircom taunt me so? 2776 * For some reason it likes setting the CARRLOST flag 2777 * even when the carrier is there. wtf?!? 2778 * Who knows, but Conexant chips have the 2779 * same problem. Maybe they took lessons 2780 * from Xircom. 2781 */ 2782 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2783 sc->dc_pmode == DC_PMODE_MII && 2784 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2785 DC_TXSTAT_NOCARRIER))) 2786 txstat &= ~DC_TXSTAT_ERRSUM; 2787 } else { 2788 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2789 sc->dc_pmode == DC_PMODE_MII && 2790 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| 2791 DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) 2792 txstat &= ~DC_TXSTAT_ERRSUM; 2793 } 2794 2795 if (txstat & DC_TXSTAT_ERRSUM) { 2796 ifp->if_oerrors++; 2797 if (txstat & DC_TXSTAT_EXCESSCOLL) 2798 ifp->if_collisions++; 2799 if (txstat & DC_TXSTAT_LATECOLL) 2800 ifp->if_collisions++; 2801 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2802 dc_init(sc); 2803 return; 2804 } 2805 } 2806 2807 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2808 2809 ifp->if_opackets++; 2810 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { 2811 m_freem(sc->dc_cdata.dc_tx_chain[idx]); 2812 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2813 } 2814 2815 sc->dc_cdata.dc_tx_cnt--; 2816 DC_INC(idx, DC_TX_LIST_CNT); 2817 } 2818 2819 if (idx != sc->dc_cdata.dc_tx_cons) { 2820 /* some buffers have been freed */ 2821 sc->dc_cdata.dc_tx_cons = idx; 2822 ifp->if_flags &= ~IFF_OACTIVE; 2823 } 2824 ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5; 2825 2826 return; 2827 } 2828 2829 static void 2830 dc_tick(xsc) 2831 void *xsc; 2832 { 2833 struct dc_softc *sc; 2834 struct mii_data *mii; 2835 struct ifnet *ifp; 2836 u_int32_t r; 2837 2838 sc = xsc; 2839 DC_LOCK(sc); 2840 ifp = &sc->arpcom.ac_if; 2841 mii = device_get_softc(sc->dc_miibus); 2842 2843 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2844 if (sc->dc_flags & DC_21143_NWAY) { 2845 r = CSR_READ_4(sc, DC_10BTSTAT); 2846 if (IFM_SUBTYPE(mii->mii_media_active) == 2847 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2848 sc->dc_link = 0; 2849 mii_mediachg(mii); 2850 } 2851 if (IFM_SUBTYPE(mii->mii_media_active) == 2852 IFM_10_T && (r & DC_TSTAT_LS10)) { 2853 sc->dc_link = 0; 2854 mii_mediachg(mii); 2855 } 2856 if (sc->dc_link == 0) 2857 mii_tick(mii); 2858 } else { 2859 r = CSR_READ_4(sc, DC_ISR); 2860 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && 2861 sc->dc_cdata.dc_tx_cnt == 0) 2862 mii_tick(mii); 2863 if (!(mii->mii_media_status & IFM_ACTIVE)) 2864 sc->dc_link = 0; 2865 } 2866 } else 2867 mii_tick(mii); 2868 2869 /* 2870 * When the init routine completes, we expect to be able to send 2871 * packets right away, and in fact the network code will send a 2872 * gratuitous ARP the moment the init routine marks the interface 2873 * as running. However, even though the MAC may have been initialized, 2874 * there may be a delay of a few seconds before the PHY completes 2875 * autonegotiation and the link is brought up. Any transmissions 2876 * made during that delay will be lost. Dealing with this is tricky: 2877 * we can't just pause in the init routine while waiting for the 2878 * PHY to come ready since that would bring the whole system to 2879 * a screeching halt for several seconds. 2880 * 2881 * What we do here is prevent the TX start routine from sending 2882 * any packets until a link has been established. After the 2883 * interface has been initialized, the tick routine will poll 2884 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2885 * that time, packets will stay in the send queue, and once the 2886 * link comes up, they will be flushed out to the wire. 2887 */ 2888 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && 2889 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2890 sc->dc_link++; 2891 if (ifp->if_snd.ifq_head != NULL) 2892 dc_start(ifp); 2893 } 2894 2895 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2896 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 2897 else 2898 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 2899 2900 DC_UNLOCK(sc); 2901 2902 return; 2903 } 2904 2905 /* 2906 * A transmit underrun has occurred. Back off the transmit threshold, 2907 * or switch to store and forward mode if we have to. 2908 */ 2909 static void 2910 dc_tx_underrun(sc) 2911 struct dc_softc *sc; 2912 { 2913 u_int32_t isr; 2914 int i; 2915 2916 if (DC_IS_DAVICOM(sc)) 2917 dc_init(sc); 2918 2919 if (DC_IS_INTEL(sc)) { 2920 /* 2921 * The real 21143 requires that the transmitter be idle 2922 * in order to change the transmit threshold or store 2923 * and forward state. 2924 */ 2925 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2926 2927 for (i = 0; i < DC_TIMEOUT; i++) { 2928 isr = CSR_READ_4(sc, DC_ISR); 2929 if (isr & DC_ISR_TX_IDLE) 2930 break; 2931 DELAY(10); 2932 } 2933 if (i == DC_TIMEOUT) { 2934 printf("dc%d: failed to force tx to idle state\n", 2935 sc->dc_unit); 2936 dc_init(sc); 2937 } 2938 } 2939 2940 printf("dc%d: TX underrun -- ", sc->dc_unit); 2941 sc->dc_txthresh += DC_TXTHRESH_INC; 2942 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2943 printf("using store and forward mode\n"); 2944 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2945 } else { 2946 printf("increasing TX threshold\n"); 2947 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2948 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2949 } 2950 2951 if (DC_IS_INTEL(sc)) 2952 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2953 2954 return; 2955 } 2956 2957 #ifdef DEVICE_POLLING 2958 static poll_handler_t dc_poll; 2959 2960 static void 2961 dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2962 { 2963 struct dc_softc *sc = ifp->if_softc; 2964 2965 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 2966 /* Re-enable interrupts. */ 2967 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 2968 return; 2969 } 2970 sc->rxcycles = count; 2971 dc_rxeof(sc); 2972 dc_txeof(sc); 2973 if (ifp->if_snd.ifq_head != NULL && !(ifp->if_flags & IFF_OACTIVE)) 2974 dc_start(ifp); 2975 2976 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2977 u_int32_t status; 2978 2979 status = CSR_READ_4(sc, DC_ISR); 2980 status &= (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF| 2981 DC_ISR_TX_NOBUF|DC_ISR_TX_IDLE|DC_ISR_TX_UNDERRUN| 2982 DC_ISR_BUS_ERR); 2983 if (!status) 2984 return; 2985 /* ack what we have */ 2986 CSR_WRITE_4(sc, DC_ISR, status); 2987 2988 if (status & (DC_ISR_RX_WATDOGTIMEO|DC_ISR_RX_NOBUF)) { 2989 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); 2990 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); 2991 2992 if (dc_rx_resync(sc)) 2993 dc_rxeof(sc); 2994 } 2995 /* restart transmit unit if necessary */ 2996 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) 2997 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 2998 2999 if (status & DC_ISR_TX_UNDERRUN) 3000 dc_tx_underrun(sc); 3001 3002 if (status & DC_ISR_BUS_ERR) { 3003 printf("dc_poll: dc%d bus error\n", sc->dc_unit); 3004 dc_reset(sc); 3005 dc_init(sc); 3006 } 3007 } 3008 } 3009 #endif /* DEVICE_POLLING */ 3010 3011 static void 3012 dc_intr(arg) 3013 void *arg; 3014 { 3015 struct dc_softc *sc; 3016 struct ifnet *ifp; 3017 u_int32_t status; 3018 3019 sc = arg; 3020 3021 if (sc->suspended) { 3022 return; 3023 } 3024 3025 if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 3026 return; 3027 3028 DC_LOCK(sc); 3029 ifp = &sc->arpcom.ac_if; 3030 #ifdef DEVICE_POLLING 3031 if (ifp->if_flags & IFF_POLLING) 3032 goto done; 3033 if (ether_poll_register(dc_poll, ifp)) { /* ok, disable interrupts */ 3034 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3035 goto done; 3036 } 3037 #endif /* DEVICE_POLLING */ 3038 3039 /* Suppress unwanted interrupts */ 3040 if (!(ifp->if_flags & IFF_UP)) { 3041 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 3042 dc_stop(sc); 3043 DC_UNLOCK(sc); 3044 return; 3045 } 3046 3047 /* Disable interrupts. */ 3048 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3049 3050 while(((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) 3051 && status != 0xFFFFFFFF) { 3052 3053 CSR_WRITE_4(sc, DC_ISR, status); 3054 3055 if (status & DC_ISR_RX_OK) { 3056 int curpkts; 3057 curpkts = ifp->if_ipackets; 3058 dc_rxeof(sc); 3059 if (curpkts == ifp->if_ipackets) { 3060 while(dc_rx_resync(sc)) 3061 dc_rxeof(sc); 3062 } 3063 } 3064 3065 if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) 3066 dc_txeof(sc); 3067 3068 if (status & DC_ISR_TX_IDLE) { 3069 dc_txeof(sc); 3070 if (sc->dc_cdata.dc_tx_cnt) { 3071 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3072 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3073 } 3074 } 3075 3076 if (status & DC_ISR_TX_UNDERRUN) 3077 dc_tx_underrun(sc); 3078 3079 if ((status & DC_ISR_RX_WATDOGTIMEO) 3080 || (status & DC_ISR_RX_NOBUF)) { 3081 int curpkts; 3082 curpkts = ifp->if_ipackets; 3083 dc_rxeof(sc); 3084 if (curpkts == ifp->if_ipackets) { 3085 while(dc_rx_resync(sc)) 3086 dc_rxeof(sc); 3087 } 3088 } 3089 3090 if (status & DC_ISR_BUS_ERR) { 3091 dc_reset(sc); 3092 dc_init(sc); 3093 } 3094 } 3095 3096 /* Re-enable interrupts. */ 3097 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3098 3099 if (ifp->if_snd.ifq_head != NULL) 3100 dc_start(ifp); 3101 3102 #ifdef DEVICE_POLLING 3103 done: 3104 #endif /* DEVICE_POLLING */ 3105 3106 DC_UNLOCK(sc); 3107 3108 return; 3109 } 3110 3111 /* 3112 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 3113 * pointers to the fragment pointers. 3114 */ 3115 static int 3116 dc_encap(sc, m_head, txidx) 3117 struct dc_softc *sc; 3118 struct mbuf *m_head; 3119 u_int32_t *txidx; 3120 { 3121 struct dc_desc *f = NULL; 3122 struct mbuf *m; 3123 int frag, cur, cnt = 0; 3124 3125 /* 3126 * Start packing the mbufs in this chain into 3127 * the fragment pointers. Stop when we run out 3128 * of fragments or hit the end of the mbuf chain. 3129 */ 3130 m = m_head; 3131 cur = frag = *txidx; 3132 3133 for (m = m_head; m != NULL; m = m->m_next) { 3134 if (m->m_len != 0) { 3135 if (sc->dc_flags & DC_TX_ADMTEK_WAR) { 3136 if (*txidx != sc->dc_cdata.dc_tx_prod && 3137 frag == (DC_TX_LIST_CNT - 1)) 3138 return(ENOBUFS); 3139 } 3140 if ((DC_TX_LIST_CNT - 3141 (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) 3142 return(ENOBUFS); 3143 3144 f = &sc->dc_ldata->dc_tx_list[frag]; 3145 f->dc_ctl = DC_TXCTL_TLINK | m->m_len; 3146 if (cnt == 0) { 3147 f->dc_status = 0; 3148 f->dc_ctl |= DC_TXCTL_FIRSTFRAG; 3149 } else 3150 f->dc_status = DC_TXSTAT_OWN; 3151 f->dc_data = vtophys(mtod(m, vm_offset_t)); 3152 cur = frag; 3153 DC_INC(frag, DC_TX_LIST_CNT); 3154 cnt++; 3155 } 3156 } 3157 3158 if (m != NULL) 3159 return(ENOBUFS); 3160 3161 sc->dc_cdata.dc_tx_cnt += cnt; 3162 sc->dc_cdata.dc_tx_chain[cur] = m_head; 3163 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_LASTFRAG; 3164 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 3165 sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= DC_TXCTL_FINT; 3166 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 3167 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 3168 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 3169 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; 3170 sc->dc_ldata->dc_tx_list[*txidx].dc_status = DC_TXSTAT_OWN; 3171 *txidx = frag; 3172 3173 return(0); 3174 } 3175 3176 /* 3177 * Coalesce an mbuf chain into a single mbuf cluster buffer. 3178 * Needed for some really badly behaved chips that just can't 3179 * do scatter/gather correctly. 3180 */ 3181 static int 3182 dc_coal(sc, m_head) 3183 struct dc_softc *sc; 3184 struct mbuf **m_head; 3185 { 3186 struct mbuf *m_new, *m; 3187 3188 m = *m_head; 3189 MGETHDR(m_new, M_NOWAIT, MT_DATA); 3190 if (m_new == NULL) 3191 return(ENOBUFS); 3192 if (m->m_pkthdr.len > MHLEN) { 3193 MCLGET(m_new, M_NOWAIT); 3194 if (!(m_new->m_flags & M_EXT)) { 3195 m_freem(m_new); 3196 return(ENOBUFS); 3197 } 3198 } 3199 m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); 3200 m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len; 3201 m_freem(m); 3202 *m_head = m_new; 3203 3204 return(0); 3205 } 3206 3207 /* 3208 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3209 * to the mbuf data regions directly in the transmit lists. We also save a 3210 * copy of the pointers since the transmit list fragment pointers are 3211 * physical addresses. 3212 */ 3213 3214 static void 3215 dc_start(ifp) 3216 struct ifnet *ifp; 3217 { 3218 struct dc_softc *sc; 3219 struct mbuf *m_head = NULL; 3220 int idx; 3221 3222 sc = ifp->if_softc; 3223 3224 DC_LOCK(sc); 3225 3226 if (!sc->dc_link && ifp->if_snd.ifq_len < 10) { 3227 DC_UNLOCK(sc); 3228 return; 3229 } 3230 3231 if (ifp->if_flags & IFF_OACTIVE) { 3232 DC_UNLOCK(sc); 3233 return; 3234 } 3235 3236 idx = sc->dc_cdata.dc_tx_prod; 3237 3238 while(sc->dc_cdata.dc_tx_chain[idx] == NULL) { 3239 IF_DEQUEUE(&ifp->if_snd, m_head); 3240 if (m_head == NULL) 3241 break; 3242 3243 if (sc->dc_flags & DC_TX_COALESCE && 3244 (m_head->m_next != NULL || 3245 sc->dc_flags & DC_TX_ALIGN)) { 3246 if (dc_coal(sc, &m_head)) { 3247 IF_PREPEND(&ifp->if_snd, m_head); 3248 ifp->if_flags |= IFF_OACTIVE; 3249 break; 3250 } 3251 } 3252 3253 if (dc_encap(sc, m_head, &idx)) { 3254 IF_PREPEND(&ifp->if_snd, m_head); 3255 ifp->if_flags |= IFF_OACTIVE; 3256 break; 3257 } 3258 3259 /* 3260 * If there's a BPF listener, bounce a copy of this frame 3261 * to him. 3262 */ 3263 BPF_MTAP(ifp, m_head); 3264 3265 if (sc->dc_flags & DC_TX_ONE) { 3266 ifp->if_flags |= IFF_OACTIVE; 3267 break; 3268 } 3269 } 3270 3271 /* Transmit */ 3272 sc->dc_cdata.dc_tx_prod = idx; 3273 if (!(sc->dc_flags & DC_TX_POLL)) 3274 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3275 3276 /* 3277 * Set a timeout in case the chip goes out to lunch. 3278 */ 3279 ifp->if_timer = 5; 3280 3281 DC_UNLOCK(sc); 3282 3283 return; 3284 } 3285 3286 static void 3287 dc_init(xsc) 3288 void *xsc; 3289 { 3290 struct dc_softc *sc = xsc; 3291 struct ifnet *ifp = &sc->arpcom.ac_if; 3292 struct mii_data *mii; 3293 3294 DC_LOCK(sc); 3295 3296 mii = device_get_softc(sc->dc_miibus); 3297 3298 /* 3299 * Cancel pending I/O and free all RX/TX buffers. 3300 */ 3301 dc_stop(sc); 3302 dc_reset(sc); 3303 3304 /* 3305 * Set cache alignment and burst length. 3306 */ 3307 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 3308 CSR_WRITE_4(sc, DC_BUSCTL, 0); 3309 else 3310 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); 3311 /* 3312 * Evenly share the bus between receive and transmit process. 3313 */ 3314 if (DC_IS_INTEL(sc)) 3315 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 3316 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 3317 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 3318 } else { 3319 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 3320 } 3321 if (sc->dc_flags & DC_TX_POLL) 3322 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 3323 switch(sc->dc_cachesize) { 3324 case 32: 3325 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 3326 break; 3327 case 16: 3328 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 3329 break; 3330 case 8: 3331 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 3332 break; 3333 case 0: 3334 default: 3335 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 3336 break; 3337 } 3338 3339 if (sc->dc_flags & DC_TX_STORENFWD) 3340 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3341 else { 3342 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3343 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3344 } else { 3345 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3346 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3347 } 3348 } 3349 3350 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 3351 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 3352 3353 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 3354 /* 3355 * The app notes for the 98713 and 98715A say that 3356 * in order to have the chips operate properly, a magic 3357 * number must be written to CSR16. Macronix does not 3358 * document the meaning of these bits so there's no way 3359 * to know exactly what they do. The 98713 has a magic 3360 * number all its own; the rest all use a different one. 3361 */ 3362 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 3363 if (sc->dc_type == DC_TYPE_98713) 3364 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 3365 else 3366 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 3367 } 3368 3369 if (DC_IS_XIRCOM(sc)) { 3370 /* 3371 * setup General Purpose Port mode and data so the tulip 3372 * can talk to the MII. 3373 */ 3374 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 3375 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3376 DELAY(10); 3377 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 3378 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3379 DELAY(10); 3380 } 3381 3382 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3383 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 3384 3385 /* Init circular RX list. */ 3386 if (dc_list_rx_init(sc) == ENOBUFS) { 3387 printf("dc%d: initialization failed: no " 3388 "memory for rx buffers\n", sc->dc_unit); 3389 dc_stop(sc); 3390 DC_UNLOCK(sc); 3391 return; 3392 } 3393 3394 /* 3395 * Init tx descriptors. 3396 */ 3397 dc_list_tx_init(sc); 3398 3399 /* 3400 * Load the address of the RX list. 3401 */ 3402 CSR_WRITE_4(sc, DC_RXADDR, vtophys(&sc->dc_ldata->dc_rx_list[0])); 3403 CSR_WRITE_4(sc, DC_TXADDR, vtophys(&sc->dc_ldata->dc_tx_list[0])); 3404 3405 /* 3406 * Enable interrupts. 3407 */ 3408 #ifdef DEVICE_POLLING 3409 /* 3410 * ... but only if we are not polling, and make sure they are off in 3411 * the case of polling. Some cards (e.g. fxp) turn interrupts on 3412 * after a reset. 3413 */ 3414 if (ifp->if_flags & IFF_POLLING) 3415 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3416 else 3417 #endif 3418 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3419 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 3420 3421 /* Enable transmitter. */ 3422 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3423 3424 /* 3425 * If this is an Intel 21143 and we're not using the 3426 * MII port, program the LED control pins so we get 3427 * link and activity indications. 3428 */ 3429 if (sc->dc_flags & DC_TULIP_LEDS) { 3430 CSR_WRITE_4(sc, DC_WATCHDOG, 3431 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); 3432 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 3433 } 3434 3435 /* 3436 * Load the RX/multicast filter. We do this sort of late 3437 * because the filter programming scheme on the 21143 and 3438 * some clones requires DMAing a setup frame via the TX 3439 * engine, and we need the transmitter enabled for that. 3440 */ 3441 dc_setfilt(sc); 3442 3443 /* Enable receiver. */ 3444 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 3445 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 3446 3447 mii_mediachg(mii); 3448 dc_setcfg(sc, sc->dc_if_media); 3449 3450 ifp->if_flags |= IFF_RUNNING; 3451 ifp->if_flags &= ~IFF_OACTIVE; 3452 3453 /* Don't start the ticker if this is a homePNA link. */ 3454 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 3455 sc->dc_link = 1; 3456 else { 3457 if (sc->dc_flags & DC_21143_NWAY) 3458 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 3459 else 3460 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 3461 } 3462 3463 #ifdef SRM_MEDIA 3464 if(sc->dc_srm_media) { 3465 struct ifreq ifr; 3466 3467 ifr.ifr_media = sc->dc_srm_media; 3468 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); 3469 sc->dc_srm_media = 0; 3470 } 3471 #endif 3472 DC_UNLOCK(sc); 3473 return; 3474 } 3475 3476 /* 3477 * Set media options. 3478 */ 3479 static int 3480 dc_ifmedia_upd(ifp) 3481 struct ifnet *ifp; 3482 { 3483 struct dc_softc *sc; 3484 struct mii_data *mii; 3485 struct ifmedia *ifm; 3486 3487 sc = ifp->if_softc; 3488 mii = device_get_softc(sc->dc_miibus); 3489 mii_mediachg(mii); 3490 ifm = &mii->mii_media; 3491 3492 if (DC_IS_DAVICOM(sc) && 3493 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 3494 dc_setcfg(sc, ifm->ifm_media); 3495 else 3496 sc->dc_link = 0; 3497 3498 return(0); 3499 } 3500 3501 /* 3502 * Report current media status. 3503 */ 3504 static void 3505 dc_ifmedia_sts(ifp, ifmr) 3506 struct ifnet *ifp; 3507 struct ifmediareq *ifmr; 3508 { 3509 struct dc_softc *sc; 3510 struct mii_data *mii; 3511 struct ifmedia *ifm; 3512 3513 sc = ifp->if_softc; 3514 mii = device_get_softc(sc->dc_miibus); 3515 mii_pollstat(mii); 3516 ifm = &mii->mii_media; 3517 if (DC_IS_DAVICOM(sc)) { 3518 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 3519 ifmr->ifm_active = ifm->ifm_media; 3520 ifmr->ifm_status = 0; 3521 return; 3522 } 3523 } 3524 ifmr->ifm_active = mii->mii_media_active; 3525 ifmr->ifm_status = mii->mii_media_status; 3526 3527 return; 3528 } 3529 3530 static int 3531 dc_ioctl(ifp, command, data) 3532 struct ifnet *ifp; 3533 u_long command; 3534 caddr_t data; 3535 { 3536 struct dc_softc *sc = ifp->if_softc; 3537 struct ifreq *ifr = (struct ifreq *) data; 3538 struct mii_data *mii; 3539 int error = 0; 3540 3541 DC_LOCK(sc); 3542 3543 switch(command) { 3544 case SIOCSIFFLAGS: 3545 if (ifp->if_flags & IFF_UP) { 3546 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & 3547 (IFF_PROMISC | IFF_ALLMULTI); 3548 3549 if (ifp->if_flags & IFF_RUNNING) { 3550 if (need_setfilt) 3551 dc_setfilt(sc); 3552 } else { 3553 sc->dc_txthresh = 0; 3554 dc_init(sc); 3555 } 3556 } else { 3557 if (ifp->if_flags & IFF_RUNNING) 3558 dc_stop(sc); 3559 } 3560 sc->dc_if_flags = ifp->if_flags; 3561 error = 0; 3562 break; 3563 case SIOCADDMULTI: 3564 case SIOCDELMULTI: 3565 dc_setfilt(sc); 3566 error = 0; 3567 break; 3568 case SIOCGIFMEDIA: 3569 case SIOCSIFMEDIA: 3570 mii = device_get_softc(sc->dc_miibus); 3571 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3572 #ifdef SRM_MEDIA 3573 if (sc->dc_srm_media) 3574 sc->dc_srm_media = 0; 3575 #endif 3576 break; 3577 default: 3578 error = ether_ioctl(ifp, command, data); 3579 break; 3580 } 3581 3582 DC_UNLOCK(sc); 3583 3584 return(error); 3585 } 3586 3587 static void 3588 dc_watchdog(ifp) 3589 struct ifnet *ifp; 3590 { 3591 struct dc_softc *sc; 3592 3593 sc = ifp->if_softc; 3594 3595 DC_LOCK(sc); 3596 3597 ifp->if_oerrors++; 3598 printf("dc%d: watchdog timeout\n", sc->dc_unit); 3599 3600 dc_stop(sc); 3601 dc_reset(sc); 3602 dc_init(sc); 3603 3604 if (ifp->if_snd.ifq_head != NULL) 3605 dc_start(ifp); 3606 3607 DC_UNLOCK(sc); 3608 3609 return; 3610 } 3611 3612 /* 3613 * Stop the adapter and free any mbufs allocated to the 3614 * RX and TX lists. 3615 */ 3616 static void 3617 dc_stop(sc) 3618 struct dc_softc *sc; 3619 { 3620 register int i; 3621 struct ifnet *ifp; 3622 3623 DC_LOCK(sc); 3624 3625 ifp = &sc->arpcom.ac_if; 3626 ifp->if_timer = 0; 3627 3628 callout_stop(&sc->dc_stat_ch); 3629 3630 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3631 #ifdef DEVICE_POLLING 3632 ether_poll_deregister(ifp); 3633 #endif 3634 3635 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); 3636 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3637 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3638 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3639 sc->dc_link = 0; 3640 3641 /* 3642 * Free data in the RX lists. 3643 */ 3644 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3645 if (sc->dc_cdata.dc_rx_chain[i] != NULL) { 3646 m_freem(sc->dc_cdata.dc_rx_chain[i]); 3647 sc->dc_cdata.dc_rx_chain[i] = NULL; 3648 } 3649 } 3650 bzero((char *)&sc->dc_ldata->dc_rx_list, 3651 sizeof(sc->dc_ldata->dc_rx_list)); 3652 3653 /* 3654 * Free the TX list buffers. 3655 */ 3656 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3657 if (sc->dc_cdata.dc_tx_chain[i] != NULL) { 3658 if (sc->dc_ldata->dc_tx_list[i].dc_ctl & 3659 DC_TXCTL_SETUP) { 3660 sc->dc_cdata.dc_tx_chain[i] = NULL; 3661 continue; 3662 } 3663 m_freem(sc->dc_cdata.dc_tx_chain[i]); 3664 sc->dc_cdata.dc_tx_chain[i] = NULL; 3665 } 3666 } 3667 3668 bzero((char *)&sc->dc_ldata->dc_tx_list, 3669 sizeof(sc->dc_ldata->dc_tx_list)); 3670 3671 DC_UNLOCK(sc); 3672 3673 return; 3674 } 3675 3676 /* 3677 * Device suspend routine. Stop the interface and save some PCI 3678 * settings in case the BIOS doesn't restore them properly on 3679 * resume. 3680 */ 3681 static int 3682 dc_suspend(dev) 3683 device_t dev; 3684 { 3685 register int i; 3686 int s; 3687 struct dc_softc *sc; 3688 3689 s = splimp(); 3690 3691 sc = device_get_softc(dev); 3692 3693 dc_stop(sc); 3694 3695 for (i = 0; i < 5; i++) 3696 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4); 3697 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3698 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3699 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3700 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3701 3702 sc->suspended = 1; 3703 3704 splx(s); 3705 return (0); 3706 } 3707 3708 /* 3709 * Device resume routine. Restore some PCI settings in case the BIOS 3710 * doesn't, re-enable busmastering, and restart the interface if 3711 * appropriate. 3712 */ 3713 static int 3714 dc_resume(dev) 3715 device_t dev; 3716 { 3717 register int i; 3718 int s; 3719 struct dc_softc *sc; 3720 struct ifnet *ifp; 3721 3722 s = splimp(); 3723 3724 sc = device_get_softc(dev); 3725 ifp = &sc->arpcom.ac_if; 3726 3727 dc_acpi(dev); 3728 3729 /* better way to do this? */ 3730 for (i = 0; i < 5; i++) 3731 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4); 3732 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3733 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3734 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3735 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3736 3737 /* reenable busmastering */ 3738 pci_enable_busmaster(dev); 3739 pci_enable_io(dev, DC_RES); 3740 3741 /* reinitialize interface if necessary */ 3742 if (ifp->if_flags & IFF_UP) 3743 dc_init(sc); 3744 3745 sc->suspended = 0; 3746 3747 splx(s); 3748 return (0); 3749 } 3750 3751 /* 3752 * Stop all chip I/O so that the kernel's probe routines don't 3753 * get confused by errant DMAs when rebooting. 3754 */ 3755 static void 3756 dc_shutdown(dev) 3757 device_t dev; 3758 { 3759 struct dc_softc *sc; 3760 3761 sc = device_get_softc(dev); 3762 3763 dc_stop(sc); 3764 3765 return; 3766 } 3767