1 /* 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 35 * series chips and several workalikes including the following: 36 * 37 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 38 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 39 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 40 * ASIX Electronics AX88140A (www.asix.com.tw) 41 * ASIX Electronics AX88141 (www.asix.com.tw) 42 * ADMtek AL981 (www.admtek.com.tw) 43 * ADMtek AN985 (www.admtek.com.tw) 44 * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985 45 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 46 * Accton EN1217 (www.accton.com) 47 * Xircom X3201 (www.xircom.com) 48 * Abocom FE2500 49 * Conexant LANfinity (www.conexant.com) 50 * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com) 51 * 52 * Datasheets for the 21143 are available at developer.intel.com. 53 * Datasheets for the clone parts can be found at their respective sites. 54 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 55 * The PNIC II is essentially a Macronix 98715A chip; the only difference 56 * worth noting is that its multicast hash table is only 128 bits wide 57 * instead of 512. 58 * 59 * Written by Bill Paul <wpaul@ee.columbia.edu> 60 * Electrical Engineering Department 61 * Columbia University, New York City 62 */ 63 64 /* 65 * The Intel 21143 is the successor to the DEC 21140. It is basically 66 * the same as the 21140 but with a few new features. The 21143 supports 67 * three kinds of media attachments: 68 * 69 * o MII port, for 10Mbps and 100Mbps support and NWAY 70 * autonegotiation provided by an external PHY. 71 * o SYM port, for symbol mode 100Mbps support. 72 * o 10baseT port. 73 * o AUI/BNC port. 74 * 75 * The 100Mbps SYM port and 10baseT port can be used together in 76 * combination with the internal NWAY support to create a 10/100 77 * autosensing configuration. 78 * 79 * Note that not all tulip workalikes are handled in this driver: we only 80 * deal with those which are relatively well behaved. The Winbond is 81 * handled separately due to its different register offsets and the 82 * special handling needed for its various bugs. The PNIC is handled 83 * here, but I'm not thrilled about it. 84 * 85 * All of the workalike chips use some form of MII transceiver support 86 * with the exception of the Macronix chips, which also have a SYM port. 87 * The ASIX AX88140A is also documented to have a SYM port, but all 88 * the cards I've seen use an MII transceiver, probably because the 89 * AX88140A doesn't support internal NWAY. 90 */ 91 92 #include <sys/cdefs.h> 93 __FBSDID("$FreeBSD$"); 94 95 #include <sys/param.h> 96 #include <sys/endian.h> 97 #include <sys/systm.h> 98 #include <sys/sockio.h> 99 #include <sys/mbuf.h> 100 #include <sys/malloc.h> 101 #include <sys/kernel.h> 102 #include <sys/socket.h> 103 #include <sys/sysctl.h> 104 105 #include <net/if.h> 106 #include <net/if_arp.h> 107 #include <net/ethernet.h> 108 #include <net/if_dl.h> 109 #include <net/if_media.h> 110 #include <net/if_types.h> 111 #include <net/if_vlan_var.h> 112 113 #include <net/bpf.h> 114 115 #include <machine/bus_pio.h> 116 #include <machine/bus_memio.h> 117 #include <machine/bus.h> 118 #include <machine/resource.h> 119 #include <sys/bus.h> 120 #include <sys/rman.h> 121 122 #include <dev/mii/mii.h> 123 #include <dev/mii/miivar.h> 124 125 #include <dev/pci/pcireg.h> 126 #include <dev/pci/pcivar.h> 127 128 #define DC_USEIOSPACE 129 #ifdef __alpha__ 130 #define SRM_MEDIA 131 #endif 132 133 #include <pci/if_dcreg.h> 134 135 MODULE_DEPEND(dc, pci, 1, 1, 1); 136 MODULE_DEPEND(dc, ether, 1, 1, 1); 137 MODULE_DEPEND(dc, miibus, 1, 1, 1); 138 139 /* "controller miibus0" required. See GENERIC if you get errors here. */ 140 #include "miibus_if.h" 141 142 /* 143 * Various supported device vendors/types and their names. 144 */ 145 static struct dc_type dc_devs[] = { 146 { DC_VENDORID_DEC, DC_DEVICEID_21143, 147 "Intel 21143 10/100BaseTX" }, 148 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009, 149 "Davicom DM9009 10/100BaseTX" }, 150 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, 151 "Davicom DM9100 10/100BaseTX" }, 152 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 153 "Davicom DM9102 10/100BaseTX" }, 154 { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 155 "Davicom DM9102A 10/100BaseTX" }, 156 { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, 157 "ADMtek AL981 10/100BaseTX" }, 158 { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, 159 "ADMtek AN985 10/100BaseTX" }, 160 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511, 161 "ADMtek ADM9511 10/100BaseTX" }, 162 { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513, 163 "ADMtek ADM9513 10/100BaseTX" }, 164 { DC_VENDORID_ADMTEK, DC_DEVICEID_FA511, 165 "Netgear FA511 10/100BaseTX" }, 166 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 167 "ASIX AX88140A 10/100BaseTX" }, 168 { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 169 "ASIX AX88141 10/100BaseTX" }, 170 { DC_VENDORID_MX, DC_DEVICEID_98713, 171 "Macronix 98713 10/100BaseTX" }, 172 { DC_VENDORID_MX, DC_DEVICEID_98713, 173 "Macronix 98713A 10/100BaseTX" }, 174 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 175 "Compex RL100-TX 10/100BaseTX" }, 176 { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 177 "Compex RL100-TX 10/100BaseTX" }, 178 { DC_VENDORID_MX, DC_DEVICEID_987x5, 179 "Macronix 98715/98715A 10/100BaseTX" }, 180 { DC_VENDORID_MX, DC_DEVICEID_987x5, 181 "Macronix 98715AEC-C 10/100BaseTX" }, 182 { DC_VENDORID_MX, DC_DEVICEID_987x5, 183 "Macronix 98725 10/100BaseTX" }, 184 { DC_VENDORID_MX, DC_DEVICEID_98727, 185 "Macronix 98727/98732 10/100BaseTX" }, 186 { DC_VENDORID_LO, DC_DEVICEID_82C115, 187 "LC82C115 PNIC II 10/100BaseTX" }, 188 { DC_VENDORID_LO, DC_DEVICEID_82C168, 189 "82c168 PNIC 10/100BaseTX" }, 190 { DC_VENDORID_LO, DC_DEVICEID_82C168, 191 "82c169 PNIC 10/100BaseTX" }, 192 { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, 193 "Accton EN1217 10/100BaseTX" }, 194 { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, 195 "Accton EN2242 MiniPCI 10/100BaseTX" }, 196 { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, 197 "Xircom X3201 10/100BaseTX" }, 198 { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500, 199 "Abocom FE2500 10/100BaseTX" }, 200 { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112, 201 "Conexant LANfinity MiniPCI 10/100BaseTX" }, 202 { DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX, 203 "Hawking CB102 CardBus 10/100" }, 204 { DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T, 205 "PlaneX FNW-3602-T CardBus 10/100" }, 206 { DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB, 207 "3Com OfficeConnect 10/100B" }, 208 { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120, 209 "Microsoft MN-120 CardBus 10/100" }, 210 { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130, 211 "Microsoft MN-130 10/100" }, 212 { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130_FAKE, 213 "Microsoft MN-130 10/100" }, 214 { 0, 0, NULL } 215 }; 216 217 static int dc_probe (device_t); 218 static int dc_attach (device_t); 219 static int dc_detach (device_t); 220 static int dc_suspend (device_t); 221 static int dc_resume (device_t); 222 #ifndef BURN_BRIDGES 223 static void dc_acpi (device_t); 224 #endif 225 static struct dc_type *dc_devtype (device_t); 226 static int dc_newbuf (struct dc_softc *, int, int); 227 static int dc_encap (struct dc_softc *, struct mbuf *); 228 static void dc_pnic_rx_bug_war (struct dc_softc *, int); 229 static int dc_rx_resync (struct dc_softc *); 230 static void dc_rxeof (struct dc_softc *); 231 static void dc_txeof (struct dc_softc *); 232 static void dc_tick (void *); 233 static void dc_tx_underrun (struct dc_softc *); 234 static void dc_intr (void *); 235 static void dc_start (struct ifnet *); 236 static int dc_ioctl (struct ifnet *, u_long, caddr_t); 237 static void dc_init (void *); 238 static void dc_stop (struct dc_softc *); 239 static void dc_watchdog (struct ifnet *); 240 static void dc_shutdown (device_t); 241 static int dc_ifmedia_upd (struct ifnet *); 242 static void dc_ifmedia_sts (struct ifnet *, struct ifmediareq *); 243 244 static void dc_delay (struct dc_softc *); 245 static void dc_eeprom_idle (struct dc_softc *); 246 static void dc_eeprom_putbyte (struct dc_softc *, int); 247 static void dc_eeprom_getword (struct dc_softc *, int, u_int16_t *); 248 static void dc_eeprom_getword_pnic 249 (struct dc_softc *, int, u_int16_t *); 250 static void dc_eeprom_getword_xircom 251 (struct dc_softc *, int, u_int16_t *); 252 static void dc_eeprom_width (struct dc_softc *); 253 static void dc_read_eeprom (struct dc_softc *, caddr_t, int, int, int); 254 255 static void dc_mii_writebit (struct dc_softc *, int); 256 static int dc_mii_readbit (struct dc_softc *); 257 static void dc_mii_sync (struct dc_softc *); 258 static void dc_mii_send (struct dc_softc *, u_int32_t, int); 259 static int dc_mii_readreg (struct dc_softc *, struct dc_mii_frame *); 260 static int dc_mii_writereg (struct dc_softc *, struct dc_mii_frame *); 261 static int dc_miibus_readreg (device_t, int, int); 262 static int dc_miibus_writereg (device_t, int, int, int); 263 static void dc_miibus_statchg (device_t); 264 static void dc_miibus_mediainit (device_t); 265 266 static void dc_setcfg (struct dc_softc *, int); 267 static u_int32_t dc_crc_le (struct dc_softc *, caddr_t); 268 static u_int32_t dc_crc_be (caddr_t); 269 static void dc_setfilt_21143 (struct dc_softc *); 270 static void dc_setfilt_asix (struct dc_softc *); 271 static void dc_setfilt_admtek (struct dc_softc *); 272 static void dc_setfilt_xircom (struct dc_softc *); 273 274 static void dc_setfilt (struct dc_softc *); 275 276 static void dc_reset (struct dc_softc *); 277 static int dc_list_rx_init (struct dc_softc *); 278 static int dc_list_tx_init (struct dc_softc *); 279 280 static void dc_read_srom (struct dc_softc *, int); 281 static void dc_parse_21143_srom (struct dc_softc *); 282 static void dc_decode_leaf_sia (struct dc_softc *, struct dc_eblock_sia *); 283 static void dc_decode_leaf_mii (struct dc_softc *, struct dc_eblock_mii *); 284 static void dc_decode_leaf_sym (struct dc_softc *, struct dc_eblock_sym *); 285 static void dc_apply_fixup (struct dc_softc *, int); 286 287 static void dc_dma_map_txbuf (void *, bus_dma_segment_t *, int, bus_size_t, 288 int); 289 static void dc_dma_map_rxbuf (void *, bus_dma_segment_t *, int, bus_size_t, 290 int); 291 292 #ifdef DC_USEIOSPACE 293 #define DC_RES SYS_RES_IOPORT 294 #define DC_RID DC_PCI_CFBIO 295 #else 296 #define DC_RES SYS_RES_MEMORY 297 #define DC_RID DC_PCI_CFBMA 298 #endif 299 300 static device_method_t dc_methods[] = { 301 /* Device interface */ 302 DEVMETHOD(device_probe, dc_probe), 303 DEVMETHOD(device_attach, dc_attach), 304 DEVMETHOD(device_detach, dc_detach), 305 DEVMETHOD(device_suspend, dc_suspend), 306 DEVMETHOD(device_resume, dc_resume), 307 DEVMETHOD(device_shutdown, dc_shutdown), 308 309 /* bus interface */ 310 DEVMETHOD(bus_print_child, bus_generic_print_child), 311 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 312 313 /* MII interface */ 314 DEVMETHOD(miibus_readreg, dc_miibus_readreg), 315 DEVMETHOD(miibus_writereg, dc_miibus_writereg), 316 DEVMETHOD(miibus_statchg, dc_miibus_statchg), 317 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), 318 319 { 0, 0 } 320 }; 321 322 static driver_t dc_driver = { 323 "dc", 324 dc_methods, 325 sizeof(struct dc_softc) 326 }; 327 328 static devclass_t dc_devclass; 329 #ifdef __i386__ 330 static int dc_quick = 1; 331 SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, &dc_quick, 0, 332 "do not mdevget in dc driver"); 333 #endif 334 335 DRIVER_MODULE(dc, cardbus, dc_driver, dc_devclass, 0, 0); 336 DRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0); 337 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); 338 339 #define DC_SETBIT(sc, reg, x) \ 340 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 341 342 #define DC_CLRBIT(sc, reg, x) \ 343 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 344 345 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 346 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 347 348 #define IS_MPSAFE 0 349 350 static void 351 dc_delay(struct dc_softc *sc) 352 { 353 int idx; 354 355 for (idx = (300 / 33) + 1; idx > 0; idx--) 356 CSR_READ_4(sc, DC_BUSCTL); 357 } 358 359 static void 360 dc_eeprom_width(struct dc_softc *sc) 361 { 362 int i; 363 364 /* Force EEPROM to idle state. */ 365 dc_eeprom_idle(sc); 366 367 /* Enter EEPROM access mode. */ 368 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 369 dc_delay(sc); 370 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 371 dc_delay(sc); 372 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 373 dc_delay(sc); 374 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 375 dc_delay(sc); 376 377 for (i = 3; i--;) { 378 if (6 & (1 << i)) 379 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 380 else 381 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 382 dc_delay(sc); 383 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 384 dc_delay(sc); 385 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 386 dc_delay(sc); 387 } 388 389 for (i = 1; i <= 12; i++) { 390 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 391 dc_delay(sc); 392 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 393 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 394 dc_delay(sc); 395 break; 396 } 397 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 398 dc_delay(sc); 399 } 400 401 /* Turn off EEPROM access mode. */ 402 dc_eeprom_idle(sc); 403 404 if (i < 4 || i > 12) 405 sc->dc_romwidth = 6; 406 else 407 sc->dc_romwidth = i; 408 409 /* Enter EEPROM access mode. */ 410 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 411 dc_delay(sc); 412 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 413 dc_delay(sc); 414 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 415 dc_delay(sc); 416 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 417 dc_delay(sc); 418 419 /* Turn off EEPROM access mode. */ 420 dc_eeprom_idle(sc); 421 } 422 423 static void 424 dc_eeprom_idle(struct dc_softc *sc) 425 { 426 int i; 427 428 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 429 dc_delay(sc); 430 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 431 dc_delay(sc); 432 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 433 dc_delay(sc); 434 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 435 dc_delay(sc); 436 437 for (i = 0; i < 25; i++) { 438 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 439 dc_delay(sc); 440 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 441 dc_delay(sc); 442 } 443 444 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 445 dc_delay(sc); 446 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 447 dc_delay(sc); 448 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 449 } 450 451 /* 452 * Send a read command and address to the EEPROM, check for ACK. 453 */ 454 static void 455 dc_eeprom_putbyte(struct dc_softc *sc, int addr) 456 { 457 int d, i; 458 459 d = DC_EECMD_READ >> 6; 460 for (i = 3; i--; ) { 461 if (d & (1 << i)) 462 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 463 else 464 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 465 dc_delay(sc); 466 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 467 dc_delay(sc); 468 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 469 dc_delay(sc); 470 } 471 472 /* 473 * Feed in each bit and strobe the clock. 474 */ 475 for (i = sc->dc_romwidth; i--;) { 476 if (addr & (1 << i)) { 477 SIO_SET(DC_SIO_EE_DATAIN); 478 } else { 479 SIO_CLR(DC_SIO_EE_DATAIN); 480 } 481 dc_delay(sc); 482 SIO_SET(DC_SIO_EE_CLK); 483 dc_delay(sc); 484 SIO_CLR(DC_SIO_EE_CLK); 485 dc_delay(sc); 486 } 487 } 488 489 /* 490 * Read a word of data stored in the EEPROM at address 'addr.' 491 * The PNIC 82c168/82c169 has its own non-standard way to read 492 * the EEPROM. 493 */ 494 static void 495 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) 496 { 497 int i; 498 u_int32_t r; 499 500 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr); 501 502 for (i = 0; i < DC_TIMEOUT; i++) { 503 DELAY(1); 504 r = CSR_READ_4(sc, DC_SIO); 505 if (!(r & DC_PN_SIOCTL_BUSY)) { 506 *dest = (u_int16_t)(r & 0xFFFF); 507 return; 508 } 509 } 510 } 511 512 /* 513 * Read a word of data stored in the EEPROM at address 'addr.' 514 * The Xircom X3201 has its own non-standard way to read 515 * the EEPROM, too. 516 */ 517 static void 518 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) 519 { 520 521 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 522 523 addr *= 2; 524 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 525 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff; 526 addr += 1; 527 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 528 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8; 529 530 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 531 } 532 533 /* 534 * Read a word of data stored in the EEPROM at address 'addr.' 535 */ 536 static void 537 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) 538 { 539 int i; 540 u_int16_t word = 0; 541 542 /* Force EEPROM to idle state. */ 543 dc_eeprom_idle(sc); 544 545 /* Enter EEPROM access mode. */ 546 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 547 dc_delay(sc); 548 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 549 dc_delay(sc); 550 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 551 dc_delay(sc); 552 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 553 dc_delay(sc); 554 555 /* 556 * Send address of word we want to read. 557 */ 558 dc_eeprom_putbyte(sc, addr); 559 560 /* 561 * Start reading bits from EEPROM. 562 */ 563 for (i = 0x8000; i; i >>= 1) { 564 SIO_SET(DC_SIO_EE_CLK); 565 dc_delay(sc); 566 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 567 word |= i; 568 dc_delay(sc); 569 SIO_CLR(DC_SIO_EE_CLK); 570 dc_delay(sc); 571 } 572 573 /* Turn off EEPROM access mode. */ 574 dc_eeprom_idle(sc); 575 576 *dest = word; 577 } 578 579 /* 580 * Read a sequence of words from the EEPROM. 581 */ 582 static void 583 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int swap) 584 { 585 int i; 586 u_int16_t word = 0, *ptr; 587 588 for (i = 0; i < cnt; i++) { 589 if (DC_IS_PNIC(sc)) 590 dc_eeprom_getword_pnic(sc, off + i, &word); 591 else if (DC_IS_XIRCOM(sc)) 592 dc_eeprom_getword_xircom(sc, off + i, &word); 593 else 594 dc_eeprom_getword(sc, off + i, &word); 595 ptr = (u_int16_t *)(dest + (i * 2)); 596 if (swap) 597 *ptr = ntohs(word); 598 else 599 *ptr = word; 600 } 601 } 602 603 /* 604 * The following two routines are taken from the Macronix 98713 605 * Application Notes pp.19-21. 606 */ 607 /* 608 * Write a bit to the MII bus. 609 */ 610 static void 611 dc_mii_writebit(struct dc_softc *sc, int bit) 612 { 613 614 if (bit) 615 CSR_WRITE_4(sc, DC_SIO, 616 DC_SIO_ROMCTL_WRITE | DC_SIO_MII_DATAOUT); 617 else 618 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 619 620 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 621 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 622 } 623 624 /* 625 * Read a bit from the MII bus. 626 */ 627 static int 628 dc_mii_readbit(struct dc_softc *sc) 629 { 630 631 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR); 632 CSR_READ_4(sc, DC_SIO); 633 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 634 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 635 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 636 return (1); 637 638 return (0); 639 } 640 641 /* 642 * Sync the PHYs by setting data bit and strobing the clock 32 times. 643 */ 644 static void 645 dc_mii_sync(struct dc_softc *sc) 646 { 647 int i; 648 649 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 650 651 for (i = 0; i < 32; i++) 652 dc_mii_writebit(sc, 1); 653 } 654 655 /* 656 * Clock a series of bits through the MII. 657 */ 658 static void 659 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) 660 { 661 int i; 662 663 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 664 dc_mii_writebit(sc, bits & i); 665 } 666 667 /* 668 * Read an PHY register through the MII. 669 */ 670 static int 671 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) 672 { 673 int i, ack; 674 675 DC_LOCK(sc); 676 677 /* 678 * Set up frame for RX. 679 */ 680 frame->mii_stdelim = DC_MII_STARTDELIM; 681 frame->mii_opcode = DC_MII_READOP; 682 frame->mii_turnaround = 0; 683 frame->mii_data = 0; 684 685 /* 686 * Sync the PHYs. 687 */ 688 dc_mii_sync(sc); 689 690 /* 691 * Send command/address info. 692 */ 693 dc_mii_send(sc, frame->mii_stdelim, 2); 694 dc_mii_send(sc, frame->mii_opcode, 2); 695 dc_mii_send(sc, frame->mii_phyaddr, 5); 696 dc_mii_send(sc, frame->mii_regaddr, 5); 697 698 #ifdef notdef 699 /* Idle bit */ 700 dc_mii_writebit(sc, 1); 701 dc_mii_writebit(sc, 0); 702 #endif 703 704 /* Check for ack. */ 705 ack = dc_mii_readbit(sc); 706 707 /* 708 * Now try reading data bits. If the ack failed, we still 709 * need to clock through 16 cycles to keep the PHY(s) in sync. 710 */ 711 if (ack) { 712 for (i = 0; i < 16; i++) 713 dc_mii_readbit(sc); 714 goto fail; 715 } 716 717 for (i = 0x8000; i; i >>= 1) { 718 if (!ack) { 719 if (dc_mii_readbit(sc)) 720 frame->mii_data |= i; 721 } 722 } 723 724 fail: 725 726 dc_mii_writebit(sc, 0); 727 dc_mii_writebit(sc, 0); 728 729 DC_UNLOCK(sc); 730 731 if (ack) 732 return (1); 733 return (0); 734 } 735 736 /* 737 * Write to a PHY register through the MII. 738 */ 739 static int 740 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) 741 { 742 743 DC_LOCK(sc); 744 /* 745 * Set up frame for TX. 746 */ 747 748 frame->mii_stdelim = DC_MII_STARTDELIM; 749 frame->mii_opcode = DC_MII_WRITEOP; 750 frame->mii_turnaround = DC_MII_TURNAROUND; 751 752 /* 753 * Sync the PHYs. 754 */ 755 dc_mii_sync(sc); 756 757 dc_mii_send(sc, frame->mii_stdelim, 2); 758 dc_mii_send(sc, frame->mii_opcode, 2); 759 dc_mii_send(sc, frame->mii_phyaddr, 5); 760 dc_mii_send(sc, frame->mii_regaddr, 5); 761 dc_mii_send(sc, frame->mii_turnaround, 2); 762 dc_mii_send(sc, frame->mii_data, 16); 763 764 /* Idle bit. */ 765 dc_mii_writebit(sc, 0); 766 dc_mii_writebit(sc, 0); 767 768 DC_UNLOCK(sc); 769 770 return (0); 771 } 772 773 static int 774 dc_miibus_readreg(device_t dev, int phy, int reg) 775 { 776 struct dc_mii_frame frame; 777 struct dc_softc *sc; 778 int i, rval, phy_reg = 0; 779 780 sc = device_get_softc(dev); 781 bzero(&frame, sizeof(frame)); 782 783 /* 784 * Note: both the AL981 and AN985 have internal PHYs, 785 * however the AL981 provides direct access to the PHY 786 * registers while the AN985 uses a serial MII interface. 787 * The AN985's MII interface is also buggy in that you 788 * can read from any MII address (0 to 31), but only address 1 789 * behaves normally. To deal with both cases, we pretend 790 * that the PHY is at MII address 1. 791 */ 792 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 793 return (0); 794 795 /* 796 * Note: the ukphy probes of the RS7112 report a PHY at 797 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 798 * so we only respond to correct one. 799 */ 800 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 801 return (0); 802 803 if (sc->dc_pmode != DC_PMODE_MII) { 804 if (phy == (MII_NPHY - 1)) { 805 switch (reg) { 806 case MII_BMSR: 807 /* 808 * Fake something to make the probe 809 * code think there's a PHY here. 810 */ 811 return (BMSR_MEDIAMASK); 812 break; 813 case MII_PHYIDR1: 814 if (DC_IS_PNIC(sc)) 815 return (DC_VENDORID_LO); 816 return (DC_VENDORID_DEC); 817 break; 818 case MII_PHYIDR2: 819 if (DC_IS_PNIC(sc)) 820 return (DC_DEVICEID_82C168); 821 return (DC_DEVICEID_21143); 822 break; 823 default: 824 return (0); 825 break; 826 } 827 } else 828 return (0); 829 } 830 831 if (DC_IS_PNIC(sc)) { 832 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 833 (phy << 23) | (reg << 18)); 834 for (i = 0; i < DC_TIMEOUT; i++) { 835 DELAY(1); 836 rval = CSR_READ_4(sc, DC_PN_MII); 837 if (!(rval & DC_PN_MII_BUSY)) { 838 rval &= 0xFFFF; 839 return (rval == 0xFFFF ? 0 : rval); 840 } 841 } 842 return (0); 843 } 844 845 if (DC_IS_COMET(sc)) { 846 switch (reg) { 847 case MII_BMCR: 848 phy_reg = DC_AL_BMCR; 849 break; 850 case MII_BMSR: 851 phy_reg = DC_AL_BMSR; 852 break; 853 case MII_PHYIDR1: 854 phy_reg = DC_AL_VENID; 855 break; 856 case MII_PHYIDR2: 857 phy_reg = DC_AL_DEVID; 858 break; 859 case MII_ANAR: 860 phy_reg = DC_AL_ANAR; 861 break; 862 case MII_ANLPAR: 863 phy_reg = DC_AL_LPAR; 864 break; 865 case MII_ANER: 866 phy_reg = DC_AL_ANER; 867 break; 868 default: 869 printf("dc%d: phy_read: bad phy register %x\n", 870 sc->dc_unit, reg); 871 return (0); 872 break; 873 } 874 875 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 876 877 if (rval == 0xFFFF) 878 return (0); 879 return (rval); 880 } 881 882 frame.mii_phyaddr = phy; 883 frame.mii_regaddr = reg; 884 if (sc->dc_type == DC_TYPE_98713) { 885 phy_reg = CSR_READ_4(sc, DC_NETCFG); 886 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 887 } 888 dc_mii_readreg(sc, &frame); 889 if (sc->dc_type == DC_TYPE_98713) 890 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 891 892 return (frame.mii_data); 893 } 894 895 static int 896 dc_miibus_writereg(device_t dev, int phy, int reg, int data) 897 { 898 struct dc_softc *sc; 899 struct dc_mii_frame frame; 900 int i, phy_reg = 0; 901 902 sc = device_get_softc(dev); 903 bzero(&frame, sizeof(frame)); 904 905 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 906 return (0); 907 908 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 909 return (0); 910 911 if (DC_IS_PNIC(sc)) { 912 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 913 (phy << 23) | (reg << 10) | data); 914 for (i = 0; i < DC_TIMEOUT; i++) { 915 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 916 break; 917 } 918 return (0); 919 } 920 921 if (DC_IS_COMET(sc)) { 922 switch (reg) { 923 case MII_BMCR: 924 phy_reg = DC_AL_BMCR; 925 break; 926 case MII_BMSR: 927 phy_reg = DC_AL_BMSR; 928 break; 929 case MII_PHYIDR1: 930 phy_reg = DC_AL_VENID; 931 break; 932 case MII_PHYIDR2: 933 phy_reg = DC_AL_DEVID; 934 break; 935 case MII_ANAR: 936 phy_reg = DC_AL_ANAR; 937 break; 938 case MII_ANLPAR: 939 phy_reg = DC_AL_LPAR; 940 break; 941 case MII_ANER: 942 phy_reg = DC_AL_ANER; 943 break; 944 default: 945 printf("dc%d: phy_write: bad phy register %x\n", 946 sc->dc_unit, reg); 947 return (0); 948 break; 949 } 950 951 CSR_WRITE_4(sc, phy_reg, data); 952 return (0); 953 } 954 955 frame.mii_phyaddr = phy; 956 frame.mii_regaddr = reg; 957 frame.mii_data = data; 958 959 if (sc->dc_type == DC_TYPE_98713) { 960 phy_reg = CSR_READ_4(sc, DC_NETCFG); 961 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 962 } 963 dc_mii_writereg(sc, &frame); 964 if (sc->dc_type == DC_TYPE_98713) 965 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 966 967 return (0); 968 } 969 970 static void 971 dc_miibus_statchg(device_t dev) 972 { 973 struct dc_softc *sc; 974 struct mii_data *mii; 975 struct ifmedia *ifm; 976 977 sc = device_get_softc(dev); 978 if (DC_IS_ADMTEK(sc)) 979 return; 980 981 mii = device_get_softc(sc->dc_miibus); 982 ifm = &mii->mii_media; 983 if (DC_IS_DAVICOM(sc) && 984 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 985 dc_setcfg(sc, ifm->ifm_media); 986 sc->dc_if_media = ifm->ifm_media; 987 } else { 988 dc_setcfg(sc, mii->mii_media_active); 989 sc->dc_if_media = mii->mii_media_active; 990 } 991 } 992 993 /* 994 * Special support for DM9102A cards with HomePNA PHYs. Note: 995 * with the Davicom DM9102A/DM9801 eval board that I have, it seems 996 * to be impossible to talk to the management interface of the DM9801 997 * PHY (its MDIO pin is not connected to anything). Consequently, 998 * the driver has to just 'know' about the additional mode and deal 999 * with it itself. *sigh* 1000 */ 1001 static void 1002 dc_miibus_mediainit(device_t dev) 1003 { 1004 struct dc_softc *sc; 1005 struct mii_data *mii; 1006 struct ifmedia *ifm; 1007 int rev; 1008 1009 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1010 1011 sc = device_get_softc(dev); 1012 mii = device_get_softc(sc->dc_miibus); 1013 ifm = &mii->mii_media; 1014 1015 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) 1016 ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); 1017 } 1018 1019 #define DC_POLY 0xEDB88320 1020 #define DC_BITS_512 9 1021 #define DC_BITS_128 7 1022 #define DC_BITS_64 6 1023 1024 static u_int32_t 1025 dc_crc_le(struct dc_softc *sc, caddr_t addr) 1026 { 1027 u_int32_t idx, bit, data, crc; 1028 1029 /* Compute CRC for the address value. */ 1030 crc = 0xFFFFFFFF; /* initial value */ 1031 1032 for (idx = 0; idx < 6; idx++) { 1033 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 1034 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); 1035 } 1036 1037 /* 1038 * The hash table on the PNIC II and the MX98715AEC-C/D/E 1039 * chips is only 128 bits wide. 1040 */ 1041 if (sc->dc_flags & DC_128BIT_HASH) 1042 return (crc & ((1 << DC_BITS_128) - 1)); 1043 1044 /* The hash table on the MX98715BEC is only 64 bits wide. */ 1045 if (sc->dc_flags & DC_64BIT_HASH) 1046 return (crc & ((1 << DC_BITS_64) - 1)); 1047 1048 /* Xircom's hash filtering table is different (read: weird) */ 1049 /* Xircom uses the LEAST significant bits */ 1050 if (DC_IS_XIRCOM(sc)) { 1051 if ((crc & 0x180) == 0x180) 1052 return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4)); 1053 else 1054 return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 + 1055 (12 << 4)); 1056 } 1057 1058 return (crc & ((1 << DC_BITS_512) - 1)); 1059 } 1060 1061 /* 1062 * Calculate CRC of a multicast group address, return the lower 6 bits. 1063 */ 1064 static u_int32_t 1065 dc_crc_be(caddr_t addr) 1066 { 1067 u_int32_t crc, carry; 1068 int i, j; 1069 u_int8_t c; 1070 1071 /* Compute CRC for the address value. */ 1072 crc = 0xFFFFFFFF; /* initial value */ 1073 1074 for (i = 0; i < 6; i++) { 1075 c = *(addr + i); 1076 for (j = 0; j < 8; j++) { 1077 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 1078 crc <<= 1; 1079 c >>= 1; 1080 if (carry) 1081 crc = (crc ^ 0x04c11db6) | carry; 1082 } 1083 } 1084 1085 /* Return the filter bit position. */ 1086 return ((crc >> 26) & 0x0000003F); 1087 } 1088 1089 /* 1090 * 21143-style RX filter setup routine. Filter programming is done by 1091 * downloading a special setup frame into the TX engine. 21143, Macronix, 1092 * PNIC, PNIC II and Davicom chips are programmed this way. 1093 * 1094 * We always program the chip using 'hash perfect' mode, i.e. one perfect 1095 * address (our node address) and a 512-bit hash filter for multicast 1096 * frames. We also sneak the broadcast address into the hash filter since 1097 * we need that too. 1098 */ 1099 static void 1100 dc_setfilt_21143(struct dc_softc *sc) 1101 { 1102 struct dc_desc *sframe; 1103 u_int32_t h, *sp; 1104 struct ifmultiaddr *ifma; 1105 struct ifnet *ifp; 1106 int i; 1107 1108 ifp = &sc->arpcom.ac_if; 1109 1110 i = sc->dc_cdata.dc_tx_prod; 1111 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1112 sc->dc_cdata.dc_tx_cnt++; 1113 sframe = &sc->dc_ldata->dc_tx_list[i]; 1114 sp = sc->dc_cdata.dc_sbuf; 1115 bzero(sp, DC_SFRAME_LEN); 1116 1117 sframe->dc_data = htole32(sc->dc_saddr); 1118 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1119 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1120 1121 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; 1122 1123 /* If we want promiscuous mode, set the allframes bit. */ 1124 if (ifp->if_flags & IFF_PROMISC) 1125 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1126 else 1127 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1128 1129 if (ifp->if_flags & IFF_ALLMULTI) 1130 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1131 else 1132 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1133 1134 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1135 if (ifma->ifma_addr->sa_family != AF_LINK) 1136 continue; 1137 h = dc_crc_le(sc, 1138 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1139 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1140 } 1141 1142 if (ifp->if_flags & IFF_BROADCAST) { 1143 h = dc_crc_le(sc, (caddr_t)ifp->if_broadcastaddr); 1144 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1145 } 1146 1147 /* Set our MAC address */ 1148 sp[39] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1149 sp[40] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1150 sp[41] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1151 1152 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1153 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1154 1155 /* 1156 * The PNIC takes an exceedingly long time to process its 1157 * setup frame; wait 10ms after posting the setup frame 1158 * before proceeding, just so it has time to swallow its 1159 * medicine. 1160 */ 1161 DELAY(10000); 1162 1163 ifp->if_timer = 5; 1164 } 1165 1166 static void 1167 dc_setfilt_admtek(struct dc_softc *sc) 1168 { 1169 struct ifnet *ifp; 1170 struct ifmultiaddr *ifma; 1171 int h = 0; 1172 u_int32_t hashes[2] = { 0, 0 }; 1173 1174 ifp = &sc->arpcom.ac_if; 1175 1176 /* Init our MAC address. */ 1177 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1178 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1179 1180 /* If we want promiscuous mode, set the allframes bit. */ 1181 if (ifp->if_flags & IFF_PROMISC) 1182 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1183 else 1184 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1185 1186 if (ifp->if_flags & IFF_ALLMULTI) 1187 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1188 else 1189 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1190 1191 /* First, zot all the existing hash bits. */ 1192 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1193 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1194 1195 /* 1196 * If we're already in promisc or allmulti mode, we 1197 * don't have to bother programming the multicast filter. 1198 */ 1199 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) 1200 return; 1201 1202 /* Now program new ones. */ 1203 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1204 if (ifma->ifma_addr->sa_family != AF_LINK) 1205 continue; 1206 if (DC_IS_CENTAUR(sc)) 1207 h = dc_crc_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1208 else 1209 h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1210 if (h < 32) 1211 hashes[0] |= (1 << h); 1212 else 1213 hashes[1] |= (1 << (h - 32)); 1214 } 1215 1216 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1217 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1218 } 1219 1220 static void 1221 dc_setfilt_asix(struct dc_softc *sc) 1222 { 1223 struct ifnet *ifp; 1224 struct ifmultiaddr *ifma; 1225 int h = 0; 1226 u_int32_t hashes[2] = { 0, 0 }; 1227 1228 ifp = &sc->arpcom.ac_if; 1229 1230 /* Init our MAC address */ 1231 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1232 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1233 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1234 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1235 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1236 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1237 1238 /* If we want promiscuous mode, set the allframes bit. */ 1239 if (ifp->if_flags & IFF_PROMISC) 1240 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1241 else 1242 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1243 1244 if (ifp->if_flags & IFF_ALLMULTI) 1245 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1246 else 1247 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1248 1249 /* 1250 * The ASIX chip has a special bit to enable reception 1251 * of broadcast frames. 1252 */ 1253 if (ifp->if_flags & IFF_BROADCAST) 1254 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1255 else 1256 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1257 1258 /* first, zot all the existing hash bits */ 1259 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1260 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1261 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1262 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1263 1264 /* 1265 * If we're already in promisc or allmulti mode, we 1266 * don't have to bother programming the multicast filter. 1267 */ 1268 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) 1269 return; 1270 1271 /* now program new ones */ 1272 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1273 if (ifma->ifma_addr->sa_family != AF_LINK) 1274 continue; 1275 h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1276 if (h < 32) 1277 hashes[0] |= (1 << h); 1278 else 1279 hashes[1] |= (1 << (h - 32)); 1280 } 1281 1282 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1283 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1284 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1285 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1286 } 1287 1288 static void 1289 dc_setfilt_xircom(struct dc_softc *sc) 1290 { 1291 struct ifnet *ifp; 1292 struct ifmultiaddr *ifma; 1293 struct dc_desc *sframe; 1294 u_int32_t h, *sp; 1295 int i; 1296 1297 ifp = &sc->arpcom.ac_if; 1298 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); 1299 1300 i = sc->dc_cdata.dc_tx_prod; 1301 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1302 sc->dc_cdata.dc_tx_cnt++; 1303 sframe = &sc->dc_ldata->dc_tx_list[i]; 1304 sp = sc->dc_cdata.dc_sbuf; 1305 bzero(sp, DC_SFRAME_LEN); 1306 1307 sframe->dc_data = htole32(sc->dc_saddr); 1308 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1309 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1310 1311 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; 1312 1313 /* If we want promiscuous mode, set the allframes bit. */ 1314 if (ifp->if_flags & IFF_PROMISC) 1315 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1316 else 1317 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1318 1319 if (ifp->if_flags & IFF_ALLMULTI) 1320 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1321 else 1322 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1323 1324 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1325 if (ifma->ifma_addr->sa_family != AF_LINK) 1326 continue; 1327 h = dc_crc_le(sc, 1328 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1329 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1330 } 1331 1332 if (ifp->if_flags & IFF_BROADCAST) { 1333 h = dc_crc_le(sc, (caddr_t)ifp->if_broadcastaddr); 1334 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1335 } 1336 1337 /* Set our MAC address */ 1338 sp[0] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1339 sp[1] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1340 sp[2] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1341 1342 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1343 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1344 ifp->if_flags |= IFF_RUNNING; 1345 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1346 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1347 1348 /* 1349 * Wait some time... 1350 */ 1351 DELAY(1000); 1352 1353 ifp->if_timer = 5; 1354 } 1355 1356 static void 1357 dc_setfilt(struct dc_softc *sc) 1358 { 1359 1360 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1361 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1362 dc_setfilt_21143(sc); 1363 1364 if (DC_IS_ASIX(sc)) 1365 dc_setfilt_asix(sc); 1366 1367 if (DC_IS_ADMTEK(sc)) 1368 dc_setfilt_admtek(sc); 1369 1370 if (DC_IS_XIRCOM(sc)) 1371 dc_setfilt_xircom(sc); 1372 } 1373 1374 /* 1375 * In order to fiddle with the 'full-duplex' and '100Mbps' bits in 1376 * the netconfig register, we first have to put the transmit and/or 1377 * receive logic in the idle state. 1378 */ 1379 static void 1380 dc_setcfg(struct dc_softc *sc, int media) 1381 { 1382 int i, restart = 0, watchdogreg; 1383 u_int32_t isr; 1384 1385 if (IFM_SUBTYPE(media) == IFM_NONE) 1386 return; 1387 1388 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) { 1389 restart = 1; 1390 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); 1391 1392 for (i = 0; i < DC_TIMEOUT; i++) { 1393 isr = CSR_READ_4(sc, DC_ISR); 1394 if (isr & DC_ISR_TX_IDLE && 1395 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1396 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1397 break; 1398 DELAY(10); 1399 } 1400 1401 if (i == DC_TIMEOUT) 1402 printf("dc%d: failed to force tx and " 1403 "rx to idle state\n", sc->dc_unit); 1404 } 1405 1406 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1407 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1408 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1409 if (sc->dc_pmode == DC_PMODE_MII) { 1410 if (DC_IS_INTEL(sc)) { 1411 /* There's a write enable bit here that reads as 1. */ 1412 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1413 watchdogreg &= ~DC_WDOG_CTLWREN; 1414 watchdogreg |= DC_WDOG_JABBERDIS; 1415 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1416 } else { 1417 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1418 } 1419 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1420 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); 1421 if (sc->dc_type == DC_TYPE_98713) 1422 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1423 DC_NETCFG_SCRAMBLER)); 1424 if (!DC_IS_DAVICOM(sc)) 1425 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1426 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1427 if (DC_IS_INTEL(sc)) 1428 dc_apply_fixup(sc, IFM_AUTO); 1429 } else { 1430 if (DC_IS_PNIC(sc)) { 1431 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1432 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1433 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1434 } 1435 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1436 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1437 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1438 if (DC_IS_INTEL(sc)) 1439 dc_apply_fixup(sc, 1440 (media & IFM_GMASK) == IFM_FDX ? 1441 IFM_100_TX | IFM_FDX : IFM_100_TX); 1442 } 1443 } 1444 1445 if (IFM_SUBTYPE(media) == IFM_10_T) { 1446 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1447 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1448 if (sc->dc_pmode == DC_PMODE_MII) { 1449 /* There's a write enable bit here that reads as 1. */ 1450 if (DC_IS_INTEL(sc)) { 1451 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1452 watchdogreg &= ~DC_WDOG_CTLWREN; 1453 watchdogreg |= DC_WDOG_JABBERDIS; 1454 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1455 } else { 1456 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1457 } 1458 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1459 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); 1460 if (sc->dc_type == DC_TYPE_98713) 1461 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1462 if (!DC_IS_DAVICOM(sc)) 1463 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1464 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1465 if (DC_IS_INTEL(sc)) 1466 dc_apply_fixup(sc, IFM_AUTO); 1467 } else { 1468 if (DC_IS_PNIC(sc)) { 1469 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1470 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1471 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1472 } 1473 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1474 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1475 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1476 if (DC_IS_INTEL(sc)) { 1477 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1478 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1479 if ((media & IFM_GMASK) == IFM_FDX) 1480 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1481 else 1482 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1483 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1484 DC_CLRBIT(sc, DC_10BTCTRL, 1485 DC_TCTL_AUTONEGENBL); 1486 dc_apply_fixup(sc, 1487 (media & IFM_GMASK) == IFM_FDX ? 1488 IFM_10_T | IFM_FDX : IFM_10_T); 1489 DELAY(20000); 1490 } 1491 } 1492 } 1493 1494 /* 1495 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1496 * PHY and we want HomePNA mode, set the portsel bit to turn 1497 * on the external MII port. 1498 */ 1499 if (DC_IS_DAVICOM(sc)) { 1500 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1501 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1502 sc->dc_link = 1; 1503 } else { 1504 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1505 } 1506 } 1507 1508 if ((media & IFM_GMASK) == IFM_FDX) { 1509 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1510 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1511 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1512 } else { 1513 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1514 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1515 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1516 } 1517 1518 if (restart) 1519 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON); 1520 } 1521 1522 static void 1523 dc_reset(struct dc_softc *sc) 1524 { 1525 int i; 1526 1527 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1528 1529 for (i = 0; i < DC_TIMEOUT; i++) { 1530 DELAY(10); 1531 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1532 break; 1533 } 1534 1535 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) || 1536 DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { 1537 DELAY(10000); 1538 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1539 i = 0; 1540 } 1541 1542 if (i == DC_TIMEOUT) 1543 printf("dc%d: reset never completed!\n", sc->dc_unit); 1544 1545 /* Wait a little while for the chip to get its brains in order. */ 1546 DELAY(1000); 1547 1548 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1549 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1550 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1551 1552 /* 1553 * Bring the SIA out of reset. In some cases, it looks 1554 * like failing to unreset the SIA soon enough gets it 1555 * into a state where it will never come out of reset 1556 * until we reset the whole chip again. 1557 */ 1558 if (DC_IS_INTEL(sc)) { 1559 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1560 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1561 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1562 } 1563 } 1564 1565 static struct dc_type * 1566 dc_devtype(device_t dev) 1567 { 1568 struct dc_type *t; 1569 u_int32_t rev; 1570 1571 t = dc_devs; 1572 1573 while (t->dc_name != NULL) { 1574 if ((pci_get_vendor(dev) == t->dc_vid) && 1575 (pci_get_device(dev) == t->dc_did)) { 1576 /* Check the PCI revision */ 1577 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1578 if (t->dc_did == DC_DEVICEID_98713 && 1579 rev >= DC_REVISION_98713A) 1580 t++; 1581 if (t->dc_did == DC_DEVICEID_98713_CP && 1582 rev >= DC_REVISION_98713A) 1583 t++; 1584 if (t->dc_did == DC_DEVICEID_987x5 && 1585 rev >= DC_REVISION_98715AEC_C) 1586 t++; 1587 if (t->dc_did == DC_DEVICEID_987x5 && 1588 rev >= DC_REVISION_98725) 1589 t++; 1590 if (t->dc_did == DC_DEVICEID_AX88140A && 1591 rev >= DC_REVISION_88141) 1592 t++; 1593 if (t->dc_did == DC_DEVICEID_82C168 && 1594 rev >= DC_REVISION_82C169) 1595 t++; 1596 if (t->dc_did == DC_DEVICEID_DM9102 && 1597 rev >= DC_REVISION_DM9102A) 1598 t++; 1599 /* 1600 * The Microsoft MN-130 has a device ID of 0x0002, 1601 * which happens to be the same as the PNIC 82c168. 1602 * To keep dc_attach() from getting confused, we 1603 * pretend its ID is something different. 1604 * XXX: ideally, dc_attach() should be checking 1605 * vendorid+deviceid together to avoid such 1606 * collisions. 1607 */ 1608 if (t->dc_vid == DC_VENDORID_MICROSOFT && 1609 t->dc_did == DC_DEVICEID_MSMN130) 1610 t++; 1611 return (t); 1612 } 1613 t++; 1614 } 1615 1616 return (NULL); 1617 } 1618 1619 /* 1620 * Probe for a 21143 or clone chip. Check the PCI vendor and device 1621 * IDs against our list and return a device name if we find a match. 1622 * We do a little bit of extra work to identify the exact type of 1623 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, 1624 * but different revision IDs. The same is true for 98715/98715A 1625 * chips and the 98725, as well as the ASIX and ADMtek chips. In some 1626 * cases, the exact chip revision affects driver behavior. 1627 */ 1628 static int 1629 dc_probe(device_t dev) 1630 { 1631 struct dc_type *t; 1632 1633 t = dc_devtype(dev); 1634 1635 if (t != NULL) { 1636 device_set_desc(dev, t->dc_name); 1637 return (0); 1638 } 1639 1640 return (ENXIO); 1641 } 1642 1643 #ifndef BURN_BRIDGES 1644 static void 1645 dc_acpi(device_t dev) 1646 { 1647 int unit; 1648 u_int32_t iobase, membase, irq; 1649 1650 unit = device_get_unit(dev); 1651 1652 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1653 /* Save important PCI config data. */ 1654 iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); 1655 membase = pci_read_config(dev, DC_PCI_CFBMA, 4); 1656 irq = pci_read_config(dev, DC_PCI_CFIT, 4); 1657 1658 /* Reset the power state. */ 1659 printf("dc%d: chip is in D%d power mode " 1660 "-- setting to D0\n", unit, 1661 pci_get_powerstate(dev)); 1662 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1663 1664 /* Restore PCI config data. */ 1665 pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); 1666 pci_write_config(dev, DC_PCI_CFBMA, membase, 4); 1667 pci_write_config(dev, DC_PCI_CFIT, irq, 4); 1668 } 1669 } 1670 #endif 1671 1672 static void 1673 dc_apply_fixup(struct dc_softc *sc, int media) 1674 { 1675 struct dc_mediainfo *m; 1676 u_int8_t *p; 1677 int i; 1678 u_int32_t reg; 1679 1680 m = sc->dc_mi; 1681 1682 while (m != NULL) { 1683 if (m->dc_media == media) 1684 break; 1685 m = m->dc_next; 1686 } 1687 1688 if (m == NULL) 1689 return; 1690 1691 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1692 reg = (p[0] | (p[1] << 8)) << 16; 1693 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1694 } 1695 1696 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1697 reg = (p[0] | (p[1] << 8)) << 16; 1698 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1699 } 1700 } 1701 1702 static void 1703 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) 1704 { 1705 struct dc_mediainfo *m; 1706 1707 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1708 if (l->dc_sia_code == DC_SIA_CODE_10BT) 1709 m->dc_media = IFM_10_T; 1710 1711 if (l->dc_sia_code == DC_SIA_CODE_10BT_FDX) 1712 m->dc_media = IFM_10_T | IFM_FDX; 1713 1714 if (l->dc_sia_code == DC_SIA_CODE_10B2) 1715 m->dc_media = IFM_10_2; 1716 1717 if (l->dc_sia_code == DC_SIA_CODE_10B5) 1718 m->dc_media = IFM_10_5; 1719 1720 m->dc_gp_len = 2; 1721 m->dc_gp_ptr = (u_int8_t *)&l->dc_sia_gpio_ctl; 1722 1723 m->dc_next = sc->dc_mi; 1724 sc->dc_mi = m; 1725 1726 sc->dc_pmode = DC_PMODE_SIA; 1727 } 1728 1729 static void 1730 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) 1731 { 1732 struct dc_mediainfo *m; 1733 1734 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1735 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1736 m->dc_media = IFM_100_TX; 1737 1738 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1739 m->dc_media = IFM_100_TX | IFM_FDX; 1740 1741 m->dc_gp_len = 2; 1742 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1743 1744 m->dc_next = sc->dc_mi; 1745 sc->dc_mi = m; 1746 1747 sc->dc_pmode = DC_PMODE_SYM; 1748 } 1749 1750 static void 1751 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) 1752 { 1753 struct dc_mediainfo *m; 1754 u_int8_t *p; 1755 1756 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1757 /* We abuse IFM_AUTO to represent MII. */ 1758 m->dc_media = IFM_AUTO; 1759 m->dc_gp_len = l->dc_gpr_len; 1760 1761 p = (u_int8_t *)l; 1762 p += sizeof(struct dc_eblock_mii); 1763 m->dc_gp_ptr = p; 1764 p += 2 * l->dc_gpr_len; 1765 m->dc_reset_len = *p; 1766 p++; 1767 m->dc_reset_ptr = p; 1768 1769 m->dc_next = sc->dc_mi; 1770 sc->dc_mi = m; 1771 } 1772 1773 static void 1774 dc_read_srom(struct dc_softc *sc, int bits) 1775 { 1776 int size; 1777 1778 size = 2 << bits; 1779 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); 1780 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1781 } 1782 1783 static void 1784 dc_parse_21143_srom(struct dc_softc *sc) 1785 { 1786 struct dc_leaf_hdr *lhdr; 1787 struct dc_eblock_hdr *hdr; 1788 int have_mii, i, loff; 1789 char *ptr; 1790 1791 have_mii = 0; 1792 loff = sc->dc_srom[27]; 1793 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1794 1795 ptr = (char *)lhdr; 1796 ptr += sizeof(struct dc_leaf_hdr) - 1; 1797 /* 1798 * Look if we got a MII media block. 1799 */ 1800 for (i = 0; i < lhdr->dc_mcnt; i++) { 1801 hdr = (struct dc_eblock_hdr *)ptr; 1802 if (hdr->dc_type == DC_EBLOCK_MII) 1803 have_mii++; 1804 1805 ptr += (hdr->dc_len & 0x7F); 1806 ptr++; 1807 } 1808 1809 /* 1810 * Do the same thing again. Only use SIA and SYM media 1811 * blocks if no MII media block is available. 1812 */ 1813 ptr = (char *)lhdr; 1814 ptr += sizeof(struct dc_leaf_hdr) - 1; 1815 for (i = 0; i < lhdr->dc_mcnt; i++) { 1816 hdr = (struct dc_eblock_hdr *)ptr; 1817 switch (hdr->dc_type) { 1818 case DC_EBLOCK_MII: 1819 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1820 break; 1821 case DC_EBLOCK_SIA: 1822 if (! have_mii) 1823 dc_decode_leaf_sia(sc, 1824 (struct dc_eblock_sia *)hdr); 1825 break; 1826 case DC_EBLOCK_SYM: 1827 if (! have_mii) 1828 dc_decode_leaf_sym(sc, 1829 (struct dc_eblock_sym *)hdr); 1830 break; 1831 default: 1832 /* Don't care. Yet. */ 1833 break; 1834 } 1835 ptr += (hdr->dc_len & 0x7F); 1836 ptr++; 1837 } 1838 } 1839 1840 static void 1841 dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1842 { 1843 u_int32_t *paddr; 1844 1845 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 1846 paddr = arg; 1847 *paddr = segs->ds_addr; 1848 } 1849 1850 /* 1851 * Attach the interface. Allocate softc structures, do ifmedia 1852 * setup and ethernet/BPF attach. 1853 */ 1854 static int 1855 dc_attach(device_t dev) 1856 { 1857 int tmp = 0; 1858 u_char eaddr[ETHER_ADDR_LEN]; 1859 u_int32_t command; 1860 struct dc_softc *sc; 1861 struct ifnet *ifp; 1862 u_int32_t revision; 1863 int unit, error = 0, rid, mac_offset; 1864 int i; 1865 u_int8_t *mac; 1866 1867 sc = device_get_softc(dev); 1868 unit = device_get_unit(dev); 1869 1870 mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1871 MTX_DEF | MTX_RECURSE); 1872 #ifndef BURN_BRIDGES 1873 /* 1874 * Handle power management nonsense. 1875 */ 1876 dc_acpi(dev); 1877 #endif 1878 /* 1879 * Map control/status registers. 1880 */ 1881 pci_enable_busmaster(dev); 1882 1883 rid = DC_RID; 1884 sc->dc_res = bus_alloc_resource(dev, DC_RES, &rid, 1885 0, ~0, 1, RF_ACTIVE); 1886 1887 if (sc->dc_res == NULL) { 1888 printf("dc%d: couldn't map ports/memory\n", unit); 1889 error = ENXIO; 1890 goto fail; 1891 } 1892 1893 sc->dc_btag = rman_get_bustag(sc->dc_res); 1894 sc->dc_bhandle = rman_get_bushandle(sc->dc_res); 1895 1896 /* Allocate interrupt. */ 1897 rid = 0; 1898 sc->dc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 1899 RF_SHAREABLE | RF_ACTIVE); 1900 1901 if (sc->dc_irq == NULL) { 1902 printf("dc%d: couldn't map interrupt\n", unit); 1903 error = ENXIO; 1904 goto fail; 1905 } 1906 1907 /* Need this info to decide on a chip type. */ 1908 sc->dc_info = dc_devtype(dev); 1909 revision = pci_read_config(dev, DC_PCI_CFRV, 4) & 0x000000FF; 1910 1911 /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */ 1912 if (sc->dc_info->dc_did != DC_DEVICEID_82C168 && 1913 sc->dc_info->dc_did != DC_DEVICEID_X3201) 1914 dc_eeprom_width(sc); 1915 1916 switch (sc->dc_info->dc_did) { 1917 case DC_DEVICEID_21143: 1918 sc->dc_type = DC_TYPE_21143; 1919 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1920 sc->dc_flags |= DC_REDUCED_MII_POLL; 1921 /* Save EEPROM contents so we can parse them later. */ 1922 dc_read_srom(sc, sc->dc_romwidth); 1923 break; 1924 case DC_DEVICEID_DM9009: 1925 case DC_DEVICEID_DM9100: 1926 case DC_DEVICEID_DM9102: 1927 sc->dc_type = DC_TYPE_DM9102; 1928 sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS; 1929 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD; 1930 sc->dc_flags |= DC_TX_ALIGN; 1931 sc->dc_pmode = DC_PMODE_MII; 1932 /* Increase the latency timer value. */ 1933 command = pci_read_config(dev, DC_PCI_CFLT, 4); 1934 command &= 0xFFFF00FF; 1935 command |= 0x00008000; 1936 pci_write_config(dev, DC_PCI_CFLT, command, 4); 1937 break; 1938 case DC_DEVICEID_AL981: 1939 sc->dc_type = DC_TYPE_AL981; 1940 sc->dc_flags |= DC_TX_USE_TX_INTR; 1941 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1942 sc->dc_pmode = DC_PMODE_MII; 1943 dc_read_srom(sc, sc->dc_romwidth); 1944 break; 1945 case DC_DEVICEID_AN985: 1946 case DC_DEVICEID_ADM9511: 1947 case DC_DEVICEID_ADM9513: 1948 case DC_DEVICEID_FA511: 1949 case DC_DEVICEID_FE2500: 1950 case DC_DEVICEID_EN2242: 1951 case DC_DEVICEID_HAWKING_PN672TX: 1952 case DC_DEVICEID_3CSOHOB: 1953 case DC_DEVICEID_MSMN120: 1954 case DC_DEVICEID_MSMN130_FAKE: /* XXX avoid collision with PNIC*/ 1955 sc->dc_type = DC_TYPE_AN985; 1956 sc->dc_flags |= DC_64BIT_HASH; 1957 sc->dc_flags |= DC_TX_USE_TX_INTR; 1958 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1959 sc->dc_pmode = DC_PMODE_MII; 1960 /* Don't read SROM for - auto-loaded on reset */ 1961 break; 1962 case DC_DEVICEID_98713: 1963 case DC_DEVICEID_98713_CP: 1964 if (revision < DC_REVISION_98713A) { 1965 sc->dc_type = DC_TYPE_98713; 1966 } 1967 if (revision >= DC_REVISION_98713A) { 1968 sc->dc_type = DC_TYPE_98713A; 1969 sc->dc_flags |= DC_21143_NWAY; 1970 } 1971 sc->dc_flags |= DC_REDUCED_MII_POLL; 1972 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1973 break; 1974 case DC_DEVICEID_987x5: 1975 case DC_DEVICEID_EN1217: 1976 /* 1977 * Macronix MX98715AEC-C/D/E parts have only a 1978 * 128-bit hash table. We need to deal with these 1979 * in the same manner as the PNIC II so that we 1980 * get the right number of bits out of the 1981 * CRC routine. 1982 */ 1983 if (revision >= DC_REVISION_98715AEC_C && 1984 revision < DC_REVISION_98725) 1985 sc->dc_flags |= DC_128BIT_HASH; 1986 sc->dc_type = DC_TYPE_987x5; 1987 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1988 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 1989 break; 1990 case DC_DEVICEID_98727: 1991 sc->dc_type = DC_TYPE_987x5; 1992 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1993 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 1994 break; 1995 case DC_DEVICEID_82C115: 1996 sc->dc_type = DC_TYPE_PNICII; 1997 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH; 1998 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 1999 break; 2000 case DC_DEVICEID_82C168: 2001 sc->dc_type = DC_TYPE_PNIC; 2002 sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS; 2003 sc->dc_flags |= DC_PNIC_RX_BUG_WAR; 2004 sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); 2005 if (revision < DC_REVISION_82C169) 2006 sc->dc_pmode = DC_PMODE_SYM; 2007 break; 2008 case DC_DEVICEID_AX88140A: 2009 sc->dc_type = DC_TYPE_ASIX; 2010 sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG; 2011 sc->dc_flags |= DC_REDUCED_MII_POLL; 2012 sc->dc_pmode = DC_PMODE_MII; 2013 break; 2014 case DC_DEVICEID_X3201: 2015 sc->dc_type = DC_TYPE_XIRCOM; 2016 sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | 2017 DC_TX_ALIGN; 2018 /* 2019 * We don't actually need to coalesce, but we're doing 2020 * it to obtain a double word aligned buffer. 2021 * The DC_TX_COALESCE flag is required. 2022 */ 2023 sc->dc_pmode = DC_PMODE_MII; 2024 break; 2025 case DC_DEVICEID_RS7112: 2026 sc->dc_type = DC_TYPE_CONEXANT; 2027 sc->dc_flags |= DC_TX_INTR_ALWAYS; 2028 sc->dc_flags |= DC_REDUCED_MII_POLL; 2029 sc->dc_pmode = DC_PMODE_MII; 2030 dc_read_srom(sc, sc->dc_romwidth); 2031 break; 2032 default: 2033 printf("dc%d: unknown device: %x\n", sc->dc_unit, 2034 sc->dc_info->dc_did); 2035 break; 2036 } 2037 2038 /* Save the cache line size. */ 2039 if (DC_IS_DAVICOM(sc)) 2040 sc->dc_cachesize = 0; 2041 else 2042 sc->dc_cachesize = pci_read_config(dev, 2043 DC_PCI_CFLT, 4) & 0xFF; 2044 2045 /* Reset the adapter. */ 2046 dc_reset(sc); 2047 2048 /* Take 21143 out of snooze mode */ 2049 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { 2050 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2051 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); 2052 pci_write_config(dev, DC_PCI_CFDD, command, 4); 2053 } 2054 2055 /* 2056 * Try to learn something about the supported media. 2057 * We know that ASIX and ADMtek and Davicom devices 2058 * will *always* be using MII media, so that's a no-brainer. 2059 * The tricky ones are the Macronix/PNIC II and the 2060 * Intel 21143. 2061 */ 2062 if (DC_IS_INTEL(sc)) 2063 dc_parse_21143_srom(sc); 2064 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2065 if (sc->dc_type == DC_TYPE_98713) 2066 sc->dc_pmode = DC_PMODE_MII; 2067 else 2068 sc->dc_pmode = DC_PMODE_SYM; 2069 } else if (!sc->dc_pmode) 2070 sc->dc_pmode = DC_PMODE_MII; 2071 2072 /* 2073 * Get station address from the EEPROM. 2074 */ 2075 switch(sc->dc_type) { 2076 case DC_TYPE_98713: 2077 case DC_TYPE_98713A: 2078 case DC_TYPE_987x5: 2079 case DC_TYPE_PNICII: 2080 dc_read_eeprom(sc, (caddr_t)&mac_offset, 2081 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 2082 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); 2083 break; 2084 case DC_TYPE_PNIC: 2085 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); 2086 break; 2087 case DC_TYPE_DM9102: 2088 case DC_TYPE_21143: 2089 case DC_TYPE_ASIX: 2090 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2091 break; 2092 case DC_TYPE_AL981: 2093 case DC_TYPE_AN985: 2094 *(u_int32_t *)(&eaddr[0]) = CSR_READ_4(sc, DC_AL_PAR0); 2095 *(u_int16_t *)(&eaddr[4]) = CSR_READ_4(sc, DC_AL_PAR1); 2096 break; 2097 case DC_TYPE_CONEXANT: 2098 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, 2099 ETHER_ADDR_LEN); 2100 break; 2101 case DC_TYPE_XIRCOM: 2102 /* The MAC comes from the CIS. */ 2103 mac = pci_get_ether(dev); 2104 if (!mac) { 2105 device_printf(dev, "No station address in CIS!\n"); 2106 error = ENXIO; 2107 goto fail; 2108 } 2109 bcopy(mac, eaddr, ETHER_ADDR_LEN); 2110 break; 2111 default: 2112 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2113 break; 2114 } 2115 2116 /* 2117 * A 21143 or clone chip was detected. Inform the world. 2118 */ 2119 printf("dc%d: Ethernet address: %6D\n", unit, eaddr, ":"); 2120 2121 sc->dc_unit = unit; 2122 bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 2123 2124 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ 2125 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 2126 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct dc_list_data), 1, 2127 sizeof(struct dc_list_data), 0, NULL, NULL, &sc->dc_ltag); 2128 if (error) { 2129 printf("dc%d: failed to allocate busdma tag\n", unit); 2130 error = ENXIO; 2131 goto fail; 2132 } 2133 error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata, 2134 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->dc_lmap); 2135 if (error) { 2136 printf("dc%d: failed to allocate DMA safe memory\n", unit); 2137 error = ENXIO; 2138 goto fail; 2139 } 2140 error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata, 2141 sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr, 2142 BUS_DMA_NOWAIT); 2143 if (error) { 2144 printf("dc%d: cannot get address of the descriptors\n", unit); 2145 error = ENXIO; 2146 goto fail; 2147 } 2148 2149 /* 2150 * Allocate a busdma tag and DMA safe memory for the multicast 2151 * setup frame. 2152 */ 2153 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 2154 BUS_SPACE_MAXADDR, NULL, NULL, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, 2155 DC_SFRAME_LEN + DC_MIN_FRAMELEN, 0, NULL, NULL, &sc->dc_stag); 2156 if (error) { 2157 printf("dc%d: failed to allocate busdma tag\n", unit); 2158 error = ENXIO; 2159 goto fail; 2160 } 2161 error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf, 2162 BUS_DMA_NOWAIT, &sc->dc_smap); 2163 if (error) { 2164 printf("dc%d: failed to allocate DMA safe memory\n", unit); 2165 error = ENXIO; 2166 goto fail; 2167 } 2168 error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf, 2169 DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT); 2170 if (error) { 2171 printf("dc%d: cannot get address of the descriptors\n", unit); 2172 error = ENXIO; 2173 goto fail; 2174 } 2175 2176 /* Allocate a busdma tag for mbufs. */ 2177 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 2178 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * DC_TX_LIST_CNT, 2179 DC_TX_LIST_CNT, MCLBYTES, 0, NULL, NULL, &sc->dc_mtag); 2180 if (error) { 2181 printf("dc%d: failed to allocate busdma tag\n", unit); 2182 error = ENXIO; 2183 goto fail; 2184 } 2185 2186 /* Create the TX/RX busdma maps. */ 2187 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2188 error = bus_dmamap_create(sc->dc_mtag, 0, 2189 &sc->dc_cdata.dc_tx_map[i]); 2190 if (error) { 2191 printf("dc%d: failed to init TX ring\n", unit); 2192 error = ENXIO; 2193 goto fail; 2194 } 2195 } 2196 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2197 error = bus_dmamap_create(sc->dc_mtag, 0, 2198 &sc->dc_cdata.dc_rx_map[i]); 2199 if (error) { 2200 printf("dc%d: failed to init RX ring\n", unit); 2201 error = ENXIO; 2202 goto fail; 2203 } 2204 } 2205 error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap); 2206 if (error) { 2207 printf("dc%d: failed to init RX ring\n", unit); 2208 error = ENXIO; 2209 goto fail; 2210 } 2211 2212 ifp = &sc->arpcom.ac_if; 2213 ifp->if_softc = sc; 2214 ifp->if_unit = unit; 2215 ifp->if_name = "dc"; 2216 /* XXX: bleah, MTU gets overwritten in ether_ifattach() */ 2217 ifp->if_mtu = ETHERMTU; 2218 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2219 ifp->if_ioctl = dc_ioctl; 2220 ifp->if_start = dc_start; 2221 ifp->if_watchdog = dc_watchdog; 2222 ifp->if_init = dc_init; 2223 ifp->if_baudrate = 10000000; 2224 ifp->if_snd.ifq_maxlen = DC_TX_LIST_CNT - 1; 2225 2226 /* 2227 * Do MII setup. If this is a 21143, check for a PHY on the 2228 * MII bus after applying any necessary fixups to twiddle the 2229 * GPIO bits. If we don't end up finding a PHY, restore the 2230 * old selection (SIA only or SIA/SYM) and attach the dcphy 2231 * driver instead. 2232 */ 2233 if (DC_IS_INTEL(sc)) { 2234 dc_apply_fixup(sc, IFM_AUTO); 2235 tmp = sc->dc_pmode; 2236 sc->dc_pmode = DC_PMODE_MII; 2237 } 2238 2239 error = mii_phy_probe(dev, &sc->dc_miibus, 2240 dc_ifmedia_upd, dc_ifmedia_sts); 2241 2242 if (error && DC_IS_INTEL(sc)) { 2243 sc->dc_pmode = tmp; 2244 if (sc->dc_pmode != DC_PMODE_SIA) 2245 sc->dc_pmode = DC_PMODE_SYM; 2246 sc->dc_flags |= DC_21143_NWAY; 2247 mii_phy_probe(dev, &sc->dc_miibus, 2248 dc_ifmedia_upd, dc_ifmedia_sts); 2249 /* 2250 * For non-MII cards, we need to have the 21143 2251 * drive the LEDs. Except there are some systems 2252 * like the NEC VersaPro NoteBook PC which have no 2253 * LEDs, and twiddling these bits has adverse effects 2254 * on them. (I.e. you suddenly can't get a link.) 2255 */ 2256 if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) 2257 sc->dc_flags |= DC_TULIP_LEDS; 2258 error = 0; 2259 } 2260 2261 if (error) { 2262 printf("dc%d: MII without any PHY!\n", sc->dc_unit); 2263 goto fail; 2264 } 2265 2266 if (DC_IS_XIRCOM(sc)) { 2267 /* 2268 * setup General Purpose Port mode and data so the tulip 2269 * can talk to the MII. 2270 */ 2271 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2272 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2273 DELAY(10); 2274 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2275 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2276 DELAY(10); 2277 } 2278 2279 if (DC_IS_ADMTEK(sc)) { 2280 /* 2281 * Set automatic TX underrun recovery for the ADMtek chips 2282 */ 2283 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 2284 } 2285 2286 /* 2287 * Tell the upper layer(s) we support long frames. 2288 */ 2289 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2290 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2291 2292 callout_init(&sc->dc_stat_ch, IS_MPSAFE ? CALLOUT_MPSAFE : 0); 2293 2294 #ifdef SRM_MEDIA 2295 sc->dc_srm_media = 0; 2296 2297 /* Remember the SRM console media setting */ 2298 if (DC_IS_INTEL(sc)) { 2299 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2300 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); 2301 switch ((command >> 8) & 0xff) { 2302 case 3: 2303 sc->dc_srm_media = IFM_10_T; 2304 break; 2305 case 4: 2306 sc->dc_srm_media = IFM_10_T | IFM_FDX; 2307 break; 2308 case 5: 2309 sc->dc_srm_media = IFM_100_TX; 2310 break; 2311 case 6: 2312 sc->dc_srm_media = IFM_100_TX | IFM_FDX; 2313 break; 2314 } 2315 if (sc->dc_srm_media) 2316 sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER; 2317 } 2318 #endif 2319 2320 /* 2321 * Call MI attach routine. 2322 */ 2323 ether_ifattach(ifp, eaddr); 2324 2325 /* Hook interrupt last to avoid having to lock softc */ 2326 error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | 2327 (IS_MPSAFE ? INTR_MPSAFE : 0), 2328 dc_intr, sc, &sc->dc_intrhand); 2329 2330 if (error) { 2331 printf("dc%d: couldn't set up irq\n", unit); 2332 ether_ifdetach(ifp); 2333 goto fail; 2334 } 2335 2336 fail: 2337 if (error) 2338 dc_detach(dev); 2339 return (error); 2340 } 2341 2342 /* 2343 * Shutdown hardware and free up resources. This can be called any 2344 * time after the mutex has been initialized. It is called in both 2345 * the error case in attach and the normal detach case so it needs 2346 * to be careful about only freeing resources that have actually been 2347 * allocated. 2348 */ 2349 static int 2350 dc_detach(device_t dev) 2351 { 2352 struct dc_softc *sc; 2353 struct ifnet *ifp; 2354 struct dc_mediainfo *m; 2355 int i; 2356 2357 sc = device_get_softc(dev); 2358 KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized")); 2359 DC_LOCK(sc); 2360 2361 ifp = &sc->arpcom.ac_if; 2362 2363 /* These should only be active if attach succeeded */ 2364 if (device_is_attached(dev)) { 2365 dc_stop(sc); 2366 ether_ifdetach(ifp); 2367 } 2368 if (sc->dc_miibus) 2369 device_delete_child(dev, sc->dc_miibus); 2370 bus_generic_detach(dev); 2371 2372 if (sc->dc_intrhand) 2373 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); 2374 if (sc->dc_irq) 2375 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2376 if (sc->dc_res) 2377 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2378 2379 if (sc->dc_cdata.dc_sbuf != NULL) 2380 bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap); 2381 if (sc->dc_ldata != NULL) 2382 bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap); 2383 for (i = 0; i < DC_TX_LIST_CNT; i++) 2384 bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_tx_map[i]); 2385 for (i = 0; i < DC_RX_LIST_CNT; i++) 2386 bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); 2387 bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap); 2388 if (sc->dc_stag) 2389 bus_dma_tag_destroy(sc->dc_stag); 2390 if (sc->dc_mtag) 2391 bus_dma_tag_destroy(sc->dc_mtag); 2392 if (sc->dc_ltag) 2393 bus_dma_tag_destroy(sc->dc_ltag); 2394 2395 free(sc->dc_pnic_rx_buf, M_DEVBUF); 2396 2397 while (sc->dc_mi != NULL) { 2398 m = sc->dc_mi->dc_next; 2399 free(sc->dc_mi, M_DEVBUF); 2400 sc->dc_mi = m; 2401 } 2402 free(sc->dc_srom, M_DEVBUF); 2403 2404 DC_UNLOCK(sc); 2405 mtx_destroy(&sc->dc_mtx); 2406 2407 return (0); 2408 } 2409 2410 /* 2411 * Initialize the transmit descriptors. 2412 */ 2413 static int 2414 dc_list_tx_init(struct dc_softc *sc) 2415 { 2416 struct dc_chain_data *cd; 2417 struct dc_list_data *ld; 2418 int i, nexti; 2419 2420 cd = &sc->dc_cdata; 2421 ld = sc->dc_ldata; 2422 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2423 if (i == DC_TX_LIST_CNT - 1) 2424 nexti = 0; 2425 else 2426 nexti = i + 1; 2427 ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti)); 2428 cd->dc_tx_chain[i] = NULL; 2429 ld->dc_tx_list[i].dc_data = 0; 2430 ld->dc_tx_list[i].dc_ctl = 0; 2431 } 2432 2433 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 2434 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2435 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2436 return (0); 2437 } 2438 2439 2440 /* 2441 * Initialize the RX descriptors and allocate mbufs for them. Note that 2442 * we arrange the descriptors in a closed ring, so that the last descriptor 2443 * points back to the first. 2444 */ 2445 static int 2446 dc_list_rx_init(struct dc_softc *sc) 2447 { 2448 struct dc_chain_data *cd; 2449 struct dc_list_data *ld; 2450 int i, nexti; 2451 2452 cd = &sc->dc_cdata; 2453 ld = sc->dc_ldata; 2454 2455 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2456 if (dc_newbuf(sc, i, 1) != 0) 2457 return (ENOBUFS); 2458 if (i == DC_RX_LIST_CNT - 1) 2459 nexti = 0; 2460 else 2461 nexti = i + 1; 2462 ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti)); 2463 } 2464 2465 cd->dc_rx_prod = 0; 2466 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2467 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2468 return (0); 2469 } 2470 2471 static void 2472 dc_dma_map_rxbuf(arg, segs, nseg, mapsize, error) 2473 void *arg; 2474 bus_dma_segment_t *segs; 2475 int nseg; 2476 bus_size_t mapsize; 2477 int error; 2478 { 2479 struct dc_softc *sc; 2480 struct dc_desc *c; 2481 2482 sc = arg; 2483 c = &sc->dc_ldata->dc_rx_list[sc->dc_cdata.dc_rx_cur]; 2484 if (error) { 2485 sc->dc_cdata.dc_rx_err = error; 2486 return; 2487 } 2488 2489 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 2490 sc->dc_cdata.dc_rx_err = 0; 2491 c->dc_data = htole32(segs->ds_addr); 2492 } 2493 2494 /* 2495 * Initialize an RX descriptor and attach an MBUF cluster. 2496 */ 2497 static int 2498 dc_newbuf(struct dc_softc *sc, int i, int alloc) 2499 { 2500 struct mbuf *m_new; 2501 bus_dmamap_t tmp; 2502 int error; 2503 2504 if (alloc) { 2505 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2506 if (m_new == NULL) 2507 return (ENOBUFS); 2508 } else { 2509 m_new = sc->dc_cdata.dc_rx_chain[i]; 2510 m_new->m_data = m_new->m_ext.ext_buf; 2511 } 2512 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2513 m_adj(m_new, sizeof(u_int64_t)); 2514 2515 /* 2516 * If this is a PNIC chip, zero the buffer. This is part 2517 * of the workaround for the receive bug in the 82c168 and 2518 * 82c169 chips. 2519 */ 2520 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 2521 bzero(mtod(m_new, char *), m_new->m_len); 2522 2523 /* No need to remap the mbuf if we're reusing it. */ 2524 if (alloc) { 2525 sc->dc_cdata.dc_rx_cur = i; 2526 error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_sparemap, 2527 m_new, dc_dma_map_rxbuf, sc, 0); 2528 if (error) { 2529 m_freem(m_new); 2530 return (error); 2531 } 2532 if (sc->dc_cdata.dc_rx_err != 0) { 2533 m_freem(m_new); 2534 return (sc->dc_cdata.dc_rx_err); 2535 } 2536 bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); 2537 tmp = sc->dc_cdata.dc_rx_map[i]; 2538 sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap; 2539 sc->dc_sparemap = tmp; 2540 sc->dc_cdata.dc_rx_chain[i] = m_new; 2541 } 2542 2543 sc->dc_ldata->dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN); 2544 sc->dc_ldata->dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN); 2545 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], 2546 BUS_DMASYNC_PREREAD); 2547 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2548 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2549 return (0); 2550 } 2551 2552 /* 2553 * Grrrrr. 2554 * The PNIC chip has a terrible bug in it that manifests itself during 2555 * periods of heavy activity. The exact mode of failure if difficult to 2556 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 2557 * will happen on slow machines. The bug is that sometimes instead of 2558 * uploading one complete frame during reception, it uploads what looks 2559 * like the entire contents of its FIFO memory. The frame we want is at 2560 * the end of the whole mess, but we never know exactly how much data has 2561 * been uploaded, so salvaging the frame is hard. 2562 * 2563 * There is only one way to do it reliably, and it's disgusting. 2564 * Here's what we know: 2565 * 2566 * - We know there will always be somewhere between one and three extra 2567 * descriptors uploaded. 2568 * 2569 * - We know the desired received frame will always be at the end of the 2570 * total data upload. 2571 * 2572 * - We know the size of the desired received frame because it will be 2573 * provided in the length field of the status word in the last descriptor. 2574 * 2575 * Here's what we do: 2576 * 2577 * - When we allocate buffers for the receive ring, we bzero() them. 2578 * This means that we know that the buffer contents should be all 2579 * zeros, except for data uploaded by the chip. 2580 * 2581 * - We also force the PNIC chip to upload frames that include the 2582 * ethernet CRC at the end. 2583 * 2584 * - We gather all of the bogus frame data into a single buffer. 2585 * 2586 * - We then position a pointer at the end of this buffer and scan 2587 * backwards until we encounter the first non-zero byte of data. 2588 * This is the end of the received frame. We know we will encounter 2589 * some data at the end of the frame because the CRC will always be 2590 * there, so even if the sender transmits a packet of all zeros, 2591 * we won't be fooled. 2592 * 2593 * - We know the size of the actual received frame, so we subtract 2594 * that value from the current pointer location. This brings us 2595 * to the start of the actual received packet. 2596 * 2597 * - We copy this into an mbuf and pass it on, along with the actual 2598 * frame length. 2599 * 2600 * The performance hit is tremendous, but it beats dropping frames all 2601 * the time. 2602 */ 2603 2604 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG) 2605 static void 2606 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) 2607 { 2608 struct dc_desc *cur_rx; 2609 struct dc_desc *c = NULL; 2610 struct mbuf *m = NULL; 2611 unsigned char *ptr; 2612 int i, total_len; 2613 u_int32_t rxstat = 0; 2614 2615 i = sc->dc_pnic_rx_bug_save; 2616 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2617 ptr = sc->dc_pnic_rx_buf; 2618 bzero(ptr, DC_RXLEN * 5); 2619 2620 /* Copy all the bytes from the bogus buffers. */ 2621 while (1) { 2622 c = &sc->dc_ldata->dc_rx_list[i]; 2623 rxstat = le32toh(c->dc_status); 2624 m = sc->dc_cdata.dc_rx_chain[i]; 2625 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2626 ptr += DC_RXLEN; 2627 /* If this is the last buffer, break out. */ 2628 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2629 break; 2630 dc_newbuf(sc, i, 0); 2631 DC_INC(i, DC_RX_LIST_CNT); 2632 } 2633 2634 /* Find the length of the actual receive frame. */ 2635 total_len = DC_RXBYTES(rxstat); 2636 2637 /* Scan backwards until we hit a non-zero byte. */ 2638 while (*ptr == 0x00) 2639 ptr--; 2640 2641 /* Round off. */ 2642 if ((uintptr_t)(ptr) & 0x3) 2643 ptr -= 1; 2644 2645 /* Now find the start of the frame. */ 2646 ptr -= total_len; 2647 if (ptr < sc->dc_pnic_rx_buf) 2648 ptr = sc->dc_pnic_rx_buf; 2649 2650 /* 2651 * Now copy the salvaged frame to the last mbuf and fake up 2652 * the status word to make it look like a successful 2653 * frame reception. 2654 */ 2655 dc_newbuf(sc, i, 0); 2656 bcopy(ptr, mtod(m, char *), total_len); 2657 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); 2658 } 2659 2660 /* 2661 * This routine searches the RX ring for dirty descriptors in the 2662 * event that the rxeof routine falls out of sync with the chip's 2663 * current descriptor pointer. This may happen sometimes as a result 2664 * of a "no RX buffer available" condition that happens when the chip 2665 * consumes all of the RX buffers before the driver has a chance to 2666 * process the RX ring. This routine may need to be called more than 2667 * once to bring the driver back in sync with the chip, however we 2668 * should still be getting RX DONE interrupts to drive the search 2669 * for new packets in the RX ring, so we should catch up eventually. 2670 */ 2671 static int 2672 dc_rx_resync(struct dc_softc *sc) 2673 { 2674 struct dc_desc *cur_rx; 2675 int i, pos; 2676 2677 pos = sc->dc_cdata.dc_rx_prod; 2678 2679 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2680 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2681 if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN)) 2682 break; 2683 DC_INC(pos, DC_RX_LIST_CNT); 2684 } 2685 2686 /* If the ring really is empty, then just return. */ 2687 if (i == DC_RX_LIST_CNT) 2688 return (0); 2689 2690 /* We've fallen behing the chip: catch it. */ 2691 sc->dc_cdata.dc_rx_prod = pos; 2692 2693 return (EAGAIN); 2694 } 2695 2696 /* 2697 * A frame has been uploaded: pass the resulting mbuf chain up to 2698 * the higher level protocols. 2699 */ 2700 static void 2701 dc_rxeof(struct dc_softc *sc) 2702 { 2703 struct mbuf *m; 2704 struct ifnet *ifp; 2705 struct dc_desc *cur_rx; 2706 int i, total_len = 0; 2707 u_int32_t rxstat; 2708 2709 ifp = &sc->arpcom.ac_if; 2710 i = sc->dc_cdata.dc_rx_prod; 2711 2712 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); 2713 while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) & 2714 DC_RXSTAT_OWN)) { 2715 #ifdef DEVICE_POLLING 2716 if (ifp->if_flags & IFF_POLLING) { 2717 if (sc->rxcycles <= 0) 2718 break; 2719 sc->rxcycles--; 2720 } 2721 #endif 2722 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2723 rxstat = le32toh(cur_rx->dc_status); 2724 m = sc->dc_cdata.dc_rx_chain[i]; 2725 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], 2726 BUS_DMASYNC_POSTREAD); 2727 total_len = DC_RXBYTES(rxstat); 2728 2729 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2730 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2731 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2732 sc->dc_pnic_rx_bug_save = i; 2733 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2734 DC_INC(i, DC_RX_LIST_CNT); 2735 continue; 2736 } 2737 dc_pnic_rx_bug_war(sc, i); 2738 rxstat = le32toh(cur_rx->dc_status); 2739 total_len = DC_RXBYTES(rxstat); 2740 } 2741 } 2742 2743 /* 2744 * If an error occurs, update stats, clear the 2745 * status word and leave the mbuf cluster in place: 2746 * it should simply get re-used next time this descriptor 2747 * comes up in the ring. However, don't report long 2748 * frames as errors since they could be vlans. 2749 */ 2750 if ((rxstat & DC_RXSTAT_RXERR)) { 2751 if (!(rxstat & DC_RXSTAT_GIANT) || 2752 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2753 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2754 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2755 ifp->if_ierrors++; 2756 if (rxstat & DC_RXSTAT_COLLSEEN) 2757 ifp->if_collisions++; 2758 dc_newbuf(sc, i, 0); 2759 if (rxstat & DC_RXSTAT_CRCERR) { 2760 DC_INC(i, DC_RX_LIST_CNT); 2761 continue; 2762 } else { 2763 dc_init(sc); 2764 return; 2765 } 2766 } 2767 } 2768 2769 /* No errors; receive the packet. */ 2770 total_len -= ETHER_CRC_LEN; 2771 #ifdef __i386__ 2772 /* 2773 * On the x86 we do not have alignment problems, so try to 2774 * allocate a new buffer for the receive ring, and pass up 2775 * the one where the packet is already, saving the expensive 2776 * copy done in m_devget(). 2777 * If we are on an architecture with alignment problems, or 2778 * if the allocation fails, then use m_devget and leave the 2779 * existing buffer in the receive ring. 2780 */ 2781 if (dc_quick && dc_newbuf(sc, i, 1) == 0) { 2782 m->m_pkthdr.rcvif = ifp; 2783 m->m_pkthdr.len = m->m_len = total_len; 2784 DC_INC(i, DC_RX_LIST_CNT); 2785 } else 2786 #endif 2787 { 2788 struct mbuf *m0; 2789 2790 m0 = m_devget(mtod(m, char *), total_len, 2791 ETHER_ALIGN, ifp, NULL); 2792 dc_newbuf(sc, i, 0); 2793 DC_INC(i, DC_RX_LIST_CNT); 2794 if (m0 == NULL) { 2795 ifp->if_ierrors++; 2796 continue; 2797 } 2798 m = m0; 2799 } 2800 2801 ifp->if_ipackets++; 2802 (*ifp->if_input)(ifp, m); 2803 } 2804 2805 sc->dc_cdata.dc_rx_prod = i; 2806 } 2807 2808 /* 2809 * A frame was downloaded to the chip. It's safe for us to clean up 2810 * the list buffers. 2811 */ 2812 2813 static void 2814 dc_txeof(struct dc_softc *sc) 2815 { 2816 struct dc_desc *cur_tx = NULL; 2817 struct ifnet *ifp; 2818 int idx; 2819 u_int32_t ctl, txstat; 2820 2821 ifp = &sc->arpcom.ac_if; 2822 2823 /* 2824 * Go through our tx list and free mbufs for those 2825 * frames that have been transmitted. 2826 */ 2827 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); 2828 idx = sc->dc_cdata.dc_tx_cons; 2829 while (idx != sc->dc_cdata.dc_tx_prod) { 2830 2831 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2832 txstat = le32toh(cur_tx->dc_status); 2833 ctl = le32toh(cur_tx->dc_ctl); 2834 2835 if (txstat & DC_TXSTAT_OWN) 2836 break; 2837 2838 if (!(ctl & DC_TXCTL_FIRSTFRAG) || ctl & DC_TXCTL_SETUP) { 2839 if (ctl & DC_TXCTL_SETUP) { 2840 /* 2841 * Yes, the PNIC is so brain damaged 2842 * that it will sometimes generate a TX 2843 * underrun error while DMAing the RX 2844 * filter setup frame. If we detect this, 2845 * we have to send the setup frame again, 2846 * or else the filter won't be programmed 2847 * correctly. 2848 */ 2849 if (DC_IS_PNIC(sc)) { 2850 if (txstat & DC_TXSTAT_ERRSUM) 2851 dc_setfilt(sc); 2852 } 2853 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2854 } 2855 sc->dc_cdata.dc_tx_cnt--; 2856 DC_INC(idx, DC_TX_LIST_CNT); 2857 continue; 2858 } 2859 2860 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2861 /* 2862 * XXX: Why does my Xircom taunt me so? 2863 * For some reason it likes setting the CARRLOST flag 2864 * even when the carrier is there. wtf?!? 2865 * Who knows, but Conexant chips have the 2866 * same problem. Maybe they took lessons 2867 * from Xircom. 2868 */ 2869 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2870 sc->dc_pmode == DC_PMODE_MII && 2871 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | 2872 DC_TXSTAT_NOCARRIER))) 2873 txstat &= ~DC_TXSTAT_ERRSUM; 2874 } else { 2875 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2876 sc->dc_pmode == DC_PMODE_MII && 2877 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | 2878 DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST))) 2879 txstat &= ~DC_TXSTAT_ERRSUM; 2880 } 2881 2882 if (txstat & DC_TXSTAT_ERRSUM) { 2883 ifp->if_oerrors++; 2884 if (txstat & DC_TXSTAT_EXCESSCOLL) 2885 ifp->if_collisions++; 2886 if (txstat & DC_TXSTAT_LATECOLL) 2887 ifp->if_collisions++; 2888 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2889 dc_init(sc); 2890 return; 2891 } 2892 } 2893 2894 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2895 2896 ifp->if_opackets++; 2897 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { 2898 bus_dmamap_sync(sc->dc_mtag, 2899 sc->dc_cdata.dc_tx_map[idx], 2900 BUS_DMASYNC_POSTWRITE); 2901 bus_dmamap_unload(sc->dc_mtag, 2902 sc->dc_cdata.dc_tx_map[idx]); 2903 m_freem(sc->dc_cdata.dc_tx_chain[idx]); 2904 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2905 } 2906 2907 sc->dc_cdata.dc_tx_cnt--; 2908 DC_INC(idx, DC_TX_LIST_CNT); 2909 } 2910 2911 if (idx != sc->dc_cdata.dc_tx_cons) { 2912 /* Some buffers have been freed. */ 2913 sc->dc_cdata.dc_tx_cons = idx; 2914 ifp->if_flags &= ~IFF_OACTIVE; 2915 } 2916 ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5; 2917 } 2918 2919 static void 2920 dc_tick(void *xsc) 2921 { 2922 struct dc_softc *sc; 2923 struct mii_data *mii; 2924 struct ifnet *ifp; 2925 u_int32_t r; 2926 2927 sc = xsc; 2928 DC_LOCK(sc); 2929 ifp = &sc->arpcom.ac_if; 2930 mii = device_get_softc(sc->dc_miibus); 2931 2932 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2933 if (sc->dc_flags & DC_21143_NWAY) { 2934 r = CSR_READ_4(sc, DC_10BTSTAT); 2935 if (IFM_SUBTYPE(mii->mii_media_active) == 2936 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2937 sc->dc_link = 0; 2938 mii_mediachg(mii); 2939 } 2940 if (IFM_SUBTYPE(mii->mii_media_active) == 2941 IFM_10_T && (r & DC_TSTAT_LS10)) { 2942 sc->dc_link = 0; 2943 mii_mediachg(mii); 2944 } 2945 if (sc->dc_link == 0) 2946 mii_tick(mii); 2947 } else { 2948 r = CSR_READ_4(sc, DC_ISR); 2949 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && 2950 sc->dc_cdata.dc_tx_cnt == 0) { 2951 mii_tick(mii); 2952 if (!(mii->mii_media_status & IFM_ACTIVE)) 2953 sc->dc_link = 0; 2954 } 2955 } 2956 } else 2957 mii_tick(mii); 2958 2959 /* 2960 * When the init routine completes, we expect to be able to send 2961 * packets right away, and in fact the network code will send a 2962 * gratuitous ARP the moment the init routine marks the interface 2963 * as running. However, even though the MAC may have been initialized, 2964 * there may be a delay of a few seconds before the PHY completes 2965 * autonegotiation and the link is brought up. Any transmissions 2966 * made during that delay will be lost. Dealing with this is tricky: 2967 * we can't just pause in the init routine while waiting for the 2968 * PHY to come ready since that would bring the whole system to 2969 * a screeching halt for several seconds. 2970 * 2971 * What we do here is prevent the TX start routine from sending 2972 * any packets until a link has been established. After the 2973 * interface has been initialized, the tick routine will poll 2974 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2975 * that time, packets will stay in the send queue, and once the 2976 * link comes up, they will be flushed out to the wire. 2977 */ 2978 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && 2979 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2980 sc->dc_link++; 2981 if (ifp->if_snd.ifq_head != NULL) 2982 dc_start(ifp); 2983 } 2984 2985 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2986 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 2987 else 2988 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 2989 2990 DC_UNLOCK(sc); 2991 } 2992 2993 /* 2994 * A transmit underrun has occurred. Back off the transmit threshold, 2995 * or switch to store and forward mode if we have to. 2996 */ 2997 static void 2998 dc_tx_underrun(struct dc_softc *sc) 2999 { 3000 u_int32_t isr; 3001 int i; 3002 3003 if (DC_IS_DAVICOM(sc)) 3004 dc_init(sc); 3005 3006 if (DC_IS_INTEL(sc)) { 3007 /* 3008 * The real 21143 requires that the transmitter be idle 3009 * in order to change the transmit threshold or store 3010 * and forward state. 3011 */ 3012 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3013 3014 for (i = 0; i < DC_TIMEOUT; i++) { 3015 isr = CSR_READ_4(sc, DC_ISR); 3016 if (isr & DC_ISR_TX_IDLE) 3017 break; 3018 DELAY(10); 3019 } 3020 if (i == DC_TIMEOUT) { 3021 printf("dc%d: failed to force tx to idle state\n", 3022 sc->dc_unit); 3023 dc_init(sc); 3024 } 3025 } 3026 3027 printf("dc%d: TX underrun -- ", sc->dc_unit); 3028 sc->dc_txthresh += DC_TXTHRESH_INC; 3029 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3030 printf("using store and forward mode\n"); 3031 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3032 } else { 3033 printf("increasing TX threshold\n"); 3034 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3035 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3036 } 3037 3038 if (DC_IS_INTEL(sc)) 3039 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3040 } 3041 3042 #ifdef DEVICE_POLLING 3043 static poll_handler_t dc_poll; 3044 3045 static void 3046 dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3047 { 3048 struct dc_softc *sc = ifp->if_softc; 3049 3050 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 3051 /* Re-enable interrupts. */ 3052 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3053 return; 3054 } 3055 sc->rxcycles = count; 3056 dc_rxeof(sc); 3057 dc_txeof(sc); 3058 if (ifp->if_snd.ifq_head != NULL && !(ifp->if_flags & IFF_OACTIVE)) 3059 dc_start(ifp); 3060 3061 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 3062 u_int32_t status; 3063 3064 status = CSR_READ_4(sc, DC_ISR); 3065 status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF | 3066 DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN | 3067 DC_ISR_BUS_ERR); 3068 if (!status) 3069 return; 3070 /* ack what we have */ 3071 CSR_WRITE_4(sc, DC_ISR, status); 3072 3073 if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) { 3074 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); 3075 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); 3076 3077 if (dc_rx_resync(sc)) 3078 dc_rxeof(sc); 3079 } 3080 /* restart transmit unit if necessary */ 3081 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) 3082 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3083 3084 if (status & DC_ISR_TX_UNDERRUN) 3085 dc_tx_underrun(sc); 3086 3087 if (status & DC_ISR_BUS_ERR) { 3088 printf("dc_poll: dc%d bus error\n", sc->dc_unit); 3089 dc_reset(sc); 3090 dc_init(sc); 3091 } 3092 } 3093 } 3094 #endif /* DEVICE_POLLING */ 3095 3096 static void 3097 dc_intr(void *arg) 3098 { 3099 struct dc_softc *sc; 3100 struct ifnet *ifp; 3101 u_int32_t status; 3102 3103 sc = arg; 3104 3105 if (sc->suspended) 3106 return; 3107 3108 if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 3109 return; 3110 3111 DC_LOCK(sc); 3112 ifp = &sc->arpcom.ac_if; 3113 #ifdef DEVICE_POLLING 3114 if (ifp->if_flags & IFF_POLLING) 3115 goto done; 3116 if (ether_poll_register(dc_poll, ifp)) { /* ok, disable interrupts */ 3117 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3118 goto done; 3119 } 3120 #endif 3121 3122 /* Suppress unwanted interrupts */ 3123 if (!(ifp->if_flags & IFF_UP)) { 3124 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 3125 dc_stop(sc); 3126 DC_UNLOCK(sc); 3127 return; 3128 } 3129 3130 /* Disable interrupts. */ 3131 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3132 3133 while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) 3134 && status != 0xFFFFFFFF) { 3135 3136 CSR_WRITE_4(sc, DC_ISR, status); 3137 3138 if (status & DC_ISR_RX_OK) { 3139 int curpkts; 3140 curpkts = ifp->if_ipackets; 3141 dc_rxeof(sc); 3142 if (curpkts == ifp->if_ipackets) { 3143 while (dc_rx_resync(sc)) 3144 dc_rxeof(sc); 3145 } 3146 } 3147 3148 if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF)) 3149 dc_txeof(sc); 3150 3151 if (status & DC_ISR_TX_IDLE) { 3152 dc_txeof(sc); 3153 if (sc->dc_cdata.dc_tx_cnt) { 3154 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3155 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3156 } 3157 } 3158 3159 if (status & DC_ISR_TX_UNDERRUN) 3160 dc_tx_underrun(sc); 3161 3162 if ((status & DC_ISR_RX_WATDOGTIMEO) 3163 || (status & DC_ISR_RX_NOBUF)) { 3164 int curpkts; 3165 curpkts = ifp->if_ipackets; 3166 dc_rxeof(sc); 3167 if (curpkts == ifp->if_ipackets) { 3168 while (dc_rx_resync(sc)) 3169 dc_rxeof(sc); 3170 } 3171 } 3172 3173 if (status & DC_ISR_BUS_ERR) { 3174 dc_reset(sc); 3175 dc_init(sc); 3176 } 3177 } 3178 3179 /* Re-enable interrupts. */ 3180 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3181 3182 if (ifp->if_snd.ifq_head != NULL) 3183 dc_start(ifp); 3184 3185 #ifdef DEVICE_POLLING 3186 done: 3187 #endif 3188 3189 DC_UNLOCK(sc); 3190 } 3191 3192 static void 3193 dc_dma_map_txbuf(arg, segs, nseg, mapsize, error) 3194 void *arg; 3195 bus_dma_segment_t *segs; 3196 int nseg; 3197 bus_size_t mapsize; 3198 int error; 3199 { 3200 struct dc_softc *sc; 3201 struct dc_desc *f; 3202 int cur, first, frag, i; 3203 3204 sc = arg; 3205 if (error) { 3206 sc->dc_cdata.dc_tx_err = error; 3207 return; 3208 } 3209 3210 first = cur = frag = sc->dc_cdata.dc_tx_prod; 3211 for (i = 0; i < nseg; i++) { 3212 if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && 3213 (frag == (DC_TX_LIST_CNT - 1)) && 3214 (first != sc->dc_cdata.dc_tx_first)) { 3215 bus_dmamap_unload(sc->dc_mtag, 3216 sc->dc_cdata.dc_tx_map[first]); 3217 sc->dc_cdata.dc_tx_err = ENOBUFS; 3218 return; 3219 } 3220 3221 f = &sc->dc_ldata->dc_tx_list[frag]; 3222 f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len); 3223 if (i == 0) { 3224 f->dc_status = 0; 3225 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); 3226 } else 3227 f->dc_status = htole32(DC_TXSTAT_OWN); 3228 f->dc_data = htole32(segs[i].ds_addr); 3229 cur = frag; 3230 DC_INC(frag, DC_TX_LIST_CNT); 3231 } 3232 3233 sc->dc_cdata.dc_tx_err = 0; 3234 sc->dc_cdata.dc_tx_prod = frag; 3235 sc->dc_cdata.dc_tx_cnt += nseg; 3236 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); 3237 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 3238 sc->dc_ldata->dc_tx_list[first].dc_ctl |= 3239 htole32(DC_TXCTL_FINT); 3240 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 3241 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); 3242 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 3243 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); 3244 sc->dc_ldata->dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN); 3245 } 3246 3247 /* 3248 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 3249 * pointers to the fragment pointers. 3250 */ 3251 static int 3252 dc_encap(struct dc_softc *sc, struct mbuf *m_head) 3253 { 3254 struct mbuf *m; 3255 int error, idx, chainlen = 0; 3256 3257 /* 3258 * If there's no way we can send any packets, return now. 3259 */ 3260 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt < 6) 3261 return (ENOBUFS); 3262 3263 /* 3264 * Count the number of frags in this chain to see if 3265 * we need to m_defrag. Since the descriptor list is shared 3266 * by all packets, we'll m_defrag long chains so that they 3267 * do not use up the entire list, even if they would fit. 3268 */ 3269 for (m = m_head; m != NULL; m = m->m_next) 3270 chainlen++; 3271 3272 if ((chainlen > DC_TX_LIST_CNT / 4) || 3273 ((DC_TX_LIST_CNT - (chainlen + sc->dc_cdata.dc_tx_cnt)) < 6)) { 3274 m = m_defrag(m_head, M_DONTWAIT); 3275 if (m == NULL) 3276 return (ENOBUFS); 3277 m_head = m; 3278 } 3279 3280 /* 3281 * Start packing the mbufs in this chain into 3282 * the fragment pointers. Stop when we run out 3283 * of fragments or hit the end of the mbuf chain. 3284 */ 3285 idx = sc->dc_cdata.dc_tx_prod; 3286 error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], 3287 m_head, dc_dma_map_txbuf, sc, 0); 3288 if (error) 3289 return (error); 3290 if (sc->dc_cdata.dc_tx_err != 0) 3291 return (sc->dc_cdata.dc_tx_err); 3292 sc->dc_cdata.dc_tx_chain[idx] = m_head; 3293 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], 3294 BUS_DMASYNC_PREWRITE); 3295 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 3296 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3297 return (0); 3298 } 3299 3300 /* 3301 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3302 * to the mbuf data regions directly in the transmit lists. We also save a 3303 * copy of the pointers since the transmit list fragment pointers are 3304 * physical addresses. 3305 */ 3306 3307 static void 3308 dc_start(struct ifnet *ifp) 3309 { 3310 struct dc_softc *sc; 3311 struct mbuf *m_head = NULL, *m; 3312 int idx; 3313 3314 sc = ifp->if_softc; 3315 3316 DC_LOCK(sc); 3317 3318 if (!sc->dc_link && ifp->if_snd.ifq_len < 10) { 3319 DC_UNLOCK(sc); 3320 return; 3321 } 3322 3323 if (ifp->if_flags & IFF_OACTIVE) { 3324 DC_UNLOCK(sc); 3325 return; 3326 } 3327 3328 idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod; 3329 3330 while (sc->dc_cdata.dc_tx_chain[idx] == NULL) { 3331 IF_DEQUEUE(&ifp->if_snd, m_head); 3332 if (m_head == NULL) 3333 break; 3334 3335 if (sc->dc_flags & DC_TX_COALESCE && 3336 (m_head->m_next != NULL || 3337 sc->dc_flags & DC_TX_ALIGN)) { 3338 m = m_defrag(m_head, M_DONTWAIT); 3339 if (m == NULL) { 3340 IF_PREPEND(&ifp->if_snd, m_head); 3341 ifp->if_flags |= IFF_OACTIVE; 3342 break; 3343 } else { 3344 m_head = m; 3345 } 3346 } 3347 3348 if (dc_encap(sc, m_head)) { 3349 IF_PREPEND(&ifp->if_snd, m_head); 3350 ifp->if_flags |= IFF_OACTIVE; 3351 break; 3352 } 3353 idx = sc->dc_cdata.dc_tx_prod; 3354 3355 /* 3356 * If there's a BPF listener, bounce a copy of this frame 3357 * to him. 3358 */ 3359 BPF_MTAP(ifp, m_head); 3360 3361 if (sc->dc_flags & DC_TX_ONE) { 3362 ifp->if_flags |= IFF_OACTIVE; 3363 break; 3364 } 3365 } 3366 3367 /* Transmit */ 3368 if (!(sc->dc_flags & DC_TX_POLL)) 3369 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3370 3371 /* 3372 * Set a timeout in case the chip goes out to lunch. 3373 */ 3374 ifp->if_timer = 5; 3375 3376 DC_UNLOCK(sc); 3377 } 3378 3379 static void 3380 dc_init(void *xsc) 3381 { 3382 struct dc_softc *sc = xsc; 3383 struct ifnet *ifp = &sc->arpcom.ac_if; 3384 struct mii_data *mii; 3385 3386 DC_LOCK(sc); 3387 3388 mii = device_get_softc(sc->dc_miibus); 3389 3390 /* 3391 * Cancel pending I/O and free all RX/TX buffers. 3392 */ 3393 dc_stop(sc); 3394 dc_reset(sc); 3395 3396 /* 3397 * Set cache alignment and burst length. 3398 */ 3399 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 3400 CSR_WRITE_4(sc, DC_BUSCTL, 0); 3401 else 3402 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE); 3403 /* 3404 * Evenly share the bus between receive and transmit process. 3405 */ 3406 if (DC_IS_INTEL(sc)) 3407 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 3408 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 3409 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 3410 } else { 3411 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 3412 } 3413 if (sc->dc_flags & DC_TX_POLL) 3414 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 3415 switch(sc->dc_cachesize) { 3416 case 32: 3417 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 3418 break; 3419 case 16: 3420 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 3421 break; 3422 case 8: 3423 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 3424 break; 3425 case 0: 3426 default: 3427 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 3428 break; 3429 } 3430 3431 if (sc->dc_flags & DC_TX_STORENFWD) 3432 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3433 else { 3434 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3435 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3436 } else { 3437 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3438 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3439 } 3440 } 3441 3442 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 3443 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 3444 3445 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 3446 /* 3447 * The app notes for the 98713 and 98715A say that 3448 * in order to have the chips operate properly, a magic 3449 * number must be written to CSR16. Macronix does not 3450 * document the meaning of these bits so there's no way 3451 * to know exactly what they do. The 98713 has a magic 3452 * number all its own; the rest all use a different one. 3453 */ 3454 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 3455 if (sc->dc_type == DC_TYPE_98713) 3456 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 3457 else 3458 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 3459 } 3460 3461 if (DC_IS_XIRCOM(sc)) { 3462 /* 3463 * setup General Purpose Port mode and data so the tulip 3464 * can talk to the MII. 3465 */ 3466 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 3467 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3468 DELAY(10); 3469 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 3470 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3471 DELAY(10); 3472 } 3473 3474 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3475 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 3476 3477 /* Init circular RX list. */ 3478 if (dc_list_rx_init(sc) == ENOBUFS) { 3479 printf("dc%d: initialization failed: no " 3480 "memory for rx buffers\n", sc->dc_unit); 3481 dc_stop(sc); 3482 DC_UNLOCK(sc); 3483 return; 3484 } 3485 3486 /* 3487 * Init TX descriptors. 3488 */ 3489 dc_list_tx_init(sc); 3490 3491 /* 3492 * Load the address of the RX list. 3493 */ 3494 CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0)); 3495 CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0)); 3496 3497 /* 3498 * Enable interrupts. 3499 */ 3500 #ifdef DEVICE_POLLING 3501 /* 3502 * ... but only if we are not polling, and make sure they are off in 3503 * the case of polling. Some cards (e.g. fxp) turn interrupts on 3504 * after a reset. 3505 */ 3506 if (ifp->if_flags & IFF_POLLING) 3507 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3508 else 3509 #endif 3510 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3511 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 3512 3513 /* Enable transmitter. */ 3514 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3515 3516 /* 3517 * If this is an Intel 21143 and we're not using the 3518 * MII port, program the LED control pins so we get 3519 * link and activity indications. 3520 */ 3521 if (sc->dc_flags & DC_TULIP_LEDS) { 3522 CSR_WRITE_4(sc, DC_WATCHDOG, 3523 DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY); 3524 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 3525 } 3526 3527 /* 3528 * Load the RX/multicast filter. We do this sort of late 3529 * because the filter programming scheme on the 21143 and 3530 * some clones requires DMAing a setup frame via the TX 3531 * engine, and we need the transmitter enabled for that. 3532 */ 3533 dc_setfilt(sc); 3534 3535 /* Enable receiver. */ 3536 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 3537 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 3538 3539 mii_mediachg(mii); 3540 dc_setcfg(sc, sc->dc_if_media); 3541 3542 ifp->if_flags |= IFF_RUNNING; 3543 ifp->if_flags &= ~IFF_OACTIVE; 3544 3545 /* Don't start the ticker if this is a homePNA link. */ 3546 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 3547 sc->dc_link = 1; 3548 else { 3549 if (sc->dc_flags & DC_21143_NWAY) 3550 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 3551 else 3552 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 3553 } 3554 3555 #ifdef SRM_MEDIA 3556 if(sc->dc_srm_media) { 3557 struct ifreq ifr; 3558 3559 ifr.ifr_media = sc->dc_srm_media; 3560 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); 3561 sc->dc_srm_media = 0; 3562 } 3563 #endif 3564 DC_UNLOCK(sc); 3565 } 3566 3567 /* 3568 * Set media options. 3569 */ 3570 static int 3571 dc_ifmedia_upd(struct ifnet *ifp) 3572 { 3573 struct dc_softc *sc; 3574 struct mii_data *mii; 3575 struct ifmedia *ifm; 3576 3577 sc = ifp->if_softc; 3578 mii = device_get_softc(sc->dc_miibus); 3579 mii_mediachg(mii); 3580 ifm = &mii->mii_media; 3581 3582 if (DC_IS_DAVICOM(sc) && 3583 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 3584 dc_setcfg(sc, ifm->ifm_media); 3585 else 3586 sc->dc_link = 0; 3587 3588 return (0); 3589 } 3590 3591 /* 3592 * Report current media status. 3593 */ 3594 static void 3595 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3596 { 3597 struct dc_softc *sc; 3598 struct mii_data *mii; 3599 struct ifmedia *ifm; 3600 3601 sc = ifp->if_softc; 3602 mii = device_get_softc(sc->dc_miibus); 3603 mii_pollstat(mii); 3604 ifm = &mii->mii_media; 3605 if (DC_IS_DAVICOM(sc)) { 3606 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 3607 ifmr->ifm_active = ifm->ifm_media; 3608 ifmr->ifm_status = 0; 3609 return; 3610 } 3611 } 3612 ifmr->ifm_active = mii->mii_media_active; 3613 ifmr->ifm_status = mii->mii_media_status; 3614 } 3615 3616 static int 3617 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3618 { 3619 struct dc_softc *sc = ifp->if_softc; 3620 struct ifreq *ifr = (struct ifreq *)data; 3621 struct mii_data *mii; 3622 int error = 0; 3623 3624 DC_LOCK(sc); 3625 3626 switch (command) { 3627 case SIOCSIFFLAGS: 3628 if (ifp->if_flags & IFF_UP) { 3629 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & 3630 (IFF_PROMISC | IFF_ALLMULTI); 3631 3632 if (ifp->if_flags & IFF_RUNNING) { 3633 if (need_setfilt) 3634 dc_setfilt(sc); 3635 } else { 3636 sc->dc_txthresh = 0; 3637 dc_init(sc); 3638 } 3639 } else { 3640 if (ifp->if_flags & IFF_RUNNING) 3641 dc_stop(sc); 3642 } 3643 sc->dc_if_flags = ifp->if_flags; 3644 error = 0; 3645 break; 3646 case SIOCADDMULTI: 3647 case SIOCDELMULTI: 3648 dc_setfilt(sc); 3649 error = 0; 3650 break; 3651 case SIOCGIFMEDIA: 3652 case SIOCSIFMEDIA: 3653 mii = device_get_softc(sc->dc_miibus); 3654 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3655 #ifdef SRM_MEDIA 3656 if (sc->dc_srm_media) 3657 sc->dc_srm_media = 0; 3658 #endif 3659 break; 3660 default: 3661 error = ether_ioctl(ifp, command, data); 3662 break; 3663 } 3664 3665 DC_UNLOCK(sc); 3666 3667 return (error); 3668 } 3669 3670 static void 3671 dc_watchdog(struct ifnet *ifp) 3672 { 3673 struct dc_softc *sc; 3674 3675 sc = ifp->if_softc; 3676 3677 DC_LOCK(sc); 3678 3679 ifp->if_oerrors++; 3680 printf("dc%d: watchdog timeout\n", sc->dc_unit); 3681 3682 dc_stop(sc); 3683 dc_reset(sc); 3684 dc_init(sc); 3685 3686 if (ifp->if_snd.ifq_head != NULL) 3687 dc_start(ifp); 3688 3689 DC_UNLOCK(sc); 3690 } 3691 3692 /* 3693 * Stop the adapter and free any mbufs allocated to the 3694 * RX and TX lists. 3695 */ 3696 static void 3697 dc_stop(struct dc_softc *sc) 3698 { 3699 struct ifnet *ifp; 3700 struct dc_list_data *ld; 3701 struct dc_chain_data *cd; 3702 int i; 3703 u_int32_t ctl; 3704 3705 DC_LOCK(sc); 3706 3707 ifp = &sc->arpcom.ac_if; 3708 ifp->if_timer = 0; 3709 ld = sc->dc_ldata; 3710 cd = &sc->dc_cdata; 3711 3712 callout_stop(&sc->dc_stat_ch); 3713 3714 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3715 #ifdef DEVICE_POLLING 3716 ether_poll_deregister(ifp); 3717 #endif 3718 3719 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)); 3720 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3721 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3722 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3723 sc->dc_link = 0; 3724 3725 /* 3726 * Free data in the RX lists. 3727 */ 3728 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3729 if (cd->dc_rx_chain[i] != NULL) { 3730 m_freem(cd->dc_rx_chain[i]); 3731 cd->dc_rx_chain[i] = NULL; 3732 } 3733 } 3734 bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list)); 3735 3736 /* 3737 * Free the TX list buffers. 3738 */ 3739 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3740 if (cd->dc_tx_chain[i] != NULL) { 3741 ctl = le32toh(ld->dc_tx_list[i].dc_ctl); 3742 if ((ctl & DC_TXCTL_SETUP) || 3743 !(ctl & DC_TXCTL_FIRSTFRAG)) { 3744 cd->dc_tx_chain[i] = NULL; 3745 continue; 3746 } 3747 bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]); 3748 m_freem(cd->dc_tx_chain[i]); 3749 cd->dc_tx_chain[i] = NULL; 3750 } 3751 } 3752 bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list)); 3753 3754 DC_UNLOCK(sc); 3755 } 3756 3757 /* 3758 * Device suspend routine. Stop the interface and save some PCI 3759 * settings in case the BIOS doesn't restore them properly on 3760 * resume. 3761 */ 3762 static int 3763 dc_suspend(device_t dev) 3764 { 3765 struct dc_softc *sc; 3766 int i, s; 3767 3768 s = splimp(); 3769 3770 sc = device_get_softc(dev); 3771 3772 dc_stop(sc); 3773 3774 for (i = 0; i < 5; i++) 3775 sc->saved_maps[i] = pci_read_config(dev, PCIR_BAR(i), 4); 3776 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3777 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3778 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3779 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3780 3781 sc->suspended = 1; 3782 3783 splx(s); 3784 return (0); 3785 } 3786 3787 /* 3788 * Device resume routine. Restore some PCI settings in case the BIOS 3789 * doesn't, re-enable busmastering, and restart the interface if 3790 * appropriate. 3791 */ 3792 static int 3793 dc_resume(device_t dev) 3794 { 3795 struct dc_softc *sc; 3796 struct ifnet *ifp; 3797 int i, s; 3798 3799 s = splimp(); 3800 3801 sc = device_get_softc(dev); 3802 ifp = &sc->arpcom.ac_if; 3803 #ifndef BURN_BRIDGES 3804 dc_acpi(dev); 3805 #endif 3806 /* better way to do this? */ 3807 for (i = 0; i < 5; i++) 3808 pci_write_config(dev, PCIR_BAR(i), sc->saved_maps[i], 4); 3809 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3810 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3811 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3812 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3813 3814 /* reenable busmastering */ 3815 pci_enable_busmaster(dev); 3816 pci_enable_io(dev, DC_RES); 3817 3818 /* reinitialize interface if necessary */ 3819 if (ifp->if_flags & IFF_UP) 3820 dc_init(sc); 3821 3822 sc->suspended = 0; 3823 3824 splx(s); 3825 return (0); 3826 } 3827 3828 /* 3829 * Stop all chip I/O so that the kernel's probe routines don't 3830 * get confused by errant DMAs when rebooting. 3831 */ 3832 static void 3833 dc_shutdown(device_t dev) 3834 { 3835 struct dc_softc *sc; 3836 3837 sc = device_get_softc(dev); 3838 3839 dc_stop(sc); 3840 } 3841