1 /*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 38 * series chips and several workalikes including the following: 39 * 40 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 41 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 42 * Lite-On 82c168/82c169 PNIC (www.litecom.com) 43 * ASIX Electronics AX88140A (www.asix.com.tw) 44 * ASIX Electronics AX88141 (www.asix.com.tw) 45 * ADMtek AL981 (www.admtek.com.tw) 46 * ADMtek AN983 (www.admtek.com.tw) 47 * ADMtek CardBus AN985 (www.admtek.com.tw) 48 * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek CardBus AN985 49 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 50 * Accton EN1217 (www.accton.com) 51 * Xircom X3201 (www.xircom.com) 52 * Abocom FE2500 53 * Conexant LANfinity (www.conexant.com) 54 * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com) 55 * 56 * Datasheets for the 21143 are available at developer.intel.com. 57 * Datasheets for the clone parts can be found at their respective sites. 58 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 59 * The PNIC II is essentially a Macronix 98715A chip; the only difference 60 * worth noting is that its multicast hash table is only 128 bits wide 61 * instead of 512. 62 * 63 * Written by Bill Paul <wpaul@ee.columbia.edu> 64 * Electrical Engineering Department 65 * Columbia University, New York City 66 */ 67 /* 68 * The Intel 21143 is the successor to the DEC 21140. It is basically 69 * the same as the 21140 but with a few new features. The 21143 supports 70 * three kinds of media attachments: 71 * 72 * o MII port, for 10Mbps and 100Mbps support and NWAY 73 * autonegotiation provided by an external PHY. 74 * o SYM port, for symbol mode 100Mbps support. 75 * o 10baseT port. 76 * o AUI/BNC port. 77 * 78 * The 100Mbps SYM port and 10baseT port can be used together in 79 * combination with the internal NWAY support to create a 10/100 80 * autosensing configuration. 81 * 82 * Note that not all tulip workalikes are handled in this driver: we only 83 * deal with those which are relatively well behaved. The Winbond is 84 * handled separately due to its different register offsets and the 85 * special handling needed for its various bugs. The PNIC is handled 86 * here, but I'm not thrilled about it. 87 * 88 * All of the workalike chips use some form of MII transceiver support 89 * with the exception of the Macronix chips, which also have a SYM port. 90 * The ASIX AX88140A is also documented to have a SYM port, but all 91 * the cards I've seen use an MII transceiver, probably because the 92 * AX88140A doesn't support internal NWAY. 93 */ 94 95 #ifdef HAVE_KERNEL_OPTION_HEADERS 96 #include "opt_device_polling.h" 97 #endif 98 99 #include <sys/param.h> 100 #include <sys/endian.h> 101 #include <sys/systm.h> 102 #include <sys/sockio.h> 103 #include <sys/mbuf.h> 104 #include <sys/malloc.h> 105 #include <sys/kernel.h> 106 #include <sys/module.h> 107 #include <sys/socket.h> 108 109 #include <net/if.h> 110 #include <net/if_arp.h> 111 #include <net/ethernet.h> 112 #include <net/if_dl.h> 113 #include <net/if_media.h> 114 #include <net/if_types.h> 115 #include <net/if_vlan_var.h> 116 117 #include <net/bpf.h> 118 119 #include <machine/bus.h> 120 #include <machine/resource.h> 121 #include <sys/bus.h> 122 #include <sys/rman.h> 123 124 #include <dev/mii/mii.h> 125 #include <dev/mii/miivar.h> 126 127 #include <dev/pci/pcireg.h> 128 #include <dev/pci/pcivar.h> 129 130 #define DC_USEIOSPACE 131 132 #include <dev/dc/if_dcreg.h> 133 134 #ifdef __sparc64__ 135 #include <dev/ofw/openfirm.h> 136 #include <machine/ofw_machdep.h> 137 #endif 138 139 MODULE_DEPEND(dc, pci, 1, 1, 1); 140 MODULE_DEPEND(dc, ether, 1, 1, 1); 141 MODULE_DEPEND(dc, miibus, 1, 1, 1); 142 143 /* 144 * "device miibus" is required in kernel config. See GENERIC if you get 145 * errors here. 146 */ 147 #include "miibus_if.h" 148 149 /* 150 * Various supported device vendors/types and their names. 151 */ 152 static const struct dc_type dc_devs[] = { 153 { DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143), 0, 154 "Intel 21143 10/100BaseTX" }, 155 { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009), 0, 156 "Davicom DM9009 10/100BaseTX" }, 157 { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100), 0, 158 "Davicom DM9100 10/100BaseTX" }, 159 { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), DC_REVISION_DM9102A, 160 "Davicom DM9102A 10/100BaseTX" }, 161 { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), 0, 162 "Davicom DM9102 10/100BaseTX" }, 163 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981), 0, 164 "ADMtek AL981 10/100BaseTX" }, 165 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN983), 0, 166 "ADMtek AN983 10/100BaseTX" }, 167 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985), 0, 168 "ADMtek AN985 CardBus 10/100BaseTX or clone" }, 169 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511), 0, 170 "ADMtek ADM9511 10/100BaseTX" }, 171 { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513), 0, 172 "ADMtek ADM9513 10/100BaseTX" }, 173 { DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), DC_REVISION_88141, 174 "ASIX AX88141 10/100BaseTX" }, 175 { DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), 0, 176 "ASIX AX88140A 10/100BaseTX" }, 177 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), DC_REVISION_98713A, 178 "Macronix 98713A 10/100BaseTX" }, 179 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), 0, 180 "Macronix 98713 10/100BaseTX" }, 181 { DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), DC_REVISION_98713A, 182 "Compex RL100-TX 10/100BaseTX" }, 183 { DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), 0, 184 "Compex RL100-TX 10/100BaseTX" }, 185 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98725, 186 "Macronix 98725 10/100BaseTX" }, 187 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98715AEC_C, 188 "Macronix 98715AEC-C 10/100BaseTX" }, 189 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), 0, 190 "Macronix 98715/98715A 10/100BaseTX" }, 191 { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727), 0, 192 "Macronix 98727/98732 10/100BaseTX" }, 193 { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115), 0, 194 "LC82C115 PNIC II 10/100BaseTX" }, 195 { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), DC_REVISION_82C169, 196 "82c169 PNIC 10/100BaseTX" }, 197 { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), 0, 198 "82c168 PNIC 10/100BaseTX" }, 199 { DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217), 0, 200 "Accton EN1217 10/100BaseTX" }, 201 { DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242), 0, 202 "Accton EN2242 MiniPCI 10/100BaseTX" }, 203 { DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201), 0, 204 "Xircom X3201 10/100BaseTX" }, 205 { DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD), 0, 206 "Neteasy DRP-32TXD Cardbus 10/100" }, 207 { DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500), 0, 208 "Abocom FE2500 10/100BaseTX" }, 209 { DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX), 0, 210 "Abocom FE2500MX 10/100BaseTX" }, 211 { DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112), 0, 212 "Conexant LANfinity MiniPCI 10/100BaseTX" }, 213 { DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX), 0, 214 "Hawking CB102 CardBus 10/100" }, 215 { DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T), 0, 216 "PlaneX FNW-3602-T CardBus 10/100" }, 217 { DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB), 0, 218 "3Com OfficeConnect 10/100B" }, 219 { DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120), 0, 220 "Microsoft MN-120 CardBus 10/100" }, 221 { DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130), 0, 222 "Microsoft MN-130 10/100" }, 223 { DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08), 0, 224 "Linksys PCMPC200 CardBus 10/100" }, 225 { DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09), 0, 226 "Linksys PCMPC200 CardBus 10/100" }, 227 { 0, 0, NULL } 228 }; 229 230 static int dc_probe(device_t); 231 static int dc_attach(device_t); 232 static int dc_detach(device_t); 233 static int dc_suspend(device_t); 234 static int dc_resume(device_t); 235 static const struct dc_type *dc_devtype(device_t); 236 static int dc_newbuf(struct dc_softc *, int, int); 237 static int dc_encap(struct dc_softc *, struct mbuf **); 238 static void dc_pnic_rx_bug_war(struct dc_softc *, int); 239 static int dc_rx_resync(struct dc_softc *); 240 static int dc_rxeof(struct dc_softc *); 241 static void dc_txeof(struct dc_softc *); 242 static void dc_tick(void *); 243 static void dc_tx_underrun(struct dc_softc *); 244 static void dc_intr(void *); 245 static void dc_start(struct ifnet *); 246 static void dc_start_locked(struct ifnet *); 247 static int dc_ioctl(struct ifnet *, u_long, caddr_t); 248 static void dc_init(void *); 249 static void dc_init_locked(struct dc_softc *); 250 static void dc_stop(struct dc_softc *); 251 static void dc_watchdog(void *); 252 static int dc_shutdown(device_t); 253 static int dc_ifmedia_upd(struct ifnet *); 254 static void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *); 255 256 static void dc_delay(struct dc_softc *); 257 static void dc_eeprom_idle(struct dc_softc *); 258 static void dc_eeprom_putbyte(struct dc_softc *, int); 259 static void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *); 260 static void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *); 261 static void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *); 262 static void dc_eeprom_width(struct dc_softc *); 263 static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int); 264 265 static void dc_mii_writebit(struct dc_softc *, int); 266 static int dc_mii_readbit(struct dc_softc *); 267 static void dc_mii_sync(struct dc_softc *); 268 static void dc_mii_send(struct dc_softc *, u_int32_t, int); 269 static int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *); 270 static int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *); 271 static int dc_miibus_readreg(device_t, int, int); 272 static int dc_miibus_writereg(device_t, int, int, int); 273 static void dc_miibus_statchg(device_t); 274 static void dc_miibus_mediainit(device_t); 275 276 static void dc_setcfg(struct dc_softc *, int); 277 static uint32_t dc_mchash_le(struct dc_softc *, const uint8_t *); 278 static uint32_t dc_mchash_be(const uint8_t *); 279 static void dc_setfilt_21143(struct dc_softc *); 280 static void dc_setfilt_asix(struct dc_softc *); 281 static void dc_setfilt_admtek(struct dc_softc *); 282 static void dc_setfilt_xircom(struct dc_softc *); 283 284 static void dc_setfilt(struct dc_softc *); 285 286 static void dc_reset(struct dc_softc *); 287 static int dc_list_rx_init(struct dc_softc *); 288 static int dc_list_tx_init(struct dc_softc *); 289 290 static void dc_read_srom(struct dc_softc *, int); 291 static void dc_parse_21143_srom(struct dc_softc *); 292 static void dc_decode_leaf_sia(struct dc_softc *, struct dc_eblock_sia *); 293 static void dc_decode_leaf_mii(struct dc_softc *, struct dc_eblock_mii *); 294 static void dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *); 295 static void dc_apply_fixup(struct dc_softc *, int); 296 static int dc_check_multiport(struct dc_softc *); 297 298 #ifdef DC_USEIOSPACE 299 #define DC_RES SYS_RES_IOPORT 300 #define DC_RID DC_PCI_CFBIO 301 #else 302 #define DC_RES SYS_RES_MEMORY 303 #define DC_RID DC_PCI_CFBMA 304 #endif 305 306 static device_method_t dc_methods[] = { 307 /* Device interface */ 308 DEVMETHOD(device_probe, dc_probe), 309 DEVMETHOD(device_attach, dc_attach), 310 DEVMETHOD(device_detach, dc_detach), 311 DEVMETHOD(device_suspend, dc_suspend), 312 DEVMETHOD(device_resume, dc_resume), 313 DEVMETHOD(device_shutdown, dc_shutdown), 314 315 /* bus interface */ 316 DEVMETHOD(bus_print_child, bus_generic_print_child), 317 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 318 319 /* MII interface */ 320 DEVMETHOD(miibus_readreg, dc_miibus_readreg), 321 DEVMETHOD(miibus_writereg, dc_miibus_writereg), 322 DEVMETHOD(miibus_statchg, dc_miibus_statchg), 323 DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), 324 325 { 0, 0 } 326 }; 327 328 static driver_t dc_driver = { 329 "dc", 330 dc_methods, 331 sizeof(struct dc_softc) 332 }; 333 334 static devclass_t dc_devclass; 335 336 DRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0); 337 DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); 338 339 #define DC_SETBIT(sc, reg, x) \ 340 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 341 342 #define DC_CLRBIT(sc, reg, x) \ 343 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 344 345 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 346 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 347 348 static void 349 dc_delay(struct dc_softc *sc) 350 { 351 int idx; 352 353 for (idx = (300 / 33) + 1; idx > 0; idx--) 354 CSR_READ_4(sc, DC_BUSCTL); 355 } 356 357 static void 358 dc_eeprom_width(struct dc_softc *sc) 359 { 360 int i; 361 362 /* Force EEPROM to idle state. */ 363 dc_eeprom_idle(sc); 364 365 /* Enter EEPROM access mode. */ 366 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 367 dc_delay(sc); 368 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 369 dc_delay(sc); 370 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 371 dc_delay(sc); 372 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 373 dc_delay(sc); 374 375 for (i = 3; i--;) { 376 if (6 & (1 << i)) 377 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 378 else 379 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 380 dc_delay(sc); 381 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 382 dc_delay(sc); 383 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 384 dc_delay(sc); 385 } 386 387 for (i = 1; i <= 12; i++) { 388 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 389 dc_delay(sc); 390 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 391 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 392 dc_delay(sc); 393 break; 394 } 395 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 396 dc_delay(sc); 397 } 398 399 /* Turn off EEPROM access mode. */ 400 dc_eeprom_idle(sc); 401 402 if (i < 4 || i > 12) 403 sc->dc_romwidth = 6; 404 else 405 sc->dc_romwidth = i; 406 407 /* Enter EEPROM access mode. */ 408 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 409 dc_delay(sc); 410 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 411 dc_delay(sc); 412 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 413 dc_delay(sc); 414 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 415 dc_delay(sc); 416 417 /* Turn off EEPROM access mode. */ 418 dc_eeprom_idle(sc); 419 } 420 421 static void 422 dc_eeprom_idle(struct dc_softc *sc) 423 { 424 int i; 425 426 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 427 dc_delay(sc); 428 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 429 dc_delay(sc); 430 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 431 dc_delay(sc); 432 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 433 dc_delay(sc); 434 435 for (i = 0; i < 25; i++) { 436 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 437 dc_delay(sc); 438 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 439 dc_delay(sc); 440 } 441 442 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 443 dc_delay(sc); 444 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 445 dc_delay(sc); 446 CSR_WRITE_4(sc, DC_SIO, 0x00000000); 447 } 448 449 /* 450 * Send a read command and address to the EEPROM, check for ACK. 451 */ 452 static void 453 dc_eeprom_putbyte(struct dc_softc *sc, int addr) 454 { 455 int d, i; 456 457 d = DC_EECMD_READ >> 6; 458 for (i = 3; i--; ) { 459 if (d & (1 << i)) 460 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 461 else 462 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 463 dc_delay(sc); 464 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 465 dc_delay(sc); 466 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 467 dc_delay(sc); 468 } 469 470 /* 471 * Feed in each bit and strobe the clock. 472 */ 473 for (i = sc->dc_romwidth; i--;) { 474 if (addr & (1 << i)) { 475 SIO_SET(DC_SIO_EE_DATAIN); 476 } else { 477 SIO_CLR(DC_SIO_EE_DATAIN); 478 } 479 dc_delay(sc); 480 SIO_SET(DC_SIO_EE_CLK); 481 dc_delay(sc); 482 SIO_CLR(DC_SIO_EE_CLK); 483 dc_delay(sc); 484 } 485 } 486 487 /* 488 * Read a word of data stored in the EEPROM at address 'addr.' 489 * The PNIC 82c168/82c169 has its own non-standard way to read 490 * the EEPROM. 491 */ 492 static void 493 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) 494 { 495 int i; 496 u_int32_t r; 497 498 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr); 499 500 for (i = 0; i < DC_TIMEOUT; i++) { 501 DELAY(1); 502 r = CSR_READ_4(sc, DC_SIO); 503 if (!(r & DC_PN_SIOCTL_BUSY)) { 504 *dest = (u_int16_t)(r & 0xFFFF); 505 return; 506 } 507 } 508 } 509 510 /* 511 * Read a word of data stored in the EEPROM at address 'addr.' 512 * The Xircom X3201 has its own non-standard way to read 513 * the EEPROM, too. 514 */ 515 static void 516 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) 517 { 518 519 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 520 521 addr *= 2; 522 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 523 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff; 524 addr += 1; 525 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 526 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8; 527 528 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 529 } 530 531 /* 532 * Read a word of data stored in the EEPROM at address 'addr.' 533 */ 534 static void 535 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) 536 { 537 int i; 538 u_int16_t word = 0; 539 540 /* Force EEPROM to idle state. */ 541 dc_eeprom_idle(sc); 542 543 /* Enter EEPROM access mode. */ 544 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 545 dc_delay(sc); 546 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 547 dc_delay(sc); 548 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 549 dc_delay(sc); 550 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 551 dc_delay(sc); 552 553 /* 554 * Send address of word we want to read. 555 */ 556 dc_eeprom_putbyte(sc, addr); 557 558 /* 559 * Start reading bits from EEPROM. 560 */ 561 for (i = 0x8000; i; i >>= 1) { 562 SIO_SET(DC_SIO_EE_CLK); 563 dc_delay(sc); 564 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 565 word |= i; 566 dc_delay(sc); 567 SIO_CLR(DC_SIO_EE_CLK); 568 dc_delay(sc); 569 } 570 571 /* Turn off EEPROM access mode. */ 572 dc_eeprom_idle(sc); 573 574 *dest = word; 575 } 576 577 /* 578 * Read a sequence of words from the EEPROM. 579 */ 580 static void 581 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be) 582 { 583 int i; 584 u_int16_t word = 0, *ptr; 585 586 for (i = 0; i < cnt; i++) { 587 if (DC_IS_PNIC(sc)) 588 dc_eeprom_getword_pnic(sc, off + i, &word); 589 else if (DC_IS_XIRCOM(sc)) 590 dc_eeprom_getword_xircom(sc, off + i, &word); 591 else 592 dc_eeprom_getword(sc, off + i, &word); 593 ptr = (u_int16_t *)(dest + (i * 2)); 594 if (be) 595 *ptr = be16toh(word); 596 else 597 *ptr = le16toh(word); 598 } 599 } 600 601 /* 602 * The following two routines are taken from the Macronix 98713 603 * Application Notes pp.19-21. 604 */ 605 /* 606 * Write a bit to the MII bus. 607 */ 608 static void 609 dc_mii_writebit(struct dc_softc *sc, int bit) 610 { 611 uint32_t reg; 612 613 reg = DC_SIO_ROMCTL_WRITE | (bit != 0 ? DC_SIO_MII_DATAOUT : 0); 614 CSR_WRITE_4(sc, DC_SIO, reg); 615 CSR_BARRIER_4(sc, DC_SIO, 616 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 617 DELAY(1); 618 619 CSR_WRITE_4(sc, DC_SIO, reg | DC_SIO_MII_CLK); 620 CSR_BARRIER_4(sc, DC_SIO, 621 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 622 DELAY(1); 623 CSR_WRITE_4(sc, DC_SIO, reg); 624 CSR_BARRIER_4(sc, DC_SIO, 625 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 626 DELAY(1); 627 } 628 629 /* 630 * Read a bit from the MII bus. 631 */ 632 static int 633 dc_mii_readbit(struct dc_softc *sc) 634 { 635 uint32_t reg; 636 637 reg = DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR; 638 CSR_WRITE_4(sc, DC_SIO, reg); 639 CSR_BARRIER_4(sc, DC_SIO, 640 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 641 DELAY(1); 642 (void)CSR_READ_4(sc, DC_SIO); 643 CSR_WRITE_4(sc, DC_SIO, reg | DC_SIO_MII_CLK); 644 CSR_BARRIER_4(sc, DC_SIO, 645 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 646 DELAY(1); 647 CSR_WRITE_4(sc, DC_SIO, reg); 648 CSR_BARRIER_4(sc, DC_SIO, 649 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 650 DELAY(1); 651 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 652 return (1); 653 654 return (0); 655 } 656 657 /* 658 * Sync the PHYs by setting data bit and strobing the clock 32 times. 659 */ 660 static void 661 dc_mii_sync(struct dc_softc *sc) 662 { 663 int i; 664 665 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 666 CSR_BARRIER_4(sc, DC_SIO, 667 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 668 DELAY(1); 669 670 for (i = 0; i < 32; i++) 671 dc_mii_writebit(sc, 1); 672 } 673 674 /* 675 * Clock a series of bits through the MII. 676 */ 677 static void 678 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) 679 { 680 int i; 681 682 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 683 dc_mii_writebit(sc, bits & i); 684 } 685 686 /* 687 * Read an PHY register through the MII. 688 */ 689 static int 690 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) 691 { 692 int i; 693 694 /* 695 * Set up frame for RX. 696 */ 697 frame->mii_stdelim = DC_MII_STARTDELIM; 698 frame->mii_opcode = DC_MII_READOP; 699 700 /* 701 * Sync the PHYs. 702 */ 703 dc_mii_sync(sc); 704 705 /* 706 * Send command/address info. 707 */ 708 dc_mii_send(sc, frame->mii_stdelim, 2); 709 dc_mii_send(sc, frame->mii_opcode, 2); 710 dc_mii_send(sc, frame->mii_phyaddr, 5); 711 dc_mii_send(sc, frame->mii_regaddr, 5); 712 713 /* 714 * Now try reading data bits. If the turnaround failed, we still 715 * need to clock through 16 cycles to keep the PHY(s) in sync. 716 */ 717 frame->mii_turnaround = dc_mii_readbit(sc); 718 if (frame->mii_turnaround != 0) { 719 for (i = 0; i < 16; i++) 720 dc_mii_readbit(sc); 721 goto fail; 722 } 723 for (i = 0x8000; i; i >>= 1) { 724 if (dc_mii_readbit(sc)) 725 frame->mii_data |= i; 726 } 727 728 fail: 729 730 /* Clock the idle bits. */ 731 dc_mii_writebit(sc, 0); 732 dc_mii_writebit(sc, 0); 733 734 if (frame->mii_turnaround != 0) 735 return (1); 736 return (0); 737 } 738 739 /* 740 * Write to a PHY register through the MII. 741 */ 742 static int 743 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) 744 { 745 746 /* 747 * Set up frame for TX. 748 */ 749 frame->mii_stdelim = DC_MII_STARTDELIM; 750 frame->mii_opcode = DC_MII_WRITEOP; 751 frame->mii_turnaround = DC_MII_TURNAROUND; 752 753 /* 754 * Sync the PHYs. 755 */ 756 dc_mii_sync(sc); 757 758 dc_mii_send(sc, frame->mii_stdelim, 2); 759 dc_mii_send(sc, frame->mii_opcode, 2); 760 dc_mii_send(sc, frame->mii_phyaddr, 5); 761 dc_mii_send(sc, frame->mii_regaddr, 5); 762 dc_mii_send(sc, frame->mii_turnaround, 2); 763 dc_mii_send(sc, frame->mii_data, 16); 764 765 /* Clock the idle bits. */ 766 dc_mii_writebit(sc, 0); 767 dc_mii_writebit(sc, 0); 768 769 return (0); 770 } 771 772 static int 773 dc_miibus_readreg(device_t dev, int phy, int reg) 774 { 775 struct dc_mii_frame frame; 776 struct dc_softc *sc; 777 int i, rval, phy_reg = 0; 778 779 sc = device_get_softc(dev); 780 bzero(&frame, sizeof(frame)); 781 782 if (sc->dc_pmode != DC_PMODE_MII) { 783 if (phy == (MII_NPHY - 1)) { 784 switch (reg) { 785 case MII_BMSR: 786 /* 787 * Fake something to make the probe 788 * code think there's a PHY here. 789 */ 790 return (BMSR_MEDIAMASK); 791 break; 792 case MII_PHYIDR1: 793 if (DC_IS_PNIC(sc)) 794 return (DC_VENDORID_LO); 795 return (DC_VENDORID_DEC); 796 break; 797 case MII_PHYIDR2: 798 if (DC_IS_PNIC(sc)) 799 return (DC_DEVICEID_82C168); 800 return (DC_DEVICEID_21143); 801 break; 802 default: 803 return (0); 804 break; 805 } 806 } else 807 return (0); 808 } 809 810 if (DC_IS_PNIC(sc)) { 811 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 812 (phy << 23) | (reg << 18)); 813 for (i = 0; i < DC_TIMEOUT; i++) { 814 DELAY(1); 815 rval = CSR_READ_4(sc, DC_PN_MII); 816 if (!(rval & DC_PN_MII_BUSY)) { 817 rval &= 0xFFFF; 818 return (rval == 0xFFFF ? 0 : rval); 819 } 820 } 821 return (0); 822 } 823 824 if (DC_IS_COMET(sc)) { 825 switch (reg) { 826 case MII_BMCR: 827 phy_reg = DC_AL_BMCR; 828 break; 829 case MII_BMSR: 830 phy_reg = DC_AL_BMSR; 831 break; 832 case MII_PHYIDR1: 833 phy_reg = DC_AL_VENID; 834 break; 835 case MII_PHYIDR2: 836 phy_reg = DC_AL_DEVID; 837 break; 838 case MII_ANAR: 839 phy_reg = DC_AL_ANAR; 840 break; 841 case MII_ANLPAR: 842 phy_reg = DC_AL_LPAR; 843 break; 844 case MII_ANER: 845 phy_reg = DC_AL_ANER; 846 break; 847 default: 848 device_printf(dev, "phy_read: bad phy register %x\n", 849 reg); 850 return (0); 851 break; 852 } 853 854 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 855 856 if (rval == 0xFFFF) 857 return (0); 858 return (rval); 859 } 860 861 frame.mii_phyaddr = phy; 862 frame.mii_regaddr = reg; 863 if (sc->dc_type == DC_TYPE_98713) { 864 phy_reg = CSR_READ_4(sc, DC_NETCFG); 865 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 866 } 867 dc_mii_readreg(sc, &frame); 868 if (sc->dc_type == DC_TYPE_98713) 869 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 870 871 return (frame.mii_data); 872 } 873 874 static int 875 dc_miibus_writereg(device_t dev, int phy, int reg, int data) 876 { 877 struct dc_softc *sc; 878 struct dc_mii_frame frame; 879 int i, phy_reg = 0; 880 881 sc = device_get_softc(dev); 882 bzero(&frame, sizeof(frame)); 883 884 if (DC_IS_PNIC(sc)) { 885 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 886 (phy << 23) | (reg << 10) | data); 887 for (i = 0; i < DC_TIMEOUT; i++) { 888 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 889 break; 890 } 891 return (0); 892 } 893 894 if (DC_IS_COMET(sc)) { 895 switch (reg) { 896 case MII_BMCR: 897 phy_reg = DC_AL_BMCR; 898 break; 899 case MII_BMSR: 900 phy_reg = DC_AL_BMSR; 901 break; 902 case MII_PHYIDR1: 903 phy_reg = DC_AL_VENID; 904 break; 905 case MII_PHYIDR2: 906 phy_reg = DC_AL_DEVID; 907 break; 908 case MII_ANAR: 909 phy_reg = DC_AL_ANAR; 910 break; 911 case MII_ANLPAR: 912 phy_reg = DC_AL_LPAR; 913 break; 914 case MII_ANER: 915 phy_reg = DC_AL_ANER; 916 break; 917 default: 918 device_printf(dev, "phy_write: bad phy register %x\n", 919 reg); 920 return (0); 921 break; 922 } 923 924 CSR_WRITE_4(sc, phy_reg, data); 925 return (0); 926 } 927 928 frame.mii_phyaddr = phy; 929 frame.mii_regaddr = reg; 930 frame.mii_data = data; 931 932 if (sc->dc_type == DC_TYPE_98713) { 933 phy_reg = CSR_READ_4(sc, DC_NETCFG); 934 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 935 } 936 dc_mii_writereg(sc, &frame); 937 if (sc->dc_type == DC_TYPE_98713) 938 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 939 940 return (0); 941 } 942 943 static void 944 dc_miibus_statchg(device_t dev) 945 { 946 struct dc_softc *sc; 947 struct mii_data *mii; 948 struct ifmedia *ifm; 949 950 sc = device_get_softc(dev); 951 if (DC_IS_ADMTEK(sc)) 952 return; 953 954 mii = device_get_softc(sc->dc_miibus); 955 ifm = &mii->mii_media; 956 if (DC_IS_DAVICOM(sc) && 957 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 958 dc_setcfg(sc, ifm->ifm_media); 959 sc->dc_if_media = ifm->ifm_media; 960 } else { 961 dc_setcfg(sc, mii->mii_media_active); 962 sc->dc_if_media = mii->mii_media_active; 963 } 964 } 965 966 /* 967 * Special support for DM9102A cards with HomePNA PHYs. Note: 968 * with the Davicom DM9102A/DM9801 eval board that I have, it seems 969 * to be impossible to talk to the management interface of the DM9801 970 * PHY (its MDIO pin is not connected to anything). Consequently, 971 * the driver has to just 'know' about the additional mode and deal 972 * with it itself. *sigh* 973 */ 974 static void 975 dc_miibus_mediainit(device_t dev) 976 { 977 struct dc_softc *sc; 978 struct mii_data *mii; 979 struct ifmedia *ifm; 980 int rev; 981 982 rev = pci_get_revid(dev); 983 984 sc = device_get_softc(dev); 985 mii = device_get_softc(sc->dc_miibus); 986 ifm = &mii->mii_media; 987 988 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) 989 ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); 990 } 991 992 #define DC_BITS_512 9 993 #define DC_BITS_128 7 994 #define DC_BITS_64 6 995 996 static uint32_t 997 dc_mchash_le(struct dc_softc *sc, const uint8_t *addr) 998 { 999 uint32_t crc; 1000 1001 /* Compute CRC for the address value. */ 1002 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 1003 1004 /* 1005 * The hash table on the PNIC II and the MX98715AEC-C/D/E 1006 * chips is only 128 bits wide. 1007 */ 1008 if (sc->dc_flags & DC_128BIT_HASH) 1009 return (crc & ((1 << DC_BITS_128) - 1)); 1010 1011 /* The hash table on the MX98715BEC is only 64 bits wide. */ 1012 if (sc->dc_flags & DC_64BIT_HASH) 1013 return (crc & ((1 << DC_BITS_64) - 1)); 1014 1015 /* Xircom's hash filtering table is different (read: weird) */ 1016 /* Xircom uses the LEAST significant bits */ 1017 if (DC_IS_XIRCOM(sc)) { 1018 if ((crc & 0x180) == 0x180) 1019 return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4)); 1020 else 1021 return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 + 1022 (12 << 4)); 1023 } 1024 1025 return (crc & ((1 << DC_BITS_512) - 1)); 1026 } 1027 1028 /* 1029 * Calculate CRC of a multicast group address, return the lower 6 bits. 1030 */ 1031 static uint32_t 1032 dc_mchash_be(const uint8_t *addr) 1033 { 1034 uint32_t crc; 1035 1036 /* Compute CRC for the address value. */ 1037 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 1038 1039 /* Return the filter bit position. */ 1040 return ((crc >> 26) & 0x0000003F); 1041 } 1042 1043 /* 1044 * 21143-style RX filter setup routine. Filter programming is done by 1045 * downloading a special setup frame into the TX engine. 21143, Macronix, 1046 * PNIC, PNIC II and Davicom chips are programmed this way. 1047 * 1048 * We always program the chip using 'hash perfect' mode, i.e. one perfect 1049 * address (our node address) and a 512-bit hash filter for multicast 1050 * frames. We also sneak the broadcast address into the hash filter since 1051 * we need that too. 1052 */ 1053 static void 1054 dc_setfilt_21143(struct dc_softc *sc) 1055 { 1056 uint16_t eaddr[(ETHER_ADDR_LEN+1)/2]; 1057 struct dc_desc *sframe; 1058 u_int32_t h, *sp; 1059 struct ifmultiaddr *ifma; 1060 struct ifnet *ifp; 1061 int i; 1062 1063 ifp = sc->dc_ifp; 1064 1065 i = sc->dc_cdata.dc_tx_prod; 1066 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1067 sc->dc_cdata.dc_tx_cnt++; 1068 sframe = &sc->dc_ldata->dc_tx_list[i]; 1069 sp = sc->dc_cdata.dc_sbuf; 1070 bzero(sp, DC_SFRAME_LEN); 1071 1072 sframe->dc_data = htole32(sc->dc_saddr); 1073 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1074 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1075 1076 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; 1077 1078 /* If we want promiscuous mode, set the allframes bit. */ 1079 if (ifp->if_flags & IFF_PROMISC) 1080 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1081 else 1082 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1083 1084 if (ifp->if_flags & IFF_ALLMULTI) 1085 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1086 else 1087 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1088 1089 if_maddr_rlock(ifp); 1090 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1091 if (ifma->ifma_addr->sa_family != AF_LINK) 1092 continue; 1093 h = dc_mchash_le(sc, 1094 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1095 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1096 } 1097 if_maddr_runlock(ifp); 1098 1099 if (ifp->if_flags & IFF_BROADCAST) { 1100 h = dc_mchash_le(sc, ifp->if_broadcastaddr); 1101 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1102 } 1103 1104 /* Set our MAC address. */ 1105 bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); 1106 sp[39] = DC_SP_MAC(eaddr[0]); 1107 sp[40] = DC_SP_MAC(eaddr[1]); 1108 sp[41] = DC_SP_MAC(eaddr[2]); 1109 1110 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1111 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1112 1113 /* 1114 * The PNIC takes an exceedingly long time to process its 1115 * setup frame; wait 10ms after posting the setup frame 1116 * before proceeding, just so it has time to swallow its 1117 * medicine. 1118 */ 1119 DELAY(10000); 1120 1121 sc->dc_wdog_timer = 5; 1122 } 1123 1124 static void 1125 dc_setfilt_admtek(struct dc_softc *sc) 1126 { 1127 uint8_t eaddr[ETHER_ADDR_LEN]; 1128 struct ifnet *ifp; 1129 struct ifmultiaddr *ifma; 1130 int h = 0; 1131 u_int32_t hashes[2] = { 0, 0 }; 1132 1133 ifp = sc->dc_ifp; 1134 1135 /* Init our MAC address. */ 1136 bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); 1137 CSR_WRITE_4(sc, DC_AL_PAR0, eaddr[3] << 24 | eaddr[2] << 16 | 1138 eaddr[1] << 8 | eaddr[0]); 1139 CSR_WRITE_4(sc, DC_AL_PAR1, eaddr[5] << 8 | eaddr[4]); 1140 1141 /* If we want promiscuous mode, set the allframes bit. */ 1142 if (ifp->if_flags & IFF_PROMISC) 1143 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1144 else 1145 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1146 1147 if (ifp->if_flags & IFF_ALLMULTI) 1148 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1149 else 1150 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1151 1152 /* First, zot all the existing hash bits. */ 1153 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1154 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1155 1156 /* 1157 * If we're already in promisc or allmulti mode, we 1158 * don't have to bother programming the multicast filter. 1159 */ 1160 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) 1161 return; 1162 1163 /* Now program new ones. */ 1164 if_maddr_rlock(ifp); 1165 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1166 if (ifma->ifma_addr->sa_family != AF_LINK) 1167 continue; 1168 if (DC_IS_CENTAUR(sc)) 1169 h = dc_mchash_le(sc, 1170 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1171 else 1172 h = dc_mchash_be( 1173 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1174 if (h < 32) 1175 hashes[0] |= (1 << h); 1176 else 1177 hashes[1] |= (1 << (h - 32)); 1178 } 1179 if_maddr_runlock(ifp); 1180 1181 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1182 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1183 } 1184 1185 static void 1186 dc_setfilt_asix(struct dc_softc *sc) 1187 { 1188 uint32_t eaddr[(ETHER_ADDR_LEN+3)/4]; 1189 struct ifnet *ifp; 1190 struct ifmultiaddr *ifma; 1191 int h = 0; 1192 u_int32_t hashes[2] = { 0, 0 }; 1193 1194 ifp = sc->dc_ifp; 1195 1196 /* Init our MAC address. */ 1197 bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); 1198 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1199 CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[0]); 1200 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1201 CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[1]); 1202 1203 /* If we want promiscuous mode, set the allframes bit. */ 1204 if (ifp->if_flags & IFF_PROMISC) 1205 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1206 else 1207 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1208 1209 if (ifp->if_flags & IFF_ALLMULTI) 1210 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1211 else 1212 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1213 1214 /* 1215 * The ASIX chip has a special bit to enable reception 1216 * of broadcast frames. 1217 */ 1218 if (ifp->if_flags & IFF_BROADCAST) 1219 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1220 else 1221 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1222 1223 /* first, zot all the existing hash bits */ 1224 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1225 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1226 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1227 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1228 1229 /* 1230 * If we're already in promisc or allmulti mode, we 1231 * don't have to bother programming the multicast filter. 1232 */ 1233 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) 1234 return; 1235 1236 /* now program new ones */ 1237 if_maddr_rlock(ifp); 1238 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1239 if (ifma->ifma_addr->sa_family != AF_LINK) 1240 continue; 1241 h = dc_mchash_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1242 if (h < 32) 1243 hashes[0] |= (1 << h); 1244 else 1245 hashes[1] |= (1 << (h - 32)); 1246 } 1247 if_maddr_runlock(ifp); 1248 1249 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1250 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1251 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1252 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1253 } 1254 1255 static void 1256 dc_setfilt_xircom(struct dc_softc *sc) 1257 { 1258 uint16_t eaddr[(ETHER_ADDR_LEN+1)/2]; 1259 struct ifnet *ifp; 1260 struct ifmultiaddr *ifma; 1261 struct dc_desc *sframe; 1262 u_int32_t h, *sp; 1263 int i; 1264 1265 ifp = sc->dc_ifp; 1266 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); 1267 1268 i = sc->dc_cdata.dc_tx_prod; 1269 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1270 sc->dc_cdata.dc_tx_cnt++; 1271 sframe = &sc->dc_ldata->dc_tx_list[i]; 1272 sp = sc->dc_cdata.dc_sbuf; 1273 bzero(sp, DC_SFRAME_LEN); 1274 1275 sframe->dc_data = htole32(sc->dc_saddr); 1276 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1277 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1278 1279 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; 1280 1281 /* If we want promiscuous mode, set the allframes bit. */ 1282 if (ifp->if_flags & IFF_PROMISC) 1283 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1284 else 1285 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1286 1287 if (ifp->if_flags & IFF_ALLMULTI) 1288 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1289 else 1290 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1291 1292 if_maddr_rlock(ifp); 1293 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1294 if (ifma->ifma_addr->sa_family != AF_LINK) 1295 continue; 1296 h = dc_mchash_le(sc, 1297 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1298 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1299 } 1300 if_maddr_runlock(ifp); 1301 1302 if (ifp->if_flags & IFF_BROADCAST) { 1303 h = dc_mchash_le(sc, ifp->if_broadcastaddr); 1304 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1305 } 1306 1307 /* Set our MAC address. */ 1308 bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); 1309 sp[0] = DC_SP_MAC(eaddr[0]); 1310 sp[1] = DC_SP_MAC(eaddr[1]); 1311 sp[2] = DC_SP_MAC(eaddr[2]); 1312 1313 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1314 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1315 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1316 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1317 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1318 1319 /* 1320 * Wait some time... 1321 */ 1322 DELAY(1000); 1323 1324 sc->dc_wdog_timer = 5; 1325 } 1326 1327 static void 1328 dc_setfilt(struct dc_softc *sc) 1329 { 1330 1331 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1332 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1333 dc_setfilt_21143(sc); 1334 1335 if (DC_IS_ASIX(sc)) 1336 dc_setfilt_asix(sc); 1337 1338 if (DC_IS_ADMTEK(sc)) 1339 dc_setfilt_admtek(sc); 1340 1341 if (DC_IS_XIRCOM(sc)) 1342 dc_setfilt_xircom(sc); 1343 } 1344 1345 /* 1346 * In order to fiddle with the 'full-duplex' and '100Mbps' bits in 1347 * the netconfig register, we first have to put the transmit and/or 1348 * receive logic in the idle state. 1349 */ 1350 static void 1351 dc_setcfg(struct dc_softc *sc, int media) 1352 { 1353 int i, restart = 0, watchdogreg; 1354 u_int32_t isr; 1355 1356 if (IFM_SUBTYPE(media) == IFM_NONE) 1357 return; 1358 1359 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) { 1360 restart = 1; 1361 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); 1362 1363 for (i = 0; i < DC_TIMEOUT; i++) { 1364 isr = CSR_READ_4(sc, DC_ISR); 1365 if (isr & DC_ISR_TX_IDLE && 1366 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1367 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1368 break; 1369 DELAY(10); 1370 } 1371 1372 if (i == DC_TIMEOUT) { 1373 if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc)) 1374 device_printf(sc->dc_dev, 1375 "%s: failed to force tx to idle state\n", 1376 __func__); 1377 if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1378 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && 1379 !DC_HAS_BROKEN_RXSTATE(sc)) 1380 device_printf(sc->dc_dev, 1381 "%s: failed to force rx to idle state\n", 1382 __func__); 1383 } 1384 } 1385 1386 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1387 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1388 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1389 if (sc->dc_pmode == DC_PMODE_MII) { 1390 if (DC_IS_INTEL(sc)) { 1391 /* There's a write enable bit here that reads as 1. */ 1392 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1393 watchdogreg &= ~DC_WDOG_CTLWREN; 1394 watchdogreg |= DC_WDOG_JABBERDIS; 1395 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1396 } else { 1397 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1398 } 1399 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1400 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); 1401 if (sc->dc_type == DC_TYPE_98713) 1402 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1403 DC_NETCFG_SCRAMBLER)); 1404 if (!DC_IS_DAVICOM(sc)) 1405 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1406 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1407 if (DC_IS_INTEL(sc)) 1408 dc_apply_fixup(sc, IFM_AUTO); 1409 } else { 1410 if (DC_IS_PNIC(sc)) { 1411 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1412 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1413 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1414 } 1415 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1416 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1417 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1418 if (DC_IS_INTEL(sc)) 1419 dc_apply_fixup(sc, 1420 (media & IFM_GMASK) == IFM_FDX ? 1421 IFM_100_TX | IFM_FDX : IFM_100_TX); 1422 } 1423 } 1424 1425 if (IFM_SUBTYPE(media) == IFM_10_T) { 1426 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1427 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1428 if (sc->dc_pmode == DC_PMODE_MII) { 1429 /* There's a write enable bit here that reads as 1. */ 1430 if (DC_IS_INTEL(sc)) { 1431 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1432 watchdogreg &= ~DC_WDOG_CTLWREN; 1433 watchdogreg |= DC_WDOG_JABBERDIS; 1434 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1435 } else { 1436 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1437 } 1438 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1439 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); 1440 if (sc->dc_type == DC_TYPE_98713) 1441 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1442 if (!DC_IS_DAVICOM(sc)) 1443 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1444 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1445 if (DC_IS_INTEL(sc)) 1446 dc_apply_fixup(sc, IFM_AUTO); 1447 } else { 1448 if (DC_IS_PNIC(sc)) { 1449 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1450 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1451 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1452 } 1453 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1454 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1455 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1456 if (DC_IS_INTEL(sc)) { 1457 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1458 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1459 if ((media & IFM_GMASK) == IFM_FDX) 1460 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1461 else 1462 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1463 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1464 DC_CLRBIT(sc, DC_10BTCTRL, 1465 DC_TCTL_AUTONEGENBL); 1466 dc_apply_fixup(sc, 1467 (media & IFM_GMASK) == IFM_FDX ? 1468 IFM_10_T | IFM_FDX : IFM_10_T); 1469 DELAY(20000); 1470 } 1471 } 1472 } 1473 1474 /* 1475 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1476 * PHY and we want HomePNA mode, set the portsel bit to turn 1477 * on the external MII port. 1478 */ 1479 if (DC_IS_DAVICOM(sc)) { 1480 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1481 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1482 sc->dc_link = 1; 1483 } else { 1484 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1485 } 1486 } 1487 1488 if ((media & IFM_GMASK) == IFM_FDX) { 1489 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1490 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1491 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1492 } else { 1493 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1494 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1495 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1496 } 1497 1498 if (restart) 1499 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON); 1500 } 1501 1502 static void 1503 dc_reset(struct dc_softc *sc) 1504 { 1505 int i; 1506 1507 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1508 1509 for (i = 0; i < DC_TIMEOUT; i++) { 1510 DELAY(10); 1511 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1512 break; 1513 } 1514 1515 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) || 1516 DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { 1517 DELAY(10000); 1518 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1519 i = 0; 1520 } 1521 1522 if (i == DC_TIMEOUT) 1523 device_printf(sc->dc_dev, "reset never completed!\n"); 1524 1525 /* Wait a little while for the chip to get its brains in order. */ 1526 DELAY(1000); 1527 1528 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1529 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1530 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1531 1532 /* 1533 * Bring the SIA out of reset. In some cases, it looks 1534 * like failing to unreset the SIA soon enough gets it 1535 * into a state where it will never come out of reset 1536 * until we reset the whole chip again. 1537 */ 1538 if (DC_IS_INTEL(sc)) { 1539 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1540 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1541 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1542 } 1543 } 1544 1545 static const struct dc_type * 1546 dc_devtype(device_t dev) 1547 { 1548 const struct dc_type *t; 1549 u_int32_t devid; 1550 u_int8_t rev; 1551 1552 t = dc_devs; 1553 devid = pci_get_devid(dev); 1554 rev = pci_get_revid(dev); 1555 1556 while (t->dc_name != NULL) { 1557 if (devid == t->dc_devid && rev >= t->dc_minrev) 1558 return (t); 1559 t++; 1560 } 1561 1562 return (NULL); 1563 } 1564 1565 /* 1566 * Probe for a 21143 or clone chip. Check the PCI vendor and device 1567 * IDs against our list and return a device name if we find a match. 1568 * We do a little bit of extra work to identify the exact type of 1569 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, 1570 * but different revision IDs. The same is true for 98715/98715A 1571 * chips and the 98725, as well as the ASIX and ADMtek chips. In some 1572 * cases, the exact chip revision affects driver behavior. 1573 */ 1574 static int 1575 dc_probe(device_t dev) 1576 { 1577 const struct dc_type *t; 1578 1579 t = dc_devtype(dev); 1580 1581 if (t != NULL) { 1582 device_set_desc(dev, t->dc_name); 1583 return (BUS_PROBE_DEFAULT); 1584 } 1585 1586 return (ENXIO); 1587 } 1588 1589 static void 1590 dc_apply_fixup(struct dc_softc *sc, int media) 1591 { 1592 struct dc_mediainfo *m; 1593 u_int8_t *p; 1594 int i; 1595 u_int32_t reg; 1596 1597 m = sc->dc_mi; 1598 1599 while (m != NULL) { 1600 if (m->dc_media == media) 1601 break; 1602 m = m->dc_next; 1603 } 1604 1605 if (m == NULL) 1606 return; 1607 1608 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1609 reg = (p[0] | (p[1] << 8)) << 16; 1610 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1611 } 1612 1613 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1614 reg = (p[0] | (p[1] << 8)) << 16; 1615 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1616 } 1617 } 1618 1619 static void 1620 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) 1621 { 1622 struct dc_mediainfo *m; 1623 1624 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1625 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) { 1626 case DC_SIA_CODE_10BT: 1627 m->dc_media = IFM_10_T; 1628 break; 1629 case DC_SIA_CODE_10BT_FDX: 1630 m->dc_media = IFM_10_T | IFM_FDX; 1631 break; 1632 case DC_SIA_CODE_10B2: 1633 m->dc_media = IFM_10_2; 1634 break; 1635 case DC_SIA_CODE_10B5: 1636 m->dc_media = IFM_10_5; 1637 break; 1638 default: 1639 break; 1640 } 1641 1642 /* 1643 * We need to ignore CSR13, CSR14, CSR15 for SIA mode. 1644 * Things apparently already work for cards that do 1645 * supply Media Specific Data. 1646 */ 1647 if (l->dc_sia_code & DC_SIA_CODE_EXT) { 1648 m->dc_gp_len = 2; 1649 m->dc_gp_ptr = 1650 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; 1651 } else { 1652 m->dc_gp_len = 2; 1653 m->dc_gp_ptr = 1654 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; 1655 } 1656 1657 m->dc_next = sc->dc_mi; 1658 sc->dc_mi = m; 1659 1660 sc->dc_pmode = DC_PMODE_SIA; 1661 } 1662 1663 static void 1664 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) 1665 { 1666 struct dc_mediainfo *m; 1667 1668 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1669 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1670 m->dc_media = IFM_100_TX; 1671 1672 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1673 m->dc_media = IFM_100_TX | IFM_FDX; 1674 1675 m->dc_gp_len = 2; 1676 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1677 1678 m->dc_next = sc->dc_mi; 1679 sc->dc_mi = m; 1680 1681 sc->dc_pmode = DC_PMODE_SYM; 1682 } 1683 1684 static void 1685 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) 1686 { 1687 struct dc_mediainfo *m; 1688 u_int8_t *p; 1689 1690 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1691 /* We abuse IFM_AUTO to represent MII. */ 1692 m->dc_media = IFM_AUTO; 1693 m->dc_gp_len = l->dc_gpr_len; 1694 1695 p = (u_int8_t *)l; 1696 p += sizeof(struct dc_eblock_mii); 1697 m->dc_gp_ptr = p; 1698 p += 2 * l->dc_gpr_len; 1699 m->dc_reset_len = *p; 1700 p++; 1701 m->dc_reset_ptr = p; 1702 1703 m->dc_next = sc->dc_mi; 1704 sc->dc_mi = m; 1705 } 1706 1707 static void 1708 dc_read_srom(struct dc_softc *sc, int bits) 1709 { 1710 int size; 1711 1712 size = 2 << bits; 1713 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); 1714 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1715 } 1716 1717 static void 1718 dc_parse_21143_srom(struct dc_softc *sc) 1719 { 1720 struct dc_leaf_hdr *lhdr; 1721 struct dc_eblock_hdr *hdr; 1722 int have_mii, i, loff; 1723 char *ptr; 1724 1725 have_mii = 0; 1726 loff = sc->dc_srom[27]; 1727 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1728 1729 ptr = (char *)lhdr; 1730 ptr += sizeof(struct dc_leaf_hdr) - 1; 1731 /* 1732 * Look if we got a MII media block. 1733 */ 1734 for (i = 0; i < lhdr->dc_mcnt; i++) { 1735 hdr = (struct dc_eblock_hdr *)ptr; 1736 if (hdr->dc_type == DC_EBLOCK_MII) 1737 have_mii++; 1738 1739 ptr += (hdr->dc_len & 0x7F); 1740 ptr++; 1741 } 1742 1743 /* 1744 * Do the same thing again. Only use SIA and SYM media 1745 * blocks if no MII media block is available. 1746 */ 1747 ptr = (char *)lhdr; 1748 ptr += sizeof(struct dc_leaf_hdr) - 1; 1749 for (i = 0; i < lhdr->dc_mcnt; i++) { 1750 hdr = (struct dc_eblock_hdr *)ptr; 1751 switch (hdr->dc_type) { 1752 case DC_EBLOCK_MII: 1753 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1754 break; 1755 case DC_EBLOCK_SIA: 1756 if (! have_mii) 1757 dc_decode_leaf_sia(sc, 1758 (struct dc_eblock_sia *)hdr); 1759 break; 1760 case DC_EBLOCK_SYM: 1761 if (! have_mii) 1762 dc_decode_leaf_sym(sc, 1763 (struct dc_eblock_sym *)hdr); 1764 break; 1765 default: 1766 /* Don't care. Yet. */ 1767 break; 1768 } 1769 ptr += (hdr->dc_len & 0x7F); 1770 ptr++; 1771 } 1772 } 1773 1774 static void 1775 dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1776 { 1777 u_int32_t *paddr; 1778 1779 KASSERT(nseg == 1, 1780 ("%s: wrong number of segments (%d)", __func__, nseg)); 1781 paddr = arg; 1782 *paddr = segs->ds_addr; 1783 } 1784 1785 /* 1786 * Attach the interface. Allocate softc structures, do ifmedia 1787 * setup and ethernet/BPF attach. 1788 */ 1789 static int 1790 dc_attach(device_t dev) 1791 { 1792 uint32_t eaddr[(ETHER_ADDR_LEN+3)/4]; 1793 u_int32_t command; 1794 struct dc_softc *sc; 1795 struct ifnet *ifp; 1796 u_int32_t reg, revision; 1797 int error, i, mac_offset, phy, rid, tmp; 1798 u_int8_t *mac; 1799 1800 sc = device_get_softc(dev); 1801 sc->dc_dev = dev; 1802 1803 mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1804 MTX_DEF); 1805 1806 /* 1807 * Map control/status registers. 1808 */ 1809 pci_enable_busmaster(dev); 1810 1811 rid = DC_RID; 1812 sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE); 1813 1814 if (sc->dc_res == NULL) { 1815 device_printf(dev, "couldn't map ports/memory\n"); 1816 error = ENXIO; 1817 goto fail; 1818 } 1819 1820 sc->dc_btag = rman_get_bustag(sc->dc_res); 1821 sc->dc_bhandle = rman_get_bushandle(sc->dc_res); 1822 1823 /* Allocate interrupt. */ 1824 rid = 0; 1825 sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1826 RF_SHAREABLE | RF_ACTIVE); 1827 1828 if (sc->dc_irq == NULL) { 1829 device_printf(dev, "couldn't map interrupt\n"); 1830 error = ENXIO; 1831 goto fail; 1832 } 1833 1834 /* Need this info to decide on a chip type. */ 1835 sc->dc_info = dc_devtype(dev); 1836 revision = pci_get_revid(dev); 1837 1838 /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */ 1839 if (sc->dc_info->dc_devid != 1840 DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168) && 1841 sc->dc_info->dc_devid != 1842 DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201)) 1843 dc_eeprom_width(sc); 1844 1845 switch (sc->dc_info->dc_devid) { 1846 case DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143): 1847 sc->dc_type = DC_TYPE_21143; 1848 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1849 sc->dc_flags |= DC_REDUCED_MII_POLL; 1850 /* Save EEPROM contents so we can parse them later. */ 1851 dc_read_srom(sc, sc->dc_romwidth); 1852 break; 1853 case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009): 1854 case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100): 1855 case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102): 1856 sc->dc_type = DC_TYPE_DM9102; 1857 sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS; 1858 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD; 1859 sc->dc_flags |= DC_TX_ALIGN; 1860 sc->dc_pmode = DC_PMODE_MII; 1861 1862 /* Increase the latency timer value. */ 1863 pci_write_config(dev, PCIR_LATTIMER, 0x80, 1); 1864 break; 1865 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981): 1866 sc->dc_type = DC_TYPE_AL981; 1867 sc->dc_flags |= DC_TX_USE_TX_INTR; 1868 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1869 sc->dc_pmode = DC_PMODE_MII; 1870 dc_read_srom(sc, sc->dc_romwidth); 1871 break; 1872 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN983): 1873 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985): 1874 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511): 1875 case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513): 1876 case DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD): 1877 case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500): 1878 case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX): 1879 case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242): 1880 case DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX): 1881 case DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T): 1882 case DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB): 1883 case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120): 1884 case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130): 1885 case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08): 1886 case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09): 1887 sc->dc_type = DC_TYPE_AN983; 1888 sc->dc_flags |= DC_64BIT_HASH; 1889 sc->dc_flags |= DC_TX_USE_TX_INTR; 1890 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1891 sc->dc_pmode = DC_PMODE_MII; 1892 /* Don't read SROM for - auto-loaded on reset */ 1893 break; 1894 case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713): 1895 case DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP): 1896 if (revision < DC_REVISION_98713A) { 1897 sc->dc_type = DC_TYPE_98713; 1898 } 1899 if (revision >= DC_REVISION_98713A) { 1900 sc->dc_type = DC_TYPE_98713A; 1901 sc->dc_flags |= DC_21143_NWAY; 1902 } 1903 sc->dc_flags |= DC_REDUCED_MII_POLL; 1904 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1905 break; 1906 case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5): 1907 case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217): 1908 /* 1909 * Macronix MX98715AEC-C/D/E parts have only a 1910 * 128-bit hash table. We need to deal with these 1911 * in the same manner as the PNIC II so that we 1912 * get the right number of bits out of the 1913 * CRC routine. 1914 */ 1915 if (revision >= DC_REVISION_98715AEC_C && 1916 revision < DC_REVISION_98725) 1917 sc->dc_flags |= DC_128BIT_HASH; 1918 sc->dc_type = DC_TYPE_987x5; 1919 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1920 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 1921 break; 1922 case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727): 1923 sc->dc_type = DC_TYPE_987x5; 1924 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1925 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 1926 break; 1927 case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115): 1928 sc->dc_type = DC_TYPE_PNICII; 1929 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH; 1930 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 1931 break; 1932 case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168): 1933 sc->dc_type = DC_TYPE_PNIC; 1934 sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS; 1935 sc->dc_flags |= DC_PNIC_RX_BUG_WAR; 1936 sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); 1937 if (revision < DC_REVISION_82C169) 1938 sc->dc_pmode = DC_PMODE_SYM; 1939 break; 1940 case DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A): 1941 sc->dc_type = DC_TYPE_ASIX; 1942 sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG; 1943 sc->dc_flags |= DC_REDUCED_MII_POLL; 1944 sc->dc_pmode = DC_PMODE_MII; 1945 break; 1946 case DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201): 1947 sc->dc_type = DC_TYPE_XIRCOM; 1948 sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | 1949 DC_TX_ALIGN; 1950 /* 1951 * We don't actually need to coalesce, but we're doing 1952 * it to obtain a double word aligned buffer. 1953 * The DC_TX_COALESCE flag is required. 1954 */ 1955 sc->dc_pmode = DC_PMODE_MII; 1956 break; 1957 case DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112): 1958 sc->dc_type = DC_TYPE_CONEXANT; 1959 sc->dc_flags |= DC_TX_INTR_ALWAYS; 1960 sc->dc_flags |= DC_REDUCED_MII_POLL; 1961 sc->dc_pmode = DC_PMODE_MII; 1962 dc_read_srom(sc, sc->dc_romwidth); 1963 break; 1964 default: 1965 device_printf(dev, "unknown device: %x\n", 1966 sc->dc_info->dc_devid); 1967 break; 1968 } 1969 1970 /* Save the cache line size. */ 1971 if (DC_IS_DAVICOM(sc)) 1972 sc->dc_cachesize = 0; 1973 else 1974 sc->dc_cachesize = pci_get_cachelnsz(dev); 1975 1976 /* Reset the adapter. */ 1977 dc_reset(sc); 1978 1979 /* Take 21143 out of snooze mode */ 1980 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { 1981 command = pci_read_config(dev, DC_PCI_CFDD, 4); 1982 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); 1983 pci_write_config(dev, DC_PCI_CFDD, command, 4); 1984 } 1985 1986 /* 1987 * Try to learn something about the supported media. 1988 * We know that ASIX and ADMtek and Davicom devices 1989 * will *always* be using MII media, so that's a no-brainer. 1990 * The tricky ones are the Macronix/PNIC II and the 1991 * Intel 21143. 1992 */ 1993 if (DC_IS_INTEL(sc)) 1994 dc_parse_21143_srom(sc); 1995 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 1996 if (sc->dc_type == DC_TYPE_98713) 1997 sc->dc_pmode = DC_PMODE_MII; 1998 else 1999 sc->dc_pmode = DC_PMODE_SYM; 2000 } else if (!sc->dc_pmode) 2001 sc->dc_pmode = DC_PMODE_MII; 2002 2003 /* 2004 * Get station address from the EEPROM. 2005 */ 2006 switch(sc->dc_type) { 2007 case DC_TYPE_98713: 2008 case DC_TYPE_98713A: 2009 case DC_TYPE_987x5: 2010 case DC_TYPE_PNICII: 2011 dc_read_eeprom(sc, (caddr_t)&mac_offset, 2012 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 2013 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); 2014 break; 2015 case DC_TYPE_PNIC: 2016 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); 2017 break; 2018 case DC_TYPE_DM9102: 2019 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2020 #ifdef __sparc64__ 2021 /* 2022 * If this is an onboard dc(4) the station address read from 2023 * the EEPROM is all zero and we have to get it from the FCode. 2024 */ 2025 if (eaddr[0] == 0 && (eaddr[1] & ~0xffff) == 0) 2026 OF_getetheraddr(dev, (caddr_t)&eaddr); 2027 #endif 2028 break; 2029 case DC_TYPE_21143: 2030 case DC_TYPE_ASIX: 2031 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2032 break; 2033 case DC_TYPE_AL981: 2034 case DC_TYPE_AN983: 2035 reg = CSR_READ_4(sc, DC_AL_PAR0); 2036 mac = (uint8_t *)&eaddr[0]; 2037 mac[0] = (reg >> 0) & 0xff; 2038 mac[1] = (reg >> 8) & 0xff; 2039 mac[2] = (reg >> 16) & 0xff; 2040 mac[3] = (reg >> 24) & 0xff; 2041 reg = CSR_READ_4(sc, DC_AL_PAR1); 2042 mac[4] = (reg >> 0) & 0xff; 2043 mac[5] = (reg >> 8) & 0xff; 2044 break; 2045 case DC_TYPE_CONEXANT: 2046 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, 2047 ETHER_ADDR_LEN); 2048 break; 2049 case DC_TYPE_XIRCOM: 2050 /* The MAC comes from the CIS. */ 2051 mac = pci_get_ether(dev); 2052 if (!mac) { 2053 device_printf(dev, "No station address in CIS!\n"); 2054 error = ENXIO; 2055 goto fail; 2056 } 2057 bcopy(mac, eaddr, ETHER_ADDR_LEN); 2058 break; 2059 default: 2060 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2061 break; 2062 } 2063 2064 bcopy(eaddr, sc->dc_eaddr, sizeof(eaddr)); 2065 /* 2066 * If we still have invalid station address, see whether we can 2067 * find station address for chip 0. Some multi-port controllers 2068 * just store station address for chip 0 if they have a shared 2069 * SROM. 2070 */ 2071 if ((sc->dc_eaddr[0] == 0 && (sc->dc_eaddr[1] & ~0xffff) == 0) || 2072 (sc->dc_eaddr[0] == 0xffffffff && 2073 (sc->dc_eaddr[1] & 0xffff) == 0xffff)) { 2074 if (dc_check_multiport(sc) == 0) 2075 bcopy(sc->dc_eaddr, eaddr, sizeof(eaddr)); 2076 } 2077 2078 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ 2079 error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0, 2080 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 2081 sizeof(struct dc_list_data), 1, sizeof(struct dc_list_data), 2082 0, NULL, NULL, &sc->dc_ltag); 2083 if (error) { 2084 device_printf(dev, "failed to allocate busdma tag\n"); 2085 error = ENXIO; 2086 goto fail; 2087 } 2088 error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata, 2089 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->dc_lmap); 2090 if (error) { 2091 device_printf(dev, "failed to allocate DMA safe memory\n"); 2092 error = ENXIO; 2093 goto fail; 2094 } 2095 error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata, 2096 sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr, 2097 BUS_DMA_NOWAIT); 2098 if (error) { 2099 device_printf(dev, "cannot get address of the descriptors\n"); 2100 error = ENXIO; 2101 goto fail; 2102 } 2103 2104 /* 2105 * Allocate a busdma tag and DMA safe memory for the multicast 2106 * setup frame. 2107 */ 2108 error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0, 2109 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 2110 DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 2111 0, NULL, NULL, &sc->dc_stag); 2112 if (error) { 2113 device_printf(dev, "failed to allocate busdma tag\n"); 2114 error = ENXIO; 2115 goto fail; 2116 } 2117 error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf, 2118 BUS_DMA_NOWAIT, &sc->dc_smap); 2119 if (error) { 2120 device_printf(dev, "failed to allocate DMA safe memory\n"); 2121 error = ENXIO; 2122 goto fail; 2123 } 2124 error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf, 2125 DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT); 2126 if (error) { 2127 device_printf(dev, "cannot get address of the descriptors\n"); 2128 error = ENXIO; 2129 goto fail; 2130 } 2131 2132 /* Allocate a busdma tag for mbufs. */ 2133 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 2134 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 2135 MCLBYTES * DC_MAXFRAGS, DC_MAXFRAGS, MCLBYTES, 2136 0, NULL, NULL, &sc->dc_mtag); 2137 if (error) { 2138 device_printf(dev, "failed to allocate busdma tag\n"); 2139 error = ENXIO; 2140 goto fail; 2141 } 2142 2143 /* Create the TX/RX busdma maps. */ 2144 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2145 error = bus_dmamap_create(sc->dc_mtag, 0, 2146 &sc->dc_cdata.dc_tx_map[i]); 2147 if (error) { 2148 device_printf(dev, "failed to init TX ring\n"); 2149 error = ENXIO; 2150 goto fail; 2151 } 2152 } 2153 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2154 error = bus_dmamap_create(sc->dc_mtag, 0, 2155 &sc->dc_cdata.dc_rx_map[i]); 2156 if (error) { 2157 device_printf(dev, "failed to init RX ring\n"); 2158 error = ENXIO; 2159 goto fail; 2160 } 2161 } 2162 error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap); 2163 if (error) { 2164 device_printf(dev, "failed to init RX ring\n"); 2165 error = ENXIO; 2166 goto fail; 2167 } 2168 2169 ifp = sc->dc_ifp = if_alloc(IFT_ETHER); 2170 if (ifp == NULL) { 2171 device_printf(dev, "can not if_alloc()\n"); 2172 error = ENOSPC; 2173 goto fail; 2174 } 2175 ifp->if_softc = sc; 2176 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2177 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2178 ifp->if_ioctl = dc_ioctl; 2179 ifp->if_start = dc_start; 2180 ifp->if_init = dc_init; 2181 IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1); 2182 ifp->if_snd.ifq_drv_maxlen = DC_TX_LIST_CNT - 1; 2183 IFQ_SET_READY(&ifp->if_snd); 2184 2185 /* 2186 * Do MII setup. If this is a 21143, check for a PHY on the 2187 * MII bus after applying any necessary fixups to twiddle the 2188 * GPIO bits. If we don't end up finding a PHY, restore the 2189 * old selection (SIA only or SIA/SYM) and attach the dcphy 2190 * driver instead. 2191 */ 2192 tmp = 0; 2193 if (DC_IS_INTEL(sc)) { 2194 dc_apply_fixup(sc, IFM_AUTO); 2195 tmp = sc->dc_pmode; 2196 sc->dc_pmode = DC_PMODE_MII; 2197 } 2198 2199 /* 2200 * Setup General Purpose port mode and data so the tulip can talk 2201 * to the MII. This needs to be done before mii_attach so that 2202 * we can actually see them. 2203 */ 2204 if (DC_IS_XIRCOM(sc)) { 2205 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2206 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2207 DELAY(10); 2208 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2209 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2210 DELAY(10); 2211 } 2212 2213 phy = MII_PHY_ANY; 2214 /* 2215 * Note: both the AL981 and AN983 have internal PHYs, however the 2216 * AL981 provides direct access to the PHY registers while the AN983 2217 * uses a serial MII interface. The AN983's MII interface is also 2218 * buggy in that you can read from any MII address (0 to 31), but 2219 * only address 1 behaves normally. To deal with both cases, we 2220 * pretend that the PHY is at MII address 1. 2221 */ 2222 if (DC_IS_ADMTEK(sc)) 2223 phy = DC_ADMTEK_PHYADDR; 2224 2225 /* 2226 * Note: the ukphy probes of the RS7112 report a PHY at MII address 2227 * 0 (possibly HomePNA?) and 1 (ethernet) so we only respond to the 2228 * correct one. 2229 */ 2230 if (DC_IS_CONEXANT(sc)) 2231 phy = DC_CONEXANT_PHYADDR; 2232 2233 error = mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd, 2234 dc_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 2235 2236 if (error && DC_IS_INTEL(sc)) { 2237 sc->dc_pmode = tmp; 2238 if (sc->dc_pmode != DC_PMODE_SIA) 2239 sc->dc_pmode = DC_PMODE_SYM; 2240 sc->dc_flags |= DC_21143_NWAY; 2241 mii_attach(dev, &sc->dc_miibus, ifp, dc_ifmedia_upd, 2242 dc_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, 2243 MII_OFFSET_ANY, 0); 2244 /* 2245 * For non-MII cards, we need to have the 21143 2246 * drive the LEDs. Except there are some systems 2247 * like the NEC VersaPro NoteBook PC which have no 2248 * LEDs, and twiddling these bits has adverse effects 2249 * on them. (I.e. you suddenly can't get a link.) 2250 */ 2251 if (!(pci_get_subvendor(dev) == 0x1033 && 2252 pci_get_subdevice(dev) == 0x8028)) 2253 sc->dc_flags |= DC_TULIP_LEDS; 2254 error = 0; 2255 } 2256 2257 if (error) { 2258 device_printf(dev, "attaching PHYs failed\n"); 2259 goto fail; 2260 } 2261 2262 if (DC_IS_ADMTEK(sc)) { 2263 /* 2264 * Set automatic TX underrun recovery for the ADMtek chips 2265 */ 2266 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 2267 } 2268 2269 /* 2270 * Tell the upper layer(s) we support long frames. 2271 */ 2272 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2273 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2274 ifp->if_capenable = ifp->if_capabilities; 2275 #ifdef DEVICE_POLLING 2276 ifp->if_capabilities |= IFCAP_POLLING; 2277 #endif 2278 2279 callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0); 2280 callout_init_mtx(&sc->dc_wdog_ch, &sc->dc_mtx, 0); 2281 2282 /* 2283 * Call MI attach routine. 2284 */ 2285 ether_ifattach(ifp, (caddr_t)eaddr); 2286 2287 /* Hook interrupt last to avoid having to lock softc */ 2288 error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | INTR_MPSAFE, 2289 NULL, dc_intr, sc, &sc->dc_intrhand); 2290 2291 if (error) { 2292 device_printf(dev, "couldn't set up irq\n"); 2293 ether_ifdetach(ifp); 2294 goto fail; 2295 } 2296 2297 fail: 2298 if (error) 2299 dc_detach(dev); 2300 return (error); 2301 } 2302 2303 /* 2304 * Shutdown hardware and free up resources. This can be called any 2305 * time after the mutex has been initialized. It is called in both 2306 * the error case in attach and the normal detach case so it needs 2307 * to be careful about only freeing resources that have actually been 2308 * allocated. 2309 */ 2310 static int 2311 dc_detach(device_t dev) 2312 { 2313 struct dc_softc *sc; 2314 struct ifnet *ifp; 2315 struct dc_mediainfo *m; 2316 int i; 2317 2318 sc = device_get_softc(dev); 2319 KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized")); 2320 2321 ifp = sc->dc_ifp; 2322 2323 #ifdef DEVICE_POLLING 2324 if (ifp->if_capenable & IFCAP_POLLING) 2325 ether_poll_deregister(ifp); 2326 #endif 2327 2328 /* These should only be active if attach succeeded */ 2329 if (device_is_attached(dev)) { 2330 DC_LOCK(sc); 2331 dc_stop(sc); 2332 DC_UNLOCK(sc); 2333 callout_drain(&sc->dc_stat_ch); 2334 callout_drain(&sc->dc_wdog_ch); 2335 ether_ifdetach(ifp); 2336 } 2337 if (sc->dc_miibus) 2338 device_delete_child(dev, sc->dc_miibus); 2339 bus_generic_detach(dev); 2340 2341 if (sc->dc_intrhand) 2342 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); 2343 if (sc->dc_irq) 2344 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2345 if (sc->dc_res) 2346 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2347 2348 if (ifp) 2349 if_free(ifp); 2350 2351 if (sc->dc_cdata.dc_sbuf != NULL) 2352 bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap); 2353 if (sc->dc_ldata != NULL) 2354 bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap); 2355 if (sc->dc_mtag) { 2356 for (i = 0; i < DC_TX_LIST_CNT; i++) 2357 if (sc->dc_cdata.dc_tx_map[i] != NULL) 2358 bus_dmamap_destroy(sc->dc_mtag, 2359 sc->dc_cdata.dc_tx_map[i]); 2360 for (i = 0; i < DC_RX_LIST_CNT; i++) 2361 if (sc->dc_cdata.dc_rx_map[i] != NULL) 2362 bus_dmamap_destroy(sc->dc_mtag, 2363 sc->dc_cdata.dc_rx_map[i]); 2364 bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap); 2365 } 2366 if (sc->dc_stag) 2367 bus_dma_tag_destroy(sc->dc_stag); 2368 if (sc->dc_mtag) 2369 bus_dma_tag_destroy(sc->dc_mtag); 2370 if (sc->dc_ltag) 2371 bus_dma_tag_destroy(sc->dc_ltag); 2372 2373 free(sc->dc_pnic_rx_buf, M_DEVBUF); 2374 2375 while (sc->dc_mi != NULL) { 2376 m = sc->dc_mi->dc_next; 2377 free(sc->dc_mi, M_DEVBUF); 2378 sc->dc_mi = m; 2379 } 2380 free(sc->dc_srom, M_DEVBUF); 2381 2382 mtx_destroy(&sc->dc_mtx); 2383 2384 return (0); 2385 } 2386 2387 /* 2388 * Initialize the transmit descriptors. 2389 */ 2390 static int 2391 dc_list_tx_init(struct dc_softc *sc) 2392 { 2393 struct dc_chain_data *cd; 2394 struct dc_list_data *ld; 2395 int i, nexti; 2396 2397 cd = &sc->dc_cdata; 2398 ld = sc->dc_ldata; 2399 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2400 if (i == DC_TX_LIST_CNT - 1) 2401 nexti = 0; 2402 else 2403 nexti = i + 1; 2404 ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti)); 2405 cd->dc_tx_chain[i] = NULL; 2406 ld->dc_tx_list[i].dc_data = 0; 2407 ld->dc_tx_list[i].dc_ctl = 0; 2408 } 2409 2410 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 2411 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2412 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2413 return (0); 2414 } 2415 2416 2417 /* 2418 * Initialize the RX descriptors and allocate mbufs for them. Note that 2419 * we arrange the descriptors in a closed ring, so that the last descriptor 2420 * points back to the first. 2421 */ 2422 static int 2423 dc_list_rx_init(struct dc_softc *sc) 2424 { 2425 struct dc_chain_data *cd; 2426 struct dc_list_data *ld; 2427 int i, nexti; 2428 2429 cd = &sc->dc_cdata; 2430 ld = sc->dc_ldata; 2431 2432 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2433 if (dc_newbuf(sc, i, 1) != 0) 2434 return (ENOBUFS); 2435 if (i == DC_RX_LIST_CNT - 1) 2436 nexti = 0; 2437 else 2438 nexti = i + 1; 2439 ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti)); 2440 } 2441 2442 cd->dc_rx_prod = 0; 2443 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2444 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2445 return (0); 2446 } 2447 2448 /* 2449 * Initialize an RX descriptor and attach an MBUF cluster. 2450 */ 2451 static int 2452 dc_newbuf(struct dc_softc *sc, int i, int alloc) 2453 { 2454 struct mbuf *m_new; 2455 bus_dmamap_t tmp; 2456 bus_dma_segment_t segs[1]; 2457 int error, nseg; 2458 2459 if (alloc) { 2460 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2461 if (m_new == NULL) 2462 return (ENOBUFS); 2463 } else { 2464 m_new = sc->dc_cdata.dc_rx_chain[i]; 2465 m_new->m_data = m_new->m_ext.ext_buf; 2466 } 2467 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2468 m_adj(m_new, sizeof(u_int64_t)); 2469 2470 /* 2471 * If this is a PNIC chip, zero the buffer. This is part 2472 * of the workaround for the receive bug in the 82c168 and 2473 * 82c169 chips. 2474 */ 2475 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 2476 bzero(mtod(m_new, char *), m_new->m_len); 2477 2478 /* No need to remap the mbuf if we're reusing it. */ 2479 if (alloc) { 2480 error = bus_dmamap_load_mbuf_sg(sc->dc_mtag, sc->dc_sparemap, 2481 m_new, segs, &nseg, 0); 2482 if (error) { 2483 m_freem(m_new); 2484 return (error); 2485 } 2486 KASSERT(nseg == 1, 2487 ("%s: wrong number of segments (%d)", __func__, nseg)); 2488 sc->dc_ldata->dc_rx_list[i].dc_data = htole32(segs->ds_addr); 2489 bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); 2490 tmp = sc->dc_cdata.dc_rx_map[i]; 2491 sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap; 2492 sc->dc_sparemap = tmp; 2493 sc->dc_cdata.dc_rx_chain[i] = m_new; 2494 } 2495 2496 sc->dc_ldata->dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN); 2497 sc->dc_ldata->dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN); 2498 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], 2499 BUS_DMASYNC_PREREAD); 2500 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2501 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2502 return (0); 2503 } 2504 2505 /* 2506 * Grrrrr. 2507 * The PNIC chip has a terrible bug in it that manifests itself during 2508 * periods of heavy activity. The exact mode of failure if difficult to 2509 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 2510 * will happen on slow machines. The bug is that sometimes instead of 2511 * uploading one complete frame during reception, it uploads what looks 2512 * like the entire contents of its FIFO memory. The frame we want is at 2513 * the end of the whole mess, but we never know exactly how much data has 2514 * been uploaded, so salvaging the frame is hard. 2515 * 2516 * There is only one way to do it reliably, and it's disgusting. 2517 * Here's what we know: 2518 * 2519 * - We know there will always be somewhere between one and three extra 2520 * descriptors uploaded. 2521 * 2522 * - We know the desired received frame will always be at the end of the 2523 * total data upload. 2524 * 2525 * - We know the size of the desired received frame because it will be 2526 * provided in the length field of the status word in the last descriptor. 2527 * 2528 * Here's what we do: 2529 * 2530 * - When we allocate buffers for the receive ring, we bzero() them. 2531 * This means that we know that the buffer contents should be all 2532 * zeros, except for data uploaded by the chip. 2533 * 2534 * - We also force the PNIC chip to upload frames that include the 2535 * ethernet CRC at the end. 2536 * 2537 * - We gather all of the bogus frame data into a single buffer. 2538 * 2539 * - We then position a pointer at the end of this buffer and scan 2540 * backwards until we encounter the first non-zero byte of data. 2541 * This is the end of the received frame. We know we will encounter 2542 * some data at the end of the frame because the CRC will always be 2543 * there, so even if the sender transmits a packet of all zeros, 2544 * we won't be fooled. 2545 * 2546 * - We know the size of the actual received frame, so we subtract 2547 * that value from the current pointer location. This brings us 2548 * to the start of the actual received packet. 2549 * 2550 * - We copy this into an mbuf and pass it on, along with the actual 2551 * frame length. 2552 * 2553 * The performance hit is tremendous, but it beats dropping frames all 2554 * the time. 2555 */ 2556 2557 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG) 2558 static void 2559 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) 2560 { 2561 struct dc_desc *cur_rx; 2562 struct dc_desc *c = NULL; 2563 struct mbuf *m = NULL; 2564 unsigned char *ptr; 2565 int i, total_len; 2566 u_int32_t rxstat = 0; 2567 2568 i = sc->dc_pnic_rx_bug_save; 2569 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2570 ptr = sc->dc_pnic_rx_buf; 2571 bzero(ptr, DC_RXLEN * 5); 2572 2573 /* Copy all the bytes from the bogus buffers. */ 2574 while (1) { 2575 c = &sc->dc_ldata->dc_rx_list[i]; 2576 rxstat = le32toh(c->dc_status); 2577 m = sc->dc_cdata.dc_rx_chain[i]; 2578 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2579 ptr += DC_RXLEN; 2580 /* If this is the last buffer, break out. */ 2581 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2582 break; 2583 dc_newbuf(sc, i, 0); 2584 DC_INC(i, DC_RX_LIST_CNT); 2585 } 2586 2587 /* Find the length of the actual receive frame. */ 2588 total_len = DC_RXBYTES(rxstat); 2589 2590 /* Scan backwards until we hit a non-zero byte. */ 2591 while (*ptr == 0x00) 2592 ptr--; 2593 2594 /* Round off. */ 2595 if ((uintptr_t)(ptr) & 0x3) 2596 ptr -= 1; 2597 2598 /* Now find the start of the frame. */ 2599 ptr -= total_len; 2600 if (ptr < sc->dc_pnic_rx_buf) 2601 ptr = sc->dc_pnic_rx_buf; 2602 2603 /* 2604 * Now copy the salvaged frame to the last mbuf and fake up 2605 * the status word to make it look like a successful 2606 * frame reception. 2607 */ 2608 dc_newbuf(sc, i, 0); 2609 bcopy(ptr, mtod(m, char *), total_len); 2610 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); 2611 } 2612 2613 /* 2614 * This routine searches the RX ring for dirty descriptors in the 2615 * event that the rxeof routine falls out of sync with the chip's 2616 * current descriptor pointer. This may happen sometimes as a result 2617 * of a "no RX buffer available" condition that happens when the chip 2618 * consumes all of the RX buffers before the driver has a chance to 2619 * process the RX ring. This routine may need to be called more than 2620 * once to bring the driver back in sync with the chip, however we 2621 * should still be getting RX DONE interrupts to drive the search 2622 * for new packets in the RX ring, so we should catch up eventually. 2623 */ 2624 static int 2625 dc_rx_resync(struct dc_softc *sc) 2626 { 2627 struct dc_desc *cur_rx; 2628 int i, pos; 2629 2630 pos = sc->dc_cdata.dc_rx_prod; 2631 2632 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2633 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2634 if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN)) 2635 break; 2636 DC_INC(pos, DC_RX_LIST_CNT); 2637 } 2638 2639 /* If the ring really is empty, then just return. */ 2640 if (i == DC_RX_LIST_CNT) 2641 return (0); 2642 2643 /* We've fallen behing the chip: catch it. */ 2644 sc->dc_cdata.dc_rx_prod = pos; 2645 2646 return (EAGAIN); 2647 } 2648 2649 /* 2650 * A frame has been uploaded: pass the resulting mbuf chain up to 2651 * the higher level protocols. 2652 */ 2653 static int 2654 dc_rxeof(struct dc_softc *sc) 2655 { 2656 struct mbuf *m, *m0; 2657 struct ifnet *ifp; 2658 struct dc_desc *cur_rx; 2659 int i, total_len, rx_npkts; 2660 u_int32_t rxstat; 2661 2662 DC_LOCK_ASSERT(sc); 2663 2664 ifp = sc->dc_ifp; 2665 i = sc->dc_cdata.dc_rx_prod; 2666 total_len = 0; 2667 rx_npkts = 0; 2668 2669 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); 2670 while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) & 2671 DC_RXSTAT_OWN)) { 2672 #ifdef DEVICE_POLLING 2673 if (ifp->if_capenable & IFCAP_POLLING) { 2674 if (sc->rxcycles <= 0) 2675 break; 2676 sc->rxcycles--; 2677 } 2678 #endif 2679 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2680 rxstat = le32toh(cur_rx->dc_status); 2681 m = sc->dc_cdata.dc_rx_chain[i]; 2682 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], 2683 BUS_DMASYNC_POSTREAD); 2684 total_len = DC_RXBYTES(rxstat); 2685 2686 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2687 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2688 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2689 sc->dc_pnic_rx_bug_save = i; 2690 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2691 DC_INC(i, DC_RX_LIST_CNT); 2692 continue; 2693 } 2694 dc_pnic_rx_bug_war(sc, i); 2695 rxstat = le32toh(cur_rx->dc_status); 2696 total_len = DC_RXBYTES(rxstat); 2697 } 2698 } 2699 2700 /* 2701 * If an error occurs, update stats, clear the 2702 * status word and leave the mbuf cluster in place: 2703 * it should simply get re-used next time this descriptor 2704 * comes up in the ring. However, don't report long 2705 * frames as errors since they could be vlans. 2706 */ 2707 if ((rxstat & DC_RXSTAT_RXERR)) { 2708 if (!(rxstat & DC_RXSTAT_GIANT) || 2709 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2710 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2711 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2712 ifp->if_ierrors++; 2713 if (rxstat & DC_RXSTAT_COLLSEEN) 2714 ifp->if_collisions++; 2715 dc_newbuf(sc, i, 0); 2716 if (rxstat & DC_RXSTAT_CRCERR) { 2717 DC_INC(i, DC_RX_LIST_CNT); 2718 continue; 2719 } else { 2720 dc_init_locked(sc); 2721 return (rx_npkts); 2722 } 2723 } 2724 } 2725 2726 /* No errors; receive the packet. */ 2727 total_len -= ETHER_CRC_LEN; 2728 #ifdef __NO_STRICT_ALIGNMENT 2729 /* 2730 * On architectures without alignment problems we try to 2731 * allocate a new buffer for the receive ring, and pass up 2732 * the one where the packet is already, saving the expensive 2733 * copy done in m_devget(). 2734 * If we are on an architecture with alignment problems, or 2735 * if the allocation fails, then use m_devget and leave the 2736 * existing buffer in the receive ring. 2737 */ 2738 if (dc_newbuf(sc, i, 1) == 0) { 2739 m->m_pkthdr.rcvif = ifp; 2740 m->m_pkthdr.len = m->m_len = total_len; 2741 DC_INC(i, DC_RX_LIST_CNT); 2742 } else 2743 #endif 2744 { 2745 m0 = m_devget(mtod(m, char *), total_len, 2746 ETHER_ALIGN, ifp, NULL); 2747 dc_newbuf(sc, i, 0); 2748 DC_INC(i, DC_RX_LIST_CNT); 2749 if (m0 == NULL) { 2750 ifp->if_ierrors++; 2751 continue; 2752 } 2753 m = m0; 2754 } 2755 2756 ifp->if_ipackets++; 2757 DC_UNLOCK(sc); 2758 (*ifp->if_input)(ifp, m); 2759 DC_LOCK(sc); 2760 rx_npkts++; 2761 } 2762 2763 sc->dc_cdata.dc_rx_prod = i; 2764 return (rx_npkts); 2765 } 2766 2767 /* 2768 * A frame was downloaded to the chip. It's safe for us to clean up 2769 * the list buffers. 2770 */ 2771 static void 2772 dc_txeof(struct dc_softc *sc) 2773 { 2774 struct dc_desc *cur_tx = NULL; 2775 struct ifnet *ifp; 2776 int idx; 2777 u_int32_t ctl, txstat; 2778 2779 ifp = sc->dc_ifp; 2780 2781 /* 2782 * Go through our tx list and free mbufs for those 2783 * frames that have been transmitted. 2784 */ 2785 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); 2786 idx = sc->dc_cdata.dc_tx_cons; 2787 while (idx != sc->dc_cdata.dc_tx_prod) { 2788 2789 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2790 txstat = le32toh(cur_tx->dc_status); 2791 ctl = le32toh(cur_tx->dc_ctl); 2792 2793 if (txstat & DC_TXSTAT_OWN) 2794 break; 2795 2796 if (!(ctl & DC_TXCTL_LASTFRAG) || ctl & DC_TXCTL_SETUP) { 2797 if (ctl & DC_TXCTL_SETUP) { 2798 /* 2799 * Yes, the PNIC is so brain damaged 2800 * that it will sometimes generate a TX 2801 * underrun error while DMAing the RX 2802 * filter setup frame. If we detect this, 2803 * we have to send the setup frame again, 2804 * or else the filter won't be programmed 2805 * correctly. 2806 */ 2807 if (DC_IS_PNIC(sc)) { 2808 if (txstat & DC_TXSTAT_ERRSUM) 2809 dc_setfilt(sc); 2810 } 2811 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2812 } 2813 sc->dc_cdata.dc_tx_cnt--; 2814 DC_INC(idx, DC_TX_LIST_CNT); 2815 continue; 2816 } 2817 2818 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2819 /* 2820 * XXX: Why does my Xircom taunt me so? 2821 * For some reason it likes setting the CARRLOST flag 2822 * even when the carrier is there. wtf?!? 2823 * Who knows, but Conexant chips have the 2824 * same problem. Maybe they took lessons 2825 * from Xircom. 2826 */ 2827 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2828 sc->dc_pmode == DC_PMODE_MII && 2829 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | 2830 DC_TXSTAT_NOCARRIER))) 2831 txstat &= ~DC_TXSTAT_ERRSUM; 2832 } else { 2833 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2834 sc->dc_pmode == DC_PMODE_MII && 2835 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | 2836 DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST))) 2837 txstat &= ~DC_TXSTAT_ERRSUM; 2838 } 2839 2840 if (txstat & DC_TXSTAT_ERRSUM) { 2841 ifp->if_oerrors++; 2842 if (txstat & DC_TXSTAT_EXCESSCOLL) 2843 ifp->if_collisions++; 2844 if (txstat & DC_TXSTAT_LATECOLL) 2845 ifp->if_collisions++; 2846 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2847 dc_init_locked(sc); 2848 return; 2849 } 2850 } 2851 2852 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2853 2854 ifp->if_opackets++; 2855 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { 2856 bus_dmamap_sync(sc->dc_mtag, 2857 sc->dc_cdata.dc_tx_map[idx], 2858 BUS_DMASYNC_POSTWRITE); 2859 bus_dmamap_unload(sc->dc_mtag, 2860 sc->dc_cdata.dc_tx_map[idx]); 2861 m_freem(sc->dc_cdata.dc_tx_chain[idx]); 2862 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2863 } 2864 2865 sc->dc_cdata.dc_tx_cnt--; 2866 DC_INC(idx, DC_TX_LIST_CNT); 2867 } 2868 sc->dc_cdata.dc_tx_cons = idx; 2869 2870 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > DC_TX_LIST_RSVD) 2871 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2872 2873 if (sc->dc_cdata.dc_tx_cnt == 0) 2874 sc->dc_wdog_timer = 0; 2875 } 2876 2877 static void 2878 dc_tick(void *xsc) 2879 { 2880 struct dc_softc *sc; 2881 struct mii_data *mii; 2882 struct ifnet *ifp; 2883 u_int32_t r; 2884 2885 sc = xsc; 2886 DC_LOCK_ASSERT(sc); 2887 ifp = sc->dc_ifp; 2888 mii = device_get_softc(sc->dc_miibus); 2889 2890 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2891 if (sc->dc_flags & DC_21143_NWAY) { 2892 r = CSR_READ_4(sc, DC_10BTSTAT); 2893 if (IFM_SUBTYPE(mii->mii_media_active) == 2894 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2895 sc->dc_link = 0; 2896 mii_mediachg(mii); 2897 } 2898 if (IFM_SUBTYPE(mii->mii_media_active) == 2899 IFM_10_T && (r & DC_TSTAT_LS10)) { 2900 sc->dc_link = 0; 2901 mii_mediachg(mii); 2902 } 2903 if (sc->dc_link == 0) 2904 mii_tick(mii); 2905 } else { 2906 /* 2907 * For NICs which never report DC_RXSTATE_WAIT, we 2908 * have to bite the bullet... 2909 */ 2910 if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc, 2911 DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && 2912 sc->dc_cdata.dc_tx_cnt == 0) { 2913 mii_tick(mii); 2914 if (!(mii->mii_media_status & IFM_ACTIVE)) 2915 sc->dc_link = 0; 2916 } 2917 } 2918 } else 2919 mii_tick(mii); 2920 2921 /* 2922 * When the init routine completes, we expect to be able to send 2923 * packets right away, and in fact the network code will send a 2924 * gratuitous ARP the moment the init routine marks the interface 2925 * as running. However, even though the MAC may have been initialized, 2926 * there may be a delay of a few seconds before the PHY completes 2927 * autonegotiation and the link is brought up. Any transmissions 2928 * made during that delay will be lost. Dealing with this is tricky: 2929 * we can't just pause in the init routine while waiting for the 2930 * PHY to come ready since that would bring the whole system to 2931 * a screeching halt for several seconds. 2932 * 2933 * What we do here is prevent the TX start routine from sending 2934 * any packets until a link has been established. After the 2935 * interface has been initialized, the tick routine will poll 2936 * the state of the PHY until the IFM_ACTIVE flag is set. Until 2937 * that time, packets will stay in the send queue, and once the 2938 * link comes up, they will be flushed out to the wire. 2939 */ 2940 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && 2941 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2942 sc->dc_link++; 2943 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2944 dc_start_locked(ifp); 2945 } 2946 2947 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 2948 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 2949 else 2950 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 2951 } 2952 2953 /* 2954 * A transmit underrun has occurred. Back off the transmit threshold, 2955 * or switch to store and forward mode if we have to. 2956 */ 2957 static void 2958 dc_tx_underrun(struct dc_softc *sc) 2959 { 2960 u_int32_t isr; 2961 int i; 2962 2963 if (DC_IS_DAVICOM(sc)) 2964 dc_init_locked(sc); 2965 2966 if (DC_IS_INTEL(sc)) { 2967 /* 2968 * The real 21143 requires that the transmitter be idle 2969 * in order to change the transmit threshold or store 2970 * and forward state. 2971 */ 2972 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 2973 2974 for (i = 0; i < DC_TIMEOUT; i++) { 2975 isr = CSR_READ_4(sc, DC_ISR); 2976 if (isr & DC_ISR_TX_IDLE) 2977 break; 2978 DELAY(10); 2979 } 2980 if (i == DC_TIMEOUT) { 2981 device_printf(sc->dc_dev, 2982 "%s: failed to force tx to idle state\n", 2983 __func__); 2984 dc_init_locked(sc); 2985 } 2986 } 2987 2988 device_printf(sc->dc_dev, "TX underrun -- "); 2989 sc->dc_txthresh += DC_TXTHRESH_INC; 2990 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 2991 printf("using store and forward mode\n"); 2992 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 2993 } else { 2994 printf("increasing TX threshold\n"); 2995 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 2996 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 2997 } 2998 2999 if (DC_IS_INTEL(sc)) 3000 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3001 } 3002 3003 #ifdef DEVICE_POLLING 3004 static poll_handler_t dc_poll; 3005 3006 static int 3007 dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3008 { 3009 struct dc_softc *sc = ifp->if_softc; 3010 int rx_npkts = 0; 3011 3012 DC_LOCK(sc); 3013 3014 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3015 DC_UNLOCK(sc); 3016 return (rx_npkts); 3017 } 3018 3019 sc->rxcycles = count; 3020 rx_npkts = dc_rxeof(sc); 3021 dc_txeof(sc); 3022 if (!IFQ_IS_EMPTY(&ifp->if_snd) && 3023 !(ifp->if_drv_flags & IFF_DRV_OACTIVE)) 3024 dc_start_locked(ifp); 3025 3026 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 3027 u_int32_t status; 3028 3029 status = CSR_READ_4(sc, DC_ISR); 3030 status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF | 3031 DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN | 3032 DC_ISR_BUS_ERR); 3033 if (!status) { 3034 DC_UNLOCK(sc); 3035 return (rx_npkts); 3036 } 3037 /* ack what we have */ 3038 CSR_WRITE_4(sc, DC_ISR, status); 3039 3040 if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) { 3041 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); 3042 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); 3043 3044 if (dc_rx_resync(sc)) 3045 dc_rxeof(sc); 3046 } 3047 /* restart transmit unit if necessary */ 3048 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) 3049 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3050 3051 if (status & DC_ISR_TX_UNDERRUN) 3052 dc_tx_underrun(sc); 3053 3054 if (status & DC_ISR_BUS_ERR) { 3055 if_printf(ifp, "%s: bus error\n", __func__); 3056 dc_reset(sc); 3057 dc_init_locked(sc); 3058 } 3059 } 3060 DC_UNLOCK(sc); 3061 return (rx_npkts); 3062 } 3063 #endif /* DEVICE_POLLING */ 3064 3065 static void 3066 dc_intr(void *arg) 3067 { 3068 struct dc_softc *sc; 3069 struct ifnet *ifp; 3070 u_int32_t status; 3071 3072 sc = arg; 3073 3074 if (sc->suspended) 3075 return; 3076 3077 if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 3078 return; 3079 3080 DC_LOCK(sc); 3081 ifp = sc->dc_ifp; 3082 #ifdef DEVICE_POLLING 3083 if (ifp->if_capenable & IFCAP_POLLING) { 3084 DC_UNLOCK(sc); 3085 return; 3086 } 3087 #endif 3088 3089 /* Suppress unwanted interrupts */ 3090 if (!(ifp->if_flags & IFF_UP)) { 3091 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 3092 dc_stop(sc); 3093 DC_UNLOCK(sc); 3094 return; 3095 } 3096 3097 /* Disable interrupts. */ 3098 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3099 3100 while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && 3101 status != 0xFFFFFFFF && 3102 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3103 3104 CSR_WRITE_4(sc, DC_ISR, status); 3105 3106 if (status & DC_ISR_RX_OK) { 3107 int curpkts; 3108 curpkts = ifp->if_ipackets; 3109 dc_rxeof(sc); 3110 if (curpkts == ifp->if_ipackets) { 3111 while (dc_rx_resync(sc)) 3112 dc_rxeof(sc); 3113 } 3114 } 3115 3116 if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF)) 3117 dc_txeof(sc); 3118 3119 if (status & DC_ISR_TX_IDLE) { 3120 dc_txeof(sc); 3121 if (sc->dc_cdata.dc_tx_cnt) { 3122 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3123 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3124 } 3125 } 3126 3127 if (status & DC_ISR_TX_UNDERRUN) 3128 dc_tx_underrun(sc); 3129 3130 if ((status & DC_ISR_RX_WATDOGTIMEO) 3131 || (status & DC_ISR_RX_NOBUF)) { 3132 int curpkts; 3133 curpkts = ifp->if_ipackets; 3134 dc_rxeof(sc); 3135 if (curpkts == ifp->if_ipackets) { 3136 while (dc_rx_resync(sc)) 3137 dc_rxeof(sc); 3138 } 3139 } 3140 3141 if (status & DC_ISR_BUS_ERR) { 3142 dc_reset(sc); 3143 dc_init_locked(sc); 3144 } 3145 } 3146 3147 /* Re-enable interrupts. */ 3148 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3149 3150 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3151 dc_start_locked(ifp); 3152 3153 DC_UNLOCK(sc); 3154 } 3155 3156 /* 3157 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 3158 * pointers to the fragment pointers. 3159 */ 3160 static int 3161 dc_encap(struct dc_softc *sc, struct mbuf **m_head) 3162 { 3163 bus_dma_segment_t segs[DC_MAXFRAGS]; 3164 struct dc_desc *f; 3165 struct mbuf *m; 3166 int cur, defragged, error, first, frag, i, idx, nseg; 3167 3168 /* 3169 * If there's no way we can send any packets, return now. 3170 */ 3171 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt <= DC_TX_LIST_RSVD) 3172 return (ENOBUFS); 3173 3174 m = NULL; 3175 defragged = 0; 3176 if (sc->dc_flags & DC_TX_COALESCE && 3177 ((*m_head)->m_next != NULL || sc->dc_flags & DC_TX_ALIGN)) { 3178 m = m_defrag(*m_head, M_DONTWAIT); 3179 defragged = 1; 3180 } else { 3181 /* 3182 * Count the number of frags in this chain to see if we 3183 * need to m_collapse. Since the descriptor list is shared 3184 * by all packets, we'll m_collapse long chains so that they 3185 * do not use up the entire list, even if they would fit. 3186 */ 3187 i = 0; 3188 for (m = *m_head; m != NULL; m = m->m_next) 3189 i++; 3190 if (i > DC_TX_LIST_CNT / 4 || 3191 DC_TX_LIST_CNT - i + sc->dc_cdata.dc_tx_cnt <= 3192 DC_TX_LIST_RSVD) { 3193 m = m_collapse(*m_head, M_DONTWAIT, DC_MAXFRAGS); 3194 defragged = 1; 3195 } 3196 } 3197 if (defragged != 0) { 3198 if (m == NULL) { 3199 m_freem(*m_head); 3200 *m_head = NULL; 3201 return (ENOBUFS); 3202 } 3203 *m_head = m; 3204 } 3205 3206 idx = sc->dc_cdata.dc_tx_prod; 3207 error = bus_dmamap_load_mbuf_sg(sc->dc_mtag, 3208 sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0); 3209 if (error == EFBIG) { 3210 if (defragged != 0 || (m = m_collapse(*m_head, M_DONTWAIT, 3211 DC_MAXFRAGS)) == NULL) { 3212 m_freem(*m_head); 3213 *m_head = NULL; 3214 return (defragged != 0 ? error : ENOBUFS); 3215 } 3216 *m_head = m; 3217 error = bus_dmamap_load_mbuf_sg(sc->dc_mtag, 3218 sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0); 3219 if (error != 0) { 3220 m_freem(*m_head); 3221 *m_head = NULL; 3222 return (error); 3223 } 3224 } else if (error != 0) 3225 return (error); 3226 KASSERT(nseg <= DC_MAXFRAGS, 3227 ("%s: wrong number of segments (%d)", __func__, nseg)); 3228 if (nseg == 0) { 3229 m_freem(*m_head); 3230 *m_head = NULL; 3231 return (EIO); 3232 } 3233 3234 first = cur = frag = sc->dc_cdata.dc_tx_prod; 3235 for (i = 0; i < nseg; i++) { 3236 if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && 3237 (frag == (DC_TX_LIST_CNT - 1)) && 3238 (first != sc->dc_cdata.dc_tx_first)) { 3239 bus_dmamap_unload(sc->dc_mtag, 3240 sc->dc_cdata.dc_tx_map[first]); 3241 m_freem(*m_head); 3242 *m_head = NULL; 3243 return (ENOBUFS); 3244 } 3245 3246 f = &sc->dc_ldata->dc_tx_list[frag]; 3247 f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len); 3248 if (i == 0) { 3249 f->dc_status = 0; 3250 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); 3251 } else 3252 f->dc_status = htole32(DC_TXSTAT_OWN); 3253 f->dc_data = htole32(segs[i].ds_addr); 3254 cur = frag; 3255 DC_INC(frag, DC_TX_LIST_CNT); 3256 } 3257 3258 sc->dc_cdata.dc_tx_prod = frag; 3259 sc->dc_cdata.dc_tx_cnt += nseg; 3260 sc->dc_cdata.dc_tx_chain[cur] = *m_head; 3261 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); 3262 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 3263 sc->dc_ldata->dc_tx_list[first].dc_ctl |= 3264 htole32(DC_TXCTL_FINT); 3265 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 3266 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); 3267 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 3268 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); 3269 sc->dc_ldata->dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN); 3270 3271 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], 3272 BUS_DMASYNC_PREWRITE); 3273 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 3274 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3275 return (0); 3276 } 3277 3278 static void 3279 dc_start(struct ifnet *ifp) 3280 { 3281 struct dc_softc *sc; 3282 3283 sc = ifp->if_softc; 3284 DC_LOCK(sc); 3285 dc_start_locked(ifp); 3286 DC_UNLOCK(sc); 3287 } 3288 3289 /* 3290 * Main transmit routine 3291 * To avoid having to do mbuf copies, we put pointers to the mbuf data 3292 * regions directly in the transmit lists. We also save a copy of the 3293 * pointers since the transmit list fragment pointers are physical 3294 * addresses. 3295 */ 3296 static void 3297 dc_start_locked(struct ifnet *ifp) 3298 { 3299 struct dc_softc *sc; 3300 struct mbuf *m_head = NULL; 3301 unsigned int queued = 0; 3302 int idx; 3303 3304 sc = ifp->if_softc; 3305 3306 DC_LOCK_ASSERT(sc); 3307 3308 if (!sc->dc_link && ifp->if_snd.ifq_len < 10) 3309 return; 3310 3311 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 3312 return; 3313 3314 idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod; 3315 3316 while (sc->dc_cdata.dc_tx_chain[idx] == NULL) { 3317 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 3318 if (m_head == NULL) 3319 break; 3320 3321 if (dc_encap(sc, &m_head)) { 3322 if (m_head == NULL) 3323 break; 3324 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3325 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3326 break; 3327 } 3328 idx = sc->dc_cdata.dc_tx_prod; 3329 3330 queued++; 3331 /* 3332 * If there's a BPF listener, bounce a copy of this frame 3333 * to him. 3334 */ 3335 BPF_MTAP(ifp, m_head); 3336 3337 if (sc->dc_flags & DC_TX_ONE) { 3338 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3339 break; 3340 } 3341 } 3342 3343 if (queued > 0) { 3344 /* Transmit */ 3345 if (!(sc->dc_flags & DC_TX_POLL)) 3346 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3347 3348 /* 3349 * Set a timeout in case the chip goes out to lunch. 3350 */ 3351 sc->dc_wdog_timer = 5; 3352 } 3353 } 3354 3355 static void 3356 dc_init(void *xsc) 3357 { 3358 struct dc_softc *sc = xsc; 3359 3360 DC_LOCK(sc); 3361 dc_init_locked(sc); 3362 DC_UNLOCK(sc); 3363 } 3364 3365 static void 3366 dc_init_locked(struct dc_softc *sc) 3367 { 3368 struct ifnet *ifp = sc->dc_ifp; 3369 struct mii_data *mii; 3370 3371 DC_LOCK_ASSERT(sc); 3372 3373 mii = device_get_softc(sc->dc_miibus); 3374 3375 /* 3376 * Cancel pending I/O and free all RX/TX buffers. 3377 */ 3378 dc_stop(sc); 3379 dc_reset(sc); 3380 3381 /* 3382 * Set cache alignment and burst length. 3383 */ 3384 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 3385 CSR_WRITE_4(sc, DC_BUSCTL, 0); 3386 else 3387 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE); 3388 /* 3389 * Evenly share the bus between receive and transmit process. 3390 */ 3391 if (DC_IS_INTEL(sc)) 3392 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 3393 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 3394 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 3395 } else { 3396 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 3397 } 3398 if (sc->dc_flags & DC_TX_POLL) 3399 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 3400 switch(sc->dc_cachesize) { 3401 case 32: 3402 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 3403 break; 3404 case 16: 3405 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 3406 break; 3407 case 8: 3408 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 3409 break; 3410 case 0: 3411 default: 3412 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 3413 break; 3414 } 3415 3416 if (sc->dc_flags & DC_TX_STORENFWD) 3417 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3418 else { 3419 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3420 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3421 } else { 3422 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3423 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3424 } 3425 } 3426 3427 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 3428 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 3429 3430 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 3431 /* 3432 * The app notes for the 98713 and 98715A say that 3433 * in order to have the chips operate properly, a magic 3434 * number must be written to CSR16. Macronix does not 3435 * document the meaning of these bits so there's no way 3436 * to know exactly what they do. The 98713 has a magic 3437 * number all its own; the rest all use a different one. 3438 */ 3439 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 3440 if (sc->dc_type == DC_TYPE_98713) 3441 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 3442 else 3443 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 3444 } 3445 3446 if (DC_IS_XIRCOM(sc)) { 3447 /* 3448 * setup General Purpose Port mode and data so the tulip 3449 * can talk to the MII. 3450 */ 3451 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 3452 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3453 DELAY(10); 3454 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 3455 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3456 DELAY(10); 3457 } 3458 3459 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3460 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 3461 3462 /* Init circular RX list. */ 3463 if (dc_list_rx_init(sc) == ENOBUFS) { 3464 device_printf(sc->dc_dev, 3465 "initialization failed: no memory for rx buffers\n"); 3466 dc_stop(sc); 3467 return; 3468 } 3469 3470 /* 3471 * Init TX descriptors. 3472 */ 3473 dc_list_tx_init(sc); 3474 3475 /* 3476 * Load the address of the RX list. 3477 */ 3478 CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0)); 3479 CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0)); 3480 3481 /* 3482 * Enable interrupts. 3483 */ 3484 #ifdef DEVICE_POLLING 3485 /* 3486 * ... but only if we are not polling, and make sure they are off in 3487 * the case of polling. Some cards (e.g. fxp) turn interrupts on 3488 * after a reset. 3489 */ 3490 if (ifp->if_capenable & IFCAP_POLLING) 3491 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3492 else 3493 #endif 3494 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3495 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 3496 3497 /* Enable transmitter. */ 3498 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3499 3500 /* 3501 * If this is an Intel 21143 and we're not using the 3502 * MII port, program the LED control pins so we get 3503 * link and activity indications. 3504 */ 3505 if (sc->dc_flags & DC_TULIP_LEDS) { 3506 CSR_WRITE_4(sc, DC_WATCHDOG, 3507 DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY); 3508 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 3509 } 3510 3511 /* 3512 * Load the RX/multicast filter. We do this sort of late 3513 * because the filter programming scheme on the 21143 and 3514 * some clones requires DMAing a setup frame via the TX 3515 * engine, and we need the transmitter enabled for that. 3516 */ 3517 dc_setfilt(sc); 3518 3519 /* Enable receiver. */ 3520 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 3521 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 3522 3523 mii_mediachg(mii); 3524 dc_setcfg(sc, sc->dc_if_media); 3525 3526 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3527 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3528 3529 /* Don't start the ticker if this is a homePNA link. */ 3530 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 3531 sc->dc_link = 1; 3532 else { 3533 if (sc->dc_flags & DC_21143_NWAY) 3534 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 3535 else 3536 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 3537 } 3538 3539 sc->dc_wdog_timer = 0; 3540 callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc); 3541 } 3542 3543 /* 3544 * Set media options. 3545 */ 3546 static int 3547 dc_ifmedia_upd(struct ifnet *ifp) 3548 { 3549 struct dc_softc *sc; 3550 struct mii_data *mii; 3551 struct ifmedia *ifm; 3552 3553 sc = ifp->if_softc; 3554 mii = device_get_softc(sc->dc_miibus); 3555 DC_LOCK(sc); 3556 mii_mediachg(mii); 3557 ifm = &mii->mii_media; 3558 3559 if (DC_IS_DAVICOM(sc) && 3560 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 3561 dc_setcfg(sc, ifm->ifm_media); 3562 else 3563 sc->dc_link = 0; 3564 DC_UNLOCK(sc); 3565 3566 return (0); 3567 } 3568 3569 /* 3570 * Report current media status. 3571 */ 3572 static void 3573 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3574 { 3575 struct dc_softc *sc; 3576 struct mii_data *mii; 3577 struct ifmedia *ifm; 3578 3579 sc = ifp->if_softc; 3580 mii = device_get_softc(sc->dc_miibus); 3581 DC_LOCK(sc); 3582 mii_pollstat(mii); 3583 ifm = &mii->mii_media; 3584 if (DC_IS_DAVICOM(sc)) { 3585 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 3586 ifmr->ifm_active = ifm->ifm_media; 3587 ifmr->ifm_status = 0; 3588 DC_UNLOCK(sc); 3589 return; 3590 } 3591 } 3592 ifmr->ifm_active = mii->mii_media_active; 3593 ifmr->ifm_status = mii->mii_media_status; 3594 DC_UNLOCK(sc); 3595 } 3596 3597 static int 3598 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3599 { 3600 struct dc_softc *sc = ifp->if_softc; 3601 struct ifreq *ifr = (struct ifreq *)data; 3602 struct mii_data *mii; 3603 int error = 0; 3604 3605 switch (command) { 3606 case SIOCSIFFLAGS: 3607 DC_LOCK(sc); 3608 if (ifp->if_flags & IFF_UP) { 3609 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & 3610 (IFF_PROMISC | IFF_ALLMULTI); 3611 3612 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3613 if (need_setfilt) 3614 dc_setfilt(sc); 3615 } else { 3616 sc->dc_txthresh = 0; 3617 dc_init_locked(sc); 3618 } 3619 } else { 3620 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3621 dc_stop(sc); 3622 } 3623 sc->dc_if_flags = ifp->if_flags; 3624 DC_UNLOCK(sc); 3625 error = 0; 3626 break; 3627 case SIOCADDMULTI: 3628 case SIOCDELMULTI: 3629 DC_LOCK(sc); 3630 dc_setfilt(sc); 3631 DC_UNLOCK(sc); 3632 error = 0; 3633 break; 3634 case SIOCGIFMEDIA: 3635 case SIOCSIFMEDIA: 3636 mii = device_get_softc(sc->dc_miibus); 3637 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3638 break; 3639 case SIOCSIFCAP: 3640 #ifdef DEVICE_POLLING 3641 if (ifr->ifr_reqcap & IFCAP_POLLING && 3642 !(ifp->if_capenable & IFCAP_POLLING)) { 3643 error = ether_poll_register(dc_poll, ifp); 3644 if (error) 3645 return(error); 3646 DC_LOCK(sc); 3647 /* Disable interrupts */ 3648 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3649 ifp->if_capenable |= IFCAP_POLLING; 3650 DC_UNLOCK(sc); 3651 return (error); 3652 } 3653 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 3654 ifp->if_capenable & IFCAP_POLLING) { 3655 error = ether_poll_deregister(ifp); 3656 /* Enable interrupts. */ 3657 DC_LOCK(sc); 3658 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3659 ifp->if_capenable &= ~IFCAP_POLLING; 3660 DC_UNLOCK(sc); 3661 return (error); 3662 } 3663 #endif /* DEVICE_POLLING */ 3664 break; 3665 default: 3666 error = ether_ioctl(ifp, command, data); 3667 break; 3668 } 3669 3670 return (error); 3671 } 3672 3673 static void 3674 dc_watchdog(void *xsc) 3675 { 3676 struct dc_softc *sc = xsc; 3677 struct ifnet *ifp; 3678 3679 DC_LOCK_ASSERT(sc); 3680 3681 if (sc->dc_wdog_timer == 0 || --sc->dc_wdog_timer != 0) { 3682 callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc); 3683 return; 3684 } 3685 3686 ifp = sc->dc_ifp; 3687 ifp->if_oerrors++; 3688 device_printf(sc->dc_dev, "watchdog timeout\n"); 3689 3690 dc_stop(sc); 3691 dc_reset(sc); 3692 dc_init_locked(sc); 3693 3694 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3695 dc_start_locked(ifp); 3696 } 3697 3698 /* 3699 * Stop the adapter and free any mbufs allocated to the 3700 * RX and TX lists. 3701 */ 3702 static void 3703 dc_stop(struct dc_softc *sc) 3704 { 3705 struct ifnet *ifp; 3706 struct dc_list_data *ld; 3707 struct dc_chain_data *cd; 3708 int i; 3709 u_int32_t ctl; 3710 3711 DC_LOCK_ASSERT(sc); 3712 3713 ifp = sc->dc_ifp; 3714 ld = sc->dc_ldata; 3715 cd = &sc->dc_cdata; 3716 3717 callout_stop(&sc->dc_stat_ch); 3718 callout_stop(&sc->dc_wdog_ch); 3719 sc->dc_wdog_timer = 0; 3720 3721 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3722 3723 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)); 3724 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3725 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3726 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3727 sc->dc_link = 0; 3728 3729 /* 3730 * Free data in the RX lists. 3731 */ 3732 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3733 if (cd->dc_rx_chain[i] != NULL) { 3734 m_freem(cd->dc_rx_chain[i]); 3735 cd->dc_rx_chain[i] = NULL; 3736 } 3737 } 3738 bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list)); 3739 3740 /* 3741 * Free the TX list buffers. 3742 */ 3743 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3744 if (cd->dc_tx_chain[i] != NULL) { 3745 ctl = le32toh(ld->dc_tx_list[i].dc_ctl); 3746 if ((ctl & DC_TXCTL_SETUP) || 3747 !(ctl & DC_TXCTL_LASTFRAG)) { 3748 cd->dc_tx_chain[i] = NULL; 3749 continue; 3750 } 3751 bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]); 3752 m_freem(cd->dc_tx_chain[i]); 3753 cd->dc_tx_chain[i] = NULL; 3754 } 3755 } 3756 bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list)); 3757 } 3758 3759 /* 3760 * Device suspend routine. Stop the interface and save some PCI 3761 * settings in case the BIOS doesn't restore them properly on 3762 * resume. 3763 */ 3764 static int 3765 dc_suspend(device_t dev) 3766 { 3767 struct dc_softc *sc; 3768 3769 sc = device_get_softc(dev); 3770 DC_LOCK(sc); 3771 dc_stop(sc); 3772 sc->suspended = 1; 3773 DC_UNLOCK(sc); 3774 3775 return (0); 3776 } 3777 3778 /* 3779 * Device resume routine. Restore some PCI settings in case the BIOS 3780 * doesn't, re-enable busmastering, and restart the interface if 3781 * appropriate. 3782 */ 3783 static int 3784 dc_resume(device_t dev) 3785 { 3786 struct dc_softc *sc; 3787 struct ifnet *ifp; 3788 3789 sc = device_get_softc(dev); 3790 ifp = sc->dc_ifp; 3791 3792 /* reinitialize interface if necessary */ 3793 DC_LOCK(sc); 3794 if (ifp->if_flags & IFF_UP) 3795 dc_init_locked(sc); 3796 3797 sc->suspended = 0; 3798 DC_UNLOCK(sc); 3799 3800 return (0); 3801 } 3802 3803 /* 3804 * Stop all chip I/O so that the kernel's probe routines don't 3805 * get confused by errant DMAs when rebooting. 3806 */ 3807 static int 3808 dc_shutdown(device_t dev) 3809 { 3810 struct dc_softc *sc; 3811 3812 sc = device_get_softc(dev); 3813 3814 DC_LOCK(sc); 3815 dc_stop(sc); 3816 DC_UNLOCK(sc); 3817 3818 return (0); 3819 } 3820 3821 static int 3822 dc_check_multiport(struct dc_softc *sc) 3823 { 3824 struct dc_softc *dsc; 3825 devclass_t dc; 3826 device_t child; 3827 uint8_t *eaddr; 3828 int unit; 3829 3830 dc = devclass_find("dc"); 3831 for (unit = 0; unit < devclass_get_maxunit(dc); unit++) { 3832 child = devclass_get_device(dc, unit); 3833 if (child == NULL) 3834 continue; 3835 if (child == sc->dc_dev) 3836 continue; 3837 if (device_get_parent(child) != device_get_parent(sc->dc_dev)) 3838 continue; 3839 if (unit > device_get_unit(sc->dc_dev)) 3840 continue; 3841 dsc = device_get_softc(child); 3842 device_printf(sc->dc_dev, "Using station address of %s as base", 3843 device_get_nameunit(child)); 3844 bcopy(dsc->dc_eaddr, sc->dc_eaddr, ETHER_ADDR_LEN); 3845 eaddr = (uint8_t *)sc->dc_eaddr; 3846 eaddr[5]++; 3847 return (0); 3848 } 3849 return (ENOENT); 3850 } 3851