1 /*- 2 * Written by: yen_cw@myson.com.tw 3 * Copyright (c) 2002 Myson Technology Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification, immediately at the beginning of the file. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/ 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/sockio.h> 36 #include <sys/mbuf.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/socket.h> 40 #include <sys/queue.h> 41 #include <sys/types.h> 42 #include <sys/bus.h> 43 #include <sys/module.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 47 #define NBPFILTER 1 48 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_media.h> 53 #include <net/if_types.h> 54 #include <net/if_dl.h> 55 #include <net/bpf.h> 56 57 #include <vm/vm.h> /* for vtophys */ 58 #include <vm/pmap.h> /* for vtophys */ 59 #include <machine/clock.h> /* for DELAY */ 60 #include <machine/bus.h> 61 #include <machine/resource.h> 62 #include <sys/bus.h> 63 #include <sys/rman.h> 64 65 #include <dev/pci/pcireg.h> 66 #include <dev/pci/pcivar.h> 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 71 #include "miibus_if.h" 72 73 /* 74 * #define MY_USEIOSPACE 75 */ 76 77 static int MY_USEIOSPACE = 1; 78 79 #ifdef MY_USEIOSPACE 80 #define MY_RES SYS_RES_IOPORT 81 #define MY_RID MY_PCI_LOIO 82 #else 83 #define MY_RES SYS_RES_MEMORY 84 #define MY_RID MY_PCI_LOMEM 85 #endif 86 87 88 #include <dev/my/if_myreg.h> 89 90 #ifndef lint 91 static const char rcsid[] = 92 "$Id: if_my.c,v 1.16 2003/04/15 06:37:25 mdodd Exp $"; 93 #endif 94 95 /* 96 * Various supported device vendors/types and their names. 97 */ 98 struct my_type *my_info_tmp; 99 static struct my_type my_devs[] = { 100 {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"}, 101 {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"}, 102 {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"}, 103 {0, 0, NULL} 104 }; 105 106 /* 107 * Various supported PHY vendors/types and their names. Note that this driver 108 * will work with pretty much any MII-compliant PHY, so failure to positively 109 * identify the chip is not a fatal error. 110 */ 111 static struct my_type my_phys[] = { 112 {MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"}, 113 {SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"}, 114 {AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"}, 115 {MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"}, 116 {LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"}, 117 {0, 0, "<MII-compliant physical interface>"} 118 }; 119 120 static int my_probe(device_t); 121 static int my_attach(device_t); 122 static int my_detach(device_t); 123 static int my_newbuf(struct my_softc *, struct my_chain_onefrag *); 124 static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *); 125 static void my_rxeof(struct my_softc *); 126 static void my_txeof(struct my_softc *); 127 static void my_txeoc(struct my_softc *); 128 static void my_intr(void *); 129 static void my_start(struct ifnet *); 130 static void my_start_locked(struct ifnet *); 131 static int my_ioctl(struct ifnet *, u_long, caddr_t); 132 static void my_init(void *); 133 static void my_init_locked(struct my_softc *); 134 static void my_stop(struct my_softc *); 135 static void my_watchdog(struct ifnet *); 136 static void my_shutdown(device_t); 137 static int my_ifmedia_upd(struct ifnet *); 138 static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *); 139 static u_int16_t my_phy_readreg(struct my_softc *, int); 140 static void my_phy_writereg(struct my_softc *, int, int); 141 static void my_autoneg_xmit(struct my_softc *); 142 static void my_autoneg_mii(struct my_softc *, int, int); 143 static void my_setmode_mii(struct my_softc *, int); 144 static void my_getmode_mii(struct my_softc *); 145 static void my_setcfg(struct my_softc *, int); 146 static void my_setmulti(struct my_softc *); 147 static void my_reset(struct my_softc *); 148 static int my_list_rx_init(struct my_softc *); 149 static int my_list_tx_init(struct my_softc *); 150 static long my_send_cmd_to_phy(struct my_softc *, int, int); 151 152 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 153 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 154 155 static device_method_t my_methods[] = { 156 /* Device interface */ 157 DEVMETHOD(device_probe, my_probe), 158 DEVMETHOD(device_attach, my_attach), 159 DEVMETHOD(device_detach, my_detach), 160 DEVMETHOD(device_shutdown, my_shutdown), 161 162 {0, 0} 163 }; 164 165 static driver_t my_driver = { 166 "my", 167 my_methods, 168 sizeof(struct my_softc) 169 }; 170 171 static devclass_t my_devclass; 172 173 DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0); 174 MODULE_DEPEND(my, pci, 1, 1, 1); 175 MODULE_DEPEND(my, ether, 1, 1, 1); 176 177 static long 178 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad) 179 { 180 long miir; 181 int i; 182 int mask, data; 183 184 MY_LOCK_ASSERT(sc); 185 186 /* enable MII output */ 187 miir = CSR_READ_4(sc, MY_MANAGEMENT); 188 miir &= 0xfffffff0; 189 190 miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO; 191 192 /* send 32 1's preamble */ 193 for (i = 0; i < 32; i++) { 194 /* low MDC; MDO is already high (miir) */ 195 miir &= ~MY_MASK_MIIR_MII_MDC; 196 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 197 198 /* high MDC */ 199 miir |= MY_MASK_MIIR_MII_MDC; 200 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 201 } 202 203 /* calculate ST+OP+PHYAD+REGAD+TA */ 204 data = opcode | (sc->my_phy_addr << 7) | (regad << 2); 205 206 /* sent out */ 207 mask = 0x8000; 208 while (mask) { 209 /* low MDC, prepare MDO */ 210 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 211 if (mask & data) 212 miir |= MY_MASK_MIIR_MII_MDO; 213 214 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 215 /* high MDC */ 216 miir |= MY_MASK_MIIR_MII_MDC; 217 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 218 DELAY(30); 219 220 /* next */ 221 mask >>= 1; 222 if (mask == 0x2 && opcode == MY_OP_READ) 223 miir &= ~MY_MASK_MIIR_MII_WRITE; 224 } 225 226 return miir; 227 } 228 229 230 static u_int16_t 231 my_phy_readreg(struct my_softc * sc, int reg) 232 { 233 long miir; 234 int mask, data; 235 236 MY_LOCK_ASSERT(sc); 237 238 if (sc->my_info->my_did == MTD803ID) 239 data = CSR_READ_2(sc, MY_PHYBASE + reg * 2); 240 else { 241 miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg); 242 243 /* read data */ 244 mask = 0x8000; 245 data = 0; 246 while (mask) { 247 /* low MDC */ 248 miir &= ~MY_MASK_MIIR_MII_MDC; 249 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 250 251 /* read MDI */ 252 miir = CSR_READ_4(sc, MY_MANAGEMENT); 253 if (miir & MY_MASK_MIIR_MII_MDI) 254 data |= mask; 255 256 /* high MDC, and wait */ 257 miir |= MY_MASK_MIIR_MII_MDC; 258 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 259 DELAY(30); 260 261 /* next */ 262 mask >>= 1; 263 } 264 265 /* low MDC */ 266 miir &= ~MY_MASK_MIIR_MII_MDC; 267 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 268 } 269 270 return (u_int16_t) data; 271 } 272 273 274 static void 275 my_phy_writereg(struct my_softc * sc, int reg, int data) 276 { 277 long miir; 278 int mask; 279 280 MY_LOCK_ASSERT(sc); 281 282 if (sc->my_info->my_did == MTD803ID) 283 CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data); 284 else { 285 miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg); 286 287 /* write data */ 288 mask = 0x8000; 289 while (mask) { 290 /* low MDC, prepare MDO */ 291 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 292 if (mask & data) 293 miir |= MY_MASK_MIIR_MII_MDO; 294 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 295 DELAY(1); 296 297 /* high MDC */ 298 miir |= MY_MASK_MIIR_MII_MDC; 299 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 300 DELAY(1); 301 302 /* next */ 303 mask >>= 1; 304 } 305 306 /* low MDC */ 307 miir &= ~MY_MASK_MIIR_MII_MDC; 308 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 309 } 310 return; 311 } 312 313 314 /* 315 * Program the 64-bit multicast hash filter. 316 */ 317 static void 318 my_setmulti(struct my_softc * sc) 319 { 320 struct ifnet *ifp; 321 int h = 0; 322 u_int32_t hashes[2] = {0, 0}; 323 struct ifmultiaddr *ifma; 324 u_int32_t rxfilt; 325 int mcnt = 0; 326 327 MY_LOCK_ASSERT(sc); 328 329 ifp = sc->my_ifp; 330 331 rxfilt = CSR_READ_4(sc, MY_TCRRCR); 332 333 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 334 rxfilt |= MY_AM; 335 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 336 CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF); 337 CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF); 338 339 return; 340 } 341 /* first, zot all the existing hash bits */ 342 CSR_WRITE_4(sc, MY_MAR0, 0); 343 CSR_WRITE_4(sc, MY_MAR1, 0); 344 345 /* now program new ones */ 346 IF_ADDR_LOCK(ifp); 347 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 348 if (ifma->ifma_addr->sa_family != AF_LINK) 349 continue; 350 h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) 351 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 352 if (h < 32) 353 hashes[0] |= (1 << h); 354 else 355 hashes[1] |= (1 << (h - 32)); 356 mcnt++; 357 } 358 IF_ADDR_UNLOCK(ifp); 359 360 if (mcnt) 361 rxfilt |= MY_AM; 362 else 363 rxfilt &= ~MY_AM; 364 CSR_WRITE_4(sc, MY_MAR0, hashes[0]); 365 CSR_WRITE_4(sc, MY_MAR1, hashes[1]); 366 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 367 return; 368 } 369 370 /* 371 * Initiate an autonegotiation session. 372 */ 373 static void 374 my_autoneg_xmit(struct my_softc * sc) 375 { 376 u_int16_t phy_sts = 0; 377 378 MY_LOCK_ASSERT(sc); 379 380 my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); 381 DELAY(500); 382 while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET); 383 384 phy_sts = my_phy_readreg(sc, PHY_BMCR); 385 phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR; 386 my_phy_writereg(sc, PHY_BMCR, phy_sts); 387 388 return; 389 } 390 391 392 /* 393 * Invoke autonegotiation on a PHY. 394 */ 395 static void 396 my_autoneg_mii(struct my_softc * sc, int flag, int verbose) 397 { 398 u_int16_t phy_sts = 0, media, advert, ability; 399 u_int16_t ability2 = 0; 400 struct ifnet *ifp; 401 struct ifmedia *ifm; 402 403 MY_LOCK_ASSERT(sc); 404 405 ifm = &sc->ifmedia; 406 ifp = sc->my_ifp; 407 408 ifm->ifm_media = IFM_ETHER | IFM_AUTO; 409 410 #ifndef FORCE_AUTONEG_TFOUR 411 /* 412 * First, see if autoneg is supported. If not, there's no point in 413 * continuing. 414 */ 415 phy_sts = my_phy_readreg(sc, PHY_BMSR); 416 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { 417 if (verbose) 418 if_printf(ifp, "autonegotiation not supported\n"); 419 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 420 return; 421 } 422 #endif 423 switch (flag) { 424 case MY_FLAG_FORCEDELAY: 425 /* 426 * XXX Never use this option anywhere but in the probe 427 * routine: making the kernel stop dead in its tracks for 428 * three whole seconds after we've gone multi-user is really 429 * bad manners. 430 */ 431 my_autoneg_xmit(sc); 432 DELAY(5000000); 433 break; 434 case MY_FLAG_SCHEDDELAY: 435 /* 436 * Wait for the transmitter to go idle before starting an 437 * autoneg session, otherwise my_start() may clobber our 438 * timeout, and we don't want to allow transmission during an 439 * autoneg session since that can screw it up. 440 */ 441 if (sc->my_cdata.my_tx_head != NULL) { 442 sc->my_want_auto = 1; 443 MY_UNLOCK(sc); 444 return; 445 } 446 my_autoneg_xmit(sc); 447 ifp->if_timer = 5; 448 sc->my_autoneg = 1; 449 sc->my_want_auto = 0; 450 return; 451 case MY_FLAG_DELAYTIMEO: 452 ifp->if_timer = 0; 453 sc->my_autoneg = 0; 454 break; 455 default: 456 if_printf(ifp, "invalid autoneg flag: %d\n", flag); 457 return; 458 } 459 460 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { 461 if (verbose) 462 if_printf(ifp, "autoneg complete, "); 463 phy_sts = my_phy_readreg(sc, PHY_BMSR); 464 } else { 465 if (verbose) 466 if_printf(ifp, "autoneg not complete, "); 467 } 468 469 media = my_phy_readreg(sc, PHY_BMCR); 470 471 /* Link is good. Report modes and set duplex mode. */ 472 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { 473 if (verbose) 474 if_printf(ifp, "link status good. "); 475 advert = my_phy_readreg(sc, PHY_ANAR); 476 ability = my_phy_readreg(sc, PHY_LPAR); 477 if ((sc->my_pinfo->my_vid == MarvellPHYID0) || 478 (sc->my_pinfo->my_vid == LevelOnePHYID0)) { 479 ability2 = my_phy_readreg(sc, PHY_1000SR); 480 if (ability2 & PHY_1000SR_1000BTXFULL) { 481 advert = 0; 482 ability = 0; 483 /* 484 * this version did not support 1000M, 485 * ifm->ifm_media = 486 * IFM_ETHER|IFM_1000_T|IFM_FDX; 487 */ 488 ifm->ifm_media = 489 IFM_ETHER | IFM_100_TX | IFM_FDX; 490 media &= ~PHY_BMCR_SPEEDSEL; 491 media |= PHY_BMCR_1000; 492 media |= PHY_BMCR_DUPLEX; 493 printf("(full-duplex, 1000Mbps)\n"); 494 } else if (ability2 & PHY_1000SR_1000BTXHALF) { 495 advert = 0; 496 ability = 0; 497 /* 498 * this version did not support 1000M, 499 * ifm->ifm_media = IFM_ETHER|IFM_1000_T; 500 */ 501 ifm->ifm_media = IFM_ETHER | IFM_100_TX; 502 media &= ~PHY_BMCR_SPEEDSEL; 503 media &= ~PHY_BMCR_DUPLEX; 504 media |= PHY_BMCR_1000; 505 printf("(half-duplex, 1000Mbps)\n"); 506 } 507 } 508 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { 509 ifm->ifm_media = IFM_ETHER | IFM_100_T4; 510 media |= PHY_BMCR_SPEEDSEL; 511 media &= ~PHY_BMCR_DUPLEX; 512 printf("(100baseT4)\n"); 513 } else if (advert & PHY_ANAR_100BTXFULL && 514 ability & PHY_ANAR_100BTXFULL) { 515 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 516 media |= PHY_BMCR_SPEEDSEL; 517 media |= PHY_BMCR_DUPLEX; 518 printf("(full-duplex, 100Mbps)\n"); 519 } else if (advert & PHY_ANAR_100BTXHALF && 520 ability & PHY_ANAR_100BTXHALF) { 521 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 522 media |= PHY_BMCR_SPEEDSEL; 523 media &= ~PHY_BMCR_DUPLEX; 524 printf("(half-duplex, 100Mbps)\n"); 525 } else if (advert & PHY_ANAR_10BTFULL && 526 ability & PHY_ANAR_10BTFULL) { 527 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 528 media &= ~PHY_BMCR_SPEEDSEL; 529 media |= PHY_BMCR_DUPLEX; 530 printf("(full-duplex, 10Mbps)\n"); 531 } else if (advert) { 532 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 533 media &= ~PHY_BMCR_SPEEDSEL; 534 media &= ~PHY_BMCR_DUPLEX; 535 printf("(half-duplex, 10Mbps)\n"); 536 } 537 media &= ~PHY_BMCR_AUTONEGENBL; 538 539 /* Set ASIC's duplex mode to match the PHY. */ 540 my_phy_writereg(sc, PHY_BMCR, media); 541 my_setcfg(sc, media); 542 } else { 543 if (verbose) 544 if_printf(ifp, "no carrier\n"); 545 } 546 547 my_init_locked(sc); 548 if (sc->my_tx_pend) { 549 sc->my_autoneg = 0; 550 sc->my_tx_pend = 0; 551 my_start_locked(ifp); 552 } 553 return; 554 } 555 556 /* 557 * To get PHY ability. 558 */ 559 static void 560 my_getmode_mii(struct my_softc * sc) 561 { 562 u_int16_t bmsr; 563 struct ifnet *ifp; 564 565 MY_LOCK_ASSERT(sc); 566 ifp = sc->my_ifp; 567 bmsr = my_phy_readreg(sc, PHY_BMSR); 568 if (bootverbose) 569 if_printf(ifp, "PHY status word: %x\n", bmsr); 570 571 /* fallback */ 572 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 573 574 if (bmsr & PHY_BMSR_10BTHALF) { 575 if (bootverbose) 576 if_printf(ifp, "10Mbps half-duplex mode supported\n"); 577 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 578 0, NULL); 579 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); 580 } 581 if (bmsr & PHY_BMSR_10BTFULL) { 582 if (bootverbose) 583 if_printf(ifp, "10Mbps full-duplex mode supported\n"); 584 585 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 586 0, NULL); 587 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 588 } 589 if (bmsr & PHY_BMSR_100BTXHALF) { 590 if (bootverbose) 591 if_printf(ifp, "100Mbps half-duplex mode supported\n"); 592 ifp->if_baudrate = 100000000; 593 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); 594 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 595 0, NULL); 596 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 597 } 598 if (bmsr & PHY_BMSR_100BTXFULL) { 599 if (bootverbose) 600 if_printf(ifp, "100Mbps full-duplex mode supported\n"); 601 ifp->if_baudrate = 100000000; 602 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 603 0, NULL); 604 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 605 } 606 /* Some also support 100BaseT4. */ 607 if (bmsr & PHY_BMSR_100BT4) { 608 if (bootverbose) 609 if_printf(ifp, "100baseT4 mode supported\n"); 610 ifp->if_baudrate = 100000000; 611 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL); 612 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4; 613 #ifdef FORCE_AUTONEG_TFOUR 614 if (bootverbose) 615 if_printf(ifp, "forcing on autoneg support for BT4\n"); 616 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL): 617 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 618 #endif 619 } 620 #if 0 /* this version did not support 1000M, */ 621 if (sc->my_pinfo->my_vid == MarvellPHYID0) { 622 if (bootverbose) 623 if_printf(ifp, "1000Mbps half-duplex mode supported\n"); 624 625 ifp->if_baudrate = 1000000000; 626 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); 627 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX, 628 0, NULL); 629 if (bootverbose) 630 if_printf(ifp, "1000Mbps full-duplex mode supported\n"); 631 ifp->if_baudrate = 1000000000; 632 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 633 0, NULL); 634 sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX; 635 } 636 #endif 637 if (bmsr & PHY_BMSR_CANAUTONEG) { 638 if (bootverbose) 639 if_printf(ifp, "autoneg supported\n"); 640 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 641 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 642 } 643 return; 644 } 645 646 /* 647 * Set speed and duplex mode. 648 */ 649 static void 650 my_setmode_mii(struct my_softc * sc, int media) 651 { 652 u_int16_t bmcr; 653 struct ifnet *ifp; 654 655 MY_LOCK_ASSERT(sc); 656 ifp = sc->my_ifp; 657 /* 658 * If an autoneg session is in progress, stop it. 659 */ 660 if (sc->my_autoneg) { 661 if_printf(ifp, "canceling autoneg session\n"); 662 ifp->if_timer = sc->my_autoneg = sc->my_want_auto = 0; 663 bmcr = my_phy_readreg(sc, PHY_BMCR); 664 bmcr &= ~PHY_BMCR_AUTONEGENBL; 665 my_phy_writereg(sc, PHY_BMCR, bmcr); 666 } 667 if_printf(ifp, "selecting MII, "); 668 bmcr = my_phy_readreg(sc, PHY_BMCR); 669 bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 | 670 PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK); 671 672 #if 0 /* this version did not support 1000M, */ 673 if (IFM_SUBTYPE(media) == IFM_1000_T) { 674 printf("1000Mbps/T4, half-duplex\n"); 675 bmcr &= ~PHY_BMCR_SPEEDSEL; 676 bmcr &= ~PHY_BMCR_DUPLEX; 677 bmcr |= PHY_BMCR_1000; 678 } 679 #endif 680 if (IFM_SUBTYPE(media) == IFM_100_T4) { 681 printf("100Mbps/T4, half-duplex\n"); 682 bmcr |= PHY_BMCR_SPEEDSEL; 683 bmcr &= ~PHY_BMCR_DUPLEX; 684 } 685 if (IFM_SUBTYPE(media) == IFM_100_TX) { 686 printf("100Mbps, "); 687 bmcr |= PHY_BMCR_SPEEDSEL; 688 } 689 if (IFM_SUBTYPE(media) == IFM_10_T) { 690 printf("10Mbps, "); 691 bmcr &= ~PHY_BMCR_SPEEDSEL; 692 } 693 if ((media & IFM_GMASK) == IFM_FDX) { 694 printf("full duplex\n"); 695 bmcr |= PHY_BMCR_DUPLEX; 696 } else { 697 printf("half duplex\n"); 698 bmcr &= ~PHY_BMCR_DUPLEX; 699 } 700 my_phy_writereg(sc, PHY_BMCR, bmcr); 701 my_setcfg(sc, bmcr); 702 return; 703 } 704 705 /* 706 * The Myson manual states that in order to fiddle with the 'full-duplex' and 707 * '100Mbps' bits in the netconfig register, we first have to put the 708 * transmit and/or receive logic in the idle state. 709 */ 710 static void 711 my_setcfg(struct my_softc * sc, int bmcr) 712 { 713 int i, restart = 0; 714 715 MY_LOCK_ASSERT(sc); 716 if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) { 717 restart = 1; 718 MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE)); 719 for (i = 0; i < MY_TIMEOUT; i++) { 720 DELAY(10); 721 if (!(CSR_READ_4(sc, MY_TCRRCR) & 722 (MY_TXRUN | MY_RXRUN))) 723 break; 724 } 725 if (i == MY_TIMEOUT) 726 if_printf(sc->my_ifp, 727 "failed to force tx and rx to idle \n"); 728 } 729 MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000); 730 MY_CLRBIT(sc, MY_TCRRCR, MY_PS10); 731 if (bmcr & PHY_BMCR_1000) 732 MY_SETBIT(sc, MY_TCRRCR, MY_PS1000); 733 else if (!(bmcr & PHY_BMCR_SPEEDSEL)) 734 MY_SETBIT(sc, MY_TCRRCR, MY_PS10); 735 if (bmcr & PHY_BMCR_DUPLEX) 736 MY_SETBIT(sc, MY_TCRRCR, MY_FD); 737 else 738 MY_CLRBIT(sc, MY_TCRRCR, MY_FD); 739 if (restart) 740 MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE); 741 return; 742 } 743 744 static void 745 my_reset(struct my_softc * sc) 746 { 747 register int i; 748 749 MY_LOCK_ASSERT(sc); 750 MY_SETBIT(sc, MY_BCR, MY_SWR); 751 for (i = 0; i < MY_TIMEOUT; i++) { 752 DELAY(10); 753 if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR)) 754 break; 755 } 756 if (i == MY_TIMEOUT) 757 if_printf(sc->my_ifp, "reset never completed!\n"); 758 759 /* Wait a little while for the chip to get its brains in order. */ 760 DELAY(1000); 761 return; 762 } 763 764 /* 765 * Probe for a Myson chip. Check the PCI vendor and device IDs against our 766 * list and return a device name if we find a match. 767 */ 768 static int 769 my_probe(device_t dev) 770 { 771 struct my_type *t; 772 773 t = my_devs; 774 while (t->my_name != NULL) { 775 if ((pci_get_vendor(dev) == t->my_vid) && 776 (pci_get_device(dev) == t->my_did)) { 777 device_set_desc(dev, t->my_name); 778 my_info_tmp = t; 779 return (BUS_PROBE_DEFAULT); 780 } 781 t++; 782 } 783 return (ENXIO); 784 } 785 786 /* 787 * Attach the interface. Allocate softc structures, do ifmedia setup and 788 * ethernet/BPF attach. 789 */ 790 static int 791 my_attach(device_t dev) 792 { 793 int i; 794 u_char eaddr[ETHER_ADDR_LEN]; 795 u_int32_t iobase; 796 struct my_softc *sc; 797 struct ifnet *ifp; 798 int media = IFM_ETHER | IFM_100_TX | IFM_FDX; 799 unsigned int round; 800 caddr_t roundptr; 801 struct my_type *p; 802 u_int16_t phy_vid, phy_did, phy_sts = 0; 803 int rid, error = 0; 804 805 sc = device_get_softc(dev); 806 mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 807 MTX_DEF); 808 809 /* 810 * Map control/status registers. 811 */ 812 pci_enable_busmaster(dev); 813 814 if (my_info_tmp->my_did == MTD800ID) { 815 iobase = pci_read_config(dev, MY_PCI_LOIO, 4); 816 if (iobase & 0x300) 817 MY_USEIOSPACE = 0; 818 } 819 820 rid = MY_RID; 821 sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE); 822 823 if (sc->my_res == NULL) { 824 device_printf(dev, "couldn't map ports/memory\n"); 825 error = ENXIO; 826 goto destroy_mutex; 827 } 828 sc->my_btag = rman_get_bustag(sc->my_res); 829 sc->my_bhandle = rman_get_bushandle(sc->my_res); 830 831 rid = 0; 832 sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 833 RF_SHAREABLE | RF_ACTIVE); 834 835 if (sc->my_irq == NULL) { 836 device_printf(dev, "couldn't map interrupt\n"); 837 error = ENXIO; 838 goto release_io; 839 } 840 841 sc->my_info = my_info_tmp; 842 843 /* Reset the adapter. */ 844 MY_LOCK(sc); 845 my_reset(sc); 846 MY_UNLOCK(sc); 847 848 /* 849 * Get station address 850 */ 851 for (i = 0; i < ETHER_ADDR_LEN; ++i) 852 eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i); 853 854 sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8, 855 M_DEVBUF, M_NOWAIT); 856 if (sc->my_ldata_ptr == NULL) { 857 device_printf(dev, "no memory for list buffers!\n"); 858 error = ENXIO; 859 goto release_irq; 860 } 861 sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr; 862 round = (uintptr_t)sc->my_ldata_ptr & 0xF; 863 roundptr = sc->my_ldata_ptr; 864 for (i = 0; i < 8; i++) { 865 if (round % 8) { 866 round++; 867 roundptr++; 868 } else 869 break; 870 } 871 sc->my_ldata = (struct my_list_data *) roundptr; 872 bzero(sc->my_ldata, sizeof(struct my_list_data)); 873 874 ifp = sc->my_ifp = if_alloc(IFT_ETHER); 875 if (ifp == NULL) { 876 device_printf(dev, "can not if_alloc()\n"); 877 error = ENOSPC; 878 goto free_ldata; 879 } 880 ifp->if_softc = sc; 881 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 882 ifp->if_mtu = ETHERMTU; 883 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 884 ifp->if_ioctl = my_ioctl; 885 ifp->if_start = my_start; 886 ifp->if_watchdog = my_watchdog; 887 ifp->if_init = my_init; 888 ifp->if_baudrate = 10000000; 889 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; 890 891 if (sc->my_info->my_did == MTD803ID) 892 sc->my_pinfo = my_phys; 893 else { 894 if (bootverbose) 895 device_printf(dev, "probing for a PHY\n"); 896 MY_LOCK(sc); 897 for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) { 898 if (bootverbose) 899 device_printf(dev, "checking address: %d\n", i); 900 sc->my_phy_addr = i; 901 phy_sts = my_phy_readreg(sc, PHY_BMSR); 902 if ((phy_sts != 0) && (phy_sts != 0xffff)) 903 break; 904 else 905 phy_sts = 0; 906 } 907 if (phy_sts) { 908 phy_vid = my_phy_readreg(sc, PHY_VENID); 909 phy_did = my_phy_readreg(sc, PHY_DEVID); 910 if (bootverbose) { 911 device_printf(dev, "found PHY at address %d, ", 912 sc->my_phy_addr); 913 printf("vendor id: %x device id: %x\n", 914 phy_vid, phy_did); 915 } 916 p = my_phys; 917 while (p->my_vid) { 918 if (phy_vid == p->my_vid) { 919 sc->my_pinfo = p; 920 break; 921 } 922 p++; 923 } 924 if (sc->my_pinfo == NULL) 925 sc->my_pinfo = &my_phys[PHY_UNKNOWN]; 926 if (bootverbose) 927 device_printf(dev, "PHY type: %s\n", 928 sc->my_pinfo->my_name); 929 } else { 930 MY_UNLOCK(sc); 931 device_printf(dev, "MII without any phy!\n"); 932 error = ENXIO; 933 goto free_if; 934 } 935 MY_UNLOCK(sc); 936 } 937 938 /* Do ifmedia setup. */ 939 ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts); 940 MY_LOCK(sc); 941 my_getmode_mii(sc); 942 my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1); 943 media = sc->ifmedia.ifm_media; 944 my_stop(sc); 945 MY_UNLOCK(sc); 946 ifmedia_set(&sc->ifmedia, media); 947 948 ether_ifattach(ifp, eaddr); 949 950 error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE, 951 my_intr, sc, &sc->my_intrhand); 952 953 if (error) { 954 device_printf(dev, "couldn't set up irq\n"); 955 goto detach_if; 956 } 957 958 return (0); 959 960 detach_if: 961 ether_ifdetach(ifp); 962 free_if: 963 if_free(ifp); 964 free_ldata: 965 free(sc->my_ldata_ptr, M_DEVBUF); 966 release_irq: 967 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 968 release_io: 969 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 970 destroy_mutex: 971 mtx_destroy(&sc->my_mtx); 972 return (error); 973 } 974 975 static int 976 my_detach(device_t dev) 977 { 978 struct my_softc *sc; 979 struct ifnet *ifp; 980 981 sc = device_get_softc(dev); 982 MY_LOCK(sc); 983 my_stop(sc); 984 MY_UNLOCK(sc); 985 bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand); 986 987 ifp = sc->my_ifp; 988 ether_ifdetach(ifp); 989 if_free(ifp); 990 free(sc->my_ldata_ptr, M_DEVBUF); 991 992 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 993 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 994 mtx_destroy(&sc->my_mtx); 995 return (0); 996 } 997 998 999 /* 1000 * Initialize the transmit descriptors. 1001 */ 1002 static int 1003 my_list_tx_init(struct my_softc * sc) 1004 { 1005 struct my_chain_data *cd; 1006 struct my_list_data *ld; 1007 int i; 1008 1009 MY_LOCK_ASSERT(sc); 1010 cd = &sc->my_cdata; 1011 ld = sc->my_ldata; 1012 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1013 cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i]; 1014 if (i == (MY_TX_LIST_CNT - 1)) 1015 cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0]; 1016 else 1017 cd->my_tx_chain[i].my_nextdesc = 1018 &cd->my_tx_chain[i + 1]; 1019 } 1020 cd->my_tx_free = &cd->my_tx_chain[0]; 1021 cd->my_tx_tail = cd->my_tx_head = NULL; 1022 return (0); 1023 } 1024 1025 /* 1026 * Initialize the RX descriptors and allocate mbufs for them. Note that we 1027 * arrange the descriptors in a closed ring, so that the last descriptor 1028 * points back to the first. 1029 */ 1030 static int 1031 my_list_rx_init(struct my_softc * sc) 1032 { 1033 struct my_chain_data *cd; 1034 struct my_list_data *ld; 1035 int i; 1036 1037 MY_LOCK_ASSERT(sc); 1038 cd = &sc->my_cdata; 1039 ld = sc->my_ldata; 1040 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1041 cd->my_rx_chain[i].my_ptr = 1042 (struct my_desc *) & ld->my_rx_list[i]; 1043 if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) { 1044 MY_UNLOCK(sc); 1045 return (ENOBUFS); 1046 } 1047 if (i == (MY_RX_LIST_CNT - 1)) { 1048 cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0]; 1049 ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]); 1050 } else { 1051 cd->my_rx_chain[i].my_nextdesc = 1052 &cd->my_rx_chain[i + 1]; 1053 ld->my_rx_list[i].my_next = 1054 vtophys(&ld->my_rx_list[i + 1]); 1055 } 1056 } 1057 cd->my_rx_head = &cd->my_rx_chain[0]; 1058 return (0); 1059 } 1060 1061 /* 1062 * Initialize an RX descriptor and attach an MBUF cluster. 1063 */ 1064 static int 1065 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c) 1066 { 1067 struct mbuf *m_new = NULL; 1068 1069 MY_LOCK_ASSERT(sc); 1070 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1071 if (m_new == NULL) { 1072 if_printf(sc->my_ifp, 1073 "no memory for rx list -- packet dropped!\n"); 1074 return (ENOBUFS); 1075 } 1076 MCLGET(m_new, M_DONTWAIT); 1077 if (!(m_new->m_flags & M_EXT)) { 1078 if_printf(sc->my_ifp, 1079 "no memory for rx list -- packet dropped!\n"); 1080 m_freem(m_new); 1081 return (ENOBUFS); 1082 } 1083 c->my_mbuf = m_new; 1084 c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t)); 1085 c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift; 1086 c->my_ptr->my_status = MY_OWNByNIC; 1087 return (0); 1088 } 1089 1090 /* 1091 * A frame has been uploaded: pass the resulting mbuf chain up to the higher 1092 * level protocols. 1093 */ 1094 static void 1095 my_rxeof(struct my_softc * sc) 1096 { 1097 struct ether_header *eh; 1098 struct mbuf *m; 1099 struct ifnet *ifp; 1100 struct my_chain_onefrag *cur_rx; 1101 int total_len = 0; 1102 u_int32_t rxstat; 1103 1104 MY_LOCK_ASSERT(sc); 1105 ifp = sc->my_ifp; 1106 while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status) 1107 & MY_OWNByNIC)) { 1108 cur_rx = sc->my_cdata.my_rx_head; 1109 sc->my_cdata.my_rx_head = cur_rx->my_nextdesc; 1110 1111 if (rxstat & MY_ES) { /* error summary: give up this rx pkt */ 1112 ifp->if_ierrors++; 1113 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1114 continue; 1115 } 1116 /* No errors; receive the packet. */ 1117 total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift; 1118 total_len -= ETHER_CRC_LEN; 1119 1120 if (total_len < MINCLSIZE) { 1121 m = m_devget(mtod(cur_rx->my_mbuf, char *), 1122 total_len, 0, ifp, NULL); 1123 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1124 if (m == NULL) { 1125 ifp->if_ierrors++; 1126 continue; 1127 } 1128 } else { 1129 m = cur_rx->my_mbuf; 1130 /* 1131 * Try to conjure up a new mbuf cluster. If that 1132 * fails, it means we have an out of memory condition 1133 * and should leave the buffer in place and continue. 1134 * This will result in a lost packet, but there's 1135 * little else we can do in this situation. 1136 */ 1137 if (my_newbuf(sc, cur_rx) == ENOBUFS) { 1138 ifp->if_ierrors++; 1139 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1140 continue; 1141 } 1142 m->m_pkthdr.rcvif = ifp; 1143 m->m_pkthdr.len = m->m_len = total_len; 1144 } 1145 ifp->if_ipackets++; 1146 eh = mtod(m, struct ether_header *); 1147 #if NBPFILTER > 0 1148 /* 1149 * Handle BPF listeners. Let the BPF user see the packet, but 1150 * don't pass it up to the ether_input() layer unless it's a 1151 * broadcast packet, multicast packet, matches our ethernet 1152 * address or the interface is in promiscuous mode. 1153 */ 1154 if (ifp->if_bpf) { 1155 BPF_MTAP(ifp, m); 1156 if (ifp->if_flags & IFF_PROMISC && 1157 (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp), 1158 ETHER_ADDR_LEN) && 1159 (eh->ether_dhost[0] & 1) == 0)) { 1160 m_freem(m); 1161 continue; 1162 } 1163 } 1164 #endif 1165 MY_UNLOCK(sc); 1166 (*ifp->if_input)(ifp, m); 1167 MY_LOCK(sc); 1168 } 1169 return; 1170 } 1171 1172 1173 /* 1174 * A frame was downloaded to the chip. It's safe for us to clean up the list 1175 * buffers. 1176 */ 1177 static void 1178 my_txeof(struct my_softc * sc) 1179 { 1180 struct my_chain *cur_tx; 1181 struct ifnet *ifp; 1182 1183 MY_LOCK_ASSERT(sc); 1184 ifp = sc->my_ifp; 1185 /* Clear the timeout timer. */ 1186 ifp->if_timer = 0; 1187 if (sc->my_cdata.my_tx_head == NULL) { 1188 return; 1189 } 1190 /* 1191 * Go through our tx list and free mbufs for those frames that have 1192 * been transmitted. 1193 */ 1194 while (sc->my_cdata.my_tx_head->my_mbuf != NULL) { 1195 u_int32_t txstat; 1196 1197 cur_tx = sc->my_cdata.my_tx_head; 1198 txstat = MY_TXSTATUS(cur_tx); 1199 if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT) 1200 break; 1201 if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) { 1202 if (txstat & MY_TXERR) { 1203 ifp->if_oerrors++; 1204 if (txstat & MY_EC) /* excessive collision */ 1205 ifp->if_collisions++; 1206 if (txstat & MY_LC) /* late collision */ 1207 ifp->if_collisions++; 1208 } 1209 ifp->if_collisions += (txstat & MY_NCRMASK) >> 1210 MY_NCRShift; 1211 } 1212 ifp->if_opackets++; 1213 m_freem(cur_tx->my_mbuf); 1214 cur_tx->my_mbuf = NULL; 1215 if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) { 1216 sc->my_cdata.my_tx_head = NULL; 1217 sc->my_cdata.my_tx_tail = NULL; 1218 break; 1219 } 1220 sc->my_cdata.my_tx_head = cur_tx->my_nextdesc; 1221 } 1222 if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) { 1223 ifp->if_collisions += (CSR_READ_4(sc, MY_TSR) & MY_NCRMask); 1224 } 1225 return; 1226 } 1227 1228 /* 1229 * TX 'end of channel' interrupt handler. 1230 */ 1231 static void 1232 my_txeoc(struct my_softc * sc) 1233 { 1234 struct ifnet *ifp; 1235 1236 MY_LOCK_ASSERT(sc); 1237 ifp = sc->my_ifp; 1238 ifp->if_timer = 0; 1239 if (sc->my_cdata.my_tx_head == NULL) { 1240 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1241 sc->my_cdata.my_tx_tail = NULL; 1242 if (sc->my_want_auto) 1243 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1244 } else { 1245 if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) { 1246 MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC; 1247 ifp->if_timer = 5; 1248 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); 1249 } 1250 } 1251 return; 1252 } 1253 1254 static void 1255 my_intr(void *arg) 1256 { 1257 struct my_softc *sc; 1258 struct ifnet *ifp; 1259 u_int32_t status; 1260 1261 sc = arg; 1262 MY_LOCK(sc); 1263 ifp = sc->my_ifp; 1264 if (!(ifp->if_flags & IFF_UP)) { 1265 MY_UNLOCK(sc); 1266 return; 1267 } 1268 /* Disable interrupts. */ 1269 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1270 1271 for (;;) { 1272 status = CSR_READ_4(sc, MY_ISR); 1273 status &= MY_INTRS; 1274 if (status) 1275 CSR_WRITE_4(sc, MY_ISR, status); 1276 else 1277 break; 1278 1279 if (status & MY_RI) /* receive interrupt */ 1280 my_rxeof(sc); 1281 1282 if ((status & MY_RBU) || (status & MY_RxErr)) { 1283 /* rx buffer unavailable or rx error */ 1284 ifp->if_ierrors++; 1285 #ifdef foo 1286 my_stop(sc); 1287 my_reset(sc); 1288 my_init_locked(sc); 1289 #endif 1290 } 1291 if (status & MY_TI) /* tx interrupt */ 1292 my_txeof(sc); 1293 if (status & MY_ETI) /* tx early interrupt */ 1294 my_txeof(sc); 1295 if (status & MY_TBU) /* tx buffer unavailable */ 1296 my_txeoc(sc); 1297 1298 #if 0 /* 90/1/18 delete */ 1299 if (status & MY_FBE) { 1300 my_reset(sc); 1301 my_init_locked(sc); 1302 } 1303 #endif 1304 1305 } 1306 1307 /* Re-enable interrupts. */ 1308 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1309 if (ifp->if_snd.ifq_head != NULL) 1310 my_start_locked(ifp); 1311 MY_UNLOCK(sc); 1312 return; 1313 } 1314 1315 /* 1316 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1317 * pointers to the fragment pointers. 1318 */ 1319 static int 1320 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head) 1321 { 1322 struct my_desc *f = NULL; 1323 int total_len; 1324 struct mbuf *m, *m_new = NULL; 1325 1326 MY_LOCK_ASSERT(sc); 1327 /* calculate the total tx pkt length */ 1328 total_len = 0; 1329 for (m = m_head; m != NULL; m = m->m_next) 1330 total_len += m->m_len; 1331 /* 1332 * Start packing the mbufs in this chain into the fragment pointers. 1333 * Stop when we run out of fragments or hit the end of the mbuf 1334 * chain. 1335 */ 1336 m = m_head; 1337 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1338 if (m_new == NULL) { 1339 if_printf(sc->my_ifp, "no memory for tx list"); 1340 return (1); 1341 } 1342 if (m_head->m_pkthdr.len > MHLEN) { 1343 MCLGET(m_new, M_DONTWAIT); 1344 if (!(m_new->m_flags & M_EXT)) { 1345 m_freem(m_new); 1346 if_printf(sc->my_ifp, "no memory for tx list"); 1347 return (1); 1348 } 1349 } 1350 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); 1351 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1352 m_freem(m_head); 1353 m_head = m_new; 1354 f = &c->my_ptr->my_frag[0]; 1355 f->my_status = 0; 1356 f->my_data = vtophys(mtod(m_new, caddr_t)); 1357 total_len = m_new->m_len; 1358 f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable; 1359 f->my_ctl |= total_len << MY_PKTShift; /* pkt size */ 1360 f->my_ctl |= total_len; /* buffer size */ 1361 /* 89/12/29 add, for mtd891 *//* [ 89? ] */ 1362 if (sc->my_info->my_did == MTD891ID) 1363 f->my_ctl |= MY_ETIControl | MY_RetryTxLC; 1364 c->my_mbuf = m_head; 1365 c->my_lastdesc = 0; 1366 MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]); 1367 return (0); 1368 } 1369 1370 /* 1371 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1372 * to the mbuf data regions directly in the transmit lists. We also save a 1373 * copy of the pointers since the transmit list fragment pointers are 1374 * physical addresses. 1375 */ 1376 static void 1377 my_start(struct ifnet * ifp) 1378 { 1379 struct my_softc *sc; 1380 1381 sc = ifp->if_softc; 1382 MY_LOCK(sc); 1383 my_start_locked(ifp); 1384 MY_UNLOCK(sc); 1385 } 1386 1387 static void 1388 my_start_locked(struct ifnet * ifp) 1389 { 1390 struct my_softc *sc; 1391 struct mbuf *m_head = NULL; 1392 struct my_chain *cur_tx = NULL, *start_tx; 1393 1394 sc = ifp->if_softc; 1395 MY_LOCK_ASSERT(sc); 1396 if (sc->my_autoneg) { 1397 sc->my_tx_pend = 1; 1398 return; 1399 } 1400 /* 1401 * Check for an available queue slot. If there are none, punt. 1402 */ 1403 if (sc->my_cdata.my_tx_free->my_mbuf != NULL) { 1404 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1405 return; 1406 } 1407 start_tx = sc->my_cdata.my_tx_free; 1408 while (sc->my_cdata.my_tx_free->my_mbuf == NULL) { 1409 IF_DEQUEUE(&ifp->if_snd, m_head); 1410 if (m_head == NULL) 1411 break; 1412 1413 /* Pick a descriptor off the free list. */ 1414 cur_tx = sc->my_cdata.my_tx_free; 1415 sc->my_cdata.my_tx_free = cur_tx->my_nextdesc; 1416 1417 /* Pack the data into the descriptor. */ 1418 my_encap(sc, cur_tx, m_head); 1419 1420 if (cur_tx != start_tx) 1421 MY_TXOWN(cur_tx) = MY_OWNByNIC; 1422 #if NBPFILTER > 0 1423 /* 1424 * If there's a BPF listener, bounce a copy of this frame to 1425 * him. 1426 */ 1427 BPF_MTAP(ifp, cur_tx->my_mbuf); 1428 #endif 1429 } 1430 /* 1431 * If there are no packets queued, bail. 1432 */ 1433 if (cur_tx == NULL) { 1434 return; 1435 } 1436 /* 1437 * Place the request for the upload interrupt in the last descriptor 1438 * in the chain. This way, if we're chaining several packets at once, 1439 * we'll only get an interupt once for the whole chain rather than 1440 * once for each packet. 1441 */ 1442 MY_TXCTL(cur_tx) |= MY_TXIC; 1443 cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC; 1444 sc->my_cdata.my_tx_tail = cur_tx; 1445 if (sc->my_cdata.my_tx_head == NULL) 1446 sc->my_cdata.my_tx_head = start_tx; 1447 MY_TXOWN(start_tx) = MY_OWNByNIC; 1448 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */ 1449 1450 /* 1451 * Set a timeout in case the chip goes out to lunch. 1452 */ 1453 ifp->if_timer = 5; 1454 return; 1455 } 1456 1457 static void 1458 my_init(void *xsc) 1459 { 1460 struct my_softc *sc = xsc; 1461 1462 MY_LOCK(sc); 1463 my_init_locked(sc); 1464 MY_UNLOCK(sc); 1465 } 1466 1467 static void 1468 my_init_locked(struct my_softc *sc) 1469 { 1470 struct ifnet *ifp = sc->my_ifp; 1471 u_int16_t phy_bmcr = 0; 1472 1473 MY_LOCK_ASSERT(sc); 1474 if (sc->my_autoneg) { 1475 return; 1476 } 1477 if (sc->my_pinfo != NULL) 1478 phy_bmcr = my_phy_readreg(sc, PHY_BMCR); 1479 /* 1480 * Cancel pending I/O and free all RX/TX buffers. 1481 */ 1482 my_stop(sc); 1483 my_reset(sc); 1484 1485 /* 1486 * Set cache alignment and burst length. 1487 */ 1488 #if 0 /* 89/9/1 modify, */ 1489 CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512); 1490 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF); 1491 #endif 1492 CSR_WRITE_4(sc, MY_BCR, MY_PBL8); 1493 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512); 1494 /* 1495 * 89/12/29 add, for mtd891, 1496 */ 1497 if (sc->my_info->my_did == MTD891ID) { 1498 MY_SETBIT(sc, MY_BCR, MY_PROG); 1499 MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced); 1500 } 1501 my_setcfg(sc, phy_bmcr); 1502 /* Init circular RX list. */ 1503 if (my_list_rx_init(sc) == ENOBUFS) { 1504 if_printf(ifp, "init failed: no memory for rx buffers\n"); 1505 my_stop(sc); 1506 return; 1507 } 1508 /* Init TX descriptors. */ 1509 my_list_tx_init(sc); 1510 1511 /* If we want promiscuous mode, set the allframes bit. */ 1512 if (ifp->if_flags & IFF_PROMISC) 1513 MY_SETBIT(sc, MY_TCRRCR, MY_PROM); 1514 else 1515 MY_CLRBIT(sc, MY_TCRRCR, MY_PROM); 1516 1517 /* 1518 * Set capture broadcast bit to capture broadcast frames. 1519 */ 1520 if (ifp->if_flags & IFF_BROADCAST) 1521 MY_SETBIT(sc, MY_TCRRCR, MY_AB); 1522 else 1523 MY_CLRBIT(sc, MY_TCRRCR, MY_AB); 1524 1525 /* 1526 * Program the multicast filter, if necessary. 1527 */ 1528 my_setmulti(sc); 1529 1530 /* 1531 * Load the address of the RX list. 1532 */ 1533 MY_CLRBIT(sc, MY_TCRRCR, MY_RE); 1534 CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0])); 1535 1536 /* 1537 * Enable interrupts. 1538 */ 1539 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1540 CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF); 1541 1542 /* Enable receiver and transmitter. */ 1543 MY_SETBIT(sc, MY_TCRRCR, MY_RE); 1544 MY_CLRBIT(sc, MY_TCRRCR, MY_TE); 1545 CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0])); 1546 MY_SETBIT(sc, MY_TCRRCR, MY_TE); 1547 1548 /* Restore state of BMCR */ 1549 if (sc->my_pinfo != NULL) 1550 my_phy_writereg(sc, PHY_BMCR, phy_bmcr); 1551 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1552 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1553 return; 1554 } 1555 1556 /* 1557 * Set media options. 1558 */ 1559 1560 static int 1561 my_ifmedia_upd(struct ifnet * ifp) 1562 { 1563 struct my_softc *sc; 1564 struct ifmedia *ifm; 1565 1566 sc = ifp->if_softc; 1567 MY_LOCK(sc); 1568 ifm = &sc->ifmedia; 1569 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1570 MY_UNLOCK(sc); 1571 return (EINVAL); 1572 } 1573 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) 1574 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1575 else 1576 my_setmode_mii(sc, ifm->ifm_media); 1577 MY_UNLOCK(sc); 1578 return (0); 1579 } 1580 1581 /* 1582 * Report current media status. 1583 */ 1584 1585 static void 1586 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr) 1587 { 1588 struct my_softc *sc; 1589 u_int16_t advert = 0, ability = 0; 1590 1591 sc = ifp->if_softc; 1592 MY_LOCK(sc); 1593 ifmr->ifm_active = IFM_ETHER; 1594 if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { 1595 #if 0 /* this version did not support 1000M, */ 1596 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000) 1597 ifmr->ifm_active = IFM_ETHER | IFM_1000TX; 1598 #endif 1599 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) 1600 ifmr->ifm_active = IFM_ETHER | IFM_100_TX; 1601 else 1602 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1603 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) 1604 ifmr->ifm_active |= IFM_FDX; 1605 else 1606 ifmr->ifm_active |= IFM_HDX; 1607 1608 MY_UNLOCK(sc); 1609 return; 1610 } 1611 ability = my_phy_readreg(sc, PHY_LPAR); 1612 advert = my_phy_readreg(sc, PHY_ANAR); 1613 1614 #if 0 /* this version did not support 1000M, */ 1615 if (sc->my_pinfo->my_vid = MarvellPHYID0) { 1616 ability2 = my_phy_readreg(sc, PHY_1000SR); 1617 if (ability2 & PHY_1000SR_1000BTXFULL) { 1618 advert = 0; 1619 ability = 0; 1620 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX; 1621 } else if (ability & PHY_1000SR_1000BTXHALF) { 1622 advert = 0; 1623 ability = 0; 1624 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX; 1625 } 1626 } 1627 #endif 1628 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) 1629 ifmr->ifm_active = IFM_ETHER | IFM_100_T4; 1630 else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) 1631 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1632 else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) 1633 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX; 1634 else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) 1635 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX; 1636 else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF) 1637 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX; 1638 MY_UNLOCK(sc); 1639 return; 1640 } 1641 1642 static int 1643 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 1644 { 1645 struct my_softc *sc = ifp->if_softc; 1646 struct ifreq *ifr = (struct ifreq *) data; 1647 int error; 1648 1649 switch (command) { 1650 case SIOCSIFFLAGS: 1651 MY_LOCK(sc); 1652 if (ifp->if_flags & IFF_UP) 1653 my_init_locked(sc); 1654 else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1655 my_stop(sc); 1656 MY_UNLOCK(sc); 1657 error = 0; 1658 break; 1659 case SIOCADDMULTI: 1660 case SIOCDELMULTI: 1661 MY_LOCK(sc); 1662 my_setmulti(sc); 1663 MY_UNLOCK(sc); 1664 error = 0; 1665 break; 1666 case SIOCGIFMEDIA: 1667 case SIOCSIFMEDIA: 1668 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 1669 break; 1670 default: 1671 error = ether_ioctl(ifp, command, data); 1672 break; 1673 } 1674 return (error); 1675 } 1676 1677 static void 1678 my_watchdog(struct ifnet * ifp) 1679 { 1680 struct my_softc *sc; 1681 1682 sc = ifp->if_softc; 1683 MY_LOCK(sc); 1684 if (sc->my_autoneg) { 1685 my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1); 1686 MY_UNLOCK(sc); 1687 return; 1688 } 1689 ifp->if_oerrors++; 1690 if_printf(ifp, "watchdog timeout\n"); 1691 if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1692 if_printf(ifp, "no carrier - transceiver cable problem?\n"); 1693 my_stop(sc); 1694 my_reset(sc); 1695 my_init_locked(sc); 1696 if (ifp->if_snd.ifq_head != NULL) 1697 my_start_locked(ifp); 1698 MY_LOCK(sc); 1699 return; 1700 } 1701 1702 1703 /* 1704 * Stop the adapter and free any mbufs allocated to the RX and TX lists. 1705 */ 1706 static void 1707 my_stop(struct my_softc * sc) 1708 { 1709 register int i; 1710 struct ifnet *ifp; 1711 1712 MY_LOCK_ASSERT(sc); 1713 ifp = sc->my_ifp; 1714 ifp->if_timer = 0; 1715 1716 MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE)); 1717 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1718 CSR_WRITE_4(sc, MY_TXLBA, 0x00000000); 1719 CSR_WRITE_4(sc, MY_RXLBA, 0x00000000); 1720 1721 /* 1722 * Free data in the RX lists. 1723 */ 1724 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1725 if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) { 1726 m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf); 1727 sc->my_cdata.my_rx_chain[i].my_mbuf = NULL; 1728 } 1729 } 1730 bzero((char *)&sc->my_ldata->my_rx_list, 1731 sizeof(sc->my_ldata->my_rx_list)); 1732 /* 1733 * Free the TX list buffers. 1734 */ 1735 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1736 if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) { 1737 m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf); 1738 sc->my_cdata.my_tx_chain[i].my_mbuf = NULL; 1739 } 1740 } 1741 bzero((char *)&sc->my_ldata->my_tx_list, 1742 sizeof(sc->my_ldata->my_tx_list)); 1743 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1744 return; 1745 } 1746 1747 /* 1748 * Stop all chip I/O so that the kernel's probe routines don't get confused 1749 * by errant DMAs when rebooting. 1750 */ 1751 static void 1752 my_shutdown(device_t dev) 1753 { 1754 struct my_softc *sc; 1755 1756 sc = device_get_softc(dev); 1757 MY_LOCK(sc); 1758 my_stop(sc); 1759 MY_UNLOCK(sc); 1760 return; 1761 } 1762