1 /*- 2 * Written by: yen_cw@myson.com.tw 3 * Copyright (c) 2002 Myson Technology Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification, immediately at the beginning of the file. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/ 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/sockio.h> 36 #include <sys/mbuf.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/socket.h> 40 #include <sys/queue.h> 41 #include <sys/types.h> 42 #include <sys/bus.h> 43 #include <sys/module.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 47 #define NBPFILTER 1 48 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_media.h> 53 #include <net/if_types.h> 54 #include <net/if_dl.h> 55 #include <net/bpf.h> 56 57 #include <vm/vm.h> /* for vtophys */ 58 #include <vm/pmap.h> /* for vtophys */ 59 #include <machine/bus.h> 60 #include <machine/resource.h> 61 #include <sys/bus.h> 62 #include <sys/rman.h> 63 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 67 #include <dev/mii/mii.h> 68 #include <dev/mii/miivar.h> 69 70 #include "miibus_if.h" 71 72 /* 73 * #define MY_USEIOSPACE 74 */ 75 76 static int MY_USEIOSPACE = 1; 77 78 #ifdef MY_USEIOSPACE 79 #define MY_RES SYS_RES_IOPORT 80 #define MY_RID MY_PCI_LOIO 81 #else 82 #define MY_RES SYS_RES_MEMORY 83 #define MY_RID MY_PCI_LOMEM 84 #endif 85 86 87 #include <dev/my/if_myreg.h> 88 89 #ifndef lint 90 static const char rcsid[] = 91 "$Id: if_my.c,v 1.16 2003/04/15 06:37:25 mdodd Exp $"; 92 #endif 93 94 /* 95 * Various supported device vendors/types and their names. 96 */ 97 struct my_type *my_info_tmp; 98 static struct my_type my_devs[] = { 99 {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"}, 100 {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"}, 101 {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"}, 102 {0, 0, NULL} 103 }; 104 105 /* 106 * Various supported PHY vendors/types and their names. Note that this driver 107 * will work with pretty much any MII-compliant PHY, so failure to positively 108 * identify the chip is not a fatal error. 109 */ 110 static struct my_type my_phys[] = { 111 {MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"}, 112 {SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"}, 113 {AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"}, 114 {MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"}, 115 {LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"}, 116 {0, 0, "<MII-compliant physical interface>"} 117 }; 118 119 static int my_probe(device_t); 120 static int my_attach(device_t); 121 static int my_detach(device_t); 122 static int my_newbuf(struct my_softc *, struct my_chain_onefrag *); 123 static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *); 124 static void my_rxeof(struct my_softc *); 125 static void my_txeof(struct my_softc *); 126 static void my_txeoc(struct my_softc *); 127 static void my_intr(void *); 128 static void my_start(struct ifnet *); 129 static void my_start_locked(struct ifnet *); 130 static int my_ioctl(struct ifnet *, u_long, caddr_t); 131 static void my_init(void *); 132 static void my_init_locked(struct my_softc *); 133 static void my_stop(struct my_softc *); 134 static void my_watchdog(struct ifnet *); 135 static void my_shutdown(device_t); 136 static int my_ifmedia_upd(struct ifnet *); 137 static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *); 138 static u_int16_t my_phy_readreg(struct my_softc *, int); 139 static void my_phy_writereg(struct my_softc *, int, int); 140 static void my_autoneg_xmit(struct my_softc *); 141 static void my_autoneg_mii(struct my_softc *, int, int); 142 static void my_setmode_mii(struct my_softc *, int); 143 static void my_getmode_mii(struct my_softc *); 144 static void my_setcfg(struct my_softc *, int); 145 static void my_setmulti(struct my_softc *); 146 static void my_reset(struct my_softc *); 147 static int my_list_rx_init(struct my_softc *); 148 static int my_list_tx_init(struct my_softc *); 149 static long my_send_cmd_to_phy(struct my_softc *, int, int); 150 151 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 152 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 153 154 static device_method_t my_methods[] = { 155 /* Device interface */ 156 DEVMETHOD(device_probe, my_probe), 157 DEVMETHOD(device_attach, my_attach), 158 DEVMETHOD(device_detach, my_detach), 159 DEVMETHOD(device_shutdown, my_shutdown), 160 161 {0, 0} 162 }; 163 164 static driver_t my_driver = { 165 "my", 166 my_methods, 167 sizeof(struct my_softc) 168 }; 169 170 static devclass_t my_devclass; 171 172 DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0); 173 MODULE_DEPEND(my, pci, 1, 1, 1); 174 MODULE_DEPEND(my, ether, 1, 1, 1); 175 176 static long 177 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad) 178 { 179 long miir; 180 int i; 181 int mask, data; 182 183 MY_LOCK_ASSERT(sc); 184 185 /* enable MII output */ 186 miir = CSR_READ_4(sc, MY_MANAGEMENT); 187 miir &= 0xfffffff0; 188 189 miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO; 190 191 /* send 32 1's preamble */ 192 for (i = 0; i < 32; i++) { 193 /* low MDC; MDO is already high (miir) */ 194 miir &= ~MY_MASK_MIIR_MII_MDC; 195 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 196 197 /* high MDC */ 198 miir |= MY_MASK_MIIR_MII_MDC; 199 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 200 } 201 202 /* calculate ST+OP+PHYAD+REGAD+TA */ 203 data = opcode | (sc->my_phy_addr << 7) | (regad << 2); 204 205 /* sent out */ 206 mask = 0x8000; 207 while (mask) { 208 /* low MDC, prepare MDO */ 209 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 210 if (mask & data) 211 miir |= MY_MASK_MIIR_MII_MDO; 212 213 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 214 /* high MDC */ 215 miir |= MY_MASK_MIIR_MII_MDC; 216 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 217 DELAY(30); 218 219 /* next */ 220 mask >>= 1; 221 if (mask == 0x2 && opcode == MY_OP_READ) 222 miir &= ~MY_MASK_MIIR_MII_WRITE; 223 } 224 225 return miir; 226 } 227 228 229 static u_int16_t 230 my_phy_readreg(struct my_softc * sc, int reg) 231 { 232 long miir; 233 int mask, data; 234 235 MY_LOCK_ASSERT(sc); 236 237 if (sc->my_info->my_did == MTD803ID) 238 data = CSR_READ_2(sc, MY_PHYBASE + reg * 2); 239 else { 240 miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg); 241 242 /* read data */ 243 mask = 0x8000; 244 data = 0; 245 while (mask) { 246 /* low MDC */ 247 miir &= ~MY_MASK_MIIR_MII_MDC; 248 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 249 250 /* read MDI */ 251 miir = CSR_READ_4(sc, MY_MANAGEMENT); 252 if (miir & MY_MASK_MIIR_MII_MDI) 253 data |= mask; 254 255 /* high MDC, and wait */ 256 miir |= MY_MASK_MIIR_MII_MDC; 257 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 258 DELAY(30); 259 260 /* next */ 261 mask >>= 1; 262 } 263 264 /* low MDC */ 265 miir &= ~MY_MASK_MIIR_MII_MDC; 266 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 267 } 268 269 return (u_int16_t) data; 270 } 271 272 273 static void 274 my_phy_writereg(struct my_softc * sc, int reg, int data) 275 { 276 long miir; 277 int mask; 278 279 MY_LOCK_ASSERT(sc); 280 281 if (sc->my_info->my_did == MTD803ID) 282 CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data); 283 else { 284 miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg); 285 286 /* write data */ 287 mask = 0x8000; 288 while (mask) { 289 /* low MDC, prepare MDO */ 290 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 291 if (mask & data) 292 miir |= MY_MASK_MIIR_MII_MDO; 293 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 294 DELAY(1); 295 296 /* high MDC */ 297 miir |= MY_MASK_MIIR_MII_MDC; 298 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 299 DELAY(1); 300 301 /* next */ 302 mask >>= 1; 303 } 304 305 /* low MDC */ 306 miir &= ~MY_MASK_MIIR_MII_MDC; 307 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 308 } 309 return; 310 } 311 312 313 /* 314 * Program the 64-bit multicast hash filter. 315 */ 316 static void 317 my_setmulti(struct my_softc * sc) 318 { 319 struct ifnet *ifp; 320 int h = 0; 321 u_int32_t hashes[2] = {0, 0}; 322 struct ifmultiaddr *ifma; 323 u_int32_t rxfilt; 324 int mcnt = 0; 325 326 MY_LOCK_ASSERT(sc); 327 328 ifp = sc->my_ifp; 329 330 rxfilt = CSR_READ_4(sc, MY_TCRRCR); 331 332 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 333 rxfilt |= MY_AM; 334 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 335 CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF); 336 CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF); 337 338 return; 339 } 340 /* first, zot all the existing hash bits */ 341 CSR_WRITE_4(sc, MY_MAR0, 0); 342 CSR_WRITE_4(sc, MY_MAR1, 0); 343 344 /* now program new ones */ 345 IF_ADDR_LOCK(ifp); 346 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 347 if (ifma->ifma_addr->sa_family != AF_LINK) 348 continue; 349 h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) 350 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 351 if (h < 32) 352 hashes[0] |= (1 << h); 353 else 354 hashes[1] |= (1 << (h - 32)); 355 mcnt++; 356 } 357 IF_ADDR_UNLOCK(ifp); 358 359 if (mcnt) 360 rxfilt |= MY_AM; 361 else 362 rxfilt &= ~MY_AM; 363 CSR_WRITE_4(sc, MY_MAR0, hashes[0]); 364 CSR_WRITE_4(sc, MY_MAR1, hashes[1]); 365 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 366 return; 367 } 368 369 /* 370 * Initiate an autonegotiation session. 371 */ 372 static void 373 my_autoneg_xmit(struct my_softc * sc) 374 { 375 u_int16_t phy_sts = 0; 376 377 MY_LOCK_ASSERT(sc); 378 379 my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); 380 DELAY(500); 381 while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET); 382 383 phy_sts = my_phy_readreg(sc, PHY_BMCR); 384 phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR; 385 my_phy_writereg(sc, PHY_BMCR, phy_sts); 386 387 return; 388 } 389 390 391 /* 392 * Invoke autonegotiation on a PHY. 393 */ 394 static void 395 my_autoneg_mii(struct my_softc * sc, int flag, int verbose) 396 { 397 u_int16_t phy_sts = 0, media, advert, ability; 398 u_int16_t ability2 = 0; 399 struct ifnet *ifp; 400 struct ifmedia *ifm; 401 402 MY_LOCK_ASSERT(sc); 403 404 ifm = &sc->ifmedia; 405 ifp = sc->my_ifp; 406 407 ifm->ifm_media = IFM_ETHER | IFM_AUTO; 408 409 #ifndef FORCE_AUTONEG_TFOUR 410 /* 411 * First, see if autoneg is supported. If not, there's no point in 412 * continuing. 413 */ 414 phy_sts = my_phy_readreg(sc, PHY_BMSR); 415 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { 416 if (verbose) 417 if_printf(ifp, "autonegotiation not supported\n"); 418 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 419 return; 420 } 421 #endif 422 switch (flag) { 423 case MY_FLAG_FORCEDELAY: 424 /* 425 * XXX Never use this option anywhere but in the probe 426 * routine: making the kernel stop dead in its tracks for 427 * three whole seconds after we've gone multi-user is really 428 * bad manners. 429 */ 430 my_autoneg_xmit(sc); 431 DELAY(5000000); 432 break; 433 case MY_FLAG_SCHEDDELAY: 434 /* 435 * Wait for the transmitter to go idle before starting an 436 * autoneg session, otherwise my_start() may clobber our 437 * timeout, and we don't want to allow transmission during an 438 * autoneg session since that can screw it up. 439 */ 440 if (sc->my_cdata.my_tx_head != NULL) { 441 sc->my_want_auto = 1; 442 MY_UNLOCK(sc); 443 return; 444 } 445 my_autoneg_xmit(sc); 446 ifp->if_timer = 5; 447 sc->my_autoneg = 1; 448 sc->my_want_auto = 0; 449 return; 450 case MY_FLAG_DELAYTIMEO: 451 ifp->if_timer = 0; 452 sc->my_autoneg = 0; 453 break; 454 default: 455 if_printf(ifp, "invalid autoneg flag: %d\n", flag); 456 return; 457 } 458 459 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { 460 if (verbose) 461 if_printf(ifp, "autoneg complete, "); 462 phy_sts = my_phy_readreg(sc, PHY_BMSR); 463 } else { 464 if (verbose) 465 if_printf(ifp, "autoneg not complete, "); 466 } 467 468 media = my_phy_readreg(sc, PHY_BMCR); 469 470 /* Link is good. Report modes and set duplex mode. */ 471 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { 472 if (verbose) 473 if_printf(ifp, "link status good. "); 474 advert = my_phy_readreg(sc, PHY_ANAR); 475 ability = my_phy_readreg(sc, PHY_LPAR); 476 if ((sc->my_pinfo->my_vid == MarvellPHYID0) || 477 (sc->my_pinfo->my_vid == LevelOnePHYID0)) { 478 ability2 = my_phy_readreg(sc, PHY_1000SR); 479 if (ability2 & PHY_1000SR_1000BTXFULL) { 480 advert = 0; 481 ability = 0; 482 /* 483 * this version did not support 1000M, 484 * ifm->ifm_media = 485 * IFM_ETHER|IFM_1000_T|IFM_FDX; 486 */ 487 ifm->ifm_media = 488 IFM_ETHER | IFM_100_TX | IFM_FDX; 489 media &= ~PHY_BMCR_SPEEDSEL; 490 media |= PHY_BMCR_1000; 491 media |= PHY_BMCR_DUPLEX; 492 printf("(full-duplex, 1000Mbps)\n"); 493 } else if (ability2 & PHY_1000SR_1000BTXHALF) { 494 advert = 0; 495 ability = 0; 496 /* 497 * this version did not support 1000M, 498 * ifm->ifm_media = IFM_ETHER|IFM_1000_T; 499 */ 500 ifm->ifm_media = IFM_ETHER | IFM_100_TX; 501 media &= ~PHY_BMCR_SPEEDSEL; 502 media &= ~PHY_BMCR_DUPLEX; 503 media |= PHY_BMCR_1000; 504 printf("(half-duplex, 1000Mbps)\n"); 505 } 506 } 507 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { 508 ifm->ifm_media = IFM_ETHER | IFM_100_T4; 509 media |= PHY_BMCR_SPEEDSEL; 510 media &= ~PHY_BMCR_DUPLEX; 511 printf("(100baseT4)\n"); 512 } else if (advert & PHY_ANAR_100BTXFULL && 513 ability & PHY_ANAR_100BTXFULL) { 514 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 515 media |= PHY_BMCR_SPEEDSEL; 516 media |= PHY_BMCR_DUPLEX; 517 printf("(full-duplex, 100Mbps)\n"); 518 } else if (advert & PHY_ANAR_100BTXHALF && 519 ability & PHY_ANAR_100BTXHALF) { 520 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 521 media |= PHY_BMCR_SPEEDSEL; 522 media &= ~PHY_BMCR_DUPLEX; 523 printf("(half-duplex, 100Mbps)\n"); 524 } else if (advert & PHY_ANAR_10BTFULL && 525 ability & PHY_ANAR_10BTFULL) { 526 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 527 media &= ~PHY_BMCR_SPEEDSEL; 528 media |= PHY_BMCR_DUPLEX; 529 printf("(full-duplex, 10Mbps)\n"); 530 } else if (advert) { 531 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 532 media &= ~PHY_BMCR_SPEEDSEL; 533 media &= ~PHY_BMCR_DUPLEX; 534 printf("(half-duplex, 10Mbps)\n"); 535 } 536 media &= ~PHY_BMCR_AUTONEGENBL; 537 538 /* Set ASIC's duplex mode to match the PHY. */ 539 my_phy_writereg(sc, PHY_BMCR, media); 540 my_setcfg(sc, media); 541 } else { 542 if (verbose) 543 if_printf(ifp, "no carrier\n"); 544 } 545 546 my_init_locked(sc); 547 if (sc->my_tx_pend) { 548 sc->my_autoneg = 0; 549 sc->my_tx_pend = 0; 550 my_start_locked(ifp); 551 } 552 return; 553 } 554 555 /* 556 * To get PHY ability. 557 */ 558 static void 559 my_getmode_mii(struct my_softc * sc) 560 { 561 u_int16_t bmsr; 562 struct ifnet *ifp; 563 564 MY_LOCK_ASSERT(sc); 565 ifp = sc->my_ifp; 566 bmsr = my_phy_readreg(sc, PHY_BMSR); 567 if (bootverbose) 568 if_printf(ifp, "PHY status word: %x\n", bmsr); 569 570 /* fallback */ 571 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 572 573 if (bmsr & PHY_BMSR_10BTHALF) { 574 if (bootverbose) 575 if_printf(ifp, "10Mbps half-duplex mode supported\n"); 576 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 577 0, NULL); 578 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); 579 } 580 if (bmsr & PHY_BMSR_10BTFULL) { 581 if (bootverbose) 582 if_printf(ifp, "10Mbps full-duplex mode supported\n"); 583 584 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 585 0, NULL); 586 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 587 } 588 if (bmsr & PHY_BMSR_100BTXHALF) { 589 if (bootverbose) 590 if_printf(ifp, "100Mbps half-duplex mode supported\n"); 591 ifp->if_baudrate = 100000000; 592 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); 593 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 594 0, NULL); 595 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 596 } 597 if (bmsr & PHY_BMSR_100BTXFULL) { 598 if (bootverbose) 599 if_printf(ifp, "100Mbps full-duplex mode supported\n"); 600 ifp->if_baudrate = 100000000; 601 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 602 0, NULL); 603 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 604 } 605 /* Some also support 100BaseT4. */ 606 if (bmsr & PHY_BMSR_100BT4) { 607 if (bootverbose) 608 if_printf(ifp, "100baseT4 mode supported\n"); 609 ifp->if_baudrate = 100000000; 610 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL); 611 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4; 612 #ifdef FORCE_AUTONEG_TFOUR 613 if (bootverbose) 614 if_printf(ifp, "forcing on autoneg support for BT4\n"); 615 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL): 616 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 617 #endif 618 } 619 #if 0 /* this version did not support 1000M, */ 620 if (sc->my_pinfo->my_vid == MarvellPHYID0) { 621 if (bootverbose) 622 if_printf(ifp, "1000Mbps half-duplex mode supported\n"); 623 624 ifp->if_baudrate = 1000000000; 625 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); 626 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX, 627 0, NULL); 628 if (bootverbose) 629 if_printf(ifp, "1000Mbps full-duplex mode supported\n"); 630 ifp->if_baudrate = 1000000000; 631 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 632 0, NULL); 633 sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX; 634 } 635 #endif 636 if (bmsr & PHY_BMSR_CANAUTONEG) { 637 if (bootverbose) 638 if_printf(ifp, "autoneg supported\n"); 639 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 640 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 641 } 642 return; 643 } 644 645 /* 646 * Set speed and duplex mode. 647 */ 648 static void 649 my_setmode_mii(struct my_softc * sc, int media) 650 { 651 u_int16_t bmcr; 652 struct ifnet *ifp; 653 654 MY_LOCK_ASSERT(sc); 655 ifp = sc->my_ifp; 656 /* 657 * If an autoneg session is in progress, stop it. 658 */ 659 if (sc->my_autoneg) { 660 if_printf(ifp, "canceling autoneg session\n"); 661 ifp->if_timer = sc->my_autoneg = sc->my_want_auto = 0; 662 bmcr = my_phy_readreg(sc, PHY_BMCR); 663 bmcr &= ~PHY_BMCR_AUTONEGENBL; 664 my_phy_writereg(sc, PHY_BMCR, bmcr); 665 } 666 if_printf(ifp, "selecting MII, "); 667 bmcr = my_phy_readreg(sc, PHY_BMCR); 668 bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 | 669 PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK); 670 671 #if 0 /* this version did not support 1000M, */ 672 if (IFM_SUBTYPE(media) == IFM_1000_T) { 673 printf("1000Mbps/T4, half-duplex\n"); 674 bmcr &= ~PHY_BMCR_SPEEDSEL; 675 bmcr &= ~PHY_BMCR_DUPLEX; 676 bmcr |= PHY_BMCR_1000; 677 } 678 #endif 679 if (IFM_SUBTYPE(media) == IFM_100_T4) { 680 printf("100Mbps/T4, half-duplex\n"); 681 bmcr |= PHY_BMCR_SPEEDSEL; 682 bmcr &= ~PHY_BMCR_DUPLEX; 683 } 684 if (IFM_SUBTYPE(media) == IFM_100_TX) { 685 printf("100Mbps, "); 686 bmcr |= PHY_BMCR_SPEEDSEL; 687 } 688 if (IFM_SUBTYPE(media) == IFM_10_T) { 689 printf("10Mbps, "); 690 bmcr &= ~PHY_BMCR_SPEEDSEL; 691 } 692 if ((media & IFM_GMASK) == IFM_FDX) { 693 printf("full duplex\n"); 694 bmcr |= PHY_BMCR_DUPLEX; 695 } else { 696 printf("half duplex\n"); 697 bmcr &= ~PHY_BMCR_DUPLEX; 698 } 699 my_phy_writereg(sc, PHY_BMCR, bmcr); 700 my_setcfg(sc, bmcr); 701 return; 702 } 703 704 /* 705 * The Myson manual states that in order to fiddle with the 'full-duplex' and 706 * '100Mbps' bits in the netconfig register, we first have to put the 707 * transmit and/or receive logic in the idle state. 708 */ 709 static void 710 my_setcfg(struct my_softc * sc, int bmcr) 711 { 712 int i, restart = 0; 713 714 MY_LOCK_ASSERT(sc); 715 if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) { 716 restart = 1; 717 MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE)); 718 for (i = 0; i < MY_TIMEOUT; i++) { 719 DELAY(10); 720 if (!(CSR_READ_4(sc, MY_TCRRCR) & 721 (MY_TXRUN | MY_RXRUN))) 722 break; 723 } 724 if (i == MY_TIMEOUT) 725 if_printf(sc->my_ifp, 726 "failed to force tx and rx to idle \n"); 727 } 728 MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000); 729 MY_CLRBIT(sc, MY_TCRRCR, MY_PS10); 730 if (bmcr & PHY_BMCR_1000) 731 MY_SETBIT(sc, MY_TCRRCR, MY_PS1000); 732 else if (!(bmcr & PHY_BMCR_SPEEDSEL)) 733 MY_SETBIT(sc, MY_TCRRCR, MY_PS10); 734 if (bmcr & PHY_BMCR_DUPLEX) 735 MY_SETBIT(sc, MY_TCRRCR, MY_FD); 736 else 737 MY_CLRBIT(sc, MY_TCRRCR, MY_FD); 738 if (restart) 739 MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE); 740 return; 741 } 742 743 static void 744 my_reset(struct my_softc * sc) 745 { 746 register int i; 747 748 MY_LOCK_ASSERT(sc); 749 MY_SETBIT(sc, MY_BCR, MY_SWR); 750 for (i = 0; i < MY_TIMEOUT; i++) { 751 DELAY(10); 752 if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR)) 753 break; 754 } 755 if (i == MY_TIMEOUT) 756 if_printf(sc->my_ifp, "reset never completed!\n"); 757 758 /* Wait a little while for the chip to get its brains in order. */ 759 DELAY(1000); 760 return; 761 } 762 763 /* 764 * Probe for a Myson chip. Check the PCI vendor and device IDs against our 765 * list and return a device name if we find a match. 766 */ 767 static int 768 my_probe(device_t dev) 769 { 770 struct my_type *t; 771 772 t = my_devs; 773 while (t->my_name != NULL) { 774 if ((pci_get_vendor(dev) == t->my_vid) && 775 (pci_get_device(dev) == t->my_did)) { 776 device_set_desc(dev, t->my_name); 777 my_info_tmp = t; 778 return (BUS_PROBE_DEFAULT); 779 } 780 t++; 781 } 782 return (ENXIO); 783 } 784 785 /* 786 * Attach the interface. Allocate softc structures, do ifmedia setup and 787 * ethernet/BPF attach. 788 */ 789 static int 790 my_attach(device_t dev) 791 { 792 int i; 793 u_char eaddr[ETHER_ADDR_LEN]; 794 u_int32_t iobase; 795 struct my_softc *sc; 796 struct ifnet *ifp; 797 int media = IFM_ETHER | IFM_100_TX | IFM_FDX; 798 unsigned int round; 799 caddr_t roundptr; 800 struct my_type *p; 801 u_int16_t phy_vid, phy_did, phy_sts = 0; 802 int rid, error = 0; 803 804 sc = device_get_softc(dev); 805 mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 806 MTX_DEF); 807 808 /* 809 * Map control/status registers. 810 */ 811 pci_enable_busmaster(dev); 812 813 if (my_info_tmp->my_did == MTD800ID) { 814 iobase = pci_read_config(dev, MY_PCI_LOIO, 4); 815 if (iobase & 0x300) 816 MY_USEIOSPACE = 0; 817 } 818 819 rid = MY_RID; 820 sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE); 821 822 if (sc->my_res == NULL) { 823 device_printf(dev, "couldn't map ports/memory\n"); 824 error = ENXIO; 825 goto destroy_mutex; 826 } 827 sc->my_btag = rman_get_bustag(sc->my_res); 828 sc->my_bhandle = rman_get_bushandle(sc->my_res); 829 830 rid = 0; 831 sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 832 RF_SHAREABLE | RF_ACTIVE); 833 834 if (sc->my_irq == NULL) { 835 device_printf(dev, "couldn't map interrupt\n"); 836 error = ENXIO; 837 goto release_io; 838 } 839 840 sc->my_info = my_info_tmp; 841 842 /* Reset the adapter. */ 843 MY_LOCK(sc); 844 my_reset(sc); 845 MY_UNLOCK(sc); 846 847 /* 848 * Get station address 849 */ 850 for (i = 0; i < ETHER_ADDR_LEN; ++i) 851 eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i); 852 853 sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8, 854 M_DEVBUF, M_NOWAIT); 855 if (sc->my_ldata_ptr == NULL) { 856 device_printf(dev, "no memory for list buffers!\n"); 857 error = ENXIO; 858 goto release_irq; 859 } 860 sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr; 861 round = (uintptr_t)sc->my_ldata_ptr & 0xF; 862 roundptr = sc->my_ldata_ptr; 863 for (i = 0; i < 8; i++) { 864 if (round % 8) { 865 round++; 866 roundptr++; 867 } else 868 break; 869 } 870 sc->my_ldata = (struct my_list_data *) roundptr; 871 bzero(sc->my_ldata, sizeof(struct my_list_data)); 872 873 ifp = sc->my_ifp = if_alloc(IFT_ETHER); 874 if (ifp == NULL) { 875 device_printf(dev, "can not if_alloc()\n"); 876 error = ENOSPC; 877 goto free_ldata; 878 } 879 ifp->if_softc = sc; 880 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 881 ifp->if_mtu = ETHERMTU; 882 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 883 ifp->if_ioctl = my_ioctl; 884 ifp->if_start = my_start; 885 ifp->if_watchdog = my_watchdog; 886 ifp->if_init = my_init; 887 ifp->if_baudrate = 10000000; 888 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 889 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 890 IFQ_SET_READY(&ifp->if_snd); 891 892 if (sc->my_info->my_did == MTD803ID) 893 sc->my_pinfo = my_phys; 894 else { 895 if (bootverbose) 896 device_printf(dev, "probing for a PHY\n"); 897 MY_LOCK(sc); 898 for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) { 899 if (bootverbose) 900 device_printf(dev, "checking address: %d\n", i); 901 sc->my_phy_addr = i; 902 phy_sts = my_phy_readreg(sc, PHY_BMSR); 903 if ((phy_sts != 0) && (phy_sts != 0xffff)) 904 break; 905 else 906 phy_sts = 0; 907 } 908 if (phy_sts) { 909 phy_vid = my_phy_readreg(sc, PHY_VENID); 910 phy_did = my_phy_readreg(sc, PHY_DEVID); 911 if (bootverbose) { 912 device_printf(dev, "found PHY at address %d, ", 913 sc->my_phy_addr); 914 printf("vendor id: %x device id: %x\n", 915 phy_vid, phy_did); 916 } 917 p = my_phys; 918 while (p->my_vid) { 919 if (phy_vid == p->my_vid) { 920 sc->my_pinfo = p; 921 break; 922 } 923 p++; 924 } 925 if (sc->my_pinfo == NULL) 926 sc->my_pinfo = &my_phys[PHY_UNKNOWN]; 927 if (bootverbose) 928 device_printf(dev, "PHY type: %s\n", 929 sc->my_pinfo->my_name); 930 } else { 931 MY_UNLOCK(sc); 932 device_printf(dev, "MII without any phy!\n"); 933 error = ENXIO; 934 goto free_if; 935 } 936 MY_UNLOCK(sc); 937 } 938 939 /* Do ifmedia setup. */ 940 ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts); 941 MY_LOCK(sc); 942 my_getmode_mii(sc); 943 my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1); 944 media = sc->ifmedia.ifm_media; 945 my_stop(sc); 946 MY_UNLOCK(sc); 947 ifmedia_set(&sc->ifmedia, media); 948 949 ether_ifattach(ifp, eaddr); 950 951 error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE, 952 my_intr, sc, &sc->my_intrhand); 953 954 if (error) { 955 device_printf(dev, "couldn't set up irq\n"); 956 goto detach_if; 957 } 958 959 return (0); 960 961 detach_if: 962 ether_ifdetach(ifp); 963 free_if: 964 if_free(ifp); 965 free_ldata: 966 free(sc->my_ldata_ptr, M_DEVBUF); 967 release_irq: 968 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 969 release_io: 970 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 971 destroy_mutex: 972 mtx_destroy(&sc->my_mtx); 973 return (error); 974 } 975 976 static int 977 my_detach(device_t dev) 978 { 979 struct my_softc *sc; 980 struct ifnet *ifp; 981 982 sc = device_get_softc(dev); 983 MY_LOCK(sc); 984 my_stop(sc); 985 MY_UNLOCK(sc); 986 bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand); 987 988 ifp = sc->my_ifp; 989 ether_ifdetach(ifp); 990 if_free(ifp); 991 free(sc->my_ldata_ptr, M_DEVBUF); 992 993 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 994 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 995 mtx_destroy(&sc->my_mtx); 996 return (0); 997 } 998 999 1000 /* 1001 * Initialize the transmit descriptors. 1002 */ 1003 static int 1004 my_list_tx_init(struct my_softc * sc) 1005 { 1006 struct my_chain_data *cd; 1007 struct my_list_data *ld; 1008 int i; 1009 1010 MY_LOCK_ASSERT(sc); 1011 cd = &sc->my_cdata; 1012 ld = sc->my_ldata; 1013 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1014 cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i]; 1015 if (i == (MY_TX_LIST_CNT - 1)) 1016 cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0]; 1017 else 1018 cd->my_tx_chain[i].my_nextdesc = 1019 &cd->my_tx_chain[i + 1]; 1020 } 1021 cd->my_tx_free = &cd->my_tx_chain[0]; 1022 cd->my_tx_tail = cd->my_tx_head = NULL; 1023 return (0); 1024 } 1025 1026 /* 1027 * Initialize the RX descriptors and allocate mbufs for them. Note that we 1028 * arrange the descriptors in a closed ring, so that the last descriptor 1029 * points back to the first. 1030 */ 1031 static int 1032 my_list_rx_init(struct my_softc * sc) 1033 { 1034 struct my_chain_data *cd; 1035 struct my_list_data *ld; 1036 int i; 1037 1038 MY_LOCK_ASSERT(sc); 1039 cd = &sc->my_cdata; 1040 ld = sc->my_ldata; 1041 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1042 cd->my_rx_chain[i].my_ptr = 1043 (struct my_desc *) & ld->my_rx_list[i]; 1044 if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) { 1045 MY_UNLOCK(sc); 1046 return (ENOBUFS); 1047 } 1048 if (i == (MY_RX_LIST_CNT - 1)) { 1049 cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0]; 1050 ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]); 1051 } else { 1052 cd->my_rx_chain[i].my_nextdesc = 1053 &cd->my_rx_chain[i + 1]; 1054 ld->my_rx_list[i].my_next = 1055 vtophys(&ld->my_rx_list[i + 1]); 1056 } 1057 } 1058 cd->my_rx_head = &cd->my_rx_chain[0]; 1059 return (0); 1060 } 1061 1062 /* 1063 * Initialize an RX descriptor and attach an MBUF cluster. 1064 */ 1065 static int 1066 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c) 1067 { 1068 struct mbuf *m_new = NULL; 1069 1070 MY_LOCK_ASSERT(sc); 1071 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1072 if (m_new == NULL) { 1073 if_printf(sc->my_ifp, 1074 "no memory for rx list -- packet dropped!\n"); 1075 return (ENOBUFS); 1076 } 1077 MCLGET(m_new, M_DONTWAIT); 1078 if (!(m_new->m_flags & M_EXT)) { 1079 if_printf(sc->my_ifp, 1080 "no memory for rx list -- packet dropped!\n"); 1081 m_freem(m_new); 1082 return (ENOBUFS); 1083 } 1084 c->my_mbuf = m_new; 1085 c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t)); 1086 c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift; 1087 c->my_ptr->my_status = MY_OWNByNIC; 1088 return (0); 1089 } 1090 1091 /* 1092 * A frame has been uploaded: pass the resulting mbuf chain up to the higher 1093 * level protocols. 1094 */ 1095 static void 1096 my_rxeof(struct my_softc * sc) 1097 { 1098 struct ether_header *eh; 1099 struct mbuf *m; 1100 struct ifnet *ifp; 1101 struct my_chain_onefrag *cur_rx; 1102 int total_len = 0; 1103 u_int32_t rxstat; 1104 1105 MY_LOCK_ASSERT(sc); 1106 ifp = sc->my_ifp; 1107 while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status) 1108 & MY_OWNByNIC)) { 1109 cur_rx = sc->my_cdata.my_rx_head; 1110 sc->my_cdata.my_rx_head = cur_rx->my_nextdesc; 1111 1112 if (rxstat & MY_ES) { /* error summary: give up this rx pkt */ 1113 ifp->if_ierrors++; 1114 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1115 continue; 1116 } 1117 /* No errors; receive the packet. */ 1118 total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift; 1119 total_len -= ETHER_CRC_LEN; 1120 1121 if (total_len < MINCLSIZE) { 1122 m = m_devget(mtod(cur_rx->my_mbuf, char *), 1123 total_len, 0, ifp, NULL); 1124 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1125 if (m == NULL) { 1126 ifp->if_ierrors++; 1127 continue; 1128 } 1129 } else { 1130 m = cur_rx->my_mbuf; 1131 /* 1132 * Try to conjure up a new mbuf cluster. If that 1133 * fails, it means we have an out of memory condition 1134 * and should leave the buffer in place and continue. 1135 * This will result in a lost packet, but there's 1136 * little else we can do in this situation. 1137 */ 1138 if (my_newbuf(sc, cur_rx) == ENOBUFS) { 1139 ifp->if_ierrors++; 1140 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1141 continue; 1142 } 1143 m->m_pkthdr.rcvif = ifp; 1144 m->m_pkthdr.len = m->m_len = total_len; 1145 } 1146 ifp->if_ipackets++; 1147 eh = mtod(m, struct ether_header *); 1148 #if NBPFILTER > 0 1149 /* 1150 * Handle BPF listeners. Let the BPF user see the packet, but 1151 * don't pass it up to the ether_input() layer unless it's a 1152 * broadcast packet, multicast packet, matches our ethernet 1153 * address or the interface is in promiscuous mode. 1154 */ 1155 if (ifp->if_bpf) { 1156 BPF_MTAP(ifp, m); 1157 if (ifp->if_flags & IFF_PROMISC && 1158 (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp), 1159 ETHER_ADDR_LEN) && 1160 (eh->ether_dhost[0] & 1) == 0)) { 1161 m_freem(m); 1162 continue; 1163 } 1164 } 1165 #endif 1166 MY_UNLOCK(sc); 1167 (*ifp->if_input)(ifp, m); 1168 MY_LOCK(sc); 1169 } 1170 return; 1171 } 1172 1173 1174 /* 1175 * A frame was downloaded to the chip. It's safe for us to clean up the list 1176 * buffers. 1177 */ 1178 static void 1179 my_txeof(struct my_softc * sc) 1180 { 1181 struct my_chain *cur_tx; 1182 struct ifnet *ifp; 1183 1184 MY_LOCK_ASSERT(sc); 1185 ifp = sc->my_ifp; 1186 /* Clear the timeout timer. */ 1187 ifp->if_timer = 0; 1188 if (sc->my_cdata.my_tx_head == NULL) { 1189 return; 1190 } 1191 /* 1192 * Go through our tx list and free mbufs for those frames that have 1193 * been transmitted. 1194 */ 1195 while (sc->my_cdata.my_tx_head->my_mbuf != NULL) { 1196 u_int32_t txstat; 1197 1198 cur_tx = sc->my_cdata.my_tx_head; 1199 txstat = MY_TXSTATUS(cur_tx); 1200 if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT) 1201 break; 1202 if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) { 1203 if (txstat & MY_TXERR) { 1204 ifp->if_oerrors++; 1205 if (txstat & MY_EC) /* excessive collision */ 1206 ifp->if_collisions++; 1207 if (txstat & MY_LC) /* late collision */ 1208 ifp->if_collisions++; 1209 } 1210 ifp->if_collisions += (txstat & MY_NCRMASK) >> 1211 MY_NCRShift; 1212 } 1213 ifp->if_opackets++; 1214 m_freem(cur_tx->my_mbuf); 1215 cur_tx->my_mbuf = NULL; 1216 if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) { 1217 sc->my_cdata.my_tx_head = NULL; 1218 sc->my_cdata.my_tx_tail = NULL; 1219 break; 1220 } 1221 sc->my_cdata.my_tx_head = cur_tx->my_nextdesc; 1222 } 1223 if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) { 1224 ifp->if_collisions += (CSR_READ_4(sc, MY_TSR) & MY_NCRMask); 1225 } 1226 return; 1227 } 1228 1229 /* 1230 * TX 'end of channel' interrupt handler. 1231 */ 1232 static void 1233 my_txeoc(struct my_softc * sc) 1234 { 1235 struct ifnet *ifp; 1236 1237 MY_LOCK_ASSERT(sc); 1238 ifp = sc->my_ifp; 1239 ifp->if_timer = 0; 1240 if (sc->my_cdata.my_tx_head == NULL) { 1241 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1242 sc->my_cdata.my_tx_tail = NULL; 1243 if (sc->my_want_auto) 1244 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1245 } else { 1246 if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) { 1247 MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC; 1248 ifp->if_timer = 5; 1249 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); 1250 } 1251 } 1252 return; 1253 } 1254 1255 static void 1256 my_intr(void *arg) 1257 { 1258 struct my_softc *sc; 1259 struct ifnet *ifp; 1260 u_int32_t status; 1261 1262 sc = arg; 1263 MY_LOCK(sc); 1264 ifp = sc->my_ifp; 1265 if (!(ifp->if_flags & IFF_UP)) { 1266 MY_UNLOCK(sc); 1267 return; 1268 } 1269 /* Disable interrupts. */ 1270 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1271 1272 for (;;) { 1273 status = CSR_READ_4(sc, MY_ISR); 1274 status &= MY_INTRS; 1275 if (status) 1276 CSR_WRITE_4(sc, MY_ISR, status); 1277 else 1278 break; 1279 1280 if (status & MY_RI) /* receive interrupt */ 1281 my_rxeof(sc); 1282 1283 if ((status & MY_RBU) || (status & MY_RxErr)) { 1284 /* rx buffer unavailable or rx error */ 1285 ifp->if_ierrors++; 1286 #ifdef foo 1287 my_stop(sc); 1288 my_reset(sc); 1289 my_init_locked(sc); 1290 #endif 1291 } 1292 if (status & MY_TI) /* tx interrupt */ 1293 my_txeof(sc); 1294 if (status & MY_ETI) /* tx early interrupt */ 1295 my_txeof(sc); 1296 if (status & MY_TBU) /* tx buffer unavailable */ 1297 my_txeoc(sc); 1298 1299 #if 0 /* 90/1/18 delete */ 1300 if (status & MY_FBE) { 1301 my_reset(sc); 1302 my_init_locked(sc); 1303 } 1304 #endif 1305 1306 } 1307 1308 /* Re-enable interrupts. */ 1309 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1310 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1311 my_start_locked(ifp); 1312 MY_UNLOCK(sc); 1313 return; 1314 } 1315 1316 /* 1317 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1318 * pointers to the fragment pointers. 1319 */ 1320 static int 1321 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head) 1322 { 1323 struct my_desc *f = NULL; 1324 int total_len; 1325 struct mbuf *m, *m_new = NULL; 1326 1327 MY_LOCK_ASSERT(sc); 1328 /* calculate the total tx pkt length */ 1329 total_len = 0; 1330 for (m = m_head; m != NULL; m = m->m_next) 1331 total_len += m->m_len; 1332 /* 1333 * Start packing the mbufs in this chain into the fragment pointers. 1334 * Stop when we run out of fragments or hit the end of the mbuf 1335 * chain. 1336 */ 1337 m = m_head; 1338 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1339 if (m_new == NULL) { 1340 if_printf(sc->my_ifp, "no memory for tx list"); 1341 return (1); 1342 } 1343 if (m_head->m_pkthdr.len > MHLEN) { 1344 MCLGET(m_new, M_DONTWAIT); 1345 if (!(m_new->m_flags & M_EXT)) { 1346 m_freem(m_new); 1347 if_printf(sc->my_ifp, "no memory for tx list"); 1348 return (1); 1349 } 1350 } 1351 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); 1352 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1353 m_freem(m_head); 1354 m_head = m_new; 1355 f = &c->my_ptr->my_frag[0]; 1356 f->my_status = 0; 1357 f->my_data = vtophys(mtod(m_new, caddr_t)); 1358 total_len = m_new->m_len; 1359 f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable; 1360 f->my_ctl |= total_len << MY_PKTShift; /* pkt size */ 1361 f->my_ctl |= total_len; /* buffer size */ 1362 /* 89/12/29 add, for mtd891 *//* [ 89? ] */ 1363 if (sc->my_info->my_did == MTD891ID) 1364 f->my_ctl |= MY_ETIControl | MY_RetryTxLC; 1365 c->my_mbuf = m_head; 1366 c->my_lastdesc = 0; 1367 MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]); 1368 return (0); 1369 } 1370 1371 /* 1372 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1373 * to the mbuf data regions directly in the transmit lists. We also save a 1374 * copy of the pointers since the transmit list fragment pointers are 1375 * physical addresses. 1376 */ 1377 static void 1378 my_start(struct ifnet * ifp) 1379 { 1380 struct my_softc *sc; 1381 1382 sc = ifp->if_softc; 1383 MY_LOCK(sc); 1384 my_start_locked(ifp); 1385 MY_UNLOCK(sc); 1386 } 1387 1388 static void 1389 my_start_locked(struct ifnet * ifp) 1390 { 1391 struct my_softc *sc; 1392 struct mbuf *m_head = NULL; 1393 struct my_chain *cur_tx = NULL, *start_tx; 1394 1395 sc = ifp->if_softc; 1396 MY_LOCK_ASSERT(sc); 1397 if (sc->my_autoneg) { 1398 sc->my_tx_pend = 1; 1399 return; 1400 } 1401 /* 1402 * Check for an available queue slot. If there are none, punt. 1403 */ 1404 if (sc->my_cdata.my_tx_free->my_mbuf != NULL) { 1405 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1406 return; 1407 } 1408 start_tx = sc->my_cdata.my_tx_free; 1409 while (sc->my_cdata.my_tx_free->my_mbuf == NULL) { 1410 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1411 if (m_head == NULL) 1412 break; 1413 1414 /* Pick a descriptor off the free list. */ 1415 cur_tx = sc->my_cdata.my_tx_free; 1416 sc->my_cdata.my_tx_free = cur_tx->my_nextdesc; 1417 1418 /* Pack the data into the descriptor. */ 1419 my_encap(sc, cur_tx, m_head); 1420 1421 if (cur_tx != start_tx) 1422 MY_TXOWN(cur_tx) = MY_OWNByNIC; 1423 #if NBPFILTER > 0 1424 /* 1425 * If there's a BPF listener, bounce a copy of this frame to 1426 * him. 1427 */ 1428 BPF_MTAP(ifp, cur_tx->my_mbuf); 1429 #endif 1430 } 1431 /* 1432 * If there are no packets queued, bail. 1433 */ 1434 if (cur_tx == NULL) { 1435 return; 1436 } 1437 /* 1438 * Place the request for the upload interrupt in the last descriptor 1439 * in the chain. This way, if we're chaining several packets at once, 1440 * we'll only get an interupt once for the whole chain rather than 1441 * once for each packet. 1442 */ 1443 MY_TXCTL(cur_tx) |= MY_TXIC; 1444 cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC; 1445 sc->my_cdata.my_tx_tail = cur_tx; 1446 if (sc->my_cdata.my_tx_head == NULL) 1447 sc->my_cdata.my_tx_head = start_tx; 1448 MY_TXOWN(start_tx) = MY_OWNByNIC; 1449 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */ 1450 1451 /* 1452 * Set a timeout in case the chip goes out to lunch. 1453 */ 1454 ifp->if_timer = 5; 1455 return; 1456 } 1457 1458 static void 1459 my_init(void *xsc) 1460 { 1461 struct my_softc *sc = xsc; 1462 1463 MY_LOCK(sc); 1464 my_init_locked(sc); 1465 MY_UNLOCK(sc); 1466 } 1467 1468 static void 1469 my_init_locked(struct my_softc *sc) 1470 { 1471 struct ifnet *ifp = sc->my_ifp; 1472 u_int16_t phy_bmcr = 0; 1473 1474 MY_LOCK_ASSERT(sc); 1475 if (sc->my_autoneg) { 1476 return; 1477 } 1478 if (sc->my_pinfo != NULL) 1479 phy_bmcr = my_phy_readreg(sc, PHY_BMCR); 1480 /* 1481 * Cancel pending I/O and free all RX/TX buffers. 1482 */ 1483 my_stop(sc); 1484 my_reset(sc); 1485 1486 /* 1487 * Set cache alignment and burst length. 1488 */ 1489 #if 0 /* 89/9/1 modify, */ 1490 CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512); 1491 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF); 1492 #endif 1493 CSR_WRITE_4(sc, MY_BCR, MY_PBL8); 1494 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512); 1495 /* 1496 * 89/12/29 add, for mtd891, 1497 */ 1498 if (sc->my_info->my_did == MTD891ID) { 1499 MY_SETBIT(sc, MY_BCR, MY_PROG); 1500 MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced); 1501 } 1502 my_setcfg(sc, phy_bmcr); 1503 /* Init circular RX list. */ 1504 if (my_list_rx_init(sc) == ENOBUFS) { 1505 if_printf(ifp, "init failed: no memory for rx buffers\n"); 1506 my_stop(sc); 1507 return; 1508 } 1509 /* Init TX descriptors. */ 1510 my_list_tx_init(sc); 1511 1512 /* If we want promiscuous mode, set the allframes bit. */ 1513 if (ifp->if_flags & IFF_PROMISC) 1514 MY_SETBIT(sc, MY_TCRRCR, MY_PROM); 1515 else 1516 MY_CLRBIT(sc, MY_TCRRCR, MY_PROM); 1517 1518 /* 1519 * Set capture broadcast bit to capture broadcast frames. 1520 */ 1521 if (ifp->if_flags & IFF_BROADCAST) 1522 MY_SETBIT(sc, MY_TCRRCR, MY_AB); 1523 else 1524 MY_CLRBIT(sc, MY_TCRRCR, MY_AB); 1525 1526 /* 1527 * Program the multicast filter, if necessary. 1528 */ 1529 my_setmulti(sc); 1530 1531 /* 1532 * Load the address of the RX list. 1533 */ 1534 MY_CLRBIT(sc, MY_TCRRCR, MY_RE); 1535 CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0])); 1536 1537 /* 1538 * Enable interrupts. 1539 */ 1540 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1541 CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF); 1542 1543 /* Enable receiver and transmitter. */ 1544 MY_SETBIT(sc, MY_TCRRCR, MY_RE); 1545 MY_CLRBIT(sc, MY_TCRRCR, MY_TE); 1546 CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0])); 1547 MY_SETBIT(sc, MY_TCRRCR, MY_TE); 1548 1549 /* Restore state of BMCR */ 1550 if (sc->my_pinfo != NULL) 1551 my_phy_writereg(sc, PHY_BMCR, phy_bmcr); 1552 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1553 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1554 return; 1555 } 1556 1557 /* 1558 * Set media options. 1559 */ 1560 1561 static int 1562 my_ifmedia_upd(struct ifnet * ifp) 1563 { 1564 struct my_softc *sc; 1565 struct ifmedia *ifm; 1566 1567 sc = ifp->if_softc; 1568 MY_LOCK(sc); 1569 ifm = &sc->ifmedia; 1570 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1571 MY_UNLOCK(sc); 1572 return (EINVAL); 1573 } 1574 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) 1575 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1576 else 1577 my_setmode_mii(sc, ifm->ifm_media); 1578 MY_UNLOCK(sc); 1579 return (0); 1580 } 1581 1582 /* 1583 * Report current media status. 1584 */ 1585 1586 static void 1587 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr) 1588 { 1589 struct my_softc *sc; 1590 u_int16_t advert = 0, ability = 0; 1591 1592 sc = ifp->if_softc; 1593 MY_LOCK(sc); 1594 ifmr->ifm_active = IFM_ETHER; 1595 if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { 1596 #if 0 /* this version did not support 1000M, */ 1597 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000) 1598 ifmr->ifm_active = IFM_ETHER | IFM_1000TX; 1599 #endif 1600 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) 1601 ifmr->ifm_active = IFM_ETHER | IFM_100_TX; 1602 else 1603 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1604 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) 1605 ifmr->ifm_active |= IFM_FDX; 1606 else 1607 ifmr->ifm_active |= IFM_HDX; 1608 1609 MY_UNLOCK(sc); 1610 return; 1611 } 1612 ability = my_phy_readreg(sc, PHY_LPAR); 1613 advert = my_phy_readreg(sc, PHY_ANAR); 1614 1615 #if 0 /* this version did not support 1000M, */ 1616 if (sc->my_pinfo->my_vid = MarvellPHYID0) { 1617 ability2 = my_phy_readreg(sc, PHY_1000SR); 1618 if (ability2 & PHY_1000SR_1000BTXFULL) { 1619 advert = 0; 1620 ability = 0; 1621 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX; 1622 } else if (ability & PHY_1000SR_1000BTXHALF) { 1623 advert = 0; 1624 ability = 0; 1625 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX; 1626 } 1627 } 1628 #endif 1629 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) 1630 ifmr->ifm_active = IFM_ETHER | IFM_100_T4; 1631 else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) 1632 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1633 else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) 1634 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX; 1635 else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) 1636 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX; 1637 else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF) 1638 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX; 1639 MY_UNLOCK(sc); 1640 return; 1641 } 1642 1643 static int 1644 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 1645 { 1646 struct my_softc *sc = ifp->if_softc; 1647 struct ifreq *ifr = (struct ifreq *) data; 1648 int error; 1649 1650 switch (command) { 1651 case SIOCSIFFLAGS: 1652 MY_LOCK(sc); 1653 if (ifp->if_flags & IFF_UP) 1654 my_init_locked(sc); 1655 else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1656 my_stop(sc); 1657 MY_UNLOCK(sc); 1658 error = 0; 1659 break; 1660 case SIOCADDMULTI: 1661 case SIOCDELMULTI: 1662 MY_LOCK(sc); 1663 my_setmulti(sc); 1664 MY_UNLOCK(sc); 1665 error = 0; 1666 break; 1667 case SIOCGIFMEDIA: 1668 case SIOCSIFMEDIA: 1669 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 1670 break; 1671 default: 1672 error = ether_ioctl(ifp, command, data); 1673 break; 1674 } 1675 return (error); 1676 } 1677 1678 static void 1679 my_watchdog(struct ifnet * ifp) 1680 { 1681 struct my_softc *sc; 1682 1683 sc = ifp->if_softc; 1684 MY_LOCK(sc); 1685 if (sc->my_autoneg) { 1686 my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1); 1687 MY_UNLOCK(sc); 1688 return; 1689 } 1690 ifp->if_oerrors++; 1691 if_printf(ifp, "watchdog timeout\n"); 1692 if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1693 if_printf(ifp, "no carrier - transceiver cable problem?\n"); 1694 my_stop(sc); 1695 my_reset(sc); 1696 my_init_locked(sc); 1697 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1698 my_start_locked(ifp); 1699 MY_LOCK(sc); 1700 return; 1701 } 1702 1703 1704 /* 1705 * Stop the adapter and free any mbufs allocated to the RX and TX lists. 1706 */ 1707 static void 1708 my_stop(struct my_softc * sc) 1709 { 1710 register int i; 1711 struct ifnet *ifp; 1712 1713 MY_LOCK_ASSERT(sc); 1714 ifp = sc->my_ifp; 1715 ifp->if_timer = 0; 1716 1717 MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE)); 1718 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1719 CSR_WRITE_4(sc, MY_TXLBA, 0x00000000); 1720 CSR_WRITE_4(sc, MY_RXLBA, 0x00000000); 1721 1722 /* 1723 * Free data in the RX lists. 1724 */ 1725 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1726 if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) { 1727 m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf); 1728 sc->my_cdata.my_rx_chain[i].my_mbuf = NULL; 1729 } 1730 } 1731 bzero((char *)&sc->my_ldata->my_rx_list, 1732 sizeof(sc->my_ldata->my_rx_list)); 1733 /* 1734 * Free the TX list buffers. 1735 */ 1736 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1737 if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) { 1738 m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf); 1739 sc->my_cdata.my_tx_chain[i].my_mbuf = NULL; 1740 } 1741 } 1742 bzero((char *)&sc->my_ldata->my_tx_list, 1743 sizeof(sc->my_ldata->my_tx_list)); 1744 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1745 return; 1746 } 1747 1748 /* 1749 * Stop all chip I/O so that the kernel's probe routines don't get confused 1750 * by errant DMAs when rebooting. 1751 */ 1752 static void 1753 my_shutdown(device_t dev) 1754 { 1755 struct my_softc *sc; 1756 1757 sc = device_get_softc(dev); 1758 MY_LOCK(sc); 1759 my_stop(sc); 1760 MY_UNLOCK(sc); 1761 return; 1762 } 1763