1 /*- 2 * Written by: yen_cw@myson.com.tw 3 * Copyright (c) 2002 Myson Technology Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification, immediately at the beginning of the file. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/ 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/sockio.h> 36 #include <sys/mbuf.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/socket.h> 40 #include <sys/queue.h> 41 #include <sys/types.h> 42 #include <sys/bus.h> 43 #include <sys/module.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 47 #define NBPFILTER 1 48 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_media.h> 53 #include <net/if_types.h> 54 #include <net/if_dl.h> 55 #include <net/bpf.h> 56 57 #include <vm/vm.h> /* for vtophys */ 58 #include <vm/pmap.h> /* for vtophys */ 59 #include <machine/bus.h> 60 #include <machine/resource.h> 61 #include <sys/bus.h> 62 #include <sys/rman.h> 63 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 67 /* 68 * #define MY_USEIOSPACE 69 */ 70 71 static int MY_USEIOSPACE = 1; 72 73 #ifdef MY_USEIOSPACE 74 #define MY_RES SYS_RES_IOPORT 75 #define MY_RID MY_PCI_LOIO 76 #else 77 #define MY_RES SYS_RES_MEMORY 78 #define MY_RID MY_PCI_LOMEM 79 #endif 80 81 82 #include <dev/my/if_myreg.h> 83 84 #ifndef lint 85 static const char rcsid[] = 86 "$Id: if_my.c,v 1.16 2003/04/15 06:37:25 mdodd Exp $"; 87 #endif 88 89 /* 90 * Various supported device vendors/types and their names. 91 */ 92 struct my_type *my_info_tmp; 93 static struct my_type my_devs[] = { 94 {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"}, 95 {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"}, 96 {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"}, 97 {0, 0, NULL} 98 }; 99 100 /* 101 * Various supported PHY vendors/types and their names. Note that this driver 102 * will work with pretty much any MII-compliant PHY, so failure to positively 103 * identify the chip is not a fatal error. 104 */ 105 static struct my_type my_phys[] = { 106 {MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"}, 107 {SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"}, 108 {AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"}, 109 {MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"}, 110 {LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"}, 111 {0, 0, "<MII-compliant physical interface>"} 112 }; 113 114 static int my_probe(device_t); 115 static int my_attach(device_t); 116 static int my_detach(device_t); 117 static int my_newbuf(struct my_softc *, struct my_chain_onefrag *); 118 static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *); 119 static void my_rxeof(struct my_softc *); 120 static void my_txeof(struct my_softc *); 121 static void my_txeoc(struct my_softc *); 122 static void my_intr(void *); 123 static void my_start(struct ifnet *); 124 static void my_start_locked(struct ifnet *); 125 static int my_ioctl(struct ifnet *, u_long, caddr_t); 126 static void my_init(void *); 127 static void my_init_locked(struct my_softc *); 128 static void my_stop(struct my_softc *); 129 static void my_watchdog(struct ifnet *); 130 static void my_shutdown(device_t); 131 static int my_ifmedia_upd(struct ifnet *); 132 static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *); 133 static u_int16_t my_phy_readreg(struct my_softc *, int); 134 static void my_phy_writereg(struct my_softc *, int, int); 135 static void my_autoneg_xmit(struct my_softc *); 136 static void my_autoneg_mii(struct my_softc *, int, int); 137 static void my_setmode_mii(struct my_softc *, int); 138 static void my_getmode_mii(struct my_softc *); 139 static void my_setcfg(struct my_softc *, int); 140 static void my_setmulti(struct my_softc *); 141 static void my_reset(struct my_softc *); 142 static int my_list_rx_init(struct my_softc *); 143 static int my_list_tx_init(struct my_softc *); 144 static long my_send_cmd_to_phy(struct my_softc *, int, int); 145 146 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 147 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 148 149 static device_method_t my_methods[] = { 150 /* Device interface */ 151 DEVMETHOD(device_probe, my_probe), 152 DEVMETHOD(device_attach, my_attach), 153 DEVMETHOD(device_detach, my_detach), 154 DEVMETHOD(device_shutdown, my_shutdown), 155 156 {0, 0} 157 }; 158 159 static driver_t my_driver = { 160 "my", 161 my_methods, 162 sizeof(struct my_softc) 163 }; 164 165 static devclass_t my_devclass; 166 167 DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0); 168 MODULE_DEPEND(my, pci, 1, 1, 1); 169 MODULE_DEPEND(my, ether, 1, 1, 1); 170 171 static long 172 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad) 173 { 174 long miir; 175 int i; 176 int mask, data; 177 178 MY_LOCK_ASSERT(sc); 179 180 /* enable MII output */ 181 miir = CSR_READ_4(sc, MY_MANAGEMENT); 182 miir &= 0xfffffff0; 183 184 miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO; 185 186 /* send 32 1's preamble */ 187 for (i = 0; i < 32; i++) { 188 /* low MDC; MDO is already high (miir) */ 189 miir &= ~MY_MASK_MIIR_MII_MDC; 190 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 191 192 /* high MDC */ 193 miir |= MY_MASK_MIIR_MII_MDC; 194 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 195 } 196 197 /* calculate ST+OP+PHYAD+REGAD+TA */ 198 data = opcode | (sc->my_phy_addr << 7) | (regad << 2); 199 200 /* sent out */ 201 mask = 0x8000; 202 while (mask) { 203 /* low MDC, prepare MDO */ 204 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 205 if (mask & data) 206 miir |= MY_MASK_MIIR_MII_MDO; 207 208 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 209 /* high MDC */ 210 miir |= MY_MASK_MIIR_MII_MDC; 211 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 212 DELAY(30); 213 214 /* next */ 215 mask >>= 1; 216 if (mask == 0x2 && opcode == MY_OP_READ) 217 miir &= ~MY_MASK_MIIR_MII_WRITE; 218 } 219 220 return miir; 221 } 222 223 224 static u_int16_t 225 my_phy_readreg(struct my_softc * sc, int reg) 226 { 227 long miir; 228 int mask, data; 229 230 MY_LOCK_ASSERT(sc); 231 232 if (sc->my_info->my_did == MTD803ID) 233 data = CSR_READ_2(sc, MY_PHYBASE + reg * 2); 234 else { 235 miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg); 236 237 /* read data */ 238 mask = 0x8000; 239 data = 0; 240 while (mask) { 241 /* low MDC */ 242 miir &= ~MY_MASK_MIIR_MII_MDC; 243 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 244 245 /* read MDI */ 246 miir = CSR_READ_4(sc, MY_MANAGEMENT); 247 if (miir & MY_MASK_MIIR_MII_MDI) 248 data |= mask; 249 250 /* high MDC, and wait */ 251 miir |= MY_MASK_MIIR_MII_MDC; 252 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 253 DELAY(30); 254 255 /* next */ 256 mask >>= 1; 257 } 258 259 /* low MDC */ 260 miir &= ~MY_MASK_MIIR_MII_MDC; 261 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 262 } 263 264 return (u_int16_t) data; 265 } 266 267 268 static void 269 my_phy_writereg(struct my_softc * sc, int reg, int data) 270 { 271 long miir; 272 int mask; 273 274 MY_LOCK_ASSERT(sc); 275 276 if (sc->my_info->my_did == MTD803ID) 277 CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data); 278 else { 279 miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg); 280 281 /* write data */ 282 mask = 0x8000; 283 while (mask) { 284 /* low MDC, prepare MDO */ 285 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 286 if (mask & data) 287 miir |= MY_MASK_MIIR_MII_MDO; 288 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 289 DELAY(1); 290 291 /* high MDC */ 292 miir |= MY_MASK_MIIR_MII_MDC; 293 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 294 DELAY(1); 295 296 /* next */ 297 mask >>= 1; 298 } 299 300 /* low MDC */ 301 miir &= ~MY_MASK_MIIR_MII_MDC; 302 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 303 } 304 return; 305 } 306 307 308 /* 309 * Program the 64-bit multicast hash filter. 310 */ 311 static void 312 my_setmulti(struct my_softc * sc) 313 { 314 struct ifnet *ifp; 315 int h = 0; 316 u_int32_t hashes[2] = {0, 0}; 317 struct ifmultiaddr *ifma; 318 u_int32_t rxfilt; 319 int mcnt = 0; 320 321 MY_LOCK_ASSERT(sc); 322 323 ifp = sc->my_ifp; 324 325 rxfilt = CSR_READ_4(sc, MY_TCRRCR); 326 327 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 328 rxfilt |= MY_AM; 329 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 330 CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF); 331 CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF); 332 333 return; 334 } 335 /* first, zot all the existing hash bits */ 336 CSR_WRITE_4(sc, MY_MAR0, 0); 337 CSR_WRITE_4(sc, MY_MAR1, 0); 338 339 /* now program new ones */ 340 IF_ADDR_LOCK(ifp); 341 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 342 if (ifma->ifma_addr->sa_family != AF_LINK) 343 continue; 344 h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) 345 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 346 if (h < 32) 347 hashes[0] |= (1 << h); 348 else 349 hashes[1] |= (1 << (h - 32)); 350 mcnt++; 351 } 352 IF_ADDR_UNLOCK(ifp); 353 354 if (mcnt) 355 rxfilt |= MY_AM; 356 else 357 rxfilt &= ~MY_AM; 358 CSR_WRITE_4(sc, MY_MAR0, hashes[0]); 359 CSR_WRITE_4(sc, MY_MAR1, hashes[1]); 360 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 361 return; 362 } 363 364 /* 365 * Initiate an autonegotiation session. 366 */ 367 static void 368 my_autoneg_xmit(struct my_softc * sc) 369 { 370 u_int16_t phy_sts = 0; 371 372 MY_LOCK_ASSERT(sc); 373 374 my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); 375 DELAY(500); 376 while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET); 377 378 phy_sts = my_phy_readreg(sc, PHY_BMCR); 379 phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR; 380 my_phy_writereg(sc, PHY_BMCR, phy_sts); 381 382 return; 383 } 384 385 386 /* 387 * Invoke autonegotiation on a PHY. 388 */ 389 static void 390 my_autoneg_mii(struct my_softc * sc, int flag, int verbose) 391 { 392 u_int16_t phy_sts = 0, media, advert, ability; 393 u_int16_t ability2 = 0; 394 struct ifnet *ifp; 395 struct ifmedia *ifm; 396 397 MY_LOCK_ASSERT(sc); 398 399 ifm = &sc->ifmedia; 400 ifp = sc->my_ifp; 401 402 ifm->ifm_media = IFM_ETHER | IFM_AUTO; 403 404 #ifndef FORCE_AUTONEG_TFOUR 405 /* 406 * First, see if autoneg is supported. If not, there's no point in 407 * continuing. 408 */ 409 phy_sts = my_phy_readreg(sc, PHY_BMSR); 410 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { 411 if (verbose) 412 device_printf(sc->my_dev, 413 "autonegotiation not supported\n"); 414 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 415 return; 416 } 417 #endif 418 switch (flag) { 419 case MY_FLAG_FORCEDELAY: 420 /* 421 * XXX Never use this option anywhere but in the probe 422 * routine: making the kernel stop dead in its tracks for 423 * three whole seconds after we've gone multi-user is really 424 * bad manners. 425 */ 426 my_autoneg_xmit(sc); 427 DELAY(5000000); 428 break; 429 case MY_FLAG_SCHEDDELAY: 430 /* 431 * Wait for the transmitter to go idle before starting an 432 * autoneg session, otherwise my_start() may clobber our 433 * timeout, and we don't want to allow transmission during an 434 * autoneg session since that can screw it up. 435 */ 436 if (sc->my_cdata.my_tx_head != NULL) { 437 sc->my_want_auto = 1; 438 MY_UNLOCK(sc); 439 return; 440 } 441 my_autoneg_xmit(sc); 442 ifp->if_timer = 5; 443 sc->my_autoneg = 1; 444 sc->my_want_auto = 0; 445 return; 446 case MY_FLAG_DELAYTIMEO: 447 ifp->if_timer = 0; 448 sc->my_autoneg = 0; 449 break; 450 default: 451 device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag); 452 return; 453 } 454 455 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { 456 if (verbose) 457 device_printf(sc->my_dev, "autoneg complete, "); 458 phy_sts = my_phy_readreg(sc, PHY_BMSR); 459 } else { 460 if (verbose) 461 device_printf(sc->my_dev, "autoneg not complete, "); 462 } 463 464 media = my_phy_readreg(sc, PHY_BMCR); 465 466 /* Link is good. Report modes and set duplex mode. */ 467 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { 468 if (verbose) 469 device_printf(sc->my_dev, "link status good. "); 470 advert = my_phy_readreg(sc, PHY_ANAR); 471 ability = my_phy_readreg(sc, PHY_LPAR); 472 if ((sc->my_pinfo->my_vid == MarvellPHYID0) || 473 (sc->my_pinfo->my_vid == LevelOnePHYID0)) { 474 ability2 = my_phy_readreg(sc, PHY_1000SR); 475 if (ability2 & PHY_1000SR_1000BTXFULL) { 476 advert = 0; 477 ability = 0; 478 /* 479 * this version did not support 1000M, 480 * ifm->ifm_media = 481 * IFM_ETHER|IFM_1000_T|IFM_FDX; 482 */ 483 ifm->ifm_media = 484 IFM_ETHER | IFM_100_TX | IFM_FDX; 485 media &= ~PHY_BMCR_SPEEDSEL; 486 media |= PHY_BMCR_1000; 487 media |= PHY_BMCR_DUPLEX; 488 printf("(full-duplex, 1000Mbps)\n"); 489 } else if (ability2 & PHY_1000SR_1000BTXHALF) { 490 advert = 0; 491 ability = 0; 492 /* 493 * this version did not support 1000M, 494 * ifm->ifm_media = IFM_ETHER|IFM_1000_T; 495 */ 496 ifm->ifm_media = IFM_ETHER | IFM_100_TX; 497 media &= ~PHY_BMCR_SPEEDSEL; 498 media &= ~PHY_BMCR_DUPLEX; 499 media |= PHY_BMCR_1000; 500 printf("(half-duplex, 1000Mbps)\n"); 501 } 502 } 503 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { 504 ifm->ifm_media = IFM_ETHER | IFM_100_T4; 505 media |= PHY_BMCR_SPEEDSEL; 506 media &= ~PHY_BMCR_DUPLEX; 507 printf("(100baseT4)\n"); 508 } else if (advert & PHY_ANAR_100BTXFULL && 509 ability & PHY_ANAR_100BTXFULL) { 510 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 511 media |= PHY_BMCR_SPEEDSEL; 512 media |= PHY_BMCR_DUPLEX; 513 printf("(full-duplex, 100Mbps)\n"); 514 } else if (advert & PHY_ANAR_100BTXHALF && 515 ability & PHY_ANAR_100BTXHALF) { 516 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 517 media |= PHY_BMCR_SPEEDSEL; 518 media &= ~PHY_BMCR_DUPLEX; 519 printf("(half-duplex, 100Mbps)\n"); 520 } else if (advert & PHY_ANAR_10BTFULL && 521 ability & PHY_ANAR_10BTFULL) { 522 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 523 media &= ~PHY_BMCR_SPEEDSEL; 524 media |= PHY_BMCR_DUPLEX; 525 printf("(full-duplex, 10Mbps)\n"); 526 } else if (advert) { 527 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 528 media &= ~PHY_BMCR_SPEEDSEL; 529 media &= ~PHY_BMCR_DUPLEX; 530 printf("(half-duplex, 10Mbps)\n"); 531 } 532 media &= ~PHY_BMCR_AUTONEGENBL; 533 534 /* Set ASIC's duplex mode to match the PHY. */ 535 my_phy_writereg(sc, PHY_BMCR, media); 536 my_setcfg(sc, media); 537 } else { 538 if (verbose) 539 device_printf(sc->my_dev, "no carrier\n"); 540 } 541 542 my_init_locked(sc); 543 if (sc->my_tx_pend) { 544 sc->my_autoneg = 0; 545 sc->my_tx_pend = 0; 546 my_start_locked(ifp); 547 } 548 return; 549 } 550 551 /* 552 * To get PHY ability. 553 */ 554 static void 555 my_getmode_mii(struct my_softc * sc) 556 { 557 u_int16_t bmsr; 558 struct ifnet *ifp; 559 560 MY_LOCK_ASSERT(sc); 561 ifp = sc->my_ifp; 562 bmsr = my_phy_readreg(sc, PHY_BMSR); 563 if (bootverbose) 564 device_printf(sc->my_dev, "PHY status word: %x\n", bmsr); 565 566 /* fallback */ 567 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 568 569 if (bmsr & PHY_BMSR_10BTHALF) { 570 if (bootverbose) 571 device_printf(sc->my_dev, 572 "10Mbps half-duplex mode supported\n"); 573 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 574 0, NULL); 575 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); 576 } 577 if (bmsr & PHY_BMSR_10BTFULL) { 578 if (bootverbose) 579 device_printf(sc->my_dev, 580 "10Mbps full-duplex mode supported\n"); 581 582 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 583 0, NULL); 584 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 585 } 586 if (bmsr & PHY_BMSR_100BTXHALF) { 587 if (bootverbose) 588 device_printf(sc->my_dev, 589 "100Mbps half-duplex mode supported\n"); 590 ifp->if_baudrate = 100000000; 591 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); 592 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 593 0, NULL); 594 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 595 } 596 if (bmsr & PHY_BMSR_100BTXFULL) { 597 if (bootverbose) 598 device_printf(sc->my_dev, 599 "100Mbps full-duplex mode supported\n"); 600 ifp->if_baudrate = 100000000; 601 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 602 0, NULL); 603 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 604 } 605 /* Some also support 100BaseT4. */ 606 if (bmsr & PHY_BMSR_100BT4) { 607 if (bootverbose) 608 device_printf(sc->my_dev, "100baseT4 mode supported\n"); 609 ifp->if_baudrate = 100000000; 610 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL); 611 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4; 612 #ifdef FORCE_AUTONEG_TFOUR 613 if (bootverbose) 614 device_printf(sc->my_dev, 615 "forcing on autoneg support for BT4\n"); 616 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL): 617 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 618 #endif 619 } 620 #if 0 /* this version did not support 1000M, */ 621 if (sc->my_pinfo->my_vid == MarvellPHYID0) { 622 if (bootverbose) 623 device_printf(sc->my_dev, 624 "1000Mbps half-duplex mode supported\n"); 625 626 ifp->if_baudrate = 1000000000; 627 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); 628 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX, 629 0, NULL); 630 if (bootverbose) 631 device_printf(sc->my_dev, 632 "1000Mbps full-duplex mode supported\n"); 633 ifp->if_baudrate = 1000000000; 634 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 635 0, NULL); 636 sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX; 637 } 638 #endif 639 if (bmsr & PHY_BMSR_CANAUTONEG) { 640 if (bootverbose) 641 device_printf(sc->my_dev, "autoneg supported\n"); 642 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 643 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 644 } 645 return; 646 } 647 648 /* 649 * Set speed and duplex mode. 650 */ 651 static void 652 my_setmode_mii(struct my_softc * sc, int media) 653 { 654 u_int16_t bmcr; 655 struct ifnet *ifp; 656 657 MY_LOCK_ASSERT(sc); 658 ifp = sc->my_ifp; 659 /* 660 * If an autoneg session is in progress, stop it. 661 */ 662 if (sc->my_autoneg) { 663 device_printf(sc->my_dev, "canceling autoneg session\n"); 664 ifp->if_timer = sc->my_autoneg = sc->my_want_auto = 0; 665 bmcr = my_phy_readreg(sc, PHY_BMCR); 666 bmcr &= ~PHY_BMCR_AUTONEGENBL; 667 my_phy_writereg(sc, PHY_BMCR, bmcr); 668 } 669 device_printf(sc->my_dev, "selecting MII, "); 670 bmcr = my_phy_readreg(sc, PHY_BMCR); 671 bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 | 672 PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK); 673 674 #if 0 /* this version did not support 1000M, */ 675 if (IFM_SUBTYPE(media) == IFM_1000_T) { 676 printf("1000Mbps/T4, half-duplex\n"); 677 bmcr &= ~PHY_BMCR_SPEEDSEL; 678 bmcr &= ~PHY_BMCR_DUPLEX; 679 bmcr |= PHY_BMCR_1000; 680 } 681 #endif 682 if (IFM_SUBTYPE(media) == IFM_100_T4) { 683 printf("100Mbps/T4, half-duplex\n"); 684 bmcr |= PHY_BMCR_SPEEDSEL; 685 bmcr &= ~PHY_BMCR_DUPLEX; 686 } 687 if (IFM_SUBTYPE(media) == IFM_100_TX) { 688 printf("100Mbps, "); 689 bmcr |= PHY_BMCR_SPEEDSEL; 690 } 691 if (IFM_SUBTYPE(media) == IFM_10_T) { 692 printf("10Mbps, "); 693 bmcr &= ~PHY_BMCR_SPEEDSEL; 694 } 695 if ((media & IFM_GMASK) == IFM_FDX) { 696 printf("full duplex\n"); 697 bmcr |= PHY_BMCR_DUPLEX; 698 } else { 699 printf("half duplex\n"); 700 bmcr &= ~PHY_BMCR_DUPLEX; 701 } 702 my_phy_writereg(sc, PHY_BMCR, bmcr); 703 my_setcfg(sc, bmcr); 704 return; 705 } 706 707 /* 708 * The Myson manual states that in order to fiddle with the 'full-duplex' and 709 * '100Mbps' bits in the netconfig register, we first have to put the 710 * transmit and/or receive logic in the idle state. 711 */ 712 static void 713 my_setcfg(struct my_softc * sc, int bmcr) 714 { 715 int i, restart = 0; 716 717 MY_LOCK_ASSERT(sc); 718 if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) { 719 restart = 1; 720 MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE)); 721 for (i = 0; i < MY_TIMEOUT; i++) { 722 DELAY(10); 723 if (!(CSR_READ_4(sc, MY_TCRRCR) & 724 (MY_TXRUN | MY_RXRUN))) 725 break; 726 } 727 if (i == MY_TIMEOUT) 728 device_printf(sc->my_dev, 729 "failed to force tx and rx to idle \n"); 730 } 731 MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000); 732 MY_CLRBIT(sc, MY_TCRRCR, MY_PS10); 733 if (bmcr & PHY_BMCR_1000) 734 MY_SETBIT(sc, MY_TCRRCR, MY_PS1000); 735 else if (!(bmcr & PHY_BMCR_SPEEDSEL)) 736 MY_SETBIT(sc, MY_TCRRCR, MY_PS10); 737 if (bmcr & PHY_BMCR_DUPLEX) 738 MY_SETBIT(sc, MY_TCRRCR, MY_FD); 739 else 740 MY_CLRBIT(sc, MY_TCRRCR, MY_FD); 741 if (restart) 742 MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE); 743 return; 744 } 745 746 static void 747 my_reset(struct my_softc * sc) 748 { 749 register int i; 750 751 MY_LOCK_ASSERT(sc); 752 MY_SETBIT(sc, MY_BCR, MY_SWR); 753 for (i = 0; i < MY_TIMEOUT; i++) { 754 DELAY(10); 755 if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR)) 756 break; 757 } 758 if (i == MY_TIMEOUT) 759 device_printf(sc->my_dev, "reset never completed!\n"); 760 761 /* Wait a little while for the chip to get its brains in order. */ 762 DELAY(1000); 763 return; 764 } 765 766 /* 767 * Probe for a Myson chip. Check the PCI vendor and device IDs against our 768 * list and return a device name if we find a match. 769 */ 770 static int 771 my_probe(device_t dev) 772 { 773 struct my_type *t; 774 775 t = my_devs; 776 while (t->my_name != NULL) { 777 if ((pci_get_vendor(dev) == t->my_vid) && 778 (pci_get_device(dev) == t->my_did)) { 779 device_set_desc(dev, t->my_name); 780 my_info_tmp = t; 781 return (BUS_PROBE_DEFAULT); 782 } 783 t++; 784 } 785 return (ENXIO); 786 } 787 788 /* 789 * Attach the interface. Allocate softc structures, do ifmedia setup and 790 * ethernet/BPF attach. 791 */ 792 static int 793 my_attach(device_t dev) 794 { 795 int i; 796 u_char eaddr[ETHER_ADDR_LEN]; 797 u_int32_t iobase; 798 struct my_softc *sc; 799 struct ifnet *ifp; 800 int media = IFM_ETHER | IFM_100_TX | IFM_FDX; 801 unsigned int round; 802 caddr_t roundptr; 803 struct my_type *p; 804 u_int16_t phy_vid, phy_did, phy_sts = 0; 805 int rid, error = 0; 806 807 sc = device_get_softc(dev); 808 sc->my_dev = dev; 809 mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 810 MTX_DEF); 811 812 /* 813 * Map control/status registers. 814 */ 815 pci_enable_busmaster(dev); 816 817 if (my_info_tmp->my_did == MTD800ID) { 818 iobase = pci_read_config(dev, MY_PCI_LOIO, 4); 819 if (iobase & 0x300) 820 MY_USEIOSPACE = 0; 821 } 822 823 rid = MY_RID; 824 sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE); 825 826 if (sc->my_res == NULL) { 827 device_printf(dev, "couldn't map ports/memory\n"); 828 error = ENXIO; 829 goto destroy_mutex; 830 } 831 sc->my_btag = rman_get_bustag(sc->my_res); 832 sc->my_bhandle = rman_get_bushandle(sc->my_res); 833 834 rid = 0; 835 sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 836 RF_SHAREABLE | RF_ACTIVE); 837 838 if (sc->my_irq == NULL) { 839 device_printf(dev, "couldn't map interrupt\n"); 840 error = ENXIO; 841 goto release_io; 842 } 843 844 sc->my_info = my_info_tmp; 845 846 /* Reset the adapter. */ 847 MY_LOCK(sc); 848 my_reset(sc); 849 MY_UNLOCK(sc); 850 851 /* 852 * Get station address 853 */ 854 for (i = 0; i < ETHER_ADDR_LEN; ++i) 855 eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i); 856 857 sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8, 858 M_DEVBUF, M_NOWAIT); 859 if (sc->my_ldata_ptr == NULL) { 860 device_printf(dev, "no memory for list buffers!\n"); 861 error = ENXIO; 862 goto release_irq; 863 } 864 sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr; 865 round = (uintptr_t)sc->my_ldata_ptr & 0xF; 866 roundptr = sc->my_ldata_ptr; 867 for (i = 0; i < 8; i++) { 868 if (round % 8) { 869 round++; 870 roundptr++; 871 } else 872 break; 873 } 874 sc->my_ldata = (struct my_list_data *) roundptr; 875 bzero(sc->my_ldata, sizeof(struct my_list_data)); 876 877 ifp = sc->my_ifp = if_alloc(IFT_ETHER); 878 if (ifp == NULL) { 879 device_printf(dev, "can not if_alloc()\n"); 880 error = ENOSPC; 881 goto free_ldata; 882 } 883 ifp->if_softc = sc; 884 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 885 ifp->if_mtu = ETHERMTU; 886 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 887 ifp->if_ioctl = my_ioctl; 888 ifp->if_start = my_start; 889 ifp->if_watchdog = my_watchdog; 890 ifp->if_init = my_init; 891 ifp->if_baudrate = 10000000; 892 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 893 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 894 IFQ_SET_READY(&ifp->if_snd); 895 896 if (sc->my_info->my_did == MTD803ID) 897 sc->my_pinfo = my_phys; 898 else { 899 if (bootverbose) 900 device_printf(dev, "probing for a PHY\n"); 901 MY_LOCK(sc); 902 for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) { 903 if (bootverbose) 904 device_printf(dev, "checking address: %d\n", i); 905 sc->my_phy_addr = i; 906 phy_sts = my_phy_readreg(sc, PHY_BMSR); 907 if ((phy_sts != 0) && (phy_sts != 0xffff)) 908 break; 909 else 910 phy_sts = 0; 911 } 912 if (phy_sts) { 913 phy_vid = my_phy_readreg(sc, PHY_VENID); 914 phy_did = my_phy_readreg(sc, PHY_DEVID); 915 if (bootverbose) { 916 device_printf(dev, "found PHY at address %d, ", 917 sc->my_phy_addr); 918 printf("vendor id: %x device id: %x\n", 919 phy_vid, phy_did); 920 } 921 p = my_phys; 922 while (p->my_vid) { 923 if (phy_vid == p->my_vid) { 924 sc->my_pinfo = p; 925 break; 926 } 927 p++; 928 } 929 if (sc->my_pinfo == NULL) 930 sc->my_pinfo = &my_phys[PHY_UNKNOWN]; 931 if (bootverbose) 932 device_printf(dev, "PHY type: %s\n", 933 sc->my_pinfo->my_name); 934 } else { 935 MY_UNLOCK(sc); 936 device_printf(dev, "MII without any phy!\n"); 937 error = ENXIO; 938 goto free_if; 939 } 940 MY_UNLOCK(sc); 941 } 942 943 /* Do ifmedia setup. */ 944 ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts); 945 MY_LOCK(sc); 946 my_getmode_mii(sc); 947 my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1); 948 media = sc->ifmedia.ifm_media; 949 my_stop(sc); 950 MY_UNLOCK(sc); 951 ifmedia_set(&sc->ifmedia, media); 952 953 ether_ifattach(ifp, eaddr); 954 955 error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE, 956 NULL, my_intr, sc, &sc->my_intrhand); 957 958 if (error) { 959 device_printf(dev, "couldn't set up irq\n"); 960 goto detach_if; 961 } 962 963 return (0); 964 965 detach_if: 966 ether_ifdetach(ifp); 967 free_if: 968 if_free(ifp); 969 free_ldata: 970 free(sc->my_ldata_ptr, M_DEVBUF); 971 release_irq: 972 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 973 release_io: 974 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 975 destroy_mutex: 976 mtx_destroy(&sc->my_mtx); 977 return (error); 978 } 979 980 static int 981 my_detach(device_t dev) 982 { 983 struct my_softc *sc; 984 struct ifnet *ifp; 985 986 sc = device_get_softc(dev); 987 MY_LOCK(sc); 988 my_stop(sc); 989 MY_UNLOCK(sc); 990 bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand); 991 992 ifp = sc->my_ifp; 993 ether_ifdetach(ifp); 994 if_free(ifp); 995 free(sc->my_ldata_ptr, M_DEVBUF); 996 997 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 998 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 999 mtx_destroy(&sc->my_mtx); 1000 return (0); 1001 } 1002 1003 1004 /* 1005 * Initialize the transmit descriptors. 1006 */ 1007 static int 1008 my_list_tx_init(struct my_softc * sc) 1009 { 1010 struct my_chain_data *cd; 1011 struct my_list_data *ld; 1012 int i; 1013 1014 MY_LOCK_ASSERT(sc); 1015 cd = &sc->my_cdata; 1016 ld = sc->my_ldata; 1017 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1018 cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i]; 1019 if (i == (MY_TX_LIST_CNT - 1)) 1020 cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0]; 1021 else 1022 cd->my_tx_chain[i].my_nextdesc = 1023 &cd->my_tx_chain[i + 1]; 1024 } 1025 cd->my_tx_free = &cd->my_tx_chain[0]; 1026 cd->my_tx_tail = cd->my_tx_head = NULL; 1027 return (0); 1028 } 1029 1030 /* 1031 * Initialize the RX descriptors and allocate mbufs for them. Note that we 1032 * arrange the descriptors in a closed ring, so that the last descriptor 1033 * points back to the first. 1034 */ 1035 static int 1036 my_list_rx_init(struct my_softc * sc) 1037 { 1038 struct my_chain_data *cd; 1039 struct my_list_data *ld; 1040 int i; 1041 1042 MY_LOCK_ASSERT(sc); 1043 cd = &sc->my_cdata; 1044 ld = sc->my_ldata; 1045 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1046 cd->my_rx_chain[i].my_ptr = 1047 (struct my_desc *) & ld->my_rx_list[i]; 1048 if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) { 1049 MY_UNLOCK(sc); 1050 return (ENOBUFS); 1051 } 1052 if (i == (MY_RX_LIST_CNT - 1)) { 1053 cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0]; 1054 ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]); 1055 } else { 1056 cd->my_rx_chain[i].my_nextdesc = 1057 &cd->my_rx_chain[i + 1]; 1058 ld->my_rx_list[i].my_next = 1059 vtophys(&ld->my_rx_list[i + 1]); 1060 } 1061 } 1062 cd->my_rx_head = &cd->my_rx_chain[0]; 1063 return (0); 1064 } 1065 1066 /* 1067 * Initialize an RX descriptor and attach an MBUF cluster. 1068 */ 1069 static int 1070 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c) 1071 { 1072 struct mbuf *m_new = NULL; 1073 1074 MY_LOCK_ASSERT(sc); 1075 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1076 if (m_new == NULL) { 1077 device_printf(sc->my_dev, 1078 "no memory for rx list -- packet dropped!\n"); 1079 return (ENOBUFS); 1080 } 1081 MCLGET(m_new, M_DONTWAIT); 1082 if (!(m_new->m_flags & M_EXT)) { 1083 device_printf(sc->my_dev, 1084 "no memory for rx list -- packet dropped!\n"); 1085 m_freem(m_new); 1086 return (ENOBUFS); 1087 } 1088 c->my_mbuf = m_new; 1089 c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t)); 1090 c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift; 1091 c->my_ptr->my_status = MY_OWNByNIC; 1092 return (0); 1093 } 1094 1095 /* 1096 * A frame has been uploaded: pass the resulting mbuf chain up to the higher 1097 * level protocols. 1098 */ 1099 static void 1100 my_rxeof(struct my_softc * sc) 1101 { 1102 struct ether_header *eh; 1103 struct mbuf *m; 1104 struct ifnet *ifp; 1105 struct my_chain_onefrag *cur_rx; 1106 int total_len = 0; 1107 u_int32_t rxstat; 1108 1109 MY_LOCK_ASSERT(sc); 1110 ifp = sc->my_ifp; 1111 while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status) 1112 & MY_OWNByNIC)) { 1113 cur_rx = sc->my_cdata.my_rx_head; 1114 sc->my_cdata.my_rx_head = cur_rx->my_nextdesc; 1115 1116 if (rxstat & MY_ES) { /* error summary: give up this rx pkt */ 1117 ifp->if_ierrors++; 1118 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1119 continue; 1120 } 1121 /* No errors; receive the packet. */ 1122 total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift; 1123 total_len -= ETHER_CRC_LEN; 1124 1125 if (total_len < MINCLSIZE) { 1126 m = m_devget(mtod(cur_rx->my_mbuf, char *), 1127 total_len, 0, ifp, NULL); 1128 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1129 if (m == NULL) { 1130 ifp->if_ierrors++; 1131 continue; 1132 } 1133 } else { 1134 m = cur_rx->my_mbuf; 1135 /* 1136 * Try to conjure up a new mbuf cluster. If that 1137 * fails, it means we have an out of memory condition 1138 * and should leave the buffer in place and continue. 1139 * This will result in a lost packet, but there's 1140 * little else we can do in this situation. 1141 */ 1142 if (my_newbuf(sc, cur_rx) == ENOBUFS) { 1143 ifp->if_ierrors++; 1144 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1145 continue; 1146 } 1147 m->m_pkthdr.rcvif = ifp; 1148 m->m_pkthdr.len = m->m_len = total_len; 1149 } 1150 ifp->if_ipackets++; 1151 eh = mtod(m, struct ether_header *); 1152 #if NBPFILTER > 0 1153 /* 1154 * Handle BPF listeners. Let the BPF user see the packet, but 1155 * don't pass it up to the ether_input() layer unless it's a 1156 * broadcast packet, multicast packet, matches our ethernet 1157 * address or the interface is in promiscuous mode. 1158 */ 1159 if (bpf_peers_present(ifp->if_bpf)) { 1160 bpf_mtap(ifp->if_bpf, m); 1161 if (ifp->if_flags & IFF_PROMISC && 1162 (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp), 1163 ETHER_ADDR_LEN) && 1164 (eh->ether_dhost[0] & 1) == 0)) { 1165 m_freem(m); 1166 continue; 1167 } 1168 } 1169 #endif 1170 MY_UNLOCK(sc); 1171 (*ifp->if_input)(ifp, m); 1172 MY_LOCK(sc); 1173 } 1174 return; 1175 } 1176 1177 1178 /* 1179 * A frame was downloaded to the chip. It's safe for us to clean up the list 1180 * buffers. 1181 */ 1182 static void 1183 my_txeof(struct my_softc * sc) 1184 { 1185 struct my_chain *cur_tx; 1186 struct ifnet *ifp; 1187 1188 MY_LOCK_ASSERT(sc); 1189 ifp = sc->my_ifp; 1190 /* Clear the timeout timer. */ 1191 ifp->if_timer = 0; 1192 if (sc->my_cdata.my_tx_head == NULL) { 1193 return; 1194 } 1195 /* 1196 * Go through our tx list and free mbufs for those frames that have 1197 * been transmitted. 1198 */ 1199 while (sc->my_cdata.my_tx_head->my_mbuf != NULL) { 1200 u_int32_t txstat; 1201 1202 cur_tx = sc->my_cdata.my_tx_head; 1203 txstat = MY_TXSTATUS(cur_tx); 1204 if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT) 1205 break; 1206 if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) { 1207 if (txstat & MY_TXERR) { 1208 ifp->if_oerrors++; 1209 if (txstat & MY_EC) /* excessive collision */ 1210 ifp->if_collisions++; 1211 if (txstat & MY_LC) /* late collision */ 1212 ifp->if_collisions++; 1213 } 1214 ifp->if_collisions += (txstat & MY_NCRMASK) >> 1215 MY_NCRShift; 1216 } 1217 ifp->if_opackets++; 1218 m_freem(cur_tx->my_mbuf); 1219 cur_tx->my_mbuf = NULL; 1220 if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) { 1221 sc->my_cdata.my_tx_head = NULL; 1222 sc->my_cdata.my_tx_tail = NULL; 1223 break; 1224 } 1225 sc->my_cdata.my_tx_head = cur_tx->my_nextdesc; 1226 } 1227 if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) { 1228 ifp->if_collisions += (CSR_READ_4(sc, MY_TSR) & MY_NCRMask); 1229 } 1230 return; 1231 } 1232 1233 /* 1234 * TX 'end of channel' interrupt handler. 1235 */ 1236 static void 1237 my_txeoc(struct my_softc * sc) 1238 { 1239 struct ifnet *ifp; 1240 1241 MY_LOCK_ASSERT(sc); 1242 ifp = sc->my_ifp; 1243 ifp->if_timer = 0; 1244 if (sc->my_cdata.my_tx_head == NULL) { 1245 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1246 sc->my_cdata.my_tx_tail = NULL; 1247 if (sc->my_want_auto) 1248 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1249 } else { 1250 if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) { 1251 MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC; 1252 ifp->if_timer = 5; 1253 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); 1254 } 1255 } 1256 return; 1257 } 1258 1259 static void 1260 my_intr(void *arg) 1261 { 1262 struct my_softc *sc; 1263 struct ifnet *ifp; 1264 u_int32_t status; 1265 1266 sc = arg; 1267 MY_LOCK(sc); 1268 ifp = sc->my_ifp; 1269 if (!(ifp->if_flags & IFF_UP)) { 1270 MY_UNLOCK(sc); 1271 return; 1272 } 1273 /* Disable interrupts. */ 1274 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1275 1276 for (;;) { 1277 status = CSR_READ_4(sc, MY_ISR); 1278 status &= MY_INTRS; 1279 if (status) 1280 CSR_WRITE_4(sc, MY_ISR, status); 1281 else 1282 break; 1283 1284 if (status & MY_RI) /* receive interrupt */ 1285 my_rxeof(sc); 1286 1287 if ((status & MY_RBU) || (status & MY_RxErr)) { 1288 /* rx buffer unavailable or rx error */ 1289 ifp->if_ierrors++; 1290 #ifdef foo 1291 my_stop(sc); 1292 my_reset(sc); 1293 my_init_locked(sc); 1294 #endif 1295 } 1296 if (status & MY_TI) /* tx interrupt */ 1297 my_txeof(sc); 1298 if (status & MY_ETI) /* tx early interrupt */ 1299 my_txeof(sc); 1300 if (status & MY_TBU) /* tx buffer unavailable */ 1301 my_txeoc(sc); 1302 1303 #if 0 /* 90/1/18 delete */ 1304 if (status & MY_FBE) { 1305 my_reset(sc); 1306 my_init_locked(sc); 1307 } 1308 #endif 1309 1310 } 1311 1312 /* Re-enable interrupts. */ 1313 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1314 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1315 my_start_locked(ifp); 1316 MY_UNLOCK(sc); 1317 return; 1318 } 1319 1320 /* 1321 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1322 * pointers to the fragment pointers. 1323 */ 1324 static int 1325 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head) 1326 { 1327 struct my_desc *f = NULL; 1328 int total_len; 1329 struct mbuf *m, *m_new = NULL; 1330 1331 MY_LOCK_ASSERT(sc); 1332 /* calculate the total tx pkt length */ 1333 total_len = 0; 1334 for (m = m_head; m != NULL; m = m->m_next) 1335 total_len += m->m_len; 1336 /* 1337 * Start packing the mbufs in this chain into the fragment pointers. 1338 * Stop when we run out of fragments or hit the end of the mbuf 1339 * chain. 1340 */ 1341 m = m_head; 1342 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1343 if (m_new == NULL) { 1344 device_printf(sc->my_dev, "no memory for tx list"); 1345 return (1); 1346 } 1347 if (m_head->m_pkthdr.len > MHLEN) { 1348 MCLGET(m_new, M_DONTWAIT); 1349 if (!(m_new->m_flags & M_EXT)) { 1350 m_freem(m_new); 1351 device_printf(sc->my_dev, "no memory for tx list"); 1352 return (1); 1353 } 1354 } 1355 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); 1356 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1357 m_freem(m_head); 1358 m_head = m_new; 1359 f = &c->my_ptr->my_frag[0]; 1360 f->my_status = 0; 1361 f->my_data = vtophys(mtod(m_new, caddr_t)); 1362 total_len = m_new->m_len; 1363 f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable; 1364 f->my_ctl |= total_len << MY_PKTShift; /* pkt size */ 1365 f->my_ctl |= total_len; /* buffer size */ 1366 /* 89/12/29 add, for mtd891 *//* [ 89? ] */ 1367 if (sc->my_info->my_did == MTD891ID) 1368 f->my_ctl |= MY_ETIControl | MY_RetryTxLC; 1369 c->my_mbuf = m_head; 1370 c->my_lastdesc = 0; 1371 MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]); 1372 return (0); 1373 } 1374 1375 /* 1376 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1377 * to the mbuf data regions directly in the transmit lists. We also save a 1378 * copy of the pointers since the transmit list fragment pointers are 1379 * physical addresses. 1380 */ 1381 static void 1382 my_start(struct ifnet * ifp) 1383 { 1384 struct my_softc *sc; 1385 1386 sc = ifp->if_softc; 1387 MY_LOCK(sc); 1388 my_start_locked(ifp); 1389 MY_UNLOCK(sc); 1390 } 1391 1392 static void 1393 my_start_locked(struct ifnet * ifp) 1394 { 1395 struct my_softc *sc; 1396 struct mbuf *m_head = NULL; 1397 struct my_chain *cur_tx = NULL, *start_tx; 1398 1399 sc = ifp->if_softc; 1400 MY_LOCK_ASSERT(sc); 1401 if (sc->my_autoneg) { 1402 sc->my_tx_pend = 1; 1403 return; 1404 } 1405 /* 1406 * Check for an available queue slot. If there are none, punt. 1407 */ 1408 if (sc->my_cdata.my_tx_free->my_mbuf != NULL) { 1409 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1410 return; 1411 } 1412 start_tx = sc->my_cdata.my_tx_free; 1413 while (sc->my_cdata.my_tx_free->my_mbuf == NULL) { 1414 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1415 if (m_head == NULL) 1416 break; 1417 1418 /* Pick a descriptor off the free list. */ 1419 cur_tx = sc->my_cdata.my_tx_free; 1420 sc->my_cdata.my_tx_free = cur_tx->my_nextdesc; 1421 1422 /* Pack the data into the descriptor. */ 1423 my_encap(sc, cur_tx, m_head); 1424 1425 if (cur_tx != start_tx) 1426 MY_TXOWN(cur_tx) = MY_OWNByNIC; 1427 #if NBPFILTER > 0 1428 /* 1429 * If there's a BPF listener, bounce a copy of this frame to 1430 * him. 1431 */ 1432 BPF_MTAP(ifp, cur_tx->my_mbuf); 1433 #endif 1434 } 1435 /* 1436 * If there are no packets queued, bail. 1437 */ 1438 if (cur_tx == NULL) { 1439 return; 1440 } 1441 /* 1442 * Place the request for the upload interrupt in the last descriptor 1443 * in the chain. This way, if we're chaining several packets at once, 1444 * we'll only get an interupt once for the whole chain rather than 1445 * once for each packet. 1446 */ 1447 MY_TXCTL(cur_tx) |= MY_TXIC; 1448 cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC; 1449 sc->my_cdata.my_tx_tail = cur_tx; 1450 if (sc->my_cdata.my_tx_head == NULL) 1451 sc->my_cdata.my_tx_head = start_tx; 1452 MY_TXOWN(start_tx) = MY_OWNByNIC; 1453 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */ 1454 1455 /* 1456 * Set a timeout in case the chip goes out to lunch. 1457 */ 1458 ifp->if_timer = 5; 1459 return; 1460 } 1461 1462 static void 1463 my_init(void *xsc) 1464 { 1465 struct my_softc *sc = xsc; 1466 1467 MY_LOCK(sc); 1468 my_init_locked(sc); 1469 MY_UNLOCK(sc); 1470 } 1471 1472 static void 1473 my_init_locked(struct my_softc *sc) 1474 { 1475 struct ifnet *ifp = sc->my_ifp; 1476 u_int16_t phy_bmcr = 0; 1477 1478 MY_LOCK_ASSERT(sc); 1479 if (sc->my_autoneg) { 1480 return; 1481 } 1482 if (sc->my_pinfo != NULL) 1483 phy_bmcr = my_phy_readreg(sc, PHY_BMCR); 1484 /* 1485 * Cancel pending I/O and free all RX/TX buffers. 1486 */ 1487 my_stop(sc); 1488 my_reset(sc); 1489 1490 /* 1491 * Set cache alignment and burst length. 1492 */ 1493 #if 0 /* 89/9/1 modify, */ 1494 CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512); 1495 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF); 1496 #endif 1497 CSR_WRITE_4(sc, MY_BCR, MY_PBL8); 1498 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512); 1499 /* 1500 * 89/12/29 add, for mtd891, 1501 */ 1502 if (sc->my_info->my_did == MTD891ID) { 1503 MY_SETBIT(sc, MY_BCR, MY_PROG); 1504 MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced); 1505 } 1506 my_setcfg(sc, phy_bmcr); 1507 /* Init circular RX list. */ 1508 if (my_list_rx_init(sc) == ENOBUFS) { 1509 device_printf(sc->my_dev, "init failed: no memory for rx buffers\n"); 1510 my_stop(sc); 1511 return; 1512 } 1513 /* Init TX descriptors. */ 1514 my_list_tx_init(sc); 1515 1516 /* If we want promiscuous mode, set the allframes bit. */ 1517 if (ifp->if_flags & IFF_PROMISC) 1518 MY_SETBIT(sc, MY_TCRRCR, MY_PROM); 1519 else 1520 MY_CLRBIT(sc, MY_TCRRCR, MY_PROM); 1521 1522 /* 1523 * Set capture broadcast bit to capture broadcast frames. 1524 */ 1525 if (ifp->if_flags & IFF_BROADCAST) 1526 MY_SETBIT(sc, MY_TCRRCR, MY_AB); 1527 else 1528 MY_CLRBIT(sc, MY_TCRRCR, MY_AB); 1529 1530 /* 1531 * Program the multicast filter, if necessary. 1532 */ 1533 my_setmulti(sc); 1534 1535 /* 1536 * Load the address of the RX list. 1537 */ 1538 MY_CLRBIT(sc, MY_TCRRCR, MY_RE); 1539 CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0])); 1540 1541 /* 1542 * Enable interrupts. 1543 */ 1544 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1545 CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF); 1546 1547 /* Enable receiver and transmitter. */ 1548 MY_SETBIT(sc, MY_TCRRCR, MY_RE); 1549 MY_CLRBIT(sc, MY_TCRRCR, MY_TE); 1550 CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0])); 1551 MY_SETBIT(sc, MY_TCRRCR, MY_TE); 1552 1553 /* Restore state of BMCR */ 1554 if (sc->my_pinfo != NULL) 1555 my_phy_writereg(sc, PHY_BMCR, phy_bmcr); 1556 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1557 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1558 return; 1559 } 1560 1561 /* 1562 * Set media options. 1563 */ 1564 1565 static int 1566 my_ifmedia_upd(struct ifnet * ifp) 1567 { 1568 struct my_softc *sc; 1569 struct ifmedia *ifm; 1570 1571 sc = ifp->if_softc; 1572 MY_LOCK(sc); 1573 ifm = &sc->ifmedia; 1574 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1575 MY_UNLOCK(sc); 1576 return (EINVAL); 1577 } 1578 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) 1579 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1580 else 1581 my_setmode_mii(sc, ifm->ifm_media); 1582 MY_UNLOCK(sc); 1583 return (0); 1584 } 1585 1586 /* 1587 * Report current media status. 1588 */ 1589 1590 static void 1591 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr) 1592 { 1593 struct my_softc *sc; 1594 u_int16_t advert = 0, ability = 0; 1595 1596 sc = ifp->if_softc; 1597 MY_LOCK(sc); 1598 ifmr->ifm_active = IFM_ETHER; 1599 if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { 1600 #if 0 /* this version did not support 1000M, */ 1601 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000) 1602 ifmr->ifm_active = IFM_ETHER | IFM_1000TX; 1603 #endif 1604 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) 1605 ifmr->ifm_active = IFM_ETHER | IFM_100_TX; 1606 else 1607 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1608 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) 1609 ifmr->ifm_active |= IFM_FDX; 1610 else 1611 ifmr->ifm_active |= IFM_HDX; 1612 1613 MY_UNLOCK(sc); 1614 return; 1615 } 1616 ability = my_phy_readreg(sc, PHY_LPAR); 1617 advert = my_phy_readreg(sc, PHY_ANAR); 1618 1619 #if 0 /* this version did not support 1000M, */ 1620 if (sc->my_pinfo->my_vid = MarvellPHYID0) { 1621 ability2 = my_phy_readreg(sc, PHY_1000SR); 1622 if (ability2 & PHY_1000SR_1000BTXFULL) { 1623 advert = 0; 1624 ability = 0; 1625 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX; 1626 } else if (ability & PHY_1000SR_1000BTXHALF) { 1627 advert = 0; 1628 ability = 0; 1629 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX; 1630 } 1631 } 1632 #endif 1633 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) 1634 ifmr->ifm_active = IFM_ETHER | IFM_100_T4; 1635 else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) 1636 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1637 else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) 1638 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX; 1639 else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) 1640 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX; 1641 else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF) 1642 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX; 1643 MY_UNLOCK(sc); 1644 return; 1645 } 1646 1647 static int 1648 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 1649 { 1650 struct my_softc *sc = ifp->if_softc; 1651 struct ifreq *ifr = (struct ifreq *) data; 1652 int error; 1653 1654 switch (command) { 1655 case SIOCSIFFLAGS: 1656 MY_LOCK(sc); 1657 if (ifp->if_flags & IFF_UP) 1658 my_init_locked(sc); 1659 else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1660 my_stop(sc); 1661 MY_UNLOCK(sc); 1662 error = 0; 1663 break; 1664 case SIOCADDMULTI: 1665 case SIOCDELMULTI: 1666 MY_LOCK(sc); 1667 my_setmulti(sc); 1668 MY_UNLOCK(sc); 1669 error = 0; 1670 break; 1671 case SIOCGIFMEDIA: 1672 case SIOCSIFMEDIA: 1673 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 1674 break; 1675 default: 1676 error = ether_ioctl(ifp, command, data); 1677 break; 1678 } 1679 return (error); 1680 } 1681 1682 static void 1683 my_watchdog(struct ifnet * ifp) 1684 { 1685 struct my_softc *sc; 1686 1687 sc = ifp->if_softc; 1688 MY_LOCK(sc); 1689 if (sc->my_autoneg) { 1690 my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1); 1691 MY_UNLOCK(sc); 1692 return; 1693 } 1694 ifp->if_oerrors++; 1695 if_printf(ifp, "watchdog timeout\n"); 1696 if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1697 if_printf(ifp, "no carrier - transceiver cable problem?\n"); 1698 my_stop(sc); 1699 my_reset(sc); 1700 my_init_locked(sc); 1701 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1702 my_start_locked(ifp); 1703 MY_LOCK(sc); 1704 return; 1705 } 1706 1707 1708 /* 1709 * Stop the adapter and free any mbufs allocated to the RX and TX lists. 1710 */ 1711 static void 1712 my_stop(struct my_softc * sc) 1713 { 1714 register int i; 1715 struct ifnet *ifp; 1716 1717 MY_LOCK_ASSERT(sc); 1718 ifp = sc->my_ifp; 1719 ifp->if_timer = 0; 1720 1721 MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE)); 1722 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1723 CSR_WRITE_4(sc, MY_TXLBA, 0x00000000); 1724 CSR_WRITE_4(sc, MY_RXLBA, 0x00000000); 1725 1726 /* 1727 * Free data in the RX lists. 1728 */ 1729 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1730 if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) { 1731 m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf); 1732 sc->my_cdata.my_rx_chain[i].my_mbuf = NULL; 1733 } 1734 } 1735 bzero((char *)&sc->my_ldata->my_rx_list, 1736 sizeof(sc->my_ldata->my_rx_list)); 1737 /* 1738 * Free the TX list buffers. 1739 */ 1740 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1741 if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) { 1742 m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf); 1743 sc->my_cdata.my_tx_chain[i].my_mbuf = NULL; 1744 } 1745 } 1746 bzero((char *)&sc->my_ldata->my_tx_list, 1747 sizeof(sc->my_ldata->my_tx_list)); 1748 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1749 return; 1750 } 1751 1752 /* 1753 * Stop all chip I/O so that the kernel's probe routines don't get confused 1754 * by errant DMAs when rebooting. 1755 */ 1756 static void 1757 my_shutdown(device_t dev) 1758 { 1759 struct my_softc *sc; 1760 1761 sc = device_get_softc(dev); 1762 MY_LOCK(sc); 1763 my_stop(sc); 1764 MY_UNLOCK(sc); 1765 return; 1766 } 1767