1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Written by: yen_cw@myson.com.tw 5 * Copyright (c) 2002 Myson Technology Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/ 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/kernel.h> 41 #include <sys/socket.h> 42 #include <sys/queue.h> 43 #include <sys/types.h> 44 #include <sys/module.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 48 #define NBPFILTER 1 49 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_arp.h> 53 #include <net/ethernet.h> 54 #include <net/if_media.h> 55 #include <net/if_types.h> 56 #include <net/if_dl.h> 57 #include <net/bpf.h> 58 59 #include <vm/vm.h> /* for vtophys */ 60 #include <vm/pmap.h> /* for vtophys */ 61 #include <machine/bus.h> 62 #include <machine/resource.h> 63 #include <sys/bus.h> 64 #include <sys/rman.h> 65 66 #include <dev/pci/pcireg.h> 67 #include <dev/pci/pcivar.h> 68 69 /* 70 * #define MY_USEIOSPACE 71 */ 72 73 static int MY_USEIOSPACE = 1; 74 75 #ifdef MY_USEIOSPACE 76 #define MY_RES SYS_RES_IOPORT 77 #define MY_RID MY_PCI_LOIO 78 #else 79 #define MY_RES SYS_RES_MEMORY 80 #define MY_RID MY_PCI_LOMEM 81 #endif 82 83 84 #include <dev/my/if_myreg.h> 85 86 /* 87 * Various supported device vendors/types and their names. 88 */ 89 struct my_type *my_info_tmp; 90 static struct my_type my_devs[] = { 91 {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"}, 92 {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"}, 93 {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"}, 94 {0, 0, NULL} 95 }; 96 97 /* 98 * Various supported PHY vendors/types and their names. Note that this driver 99 * will work with pretty much any MII-compliant PHY, so failure to positively 100 * identify the chip is not a fatal error. 101 */ 102 static struct my_type my_phys[] = { 103 {MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"}, 104 {SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"}, 105 {AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"}, 106 {MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"}, 107 {LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"}, 108 {0, 0, "<MII-compliant physical interface>"} 109 }; 110 111 static int my_probe(device_t); 112 static int my_attach(device_t); 113 static int my_detach(device_t); 114 static int my_newbuf(struct my_softc *, struct my_chain_onefrag *); 115 static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *); 116 static void my_rxeof(struct my_softc *); 117 static void my_txeof(struct my_softc *); 118 static void my_txeoc(struct my_softc *); 119 static void my_intr(void *); 120 static void my_start(struct ifnet *); 121 static void my_start_locked(struct ifnet *); 122 static int my_ioctl(struct ifnet *, u_long, caddr_t); 123 static void my_init(void *); 124 static void my_init_locked(struct my_softc *); 125 static void my_stop(struct my_softc *); 126 static void my_autoneg_timeout(void *); 127 static void my_watchdog(void *); 128 static int my_shutdown(device_t); 129 static int my_ifmedia_upd(struct ifnet *); 130 static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 static u_int16_t my_phy_readreg(struct my_softc *, int); 132 static void my_phy_writereg(struct my_softc *, int, int); 133 static void my_autoneg_xmit(struct my_softc *); 134 static void my_autoneg_mii(struct my_softc *, int, int); 135 static void my_setmode_mii(struct my_softc *, int); 136 static void my_getmode_mii(struct my_softc *); 137 static void my_setcfg(struct my_softc *, int); 138 static void my_setmulti(struct my_softc *); 139 static void my_reset(struct my_softc *); 140 static int my_list_rx_init(struct my_softc *); 141 static int my_list_tx_init(struct my_softc *); 142 static long my_send_cmd_to_phy(struct my_softc *, int, int); 143 144 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 145 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 146 147 static device_method_t my_methods[] = { 148 /* Device interface */ 149 DEVMETHOD(device_probe, my_probe), 150 DEVMETHOD(device_attach, my_attach), 151 DEVMETHOD(device_detach, my_detach), 152 DEVMETHOD(device_shutdown, my_shutdown), 153 154 DEVMETHOD_END 155 }; 156 157 static driver_t my_driver = { 158 "my", 159 my_methods, 160 sizeof(struct my_softc) 161 }; 162 163 static devclass_t my_devclass; 164 165 DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0); 166 MODULE_DEPEND(my, pci, 1, 1, 1); 167 MODULE_DEPEND(my, ether, 1, 1, 1); 168 169 static long 170 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad) 171 { 172 long miir; 173 int i; 174 int mask, data; 175 176 MY_LOCK_ASSERT(sc); 177 178 /* enable MII output */ 179 miir = CSR_READ_4(sc, MY_MANAGEMENT); 180 miir &= 0xfffffff0; 181 182 miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO; 183 184 /* send 32 1's preamble */ 185 for (i = 0; i < 32; i++) { 186 /* low MDC; MDO is already high (miir) */ 187 miir &= ~MY_MASK_MIIR_MII_MDC; 188 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 189 190 /* high MDC */ 191 miir |= MY_MASK_MIIR_MII_MDC; 192 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 193 } 194 195 /* calculate ST+OP+PHYAD+REGAD+TA */ 196 data = opcode | (sc->my_phy_addr << 7) | (regad << 2); 197 198 /* sent out */ 199 mask = 0x8000; 200 while (mask) { 201 /* low MDC, prepare MDO */ 202 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 203 if (mask & data) 204 miir |= MY_MASK_MIIR_MII_MDO; 205 206 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 207 /* high MDC */ 208 miir |= MY_MASK_MIIR_MII_MDC; 209 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 210 DELAY(30); 211 212 /* next */ 213 mask >>= 1; 214 if (mask == 0x2 && opcode == MY_OP_READ) 215 miir &= ~MY_MASK_MIIR_MII_WRITE; 216 } 217 218 return miir; 219 } 220 221 222 static u_int16_t 223 my_phy_readreg(struct my_softc * sc, int reg) 224 { 225 long miir; 226 int mask, data; 227 228 MY_LOCK_ASSERT(sc); 229 230 if (sc->my_info->my_did == MTD803ID) 231 data = CSR_READ_2(sc, MY_PHYBASE + reg * 2); 232 else { 233 miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg); 234 235 /* read data */ 236 mask = 0x8000; 237 data = 0; 238 while (mask) { 239 /* low MDC */ 240 miir &= ~MY_MASK_MIIR_MII_MDC; 241 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 242 243 /* read MDI */ 244 miir = CSR_READ_4(sc, MY_MANAGEMENT); 245 if (miir & MY_MASK_MIIR_MII_MDI) 246 data |= mask; 247 248 /* high MDC, and wait */ 249 miir |= MY_MASK_MIIR_MII_MDC; 250 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 251 DELAY(30); 252 253 /* next */ 254 mask >>= 1; 255 } 256 257 /* low MDC */ 258 miir &= ~MY_MASK_MIIR_MII_MDC; 259 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 260 } 261 262 return (u_int16_t) data; 263 } 264 265 266 static void 267 my_phy_writereg(struct my_softc * sc, int reg, int data) 268 { 269 long miir; 270 int mask; 271 272 MY_LOCK_ASSERT(sc); 273 274 if (sc->my_info->my_did == MTD803ID) 275 CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data); 276 else { 277 miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg); 278 279 /* write data */ 280 mask = 0x8000; 281 while (mask) { 282 /* low MDC, prepare MDO */ 283 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 284 if (mask & data) 285 miir |= MY_MASK_MIIR_MII_MDO; 286 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 287 DELAY(1); 288 289 /* high MDC */ 290 miir |= MY_MASK_MIIR_MII_MDC; 291 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 292 DELAY(1); 293 294 /* next */ 295 mask >>= 1; 296 } 297 298 /* low MDC */ 299 miir &= ~MY_MASK_MIIR_MII_MDC; 300 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 301 } 302 return; 303 } 304 305 306 /* 307 * Program the 64-bit multicast hash filter. 308 */ 309 static void 310 my_setmulti(struct my_softc * sc) 311 { 312 struct ifnet *ifp; 313 int h = 0; 314 u_int32_t hashes[2] = {0, 0}; 315 struct ifmultiaddr *ifma; 316 u_int32_t rxfilt; 317 int mcnt = 0; 318 319 MY_LOCK_ASSERT(sc); 320 321 ifp = sc->my_ifp; 322 323 rxfilt = CSR_READ_4(sc, MY_TCRRCR); 324 325 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 326 rxfilt |= MY_AM; 327 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 328 CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF); 329 CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF); 330 331 return; 332 } 333 /* first, zot all the existing hash bits */ 334 CSR_WRITE_4(sc, MY_MAR0, 0); 335 CSR_WRITE_4(sc, MY_MAR1, 0); 336 337 /* now program new ones */ 338 if_maddr_rlock(ifp); 339 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 340 if (ifma->ifma_addr->sa_family != AF_LINK) 341 continue; 342 h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) 343 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 344 if (h < 32) 345 hashes[0] |= (1 << h); 346 else 347 hashes[1] |= (1 << (h - 32)); 348 mcnt++; 349 } 350 if_maddr_runlock(ifp); 351 352 if (mcnt) 353 rxfilt |= MY_AM; 354 else 355 rxfilt &= ~MY_AM; 356 CSR_WRITE_4(sc, MY_MAR0, hashes[0]); 357 CSR_WRITE_4(sc, MY_MAR1, hashes[1]); 358 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 359 return; 360 } 361 362 /* 363 * Initiate an autonegotiation session. 364 */ 365 static void 366 my_autoneg_xmit(struct my_softc * sc) 367 { 368 u_int16_t phy_sts = 0; 369 370 MY_LOCK_ASSERT(sc); 371 372 my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); 373 DELAY(500); 374 while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET); 375 376 phy_sts = my_phy_readreg(sc, PHY_BMCR); 377 phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR; 378 my_phy_writereg(sc, PHY_BMCR, phy_sts); 379 380 return; 381 } 382 383 static void 384 my_autoneg_timeout(void *arg) 385 { 386 struct my_softc *sc; 387 388 sc = arg; 389 MY_LOCK_ASSERT(sc); 390 my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1); 391 } 392 393 /* 394 * Invoke autonegotiation on a PHY. 395 */ 396 static void 397 my_autoneg_mii(struct my_softc * sc, int flag, int verbose) 398 { 399 u_int16_t phy_sts = 0, media, advert, ability; 400 u_int16_t ability2 = 0; 401 struct ifnet *ifp; 402 struct ifmedia *ifm; 403 404 MY_LOCK_ASSERT(sc); 405 406 ifm = &sc->ifmedia; 407 ifp = sc->my_ifp; 408 409 ifm->ifm_media = IFM_ETHER | IFM_AUTO; 410 411 #ifndef FORCE_AUTONEG_TFOUR 412 /* 413 * First, see if autoneg is supported. If not, there's no point in 414 * continuing. 415 */ 416 phy_sts = my_phy_readreg(sc, PHY_BMSR); 417 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { 418 if (verbose) 419 device_printf(sc->my_dev, 420 "autonegotiation not supported\n"); 421 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 422 return; 423 } 424 #endif 425 switch (flag) { 426 case MY_FLAG_FORCEDELAY: 427 /* 428 * XXX Never use this option anywhere but in the probe 429 * routine: making the kernel stop dead in its tracks for 430 * three whole seconds after we've gone multi-user is really 431 * bad manners. 432 */ 433 my_autoneg_xmit(sc); 434 DELAY(5000000); 435 break; 436 case MY_FLAG_SCHEDDELAY: 437 /* 438 * Wait for the transmitter to go idle before starting an 439 * autoneg session, otherwise my_start() may clobber our 440 * timeout, and we don't want to allow transmission during an 441 * autoneg session since that can screw it up. 442 */ 443 if (sc->my_cdata.my_tx_head != NULL) { 444 sc->my_want_auto = 1; 445 MY_UNLOCK(sc); 446 return; 447 } 448 my_autoneg_xmit(sc); 449 callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout, 450 sc); 451 sc->my_autoneg = 1; 452 sc->my_want_auto = 0; 453 return; 454 case MY_FLAG_DELAYTIMEO: 455 callout_stop(&sc->my_autoneg_timer); 456 sc->my_autoneg = 0; 457 break; 458 default: 459 device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag); 460 return; 461 } 462 463 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { 464 if (verbose) 465 device_printf(sc->my_dev, "autoneg complete, "); 466 phy_sts = my_phy_readreg(sc, PHY_BMSR); 467 } else { 468 if (verbose) 469 device_printf(sc->my_dev, "autoneg not complete, "); 470 } 471 472 media = my_phy_readreg(sc, PHY_BMCR); 473 474 /* Link is good. Report modes and set duplex mode. */ 475 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { 476 if (verbose) 477 device_printf(sc->my_dev, "link status good. "); 478 advert = my_phy_readreg(sc, PHY_ANAR); 479 ability = my_phy_readreg(sc, PHY_LPAR); 480 if ((sc->my_pinfo->my_vid == MarvellPHYID0) || 481 (sc->my_pinfo->my_vid == LevelOnePHYID0)) { 482 ability2 = my_phy_readreg(sc, PHY_1000SR); 483 if (ability2 & PHY_1000SR_1000BTXFULL) { 484 advert = 0; 485 ability = 0; 486 /* 487 * this version did not support 1000M, 488 * ifm->ifm_media = 489 * IFM_ETHER|IFM_1000_T|IFM_FDX; 490 */ 491 ifm->ifm_media = 492 IFM_ETHER | IFM_100_TX | IFM_FDX; 493 media &= ~PHY_BMCR_SPEEDSEL; 494 media |= PHY_BMCR_1000; 495 media |= PHY_BMCR_DUPLEX; 496 printf("(full-duplex, 1000Mbps)\n"); 497 } else if (ability2 & PHY_1000SR_1000BTXHALF) { 498 advert = 0; 499 ability = 0; 500 /* 501 * this version did not support 1000M, 502 * ifm->ifm_media = IFM_ETHER|IFM_1000_T; 503 */ 504 ifm->ifm_media = IFM_ETHER | IFM_100_TX; 505 media &= ~PHY_BMCR_SPEEDSEL; 506 media &= ~PHY_BMCR_DUPLEX; 507 media |= PHY_BMCR_1000; 508 printf("(half-duplex, 1000Mbps)\n"); 509 } 510 } 511 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { 512 ifm->ifm_media = IFM_ETHER | IFM_100_T4; 513 media |= PHY_BMCR_SPEEDSEL; 514 media &= ~PHY_BMCR_DUPLEX; 515 printf("(100baseT4)\n"); 516 } else if (advert & PHY_ANAR_100BTXFULL && 517 ability & PHY_ANAR_100BTXFULL) { 518 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 519 media |= PHY_BMCR_SPEEDSEL; 520 media |= PHY_BMCR_DUPLEX; 521 printf("(full-duplex, 100Mbps)\n"); 522 } else if (advert & PHY_ANAR_100BTXHALF && 523 ability & PHY_ANAR_100BTXHALF) { 524 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 525 media |= PHY_BMCR_SPEEDSEL; 526 media &= ~PHY_BMCR_DUPLEX; 527 printf("(half-duplex, 100Mbps)\n"); 528 } else if (advert & PHY_ANAR_10BTFULL && 529 ability & PHY_ANAR_10BTFULL) { 530 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 531 media &= ~PHY_BMCR_SPEEDSEL; 532 media |= PHY_BMCR_DUPLEX; 533 printf("(full-duplex, 10Mbps)\n"); 534 } else if (advert) { 535 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 536 media &= ~PHY_BMCR_SPEEDSEL; 537 media &= ~PHY_BMCR_DUPLEX; 538 printf("(half-duplex, 10Mbps)\n"); 539 } 540 media &= ~PHY_BMCR_AUTONEGENBL; 541 542 /* Set ASIC's duplex mode to match the PHY. */ 543 my_phy_writereg(sc, PHY_BMCR, media); 544 my_setcfg(sc, media); 545 } else { 546 if (verbose) 547 device_printf(sc->my_dev, "no carrier\n"); 548 } 549 550 my_init_locked(sc); 551 if (sc->my_tx_pend) { 552 sc->my_autoneg = 0; 553 sc->my_tx_pend = 0; 554 my_start_locked(ifp); 555 } 556 return; 557 } 558 559 /* 560 * To get PHY ability. 561 */ 562 static void 563 my_getmode_mii(struct my_softc * sc) 564 { 565 u_int16_t bmsr; 566 struct ifnet *ifp; 567 568 MY_LOCK_ASSERT(sc); 569 ifp = sc->my_ifp; 570 bmsr = my_phy_readreg(sc, PHY_BMSR); 571 if (bootverbose) 572 device_printf(sc->my_dev, "PHY status word: %x\n", bmsr); 573 574 /* fallback */ 575 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 576 577 if (bmsr & PHY_BMSR_10BTHALF) { 578 if (bootverbose) 579 device_printf(sc->my_dev, 580 "10Mbps half-duplex mode supported\n"); 581 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 582 0, NULL); 583 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); 584 } 585 if (bmsr & PHY_BMSR_10BTFULL) { 586 if (bootverbose) 587 device_printf(sc->my_dev, 588 "10Mbps full-duplex mode supported\n"); 589 590 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 591 0, NULL); 592 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 593 } 594 if (bmsr & PHY_BMSR_100BTXHALF) { 595 if (bootverbose) 596 device_printf(sc->my_dev, 597 "100Mbps half-duplex mode supported\n"); 598 ifp->if_baudrate = 100000000; 599 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); 600 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 601 0, NULL); 602 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 603 } 604 if (bmsr & PHY_BMSR_100BTXFULL) { 605 if (bootverbose) 606 device_printf(sc->my_dev, 607 "100Mbps full-duplex mode supported\n"); 608 ifp->if_baudrate = 100000000; 609 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 610 0, NULL); 611 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 612 } 613 /* Some also support 100BaseT4. */ 614 if (bmsr & PHY_BMSR_100BT4) { 615 if (bootverbose) 616 device_printf(sc->my_dev, "100baseT4 mode supported\n"); 617 ifp->if_baudrate = 100000000; 618 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL); 619 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4; 620 #ifdef FORCE_AUTONEG_TFOUR 621 if (bootverbose) 622 device_printf(sc->my_dev, 623 "forcing on autoneg support for BT4\n"); 624 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL): 625 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 626 #endif 627 } 628 #if 0 /* this version did not support 1000M, */ 629 if (sc->my_pinfo->my_vid == MarvellPHYID0) { 630 if (bootverbose) 631 device_printf(sc->my_dev, 632 "1000Mbps half-duplex mode supported\n"); 633 634 ifp->if_baudrate = 1000000000; 635 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); 636 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX, 637 0, NULL); 638 if (bootverbose) 639 device_printf(sc->my_dev, 640 "1000Mbps full-duplex mode supported\n"); 641 ifp->if_baudrate = 1000000000; 642 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 643 0, NULL); 644 sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX; 645 } 646 #endif 647 if (bmsr & PHY_BMSR_CANAUTONEG) { 648 if (bootverbose) 649 device_printf(sc->my_dev, "autoneg supported\n"); 650 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 651 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 652 } 653 return; 654 } 655 656 /* 657 * Set speed and duplex mode. 658 */ 659 static void 660 my_setmode_mii(struct my_softc * sc, int media) 661 { 662 u_int16_t bmcr; 663 664 MY_LOCK_ASSERT(sc); 665 /* 666 * If an autoneg session is in progress, stop it. 667 */ 668 if (sc->my_autoneg) { 669 device_printf(sc->my_dev, "canceling autoneg session\n"); 670 callout_stop(&sc->my_autoneg_timer); 671 sc->my_autoneg = sc->my_want_auto = 0; 672 bmcr = my_phy_readreg(sc, PHY_BMCR); 673 bmcr &= ~PHY_BMCR_AUTONEGENBL; 674 my_phy_writereg(sc, PHY_BMCR, bmcr); 675 } 676 device_printf(sc->my_dev, "selecting MII, "); 677 bmcr = my_phy_readreg(sc, PHY_BMCR); 678 bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 | 679 PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK); 680 681 #if 0 /* this version did not support 1000M, */ 682 if (IFM_SUBTYPE(media) == IFM_1000_T) { 683 printf("1000Mbps/T4, half-duplex\n"); 684 bmcr &= ~PHY_BMCR_SPEEDSEL; 685 bmcr &= ~PHY_BMCR_DUPLEX; 686 bmcr |= PHY_BMCR_1000; 687 } 688 #endif 689 if (IFM_SUBTYPE(media) == IFM_100_T4) { 690 printf("100Mbps/T4, half-duplex\n"); 691 bmcr |= PHY_BMCR_SPEEDSEL; 692 bmcr &= ~PHY_BMCR_DUPLEX; 693 } 694 if (IFM_SUBTYPE(media) == IFM_100_TX) { 695 printf("100Mbps, "); 696 bmcr |= PHY_BMCR_SPEEDSEL; 697 } 698 if (IFM_SUBTYPE(media) == IFM_10_T) { 699 printf("10Mbps, "); 700 bmcr &= ~PHY_BMCR_SPEEDSEL; 701 } 702 if ((media & IFM_GMASK) == IFM_FDX) { 703 printf("full duplex\n"); 704 bmcr |= PHY_BMCR_DUPLEX; 705 } else { 706 printf("half duplex\n"); 707 bmcr &= ~PHY_BMCR_DUPLEX; 708 } 709 my_phy_writereg(sc, PHY_BMCR, bmcr); 710 my_setcfg(sc, bmcr); 711 return; 712 } 713 714 /* 715 * The Myson manual states that in order to fiddle with the 'full-duplex' and 716 * '100Mbps' bits in the netconfig register, we first have to put the 717 * transmit and/or receive logic in the idle state. 718 */ 719 static void 720 my_setcfg(struct my_softc * sc, int bmcr) 721 { 722 int i, restart = 0; 723 724 MY_LOCK_ASSERT(sc); 725 if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) { 726 restart = 1; 727 MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE)); 728 for (i = 0; i < MY_TIMEOUT; i++) { 729 DELAY(10); 730 if (!(CSR_READ_4(sc, MY_TCRRCR) & 731 (MY_TXRUN | MY_RXRUN))) 732 break; 733 } 734 if (i == MY_TIMEOUT) 735 device_printf(sc->my_dev, 736 "failed to force tx and rx to idle \n"); 737 } 738 MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000); 739 MY_CLRBIT(sc, MY_TCRRCR, MY_PS10); 740 if (bmcr & PHY_BMCR_1000) 741 MY_SETBIT(sc, MY_TCRRCR, MY_PS1000); 742 else if (!(bmcr & PHY_BMCR_SPEEDSEL)) 743 MY_SETBIT(sc, MY_TCRRCR, MY_PS10); 744 if (bmcr & PHY_BMCR_DUPLEX) 745 MY_SETBIT(sc, MY_TCRRCR, MY_FD); 746 else 747 MY_CLRBIT(sc, MY_TCRRCR, MY_FD); 748 if (restart) 749 MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE); 750 return; 751 } 752 753 static void 754 my_reset(struct my_softc * sc) 755 { 756 int i; 757 758 MY_LOCK_ASSERT(sc); 759 MY_SETBIT(sc, MY_BCR, MY_SWR); 760 for (i = 0; i < MY_TIMEOUT; i++) { 761 DELAY(10); 762 if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR)) 763 break; 764 } 765 if (i == MY_TIMEOUT) 766 device_printf(sc->my_dev, "reset never completed!\n"); 767 768 /* Wait a little while for the chip to get its brains in order. */ 769 DELAY(1000); 770 return; 771 } 772 773 /* 774 * Probe for a Myson chip. Check the PCI vendor and device IDs against our 775 * list and return a device name if we find a match. 776 */ 777 static int 778 my_probe(device_t dev) 779 { 780 struct my_type *t; 781 782 t = my_devs; 783 while (t->my_name != NULL) { 784 if ((pci_get_vendor(dev) == t->my_vid) && 785 (pci_get_device(dev) == t->my_did)) { 786 device_set_desc(dev, t->my_name); 787 my_info_tmp = t; 788 return (BUS_PROBE_DEFAULT); 789 } 790 t++; 791 } 792 return (ENXIO); 793 } 794 795 /* 796 * Attach the interface. Allocate softc structures, do ifmedia setup and 797 * ethernet/BPF attach. 798 */ 799 static int 800 my_attach(device_t dev) 801 { 802 int i; 803 u_char eaddr[ETHER_ADDR_LEN]; 804 u_int32_t iobase; 805 struct my_softc *sc; 806 struct ifnet *ifp; 807 int media = IFM_ETHER | IFM_100_TX | IFM_FDX; 808 unsigned int round; 809 caddr_t roundptr; 810 struct my_type *p; 811 u_int16_t phy_vid, phy_did, phy_sts = 0; 812 int rid, error = 0; 813 814 sc = device_get_softc(dev); 815 sc->my_dev = dev; 816 mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 817 MTX_DEF); 818 callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0); 819 callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0); 820 821 /* 822 * Map control/status registers. 823 */ 824 pci_enable_busmaster(dev); 825 826 if (my_info_tmp->my_did == MTD800ID) { 827 iobase = pci_read_config(dev, MY_PCI_LOIO, 4); 828 if (iobase & 0x300) 829 MY_USEIOSPACE = 0; 830 } 831 832 rid = MY_RID; 833 sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE); 834 835 if (sc->my_res == NULL) { 836 device_printf(dev, "couldn't map ports/memory\n"); 837 error = ENXIO; 838 goto destroy_mutex; 839 } 840 sc->my_btag = rman_get_bustag(sc->my_res); 841 sc->my_bhandle = rman_get_bushandle(sc->my_res); 842 843 rid = 0; 844 sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 845 RF_SHAREABLE | RF_ACTIVE); 846 847 if (sc->my_irq == NULL) { 848 device_printf(dev, "couldn't map interrupt\n"); 849 error = ENXIO; 850 goto release_io; 851 } 852 853 sc->my_info = my_info_tmp; 854 855 /* Reset the adapter. */ 856 MY_LOCK(sc); 857 my_reset(sc); 858 MY_UNLOCK(sc); 859 860 /* 861 * Get station address 862 */ 863 for (i = 0; i < ETHER_ADDR_LEN; ++i) 864 eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i); 865 866 sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8, 867 M_DEVBUF, M_NOWAIT); 868 if (sc->my_ldata_ptr == NULL) { 869 device_printf(dev, "no memory for list buffers!\n"); 870 error = ENXIO; 871 goto release_irq; 872 } 873 sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr; 874 round = (uintptr_t)sc->my_ldata_ptr & 0xF; 875 roundptr = sc->my_ldata_ptr; 876 for (i = 0; i < 8; i++) { 877 if (round % 8) { 878 round++; 879 roundptr++; 880 } else 881 break; 882 } 883 sc->my_ldata = (struct my_list_data *) roundptr; 884 bzero(sc->my_ldata, sizeof(struct my_list_data)); 885 886 ifp = sc->my_ifp = if_alloc(IFT_ETHER); 887 if (ifp == NULL) { 888 device_printf(dev, "can not if_alloc()\n"); 889 error = ENOSPC; 890 goto free_ldata; 891 } 892 ifp->if_softc = sc; 893 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 894 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 895 ifp->if_ioctl = my_ioctl; 896 ifp->if_start = my_start; 897 ifp->if_init = my_init; 898 ifp->if_baudrate = 10000000; 899 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 900 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 901 IFQ_SET_READY(&ifp->if_snd); 902 903 if (sc->my_info->my_did == MTD803ID) 904 sc->my_pinfo = my_phys; 905 else { 906 if (bootverbose) 907 device_printf(dev, "probing for a PHY\n"); 908 MY_LOCK(sc); 909 for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) { 910 if (bootverbose) 911 device_printf(dev, "checking address: %d\n", i); 912 sc->my_phy_addr = i; 913 phy_sts = my_phy_readreg(sc, PHY_BMSR); 914 if ((phy_sts != 0) && (phy_sts != 0xffff)) 915 break; 916 else 917 phy_sts = 0; 918 } 919 if (phy_sts) { 920 phy_vid = my_phy_readreg(sc, PHY_VENID); 921 phy_did = my_phy_readreg(sc, PHY_DEVID); 922 if (bootverbose) { 923 device_printf(dev, "found PHY at address %d, ", 924 sc->my_phy_addr); 925 printf("vendor id: %x device id: %x\n", 926 phy_vid, phy_did); 927 } 928 p = my_phys; 929 while (p->my_vid) { 930 if (phy_vid == p->my_vid) { 931 sc->my_pinfo = p; 932 break; 933 } 934 p++; 935 } 936 if (sc->my_pinfo == NULL) 937 sc->my_pinfo = &my_phys[PHY_UNKNOWN]; 938 if (bootverbose) 939 device_printf(dev, "PHY type: %s\n", 940 sc->my_pinfo->my_name); 941 } else { 942 MY_UNLOCK(sc); 943 device_printf(dev, "MII without any phy!\n"); 944 error = ENXIO; 945 goto free_if; 946 } 947 MY_UNLOCK(sc); 948 } 949 950 /* Do ifmedia setup. */ 951 ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts); 952 MY_LOCK(sc); 953 my_getmode_mii(sc); 954 my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1); 955 media = sc->ifmedia.ifm_media; 956 my_stop(sc); 957 MY_UNLOCK(sc); 958 ifmedia_set(&sc->ifmedia, media); 959 960 ether_ifattach(ifp, eaddr); 961 962 error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE, 963 NULL, my_intr, sc, &sc->my_intrhand); 964 965 if (error) { 966 device_printf(dev, "couldn't set up irq\n"); 967 goto detach_if; 968 } 969 970 return (0); 971 972 detach_if: 973 ether_ifdetach(ifp); 974 free_if: 975 if_free(ifp); 976 free_ldata: 977 free(sc->my_ldata_ptr, M_DEVBUF); 978 release_irq: 979 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 980 release_io: 981 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 982 destroy_mutex: 983 mtx_destroy(&sc->my_mtx); 984 return (error); 985 } 986 987 static int 988 my_detach(device_t dev) 989 { 990 struct my_softc *sc; 991 struct ifnet *ifp; 992 993 sc = device_get_softc(dev); 994 ifp = sc->my_ifp; 995 ether_ifdetach(ifp); 996 MY_LOCK(sc); 997 my_stop(sc); 998 MY_UNLOCK(sc); 999 bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand); 1000 callout_drain(&sc->my_watchdog); 1001 callout_drain(&sc->my_autoneg_timer); 1002 1003 if_free(ifp); 1004 free(sc->my_ldata_ptr, M_DEVBUF); 1005 1006 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 1007 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 1008 mtx_destroy(&sc->my_mtx); 1009 return (0); 1010 } 1011 1012 1013 /* 1014 * Initialize the transmit descriptors. 1015 */ 1016 static int 1017 my_list_tx_init(struct my_softc * sc) 1018 { 1019 struct my_chain_data *cd; 1020 struct my_list_data *ld; 1021 int i; 1022 1023 MY_LOCK_ASSERT(sc); 1024 cd = &sc->my_cdata; 1025 ld = sc->my_ldata; 1026 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1027 cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i]; 1028 if (i == (MY_TX_LIST_CNT - 1)) 1029 cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0]; 1030 else 1031 cd->my_tx_chain[i].my_nextdesc = 1032 &cd->my_tx_chain[i + 1]; 1033 } 1034 cd->my_tx_free = &cd->my_tx_chain[0]; 1035 cd->my_tx_tail = cd->my_tx_head = NULL; 1036 return (0); 1037 } 1038 1039 /* 1040 * Initialize the RX descriptors and allocate mbufs for them. Note that we 1041 * arrange the descriptors in a closed ring, so that the last descriptor 1042 * points back to the first. 1043 */ 1044 static int 1045 my_list_rx_init(struct my_softc * sc) 1046 { 1047 struct my_chain_data *cd; 1048 struct my_list_data *ld; 1049 int i; 1050 1051 MY_LOCK_ASSERT(sc); 1052 cd = &sc->my_cdata; 1053 ld = sc->my_ldata; 1054 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1055 cd->my_rx_chain[i].my_ptr = 1056 (struct my_desc *) & ld->my_rx_list[i]; 1057 if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) { 1058 MY_UNLOCK(sc); 1059 return (ENOBUFS); 1060 } 1061 if (i == (MY_RX_LIST_CNT - 1)) { 1062 cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0]; 1063 ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]); 1064 } else { 1065 cd->my_rx_chain[i].my_nextdesc = 1066 &cd->my_rx_chain[i + 1]; 1067 ld->my_rx_list[i].my_next = 1068 vtophys(&ld->my_rx_list[i + 1]); 1069 } 1070 } 1071 cd->my_rx_head = &cd->my_rx_chain[0]; 1072 return (0); 1073 } 1074 1075 /* 1076 * Initialize an RX descriptor and attach an MBUF cluster. 1077 */ 1078 static int 1079 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c) 1080 { 1081 struct mbuf *m_new = NULL; 1082 1083 MY_LOCK_ASSERT(sc); 1084 MGETHDR(m_new, M_NOWAIT, MT_DATA); 1085 if (m_new == NULL) { 1086 device_printf(sc->my_dev, 1087 "no memory for rx list -- packet dropped!\n"); 1088 return (ENOBUFS); 1089 } 1090 if (!(MCLGET(m_new, M_NOWAIT))) { 1091 device_printf(sc->my_dev, 1092 "no memory for rx list -- packet dropped!\n"); 1093 m_freem(m_new); 1094 return (ENOBUFS); 1095 } 1096 c->my_mbuf = m_new; 1097 c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t)); 1098 c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift; 1099 c->my_ptr->my_status = MY_OWNByNIC; 1100 return (0); 1101 } 1102 1103 /* 1104 * A frame has been uploaded: pass the resulting mbuf chain up to the higher 1105 * level protocols. 1106 */ 1107 static void 1108 my_rxeof(struct my_softc * sc) 1109 { 1110 struct ether_header *eh; 1111 struct mbuf *m; 1112 struct ifnet *ifp; 1113 struct my_chain_onefrag *cur_rx; 1114 int total_len = 0; 1115 u_int32_t rxstat; 1116 1117 MY_LOCK_ASSERT(sc); 1118 ifp = sc->my_ifp; 1119 while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status) 1120 & MY_OWNByNIC)) { 1121 cur_rx = sc->my_cdata.my_rx_head; 1122 sc->my_cdata.my_rx_head = cur_rx->my_nextdesc; 1123 1124 if (rxstat & MY_ES) { /* error summary: give up this rx pkt */ 1125 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1126 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1127 continue; 1128 } 1129 /* No errors; receive the packet. */ 1130 total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift; 1131 total_len -= ETHER_CRC_LEN; 1132 1133 if (total_len < MINCLSIZE) { 1134 m = m_devget(mtod(cur_rx->my_mbuf, char *), 1135 total_len, 0, ifp, NULL); 1136 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1137 if (m == NULL) { 1138 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1139 continue; 1140 } 1141 } else { 1142 m = cur_rx->my_mbuf; 1143 /* 1144 * Try to conjure up a new mbuf cluster. If that 1145 * fails, it means we have an out of memory condition 1146 * and should leave the buffer in place and continue. 1147 * This will result in a lost packet, but there's 1148 * little else we can do in this situation. 1149 */ 1150 if (my_newbuf(sc, cur_rx) == ENOBUFS) { 1151 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1152 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1153 continue; 1154 } 1155 m->m_pkthdr.rcvif = ifp; 1156 m->m_pkthdr.len = m->m_len = total_len; 1157 } 1158 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1159 eh = mtod(m, struct ether_header *); 1160 #if NBPFILTER > 0 1161 /* 1162 * Handle BPF listeners. Let the BPF user see the packet, but 1163 * don't pass it up to the ether_input() layer unless it's a 1164 * broadcast packet, multicast packet, matches our ethernet 1165 * address or the interface is in promiscuous mode. 1166 */ 1167 if (bpf_peers_present(ifp->if_bpf)) { 1168 bpf_mtap(ifp->if_bpf, m); 1169 if (ifp->if_flags & IFF_PROMISC && 1170 (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp), 1171 ETHER_ADDR_LEN) && 1172 (eh->ether_dhost[0] & 1) == 0)) { 1173 m_freem(m); 1174 continue; 1175 } 1176 } 1177 #endif 1178 MY_UNLOCK(sc); 1179 (*ifp->if_input)(ifp, m); 1180 MY_LOCK(sc); 1181 } 1182 return; 1183 } 1184 1185 1186 /* 1187 * A frame was downloaded to the chip. It's safe for us to clean up the list 1188 * buffers. 1189 */ 1190 static void 1191 my_txeof(struct my_softc * sc) 1192 { 1193 struct my_chain *cur_tx; 1194 struct ifnet *ifp; 1195 1196 MY_LOCK_ASSERT(sc); 1197 ifp = sc->my_ifp; 1198 /* Clear the timeout timer. */ 1199 sc->my_timer = 0; 1200 if (sc->my_cdata.my_tx_head == NULL) { 1201 return; 1202 } 1203 /* 1204 * Go through our tx list and free mbufs for those frames that have 1205 * been transmitted. 1206 */ 1207 while (sc->my_cdata.my_tx_head->my_mbuf != NULL) { 1208 u_int32_t txstat; 1209 1210 cur_tx = sc->my_cdata.my_tx_head; 1211 txstat = MY_TXSTATUS(cur_tx); 1212 if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT) 1213 break; 1214 if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) { 1215 if (txstat & MY_TXERR) { 1216 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1217 if (txstat & MY_EC) /* excessive collision */ 1218 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1219 if (txstat & MY_LC) /* late collision */ 1220 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1221 } 1222 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1223 (txstat & MY_NCRMASK) >> MY_NCRShift); 1224 } 1225 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1226 m_freem(cur_tx->my_mbuf); 1227 cur_tx->my_mbuf = NULL; 1228 if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) { 1229 sc->my_cdata.my_tx_head = NULL; 1230 sc->my_cdata.my_tx_tail = NULL; 1231 break; 1232 } 1233 sc->my_cdata.my_tx_head = cur_tx->my_nextdesc; 1234 } 1235 if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) { 1236 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask)); 1237 } 1238 return; 1239 } 1240 1241 /* 1242 * TX 'end of channel' interrupt handler. 1243 */ 1244 static void 1245 my_txeoc(struct my_softc * sc) 1246 { 1247 struct ifnet *ifp; 1248 1249 MY_LOCK_ASSERT(sc); 1250 ifp = sc->my_ifp; 1251 sc->my_timer = 0; 1252 if (sc->my_cdata.my_tx_head == NULL) { 1253 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1254 sc->my_cdata.my_tx_tail = NULL; 1255 if (sc->my_want_auto) 1256 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1257 } else { 1258 if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) { 1259 MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC; 1260 sc->my_timer = 5; 1261 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); 1262 } 1263 } 1264 return; 1265 } 1266 1267 static void 1268 my_intr(void *arg) 1269 { 1270 struct my_softc *sc; 1271 struct ifnet *ifp; 1272 u_int32_t status; 1273 1274 sc = arg; 1275 MY_LOCK(sc); 1276 ifp = sc->my_ifp; 1277 if (!(ifp->if_flags & IFF_UP)) { 1278 MY_UNLOCK(sc); 1279 return; 1280 } 1281 /* Disable interrupts. */ 1282 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1283 1284 for (;;) { 1285 status = CSR_READ_4(sc, MY_ISR); 1286 status &= MY_INTRS; 1287 if (status) 1288 CSR_WRITE_4(sc, MY_ISR, status); 1289 else 1290 break; 1291 1292 if (status & MY_RI) /* receive interrupt */ 1293 my_rxeof(sc); 1294 1295 if ((status & MY_RBU) || (status & MY_RxErr)) { 1296 /* rx buffer unavailable or rx error */ 1297 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1298 #ifdef foo 1299 my_stop(sc); 1300 my_reset(sc); 1301 my_init_locked(sc); 1302 #endif 1303 } 1304 if (status & MY_TI) /* tx interrupt */ 1305 my_txeof(sc); 1306 if (status & MY_ETI) /* tx early interrupt */ 1307 my_txeof(sc); 1308 if (status & MY_TBU) /* tx buffer unavailable */ 1309 my_txeoc(sc); 1310 1311 #if 0 /* 90/1/18 delete */ 1312 if (status & MY_FBE) { 1313 my_reset(sc); 1314 my_init_locked(sc); 1315 } 1316 #endif 1317 1318 } 1319 1320 /* Re-enable interrupts. */ 1321 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1322 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1323 my_start_locked(ifp); 1324 MY_UNLOCK(sc); 1325 return; 1326 } 1327 1328 /* 1329 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1330 * pointers to the fragment pointers. 1331 */ 1332 static int 1333 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head) 1334 { 1335 struct my_desc *f = NULL; 1336 int total_len; 1337 struct mbuf *m, *m_new = NULL; 1338 1339 MY_LOCK_ASSERT(sc); 1340 /* calculate the total tx pkt length */ 1341 total_len = 0; 1342 for (m = m_head; m != NULL; m = m->m_next) 1343 total_len += m->m_len; 1344 /* 1345 * Start packing the mbufs in this chain into the fragment pointers. 1346 * Stop when we run out of fragments or hit the end of the mbuf 1347 * chain. 1348 */ 1349 m = m_head; 1350 MGETHDR(m_new, M_NOWAIT, MT_DATA); 1351 if (m_new == NULL) { 1352 device_printf(sc->my_dev, "no memory for tx list"); 1353 return (1); 1354 } 1355 if (m_head->m_pkthdr.len > MHLEN) { 1356 if (!(MCLGET(m_new, M_NOWAIT))) { 1357 m_freem(m_new); 1358 device_printf(sc->my_dev, "no memory for tx list"); 1359 return (1); 1360 } 1361 } 1362 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); 1363 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1364 m_freem(m_head); 1365 m_head = m_new; 1366 f = &c->my_ptr->my_frag[0]; 1367 f->my_status = 0; 1368 f->my_data = vtophys(mtod(m_new, caddr_t)); 1369 total_len = m_new->m_len; 1370 f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable; 1371 f->my_ctl |= total_len << MY_PKTShift; /* pkt size */ 1372 f->my_ctl |= total_len; /* buffer size */ 1373 /* 89/12/29 add, for mtd891 *//* [ 89? ] */ 1374 if (sc->my_info->my_did == MTD891ID) 1375 f->my_ctl |= MY_ETIControl | MY_RetryTxLC; 1376 c->my_mbuf = m_head; 1377 c->my_lastdesc = 0; 1378 MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]); 1379 return (0); 1380 } 1381 1382 /* 1383 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1384 * to the mbuf data regions directly in the transmit lists. We also save a 1385 * copy of the pointers since the transmit list fragment pointers are 1386 * physical addresses. 1387 */ 1388 static void 1389 my_start(struct ifnet * ifp) 1390 { 1391 struct my_softc *sc; 1392 1393 sc = ifp->if_softc; 1394 MY_LOCK(sc); 1395 my_start_locked(ifp); 1396 MY_UNLOCK(sc); 1397 } 1398 1399 static void 1400 my_start_locked(struct ifnet * ifp) 1401 { 1402 struct my_softc *sc; 1403 struct mbuf *m_head = NULL; 1404 struct my_chain *cur_tx = NULL, *start_tx; 1405 1406 sc = ifp->if_softc; 1407 MY_LOCK_ASSERT(sc); 1408 if (sc->my_autoneg) { 1409 sc->my_tx_pend = 1; 1410 return; 1411 } 1412 /* 1413 * Check for an available queue slot. If there are none, punt. 1414 */ 1415 if (sc->my_cdata.my_tx_free->my_mbuf != NULL) { 1416 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1417 return; 1418 } 1419 start_tx = sc->my_cdata.my_tx_free; 1420 while (sc->my_cdata.my_tx_free->my_mbuf == NULL) { 1421 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1422 if (m_head == NULL) 1423 break; 1424 1425 /* Pick a descriptor off the free list. */ 1426 cur_tx = sc->my_cdata.my_tx_free; 1427 sc->my_cdata.my_tx_free = cur_tx->my_nextdesc; 1428 1429 /* Pack the data into the descriptor. */ 1430 my_encap(sc, cur_tx, m_head); 1431 1432 if (cur_tx != start_tx) 1433 MY_TXOWN(cur_tx) = MY_OWNByNIC; 1434 #if NBPFILTER > 0 1435 /* 1436 * If there's a BPF listener, bounce a copy of this frame to 1437 * him. 1438 */ 1439 BPF_MTAP(ifp, cur_tx->my_mbuf); 1440 #endif 1441 } 1442 /* 1443 * If there are no packets queued, bail. 1444 */ 1445 if (cur_tx == NULL) { 1446 return; 1447 } 1448 /* 1449 * Place the request for the upload interrupt in the last descriptor 1450 * in the chain. This way, if we're chaining several packets at once, 1451 * we'll only get an interrupt once for the whole chain rather than 1452 * once for each packet. 1453 */ 1454 MY_TXCTL(cur_tx) |= MY_TXIC; 1455 cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC; 1456 sc->my_cdata.my_tx_tail = cur_tx; 1457 if (sc->my_cdata.my_tx_head == NULL) 1458 sc->my_cdata.my_tx_head = start_tx; 1459 MY_TXOWN(start_tx) = MY_OWNByNIC; 1460 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */ 1461 1462 /* 1463 * Set a timeout in case the chip goes out to lunch. 1464 */ 1465 sc->my_timer = 5; 1466 return; 1467 } 1468 1469 static void 1470 my_init(void *xsc) 1471 { 1472 struct my_softc *sc = xsc; 1473 1474 MY_LOCK(sc); 1475 my_init_locked(sc); 1476 MY_UNLOCK(sc); 1477 } 1478 1479 static void 1480 my_init_locked(struct my_softc *sc) 1481 { 1482 struct ifnet *ifp = sc->my_ifp; 1483 u_int16_t phy_bmcr = 0; 1484 1485 MY_LOCK_ASSERT(sc); 1486 if (sc->my_autoneg) { 1487 return; 1488 } 1489 if (sc->my_pinfo != NULL) 1490 phy_bmcr = my_phy_readreg(sc, PHY_BMCR); 1491 /* 1492 * Cancel pending I/O and free all RX/TX buffers. 1493 */ 1494 my_stop(sc); 1495 my_reset(sc); 1496 1497 /* 1498 * Set cache alignment and burst length. 1499 */ 1500 #if 0 /* 89/9/1 modify, */ 1501 CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512); 1502 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF); 1503 #endif 1504 CSR_WRITE_4(sc, MY_BCR, MY_PBL8); 1505 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512); 1506 /* 1507 * 89/12/29 add, for mtd891, 1508 */ 1509 if (sc->my_info->my_did == MTD891ID) { 1510 MY_SETBIT(sc, MY_BCR, MY_PROG); 1511 MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced); 1512 } 1513 my_setcfg(sc, phy_bmcr); 1514 /* Init circular RX list. */ 1515 if (my_list_rx_init(sc) == ENOBUFS) { 1516 device_printf(sc->my_dev, "init failed: no memory for rx buffers\n"); 1517 my_stop(sc); 1518 return; 1519 } 1520 /* Init TX descriptors. */ 1521 my_list_tx_init(sc); 1522 1523 /* If we want promiscuous mode, set the allframes bit. */ 1524 if (ifp->if_flags & IFF_PROMISC) 1525 MY_SETBIT(sc, MY_TCRRCR, MY_PROM); 1526 else 1527 MY_CLRBIT(sc, MY_TCRRCR, MY_PROM); 1528 1529 /* 1530 * Set capture broadcast bit to capture broadcast frames. 1531 */ 1532 if (ifp->if_flags & IFF_BROADCAST) 1533 MY_SETBIT(sc, MY_TCRRCR, MY_AB); 1534 else 1535 MY_CLRBIT(sc, MY_TCRRCR, MY_AB); 1536 1537 /* 1538 * Program the multicast filter, if necessary. 1539 */ 1540 my_setmulti(sc); 1541 1542 /* 1543 * Load the address of the RX list. 1544 */ 1545 MY_CLRBIT(sc, MY_TCRRCR, MY_RE); 1546 CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0])); 1547 1548 /* 1549 * Enable interrupts. 1550 */ 1551 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1552 CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF); 1553 1554 /* Enable receiver and transmitter. */ 1555 MY_SETBIT(sc, MY_TCRRCR, MY_RE); 1556 MY_CLRBIT(sc, MY_TCRRCR, MY_TE); 1557 CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0])); 1558 MY_SETBIT(sc, MY_TCRRCR, MY_TE); 1559 1560 /* Restore state of BMCR */ 1561 if (sc->my_pinfo != NULL) 1562 my_phy_writereg(sc, PHY_BMCR, phy_bmcr); 1563 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1564 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1565 1566 callout_reset(&sc->my_watchdog, hz, my_watchdog, sc); 1567 return; 1568 } 1569 1570 /* 1571 * Set media options. 1572 */ 1573 1574 static int 1575 my_ifmedia_upd(struct ifnet * ifp) 1576 { 1577 struct my_softc *sc; 1578 struct ifmedia *ifm; 1579 1580 sc = ifp->if_softc; 1581 MY_LOCK(sc); 1582 ifm = &sc->ifmedia; 1583 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1584 MY_UNLOCK(sc); 1585 return (EINVAL); 1586 } 1587 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) 1588 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1589 else 1590 my_setmode_mii(sc, ifm->ifm_media); 1591 MY_UNLOCK(sc); 1592 return (0); 1593 } 1594 1595 /* 1596 * Report current media status. 1597 */ 1598 1599 static void 1600 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr) 1601 { 1602 struct my_softc *sc; 1603 u_int16_t advert = 0, ability = 0; 1604 1605 sc = ifp->if_softc; 1606 MY_LOCK(sc); 1607 ifmr->ifm_active = IFM_ETHER; 1608 if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { 1609 #if 0 /* this version did not support 1000M, */ 1610 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000) 1611 ifmr->ifm_active = IFM_ETHER | IFM_1000TX; 1612 #endif 1613 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) 1614 ifmr->ifm_active = IFM_ETHER | IFM_100_TX; 1615 else 1616 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1617 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) 1618 ifmr->ifm_active |= IFM_FDX; 1619 else 1620 ifmr->ifm_active |= IFM_HDX; 1621 1622 MY_UNLOCK(sc); 1623 return; 1624 } 1625 ability = my_phy_readreg(sc, PHY_LPAR); 1626 advert = my_phy_readreg(sc, PHY_ANAR); 1627 1628 #if 0 /* this version did not support 1000M, */ 1629 if (sc->my_pinfo->my_vid = MarvellPHYID0) { 1630 ability2 = my_phy_readreg(sc, PHY_1000SR); 1631 if (ability2 & PHY_1000SR_1000BTXFULL) { 1632 advert = 0; 1633 ability = 0; 1634 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX; 1635 } else if (ability & PHY_1000SR_1000BTXHALF) { 1636 advert = 0; 1637 ability = 0; 1638 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX; 1639 } 1640 } 1641 #endif 1642 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) 1643 ifmr->ifm_active = IFM_ETHER | IFM_100_T4; 1644 else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) 1645 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1646 else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) 1647 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX; 1648 else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) 1649 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX; 1650 else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF) 1651 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX; 1652 MY_UNLOCK(sc); 1653 return; 1654 } 1655 1656 static int 1657 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 1658 { 1659 struct my_softc *sc = ifp->if_softc; 1660 struct ifreq *ifr = (struct ifreq *) data; 1661 int error; 1662 1663 switch (command) { 1664 case SIOCSIFFLAGS: 1665 MY_LOCK(sc); 1666 if (ifp->if_flags & IFF_UP) 1667 my_init_locked(sc); 1668 else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1669 my_stop(sc); 1670 MY_UNLOCK(sc); 1671 error = 0; 1672 break; 1673 case SIOCADDMULTI: 1674 case SIOCDELMULTI: 1675 MY_LOCK(sc); 1676 my_setmulti(sc); 1677 MY_UNLOCK(sc); 1678 error = 0; 1679 break; 1680 case SIOCGIFMEDIA: 1681 case SIOCSIFMEDIA: 1682 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 1683 break; 1684 default: 1685 error = ether_ioctl(ifp, command, data); 1686 break; 1687 } 1688 return (error); 1689 } 1690 1691 static void 1692 my_watchdog(void *arg) 1693 { 1694 struct my_softc *sc; 1695 struct ifnet *ifp; 1696 1697 sc = arg; 1698 MY_LOCK_ASSERT(sc); 1699 callout_reset(&sc->my_watchdog, hz, my_watchdog, sc); 1700 if (sc->my_timer == 0 || --sc->my_timer > 0) 1701 return; 1702 1703 ifp = sc->my_ifp; 1704 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1705 if_printf(ifp, "watchdog timeout\n"); 1706 if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1707 if_printf(ifp, "no carrier - transceiver cable problem?\n"); 1708 my_stop(sc); 1709 my_reset(sc); 1710 my_init_locked(sc); 1711 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1712 my_start_locked(ifp); 1713 } 1714 1715 1716 /* 1717 * Stop the adapter and free any mbufs allocated to the RX and TX lists. 1718 */ 1719 static void 1720 my_stop(struct my_softc * sc) 1721 { 1722 int i; 1723 struct ifnet *ifp; 1724 1725 MY_LOCK_ASSERT(sc); 1726 ifp = sc->my_ifp; 1727 1728 callout_stop(&sc->my_autoneg_timer); 1729 callout_stop(&sc->my_watchdog); 1730 1731 MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE)); 1732 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1733 CSR_WRITE_4(sc, MY_TXLBA, 0x00000000); 1734 CSR_WRITE_4(sc, MY_RXLBA, 0x00000000); 1735 1736 /* 1737 * Free data in the RX lists. 1738 */ 1739 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1740 if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) { 1741 m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf); 1742 sc->my_cdata.my_rx_chain[i].my_mbuf = NULL; 1743 } 1744 } 1745 bzero((char *)&sc->my_ldata->my_rx_list, 1746 sizeof(sc->my_ldata->my_rx_list)); 1747 /* 1748 * Free the TX list buffers. 1749 */ 1750 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1751 if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) { 1752 m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf); 1753 sc->my_cdata.my_tx_chain[i].my_mbuf = NULL; 1754 } 1755 } 1756 bzero((char *)&sc->my_ldata->my_tx_list, 1757 sizeof(sc->my_ldata->my_tx_list)); 1758 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1759 return; 1760 } 1761 1762 /* 1763 * Stop all chip I/O so that the kernel's probe routines don't get confused 1764 * by errant DMAs when rebooting. 1765 */ 1766 static int 1767 my_shutdown(device_t dev) 1768 { 1769 struct my_softc *sc; 1770 1771 sc = device_get_softc(dev); 1772 MY_LOCK(sc); 1773 my_stop(sc); 1774 MY_UNLOCK(sc); 1775 return 0; 1776 } 1777