1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Written by: yen_cw@myson.com.tw 5 * Copyright (c) 2002 Myson Technology Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/ 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/malloc.h> 40 #include <sys/kernel.h> 41 #include <sys/socket.h> 42 #include <sys/queue.h> 43 #include <sys/types.h> 44 #include <sys/module.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 48 #define NBPFILTER 1 49 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_arp.h> 53 #include <net/ethernet.h> 54 #include <net/if_media.h> 55 #include <net/if_types.h> 56 #include <net/if_dl.h> 57 #include <net/bpf.h> 58 59 #include <vm/vm.h> /* for vtophys */ 60 #include <vm/pmap.h> /* for vtophys */ 61 #include <machine/bus.h> 62 #include <machine/resource.h> 63 #include <sys/bus.h> 64 #include <sys/rman.h> 65 66 #include <dev/pci/pcireg.h> 67 #include <dev/pci/pcivar.h> 68 69 /* 70 * #define MY_USEIOSPACE 71 */ 72 73 static int MY_USEIOSPACE = 1; 74 75 #ifdef MY_USEIOSPACE 76 #define MY_RES SYS_RES_IOPORT 77 #define MY_RID MY_PCI_LOIO 78 #else 79 #define MY_RES SYS_RES_MEMORY 80 #define MY_RID MY_PCI_LOMEM 81 #endif 82 83 84 #include <dev/my/if_myreg.h> 85 86 /* 87 * Various supported device vendors/types and their names. 88 */ 89 struct my_type *my_info_tmp; 90 static struct my_type my_devs[] = { 91 {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"}, 92 {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"}, 93 {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"}, 94 {0, 0, NULL} 95 }; 96 97 /* 98 * Various supported PHY vendors/types and their names. Note that this driver 99 * will work with pretty much any MII-compliant PHY, so failure to positively 100 * identify the chip is not a fatal error. 101 */ 102 static struct my_type my_phys[] = { 103 {MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"}, 104 {SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"}, 105 {AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"}, 106 {MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"}, 107 {LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"}, 108 {0, 0, "<MII-compliant physical interface>"} 109 }; 110 111 static int my_probe(device_t); 112 static int my_attach(device_t); 113 static int my_detach(device_t); 114 static int my_newbuf(struct my_softc *, struct my_chain_onefrag *); 115 static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *); 116 static void my_rxeof(struct my_softc *); 117 static void my_txeof(struct my_softc *); 118 static void my_txeoc(struct my_softc *); 119 static void my_intr(void *); 120 static void my_start(struct ifnet *); 121 static void my_start_locked(struct ifnet *); 122 static int my_ioctl(struct ifnet *, u_long, caddr_t); 123 static void my_init(void *); 124 static void my_init_locked(struct my_softc *); 125 static void my_stop(struct my_softc *); 126 static void my_autoneg_timeout(void *); 127 static void my_watchdog(void *); 128 static int my_shutdown(device_t); 129 static int my_ifmedia_upd(struct ifnet *); 130 static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 static u_int16_t my_phy_readreg(struct my_softc *, int); 132 static void my_phy_writereg(struct my_softc *, int, int); 133 static void my_autoneg_xmit(struct my_softc *); 134 static void my_autoneg_mii(struct my_softc *, int, int); 135 static void my_setmode_mii(struct my_softc *, int); 136 static void my_getmode_mii(struct my_softc *); 137 static void my_setcfg(struct my_softc *, int); 138 static void my_setmulti(struct my_softc *); 139 static void my_reset(struct my_softc *); 140 static int my_list_rx_init(struct my_softc *); 141 static int my_list_tx_init(struct my_softc *); 142 static long my_send_cmd_to_phy(struct my_softc *, int, int); 143 144 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 145 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 146 147 static device_method_t my_methods[] = { 148 /* Device interface */ 149 DEVMETHOD(device_probe, my_probe), 150 DEVMETHOD(device_attach, my_attach), 151 DEVMETHOD(device_detach, my_detach), 152 DEVMETHOD(device_shutdown, my_shutdown), 153 154 DEVMETHOD_END 155 }; 156 157 static driver_t my_driver = { 158 "my", 159 my_methods, 160 sizeof(struct my_softc) 161 }; 162 163 static devclass_t my_devclass; 164 165 DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0); 166 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, my, my_devs, 167 nitems(my_devs) - 1); 168 MODULE_DEPEND(my, pci, 1, 1, 1); 169 MODULE_DEPEND(my, ether, 1, 1, 1); 170 171 static long 172 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad) 173 { 174 long miir; 175 int i; 176 int mask, data; 177 178 MY_LOCK_ASSERT(sc); 179 180 /* enable MII output */ 181 miir = CSR_READ_4(sc, MY_MANAGEMENT); 182 miir &= 0xfffffff0; 183 184 miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO; 185 186 /* send 32 1's preamble */ 187 for (i = 0; i < 32; i++) { 188 /* low MDC; MDO is already high (miir) */ 189 miir &= ~MY_MASK_MIIR_MII_MDC; 190 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 191 192 /* high MDC */ 193 miir |= MY_MASK_MIIR_MII_MDC; 194 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 195 } 196 197 /* calculate ST+OP+PHYAD+REGAD+TA */ 198 data = opcode | (sc->my_phy_addr << 7) | (regad << 2); 199 200 /* sent out */ 201 mask = 0x8000; 202 while (mask) { 203 /* low MDC, prepare MDO */ 204 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 205 if (mask & data) 206 miir |= MY_MASK_MIIR_MII_MDO; 207 208 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 209 /* high MDC */ 210 miir |= MY_MASK_MIIR_MII_MDC; 211 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 212 DELAY(30); 213 214 /* next */ 215 mask >>= 1; 216 if (mask == 0x2 && opcode == MY_OP_READ) 217 miir &= ~MY_MASK_MIIR_MII_WRITE; 218 } 219 220 return miir; 221 } 222 223 224 static u_int16_t 225 my_phy_readreg(struct my_softc * sc, int reg) 226 { 227 long miir; 228 int mask, data; 229 230 MY_LOCK_ASSERT(sc); 231 232 if (sc->my_info->my_did == MTD803ID) 233 data = CSR_READ_2(sc, MY_PHYBASE + reg * 2); 234 else { 235 miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg); 236 237 /* read data */ 238 mask = 0x8000; 239 data = 0; 240 while (mask) { 241 /* low MDC */ 242 miir &= ~MY_MASK_MIIR_MII_MDC; 243 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 244 245 /* read MDI */ 246 miir = CSR_READ_4(sc, MY_MANAGEMENT); 247 if (miir & MY_MASK_MIIR_MII_MDI) 248 data |= mask; 249 250 /* high MDC, and wait */ 251 miir |= MY_MASK_MIIR_MII_MDC; 252 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 253 DELAY(30); 254 255 /* next */ 256 mask >>= 1; 257 } 258 259 /* low MDC */ 260 miir &= ~MY_MASK_MIIR_MII_MDC; 261 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 262 } 263 264 return (u_int16_t) data; 265 } 266 267 268 static void 269 my_phy_writereg(struct my_softc * sc, int reg, int data) 270 { 271 long miir; 272 int mask; 273 274 MY_LOCK_ASSERT(sc); 275 276 if (sc->my_info->my_did == MTD803ID) 277 CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data); 278 else { 279 miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg); 280 281 /* write data */ 282 mask = 0x8000; 283 while (mask) { 284 /* low MDC, prepare MDO */ 285 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); 286 if (mask & data) 287 miir |= MY_MASK_MIIR_MII_MDO; 288 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 289 DELAY(1); 290 291 /* high MDC */ 292 miir |= MY_MASK_MIIR_MII_MDC; 293 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 294 DELAY(1); 295 296 /* next */ 297 mask >>= 1; 298 } 299 300 /* low MDC */ 301 miir &= ~MY_MASK_MIIR_MII_MDC; 302 CSR_WRITE_4(sc, MY_MANAGEMENT, miir); 303 } 304 return; 305 } 306 307 static u_int 308 my_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 309 { 310 uint32_t *hashes = arg; 311 int h; 312 313 h = ~ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 314 if (h < 32) 315 hashes[0] |= (1 << h); 316 else 317 hashes[1] |= (1 << (h - 32)); 318 319 return (1); 320 } 321 /* 322 * Program the 64-bit multicast hash filter. 323 */ 324 static void 325 my_setmulti(struct my_softc * sc) 326 { 327 struct ifnet *ifp; 328 u_int32_t hashes[2] = {0, 0}; 329 u_int32_t rxfilt; 330 331 MY_LOCK_ASSERT(sc); 332 333 ifp = sc->my_ifp; 334 335 rxfilt = CSR_READ_4(sc, MY_TCRRCR); 336 337 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 338 rxfilt |= MY_AM; 339 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 340 CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF); 341 CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF); 342 343 return; 344 } 345 /* first, zot all the existing hash bits */ 346 CSR_WRITE_4(sc, MY_MAR0, 0); 347 CSR_WRITE_4(sc, MY_MAR1, 0); 348 349 /* now program new ones */ 350 if (if_foreach_llmaddr(ifp, my_hash_maddr, hashes) > 0) 351 rxfilt |= MY_AM; 352 else 353 rxfilt &= ~MY_AM; 354 CSR_WRITE_4(sc, MY_MAR0, hashes[0]); 355 CSR_WRITE_4(sc, MY_MAR1, hashes[1]); 356 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); 357 } 358 359 /* 360 * Initiate an autonegotiation session. 361 */ 362 static void 363 my_autoneg_xmit(struct my_softc * sc) 364 { 365 u_int16_t phy_sts = 0; 366 367 MY_LOCK_ASSERT(sc); 368 369 my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); 370 DELAY(500); 371 while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET); 372 373 phy_sts = my_phy_readreg(sc, PHY_BMCR); 374 phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR; 375 my_phy_writereg(sc, PHY_BMCR, phy_sts); 376 377 return; 378 } 379 380 static void 381 my_autoneg_timeout(void *arg) 382 { 383 struct my_softc *sc; 384 385 sc = arg; 386 MY_LOCK_ASSERT(sc); 387 my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1); 388 } 389 390 /* 391 * Invoke autonegotiation on a PHY. 392 */ 393 static void 394 my_autoneg_mii(struct my_softc * sc, int flag, int verbose) 395 { 396 u_int16_t phy_sts = 0, media, advert, ability; 397 u_int16_t ability2 = 0; 398 struct ifnet *ifp; 399 struct ifmedia *ifm; 400 401 MY_LOCK_ASSERT(sc); 402 403 ifm = &sc->ifmedia; 404 ifp = sc->my_ifp; 405 406 ifm->ifm_media = IFM_ETHER | IFM_AUTO; 407 408 #ifndef FORCE_AUTONEG_TFOUR 409 /* 410 * First, see if autoneg is supported. If not, there's no point in 411 * continuing. 412 */ 413 phy_sts = my_phy_readreg(sc, PHY_BMSR); 414 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { 415 if (verbose) 416 device_printf(sc->my_dev, 417 "autonegotiation not supported\n"); 418 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 419 return; 420 } 421 #endif 422 switch (flag) { 423 case MY_FLAG_FORCEDELAY: 424 /* 425 * XXX Never use this option anywhere but in the probe 426 * routine: making the kernel stop dead in its tracks for 427 * three whole seconds after we've gone multi-user is really 428 * bad manners. 429 */ 430 my_autoneg_xmit(sc); 431 DELAY(5000000); 432 break; 433 case MY_FLAG_SCHEDDELAY: 434 /* 435 * Wait for the transmitter to go idle before starting an 436 * autoneg session, otherwise my_start() may clobber our 437 * timeout, and we don't want to allow transmission during an 438 * autoneg session since that can screw it up. 439 */ 440 if (sc->my_cdata.my_tx_head != NULL) { 441 sc->my_want_auto = 1; 442 MY_UNLOCK(sc); 443 return; 444 } 445 my_autoneg_xmit(sc); 446 callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout, 447 sc); 448 sc->my_autoneg = 1; 449 sc->my_want_auto = 0; 450 return; 451 case MY_FLAG_DELAYTIMEO: 452 callout_stop(&sc->my_autoneg_timer); 453 sc->my_autoneg = 0; 454 break; 455 default: 456 device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag); 457 return; 458 } 459 460 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { 461 if (verbose) 462 device_printf(sc->my_dev, "autoneg complete, "); 463 phy_sts = my_phy_readreg(sc, PHY_BMSR); 464 } else { 465 if (verbose) 466 device_printf(sc->my_dev, "autoneg not complete, "); 467 } 468 469 media = my_phy_readreg(sc, PHY_BMCR); 470 471 /* Link is good. Report modes and set duplex mode. */ 472 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { 473 if (verbose) 474 device_printf(sc->my_dev, "link status good. "); 475 advert = my_phy_readreg(sc, PHY_ANAR); 476 ability = my_phy_readreg(sc, PHY_LPAR); 477 if ((sc->my_pinfo->my_vid == MarvellPHYID0) || 478 (sc->my_pinfo->my_vid == LevelOnePHYID0)) { 479 ability2 = my_phy_readreg(sc, PHY_1000SR); 480 if (ability2 & PHY_1000SR_1000BTXFULL) { 481 advert = 0; 482 ability = 0; 483 /* 484 * this version did not support 1000M, 485 * ifm->ifm_media = 486 * IFM_ETHER|IFM_1000_T|IFM_FDX; 487 */ 488 ifm->ifm_media = 489 IFM_ETHER | IFM_100_TX | IFM_FDX; 490 media &= ~PHY_BMCR_SPEEDSEL; 491 media |= PHY_BMCR_1000; 492 media |= PHY_BMCR_DUPLEX; 493 printf("(full-duplex, 1000Mbps)\n"); 494 } else if (ability2 & PHY_1000SR_1000BTXHALF) { 495 advert = 0; 496 ability = 0; 497 /* 498 * this version did not support 1000M, 499 * ifm->ifm_media = IFM_ETHER|IFM_1000_T; 500 */ 501 ifm->ifm_media = IFM_ETHER | IFM_100_TX; 502 media &= ~PHY_BMCR_SPEEDSEL; 503 media &= ~PHY_BMCR_DUPLEX; 504 media |= PHY_BMCR_1000; 505 printf("(half-duplex, 1000Mbps)\n"); 506 } 507 } 508 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { 509 ifm->ifm_media = IFM_ETHER | IFM_100_T4; 510 media |= PHY_BMCR_SPEEDSEL; 511 media &= ~PHY_BMCR_DUPLEX; 512 printf("(100baseT4)\n"); 513 } else if (advert & PHY_ANAR_100BTXFULL && 514 ability & PHY_ANAR_100BTXFULL) { 515 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 516 media |= PHY_BMCR_SPEEDSEL; 517 media |= PHY_BMCR_DUPLEX; 518 printf("(full-duplex, 100Mbps)\n"); 519 } else if (advert & PHY_ANAR_100BTXHALF && 520 ability & PHY_ANAR_100BTXHALF) { 521 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 522 media |= PHY_BMCR_SPEEDSEL; 523 media &= ~PHY_BMCR_DUPLEX; 524 printf("(half-duplex, 100Mbps)\n"); 525 } else if (advert & PHY_ANAR_10BTFULL && 526 ability & PHY_ANAR_10BTFULL) { 527 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 528 media &= ~PHY_BMCR_SPEEDSEL; 529 media |= PHY_BMCR_DUPLEX; 530 printf("(full-duplex, 10Mbps)\n"); 531 } else if (advert) { 532 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 533 media &= ~PHY_BMCR_SPEEDSEL; 534 media &= ~PHY_BMCR_DUPLEX; 535 printf("(half-duplex, 10Mbps)\n"); 536 } 537 media &= ~PHY_BMCR_AUTONEGENBL; 538 539 /* Set ASIC's duplex mode to match the PHY. */ 540 my_phy_writereg(sc, PHY_BMCR, media); 541 my_setcfg(sc, media); 542 } else { 543 if (verbose) 544 device_printf(sc->my_dev, "no carrier\n"); 545 } 546 547 my_init_locked(sc); 548 if (sc->my_tx_pend) { 549 sc->my_autoneg = 0; 550 sc->my_tx_pend = 0; 551 my_start_locked(ifp); 552 } 553 return; 554 } 555 556 /* 557 * To get PHY ability. 558 */ 559 static void 560 my_getmode_mii(struct my_softc * sc) 561 { 562 u_int16_t bmsr; 563 struct ifnet *ifp; 564 565 MY_LOCK_ASSERT(sc); 566 ifp = sc->my_ifp; 567 bmsr = my_phy_readreg(sc, PHY_BMSR); 568 if (bootverbose) 569 device_printf(sc->my_dev, "PHY status word: %x\n", bmsr); 570 571 /* fallback */ 572 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; 573 574 if (bmsr & PHY_BMSR_10BTHALF) { 575 if (bootverbose) 576 device_printf(sc->my_dev, 577 "10Mbps half-duplex mode supported\n"); 578 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 579 0, NULL); 580 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); 581 } 582 if (bmsr & PHY_BMSR_10BTFULL) { 583 if (bootverbose) 584 device_printf(sc->my_dev, 585 "10Mbps full-duplex mode supported\n"); 586 587 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 588 0, NULL); 589 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; 590 } 591 if (bmsr & PHY_BMSR_100BTXHALF) { 592 if (bootverbose) 593 device_printf(sc->my_dev, 594 "100Mbps half-duplex mode supported\n"); 595 ifp->if_baudrate = 100000000; 596 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); 597 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 598 0, NULL); 599 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; 600 } 601 if (bmsr & PHY_BMSR_100BTXFULL) { 602 if (bootverbose) 603 device_printf(sc->my_dev, 604 "100Mbps full-duplex mode supported\n"); 605 ifp->if_baudrate = 100000000; 606 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 607 0, NULL); 608 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; 609 } 610 /* Some also support 100BaseT4. */ 611 if (bmsr & PHY_BMSR_100BT4) { 612 if (bootverbose) 613 device_printf(sc->my_dev, "100baseT4 mode supported\n"); 614 ifp->if_baudrate = 100000000; 615 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL); 616 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4; 617 #ifdef FORCE_AUTONEG_TFOUR 618 if (bootverbose) 619 device_printf(sc->my_dev, 620 "forcing on autoneg support for BT4\n"); 621 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL): 622 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 623 #endif 624 } 625 #if 0 /* this version did not support 1000M, */ 626 if (sc->my_pinfo->my_vid == MarvellPHYID0) { 627 if (bootverbose) 628 device_printf(sc->my_dev, 629 "1000Mbps half-duplex mode supported\n"); 630 631 ifp->if_baudrate = 1000000000; 632 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); 633 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX, 634 0, NULL); 635 if (bootverbose) 636 device_printf(sc->my_dev, 637 "1000Mbps full-duplex mode supported\n"); 638 ifp->if_baudrate = 1000000000; 639 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 640 0, NULL); 641 sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX; 642 } 643 #endif 644 if (bmsr & PHY_BMSR_CANAUTONEG) { 645 if (bootverbose) 646 device_printf(sc->my_dev, "autoneg supported\n"); 647 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 648 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; 649 } 650 return; 651 } 652 653 /* 654 * Set speed and duplex mode. 655 */ 656 static void 657 my_setmode_mii(struct my_softc * sc, int media) 658 { 659 u_int16_t bmcr; 660 661 MY_LOCK_ASSERT(sc); 662 /* 663 * If an autoneg session is in progress, stop it. 664 */ 665 if (sc->my_autoneg) { 666 device_printf(sc->my_dev, "canceling autoneg session\n"); 667 callout_stop(&sc->my_autoneg_timer); 668 sc->my_autoneg = sc->my_want_auto = 0; 669 bmcr = my_phy_readreg(sc, PHY_BMCR); 670 bmcr &= ~PHY_BMCR_AUTONEGENBL; 671 my_phy_writereg(sc, PHY_BMCR, bmcr); 672 } 673 device_printf(sc->my_dev, "selecting MII, "); 674 bmcr = my_phy_readreg(sc, PHY_BMCR); 675 bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 | 676 PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK); 677 678 #if 0 /* this version did not support 1000M, */ 679 if (IFM_SUBTYPE(media) == IFM_1000_T) { 680 printf("1000Mbps/T4, half-duplex\n"); 681 bmcr &= ~PHY_BMCR_SPEEDSEL; 682 bmcr &= ~PHY_BMCR_DUPLEX; 683 bmcr |= PHY_BMCR_1000; 684 } 685 #endif 686 if (IFM_SUBTYPE(media) == IFM_100_T4) { 687 printf("100Mbps/T4, half-duplex\n"); 688 bmcr |= PHY_BMCR_SPEEDSEL; 689 bmcr &= ~PHY_BMCR_DUPLEX; 690 } 691 if (IFM_SUBTYPE(media) == IFM_100_TX) { 692 printf("100Mbps, "); 693 bmcr |= PHY_BMCR_SPEEDSEL; 694 } 695 if (IFM_SUBTYPE(media) == IFM_10_T) { 696 printf("10Mbps, "); 697 bmcr &= ~PHY_BMCR_SPEEDSEL; 698 } 699 if ((media & IFM_GMASK) == IFM_FDX) { 700 printf("full duplex\n"); 701 bmcr |= PHY_BMCR_DUPLEX; 702 } else { 703 printf("half duplex\n"); 704 bmcr &= ~PHY_BMCR_DUPLEX; 705 } 706 my_phy_writereg(sc, PHY_BMCR, bmcr); 707 my_setcfg(sc, bmcr); 708 return; 709 } 710 711 /* 712 * The Myson manual states that in order to fiddle with the 'full-duplex' and 713 * '100Mbps' bits in the netconfig register, we first have to put the 714 * transmit and/or receive logic in the idle state. 715 */ 716 static void 717 my_setcfg(struct my_softc * sc, int bmcr) 718 { 719 int i, restart = 0; 720 721 MY_LOCK_ASSERT(sc); 722 if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) { 723 restart = 1; 724 MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE)); 725 for (i = 0; i < MY_TIMEOUT; i++) { 726 DELAY(10); 727 if (!(CSR_READ_4(sc, MY_TCRRCR) & 728 (MY_TXRUN | MY_RXRUN))) 729 break; 730 } 731 if (i == MY_TIMEOUT) 732 device_printf(sc->my_dev, 733 "failed to force tx and rx to idle \n"); 734 } 735 MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000); 736 MY_CLRBIT(sc, MY_TCRRCR, MY_PS10); 737 if (bmcr & PHY_BMCR_1000) 738 MY_SETBIT(sc, MY_TCRRCR, MY_PS1000); 739 else if (!(bmcr & PHY_BMCR_SPEEDSEL)) 740 MY_SETBIT(sc, MY_TCRRCR, MY_PS10); 741 if (bmcr & PHY_BMCR_DUPLEX) 742 MY_SETBIT(sc, MY_TCRRCR, MY_FD); 743 else 744 MY_CLRBIT(sc, MY_TCRRCR, MY_FD); 745 if (restart) 746 MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE); 747 return; 748 } 749 750 static void 751 my_reset(struct my_softc * sc) 752 { 753 int i; 754 755 MY_LOCK_ASSERT(sc); 756 MY_SETBIT(sc, MY_BCR, MY_SWR); 757 for (i = 0; i < MY_TIMEOUT; i++) { 758 DELAY(10); 759 if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR)) 760 break; 761 } 762 if (i == MY_TIMEOUT) 763 device_printf(sc->my_dev, "reset never completed!\n"); 764 765 /* Wait a little while for the chip to get its brains in order. */ 766 DELAY(1000); 767 return; 768 } 769 770 /* 771 * Probe for a Myson chip. Check the PCI vendor and device IDs against our 772 * list and return a device name if we find a match. 773 */ 774 static int 775 my_probe(device_t dev) 776 { 777 struct my_type *t; 778 779 t = my_devs; 780 while (t->my_name != NULL) { 781 if ((pci_get_vendor(dev) == t->my_vid) && 782 (pci_get_device(dev) == t->my_did)) { 783 device_set_desc(dev, t->my_name); 784 my_info_tmp = t; 785 return (BUS_PROBE_DEFAULT); 786 } 787 t++; 788 } 789 return (ENXIO); 790 } 791 792 /* 793 * Attach the interface. Allocate softc structures, do ifmedia setup and 794 * ethernet/BPF attach. 795 */ 796 static int 797 my_attach(device_t dev) 798 { 799 int i; 800 u_char eaddr[ETHER_ADDR_LEN]; 801 u_int32_t iobase; 802 struct my_softc *sc; 803 struct ifnet *ifp; 804 int media = IFM_ETHER | IFM_100_TX | IFM_FDX; 805 unsigned int round; 806 caddr_t roundptr; 807 struct my_type *p; 808 u_int16_t phy_vid, phy_did, phy_sts = 0; 809 int rid, error = 0; 810 811 sc = device_get_softc(dev); 812 sc->my_dev = dev; 813 mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 814 MTX_DEF); 815 callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0); 816 callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0); 817 818 /* 819 * Map control/status registers. 820 */ 821 pci_enable_busmaster(dev); 822 823 if (my_info_tmp->my_did == MTD800ID) { 824 iobase = pci_read_config(dev, MY_PCI_LOIO, 4); 825 if (iobase & 0x300) 826 MY_USEIOSPACE = 0; 827 } 828 829 rid = MY_RID; 830 sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE); 831 832 if (sc->my_res == NULL) { 833 device_printf(dev, "couldn't map ports/memory\n"); 834 error = ENXIO; 835 goto destroy_mutex; 836 } 837 sc->my_btag = rman_get_bustag(sc->my_res); 838 sc->my_bhandle = rman_get_bushandle(sc->my_res); 839 840 rid = 0; 841 sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 842 RF_SHAREABLE | RF_ACTIVE); 843 844 if (sc->my_irq == NULL) { 845 device_printf(dev, "couldn't map interrupt\n"); 846 error = ENXIO; 847 goto release_io; 848 } 849 850 sc->my_info = my_info_tmp; 851 852 /* Reset the adapter. */ 853 MY_LOCK(sc); 854 my_reset(sc); 855 MY_UNLOCK(sc); 856 857 /* 858 * Get station address 859 */ 860 for (i = 0; i < ETHER_ADDR_LEN; ++i) 861 eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i); 862 863 sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8, 864 M_DEVBUF, M_NOWAIT); 865 if (sc->my_ldata_ptr == NULL) { 866 device_printf(dev, "no memory for list buffers!\n"); 867 error = ENXIO; 868 goto release_irq; 869 } 870 sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr; 871 round = (uintptr_t)sc->my_ldata_ptr & 0xF; 872 roundptr = sc->my_ldata_ptr; 873 for (i = 0; i < 8; i++) { 874 if (round % 8) { 875 round++; 876 roundptr++; 877 } else 878 break; 879 } 880 sc->my_ldata = (struct my_list_data *) roundptr; 881 bzero(sc->my_ldata, sizeof(struct my_list_data)); 882 883 ifp = sc->my_ifp = if_alloc(IFT_ETHER); 884 if (ifp == NULL) { 885 device_printf(dev, "can not if_alloc()\n"); 886 error = ENOSPC; 887 goto free_ldata; 888 } 889 ifp->if_softc = sc; 890 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 891 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 892 ifp->if_ioctl = my_ioctl; 893 ifp->if_start = my_start; 894 ifp->if_init = my_init; 895 ifp->if_baudrate = 10000000; 896 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 897 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 898 IFQ_SET_READY(&ifp->if_snd); 899 900 if (sc->my_info->my_did == MTD803ID) 901 sc->my_pinfo = my_phys; 902 else { 903 if (bootverbose) 904 device_printf(dev, "probing for a PHY\n"); 905 MY_LOCK(sc); 906 for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) { 907 if (bootverbose) 908 device_printf(dev, "checking address: %d\n", i); 909 sc->my_phy_addr = i; 910 phy_sts = my_phy_readreg(sc, PHY_BMSR); 911 if ((phy_sts != 0) && (phy_sts != 0xffff)) 912 break; 913 else 914 phy_sts = 0; 915 } 916 if (phy_sts) { 917 phy_vid = my_phy_readreg(sc, PHY_VENID); 918 phy_did = my_phy_readreg(sc, PHY_DEVID); 919 if (bootverbose) { 920 device_printf(dev, "found PHY at address %d, ", 921 sc->my_phy_addr); 922 printf("vendor id: %x device id: %x\n", 923 phy_vid, phy_did); 924 } 925 p = my_phys; 926 while (p->my_vid) { 927 if (phy_vid == p->my_vid) { 928 sc->my_pinfo = p; 929 break; 930 } 931 p++; 932 } 933 if (sc->my_pinfo == NULL) 934 sc->my_pinfo = &my_phys[PHY_UNKNOWN]; 935 if (bootverbose) 936 device_printf(dev, "PHY type: %s\n", 937 sc->my_pinfo->my_name); 938 } else { 939 MY_UNLOCK(sc); 940 device_printf(dev, "MII without any phy!\n"); 941 error = ENXIO; 942 goto free_if; 943 } 944 MY_UNLOCK(sc); 945 } 946 947 /* Do ifmedia setup. */ 948 ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts); 949 MY_LOCK(sc); 950 my_getmode_mii(sc); 951 my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1); 952 media = sc->ifmedia.ifm_media; 953 my_stop(sc); 954 MY_UNLOCK(sc); 955 ifmedia_set(&sc->ifmedia, media); 956 957 ether_ifattach(ifp, eaddr); 958 959 error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE, 960 NULL, my_intr, sc, &sc->my_intrhand); 961 962 if (error) { 963 device_printf(dev, "couldn't set up irq\n"); 964 goto detach_if; 965 } 966 967 return (0); 968 969 detach_if: 970 ether_ifdetach(ifp); 971 free_if: 972 if_free(ifp); 973 free_ldata: 974 free(sc->my_ldata_ptr, M_DEVBUF); 975 release_irq: 976 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 977 release_io: 978 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 979 destroy_mutex: 980 mtx_destroy(&sc->my_mtx); 981 return (error); 982 } 983 984 static int 985 my_detach(device_t dev) 986 { 987 struct my_softc *sc; 988 struct ifnet *ifp; 989 990 sc = device_get_softc(dev); 991 ifp = sc->my_ifp; 992 ether_ifdetach(ifp); 993 MY_LOCK(sc); 994 my_stop(sc); 995 MY_UNLOCK(sc); 996 bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand); 997 callout_drain(&sc->my_watchdog); 998 callout_drain(&sc->my_autoneg_timer); 999 1000 if_free(ifp); 1001 free(sc->my_ldata_ptr, M_DEVBUF); 1002 1003 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); 1004 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); 1005 mtx_destroy(&sc->my_mtx); 1006 return (0); 1007 } 1008 1009 1010 /* 1011 * Initialize the transmit descriptors. 1012 */ 1013 static int 1014 my_list_tx_init(struct my_softc * sc) 1015 { 1016 struct my_chain_data *cd; 1017 struct my_list_data *ld; 1018 int i; 1019 1020 MY_LOCK_ASSERT(sc); 1021 cd = &sc->my_cdata; 1022 ld = sc->my_ldata; 1023 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1024 cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i]; 1025 if (i == (MY_TX_LIST_CNT - 1)) 1026 cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0]; 1027 else 1028 cd->my_tx_chain[i].my_nextdesc = 1029 &cd->my_tx_chain[i + 1]; 1030 } 1031 cd->my_tx_free = &cd->my_tx_chain[0]; 1032 cd->my_tx_tail = cd->my_tx_head = NULL; 1033 return (0); 1034 } 1035 1036 /* 1037 * Initialize the RX descriptors and allocate mbufs for them. Note that we 1038 * arrange the descriptors in a closed ring, so that the last descriptor 1039 * points back to the first. 1040 */ 1041 static int 1042 my_list_rx_init(struct my_softc * sc) 1043 { 1044 struct my_chain_data *cd; 1045 struct my_list_data *ld; 1046 int i; 1047 1048 MY_LOCK_ASSERT(sc); 1049 cd = &sc->my_cdata; 1050 ld = sc->my_ldata; 1051 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1052 cd->my_rx_chain[i].my_ptr = 1053 (struct my_desc *) & ld->my_rx_list[i]; 1054 if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) { 1055 MY_UNLOCK(sc); 1056 return (ENOBUFS); 1057 } 1058 if (i == (MY_RX_LIST_CNT - 1)) { 1059 cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0]; 1060 ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]); 1061 } else { 1062 cd->my_rx_chain[i].my_nextdesc = 1063 &cd->my_rx_chain[i + 1]; 1064 ld->my_rx_list[i].my_next = 1065 vtophys(&ld->my_rx_list[i + 1]); 1066 } 1067 } 1068 cd->my_rx_head = &cd->my_rx_chain[0]; 1069 return (0); 1070 } 1071 1072 /* 1073 * Initialize an RX descriptor and attach an MBUF cluster. 1074 */ 1075 static int 1076 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c) 1077 { 1078 struct mbuf *m_new = NULL; 1079 1080 MY_LOCK_ASSERT(sc); 1081 MGETHDR(m_new, M_NOWAIT, MT_DATA); 1082 if (m_new == NULL) { 1083 device_printf(sc->my_dev, 1084 "no memory for rx list -- packet dropped!\n"); 1085 return (ENOBUFS); 1086 } 1087 if (!(MCLGET(m_new, M_NOWAIT))) { 1088 device_printf(sc->my_dev, 1089 "no memory for rx list -- packet dropped!\n"); 1090 m_freem(m_new); 1091 return (ENOBUFS); 1092 } 1093 c->my_mbuf = m_new; 1094 c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t)); 1095 c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift; 1096 c->my_ptr->my_status = MY_OWNByNIC; 1097 return (0); 1098 } 1099 1100 /* 1101 * A frame has been uploaded: pass the resulting mbuf chain up to the higher 1102 * level protocols. 1103 */ 1104 static void 1105 my_rxeof(struct my_softc * sc) 1106 { 1107 struct ether_header *eh; 1108 struct mbuf *m; 1109 struct ifnet *ifp; 1110 struct my_chain_onefrag *cur_rx; 1111 int total_len = 0; 1112 u_int32_t rxstat; 1113 1114 MY_LOCK_ASSERT(sc); 1115 ifp = sc->my_ifp; 1116 while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status) 1117 & MY_OWNByNIC)) { 1118 cur_rx = sc->my_cdata.my_rx_head; 1119 sc->my_cdata.my_rx_head = cur_rx->my_nextdesc; 1120 1121 if (rxstat & MY_ES) { /* error summary: give up this rx pkt */ 1122 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1123 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1124 continue; 1125 } 1126 /* No errors; receive the packet. */ 1127 total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift; 1128 total_len -= ETHER_CRC_LEN; 1129 1130 if (total_len < MINCLSIZE) { 1131 m = m_devget(mtod(cur_rx->my_mbuf, char *), 1132 total_len, 0, ifp, NULL); 1133 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1134 if (m == NULL) { 1135 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1136 continue; 1137 } 1138 } else { 1139 m = cur_rx->my_mbuf; 1140 /* 1141 * Try to conjure up a new mbuf cluster. If that 1142 * fails, it means we have an out of memory condition 1143 * and should leave the buffer in place and continue. 1144 * This will result in a lost packet, but there's 1145 * little else we can do in this situation. 1146 */ 1147 if (my_newbuf(sc, cur_rx) == ENOBUFS) { 1148 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1149 cur_rx->my_ptr->my_status = MY_OWNByNIC; 1150 continue; 1151 } 1152 m->m_pkthdr.rcvif = ifp; 1153 m->m_pkthdr.len = m->m_len = total_len; 1154 } 1155 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1156 eh = mtod(m, struct ether_header *); 1157 #if NBPFILTER > 0 1158 /* 1159 * Handle BPF listeners. Let the BPF user see the packet, but 1160 * don't pass it up to the ether_input() layer unless it's a 1161 * broadcast packet, multicast packet, matches our ethernet 1162 * address or the interface is in promiscuous mode. 1163 */ 1164 if (bpf_peers_present(ifp->if_bpf)) { 1165 bpf_mtap(ifp->if_bpf, m); 1166 if (ifp->if_flags & IFF_PROMISC && 1167 (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp), 1168 ETHER_ADDR_LEN) && 1169 (eh->ether_dhost[0] & 1) == 0)) { 1170 m_freem(m); 1171 continue; 1172 } 1173 } 1174 #endif 1175 MY_UNLOCK(sc); 1176 (*ifp->if_input)(ifp, m); 1177 MY_LOCK(sc); 1178 } 1179 return; 1180 } 1181 1182 1183 /* 1184 * A frame was downloaded to the chip. It's safe for us to clean up the list 1185 * buffers. 1186 */ 1187 static void 1188 my_txeof(struct my_softc * sc) 1189 { 1190 struct my_chain *cur_tx; 1191 struct ifnet *ifp; 1192 1193 MY_LOCK_ASSERT(sc); 1194 ifp = sc->my_ifp; 1195 /* Clear the timeout timer. */ 1196 sc->my_timer = 0; 1197 if (sc->my_cdata.my_tx_head == NULL) { 1198 return; 1199 } 1200 /* 1201 * Go through our tx list and free mbufs for those frames that have 1202 * been transmitted. 1203 */ 1204 while (sc->my_cdata.my_tx_head->my_mbuf != NULL) { 1205 u_int32_t txstat; 1206 1207 cur_tx = sc->my_cdata.my_tx_head; 1208 txstat = MY_TXSTATUS(cur_tx); 1209 if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT) 1210 break; 1211 if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) { 1212 if (txstat & MY_TXERR) { 1213 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1214 if (txstat & MY_EC) /* excessive collision */ 1215 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1216 if (txstat & MY_LC) /* late collision */ 1217 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1218 } 1219 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1220 (txstat & MY_NCRMASK) >> MY_NCRShift); 1221 } 1222 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1223 m_freem(cur_tx->my_mbuf); 1224 cur_tx->my_mbuf = NULL; 1225 if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) { 1226 sc->my_cdata.my_tx_head = NULL; 1227 sc->my_cdata.my_tx_tail = NULL; 1228 break; 1229 } 1230 sc->my_cdata.my_tx_head = cur_tx->my_nextdesc; 1231 } 1232 if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) { 1233 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask)); 1234 } 1235 return; 1236 } 1237 1238 /* 1239 * TX 'end of channel' interrupt handler. 1240 */ 1241 static void 1242 my_txeoc(struct my_softc * sc) 1243 { 1244 struct ifnet *ifp; 1245 1246 MY_LOCK_ASSERT(sc); 1247 ifp = sc->my_ifp; 1248 sc->my_timer = 0; 1249 if (sc->my_cdata.my_tx_head == NULL) { 1250 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1251 sc->my_cdata.my_tx_tail = NULL; 1252 if (sc->my_want_auto) 1253 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1254 } else { 1255 if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) { 1256 MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC; 1257 sc->my_timer = 5; 1258 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); 1259 } 1260 } 1261 return; 1262 } 1263 1264 static void 1265 my_intr(void *arg) 1266 { 1267 struct my_softc *sc; 1268 struct ifnet *ifp; 1269 u_int32_t status; 1270 1271 sc = arg; 1272 MY_LOCK(sc); 1273 ifp = sc->my_ifp; 1274 if (!(ifp->if_flags & IFF_UP)) { 1275 MY_UNLOCK(sc); 1276 return; 1277 } 1278 /* Disable interrupts. */ 1279 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1280 1281 for (;;) { 1282 status = CSR_READ_4(sc, MY_ISR); 1283 status &= MY_INTRS; 1284 if (status) 1285 CSR_WRITE_4(sc, MY_ISR, status); 1286 else 1287 break; 1288 1289 if (status & MY_RI) /* receive interrupt */ 1290 my_rxeof(sc); 1291 1292 if ((status & MY_RBU) || (status & MY_RxErr)) { 1293 /* rx buffer unavailable or rx error */ 1294 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1295 #ifdef foo 1296 my_stop(sc); 1297 my_reset(sc); 1298 my_init_locked(sc); 1299 #endif 1300 } 1301 if (status & MY_TI) /* tx interrupt */ 1302 my_txeof(sc); 1303 if (status & MY_ETI) /* tx early interrupt */ 1304 my_txeof(sc); 1305 if (status & MY_TBU) /* tx buffer unavailable */ 1306 my_txeoc(sc); 1307 1308 #if 0 /* 90/1/18 delete */ 1309 if (status & MY_FBE) { 1310 my_reset(sc); 1311 my_init_locked(sc); 1312 } 1313 #endif 1314 1315 } 1316 1317 /* Re-enable interrupts. */ 1318 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1319 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1320 my_start_locked(ifp); 1321 MY_UNLOCK(sc); 1322 return; 1323 } 1324 1325 /* 1326 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1327 * pointers to the fragment pointers. 1328 */ 1329 static int 1330 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head) 1331 { 1332 struct my_desc *f = NULL; 1333 int total_len; 1334 struct mbuf *m, *m_new = NULL; 1335 1336 MY_LOCK_ASSERT(sc); 1337 /* calculate the total tx pkt length */ 1338 total_len = 0; 1339 for (m = m_head; m != NULL; m = m->m_next) 1340 total_len += m->m_len; 1341 /* 1342 * Start packing the mbufs in this chain into the fragment pointers. 1343 * Stop when we run out of fragments or hit the end of the mbuf 1344 * chain. 1345 */ 1346 m = m_head; 1347 MGETHDR(m_new, M_NOWAIT, MT_DATA); 1348 if (m_new == NULL) { 1349 device_printf(sc->my_dev, "no memory for tx list"); 1350 return (1); 1351 } 1352 if (m_head->m_pkthdr.len > MHLEN) { 1353 if (!(MCLGET(m_new, M_NOWAIT))) { 1354 m_freem(m_new); 1355 device_printf(sc->my_dev, "no memory for tx list"); 1356 return (1); 1357 } 1358 } 1359 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); 1360 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1361 m_freem(m_head); 1362 m_head = m_new; 1363 f = &c->my_ptr->my_frag[0]; 1364 f->my_status = 0; 1365 f->my_data = vtophys(mtod(m_new, caddr_t)); 1366 total_len = m_new->m_len; 1367 f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable; 1368 f->my_ctl |= total_len << MY_PKTShift; /* pkt size */ 1369 f->my_ctl |= total_len; /* buffer size */ 1370 /* 89/12/29 add, for mtd891 *//* [ 89? ] */ 1371 if (sc->my_info->my_did == MTD891ID) 1372 f->my_ctl |= MY_ETIControl | MY_RetryTxLC; 1373 c->my_mbuf = m_head; 1374 c->my_lastdesc = 0; 1375 MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]); 1376 return (0); 1377 } 1378 1379 /* 1380 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1381 * to the mbuf data regions directly in the transmit lists. We also save a 1382 * copy of the pointers since the transmit list fragment pointers are 1383 * physical addresses. 1384 */ 1385 static void 1386 my_start(struct ifnet * ifp) 1387 { 1388 struct my_softc *sc; 1389 1390 sc = ifp->if_softc; 1391 MY_LOCK(sc); 1392 my_start_locked(ifp); 1393 MY_UNLOCK(sc); 1394 } 1395 1396 static void 1397 my_start_locked(struct ifnet * ifp) 1398 { 1399 struct my_softc *sc; 1400 struct mbuf *m_head = NULL; 1401 struct my_chain *cur_tx = NULL, *start_tx; 1402 1403 sc = ifp->if_softc; 1404 MY_LOCK_ASSERT(sc); 1405 if (sc->my_autoneg) { 1406 sc->my_tx_pend = 1; 1407 return; 1408 } 1409 /* 1410 * Check for an available queue slot. If there are none, punt. 1411 */ 1412 if (sc->my_cdata.my_tx_free->my_mbuf != NULL) { 1413 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1414 return; 1415 } 1416 start_tx = sc->my_cdata.my_tx_free; 1417 while (sc->my_cdata.my_tx_free->my_mbuf == NULL) { 1418 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1419 if (m_head == NULL) 1420 break; 1421 1422 /* Pick a descriptor off the free list. */ 1423 cur_tx = sc->my_cdata.my_tx_free; 1424 sc->my_cdata.my_tx_free = cur_tx->my_nextdesc; 1425 1426 /* Pack the data into the descriptor. */ 1427 my_encap(sc, cur_tx, m_head); 1428 1429 if (cur_tx != start_tx) 1430 MY_TXOWN(cur_tx) = MY_OWNByNIC; 1431 #if NBPFILTER > 0 1432 /* 1433 * If there's a BPF listener, bounce a copy of this frame to 1434 * him. 1435 */ 1436 BPF_MTAP(ifp, cur_tx->my_mbuf); 1437 #endif 1438 } 1439 /* 1440 * If there are no packets queued, bail. 1441 */ 1442 if (cur_tx == NULL) { 1443 return; 1444 } 1445 /* 1446 * Place the request for the upload interrupt in the last descriptor 1447 * in the chain. This way, if we're chaining several packets at once, 1448 * we'll only get an interrupt once for the whole chain rather than 1449 * once for each packet. 1450 */ 1451 MY_TXCTL(cur_tx) |= MY_TXIC; 1452 cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC; 1453 sc->my_cdata.my_tx_tail = cur_tx; 1454 if (sc->my_cdata.my_tx_head == NULL) 1455 sc->my_cdata.my_tx_head = start_tx; 1456 MY_TXOWN(start_tx) = MY_OWNByNIC; 1457 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */ 1458 1459 /* 1460 * Set a timeout in case the chip goes out to lunch. 1461 */ 1462 sc->my_timer = 5; 1463 return; 1464 } 1465 1466 static void 1467 my_init(void *xsc) 1468 { 1469 struct my_softc *sc = xsc; 1470 1471 MY_LOCK(sc); 1472 my_init_locked(sc); 1473 MY_UNLOCK(sc); 1474 } 1475 1476 static void 1477 my_init_locked(struct my_softc *sc) 1478 { 1479 struct ifnet *ifp = sc->my_ifp; 1480 u_int16_t phy_bmcr = 0; 1481 1482 MY_LOCK_ASSERT(sc); 1483 if (sc->my_autoneg) { 1484 return; 1485 } 1486 if (sc->my_pinfo != NULL) 1487 phy_bmcr = my_phy_readreg(sc, PHY_BMCR); 1488 /* 1489 * Cancel pending I/O and free all RX/TX buffers. 1490 */ 1491 my_stop(sc); 1492 my_reset(sc); 1493 1494 /* 1495 * Set cache alignment and burst length. 1496 */ 1497 #if 0 /* 89/9/1 modify, */ 1498 CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512); 1499 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF); 1500 #endif 1501 CSR_WRITE_4(sc, MY_BCR, MY_PBL8); 1502 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512); 1503 /* 1504 * 89/12/29 add, for mtd891, 1505 */ 1506 if (sc->my_info->my_did == MTD891ID) { 1507 MY_SETBIT(sc, MY_BCR, MY_PROG); 1508 MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced); 1509 } 1510 my_setcfg(sc, phy_bmcr); 1511 /* Init circular RX list. */ 1512 if (my_list_rx_init(sc) == ENOBUFS) { 1513 device_printf(sc->my_dev, "init failed: no memory for rx buffers\n"); 1514 my_stop(sc); 1515 return; 1516 } 1517 /* Init TX descriptors. */ 1518 my_list_tx_init(sc); 1519 1520 /* If we want promiscuous mode, set the allframes bit. */ 1521 if (ifp->if_flags & IFF_PROMISC) 1522 MY_SETBIT(sc, MY_TCRRCR, MY_PROM); 1523 else 1524 MY_CLRBIT(sc, MY_TCRRCR, MY_PROM); 1525 1526 /* 1527 * Set capture broadcast bit to capture broadcast frames. 1528 */ 1529 if (ifp->if_flags & IFF_BROADCAST) 1530 MY_SETBIT(sc, MY_TCRRCR, MY_AB); 1531 else 1532 MY_CLRBIT(sc, MY_TCRRCR, MY_AB); 1533 1534 /* 1535 * Program the multicast filter, if necessary. 1536 */ 1537 my_setmulti(sc); 1538 1539 /* 1540 * Load the address of the RX list. 1541 */ 1542 MY_CLRBIT(sc, MY_TCRRCR, MY_RE); 1543 CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0])); 1544 1545 /* 1546 * Enable interrupts. 1547 */ 1548 CSR_WRITE_4(sc, MY_IMR, MY_INTRS); 1549 CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF); 1550 1551 /* Enable receiver and transmitter. */ 1552 MY_SETBIT(sc, MY_TCRRCR, MY_RE); 1553 MY_CLRBIT(sc, MY_TCRRCR, MY_TE); 1554 CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0])); 1555 MY_SETBIT(sc, MY_TCRRCR, MY_TE); 1556 1557 /* Restore state of BMCR */ 1558 if (sc->my_pinfo != NULL) 1559 my_phy_writereg(sc, PHY_BMCR, phy_bmcr); 1560 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1561 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1562 1563 callout_reset(&sc->my_watchdog, hz, my_watchdog, sc); 1564 return; 1565 } 1566 1567 /* 1568 * Set media options. 1569 */ 1570 1571 static int 1572 my_ifmedia_upd(struct ifnet * ifp) 1573 { 1574 struct my_softc *sc; 1575 struct ifmedia *ifm; 1576 1577 sc = ifp->if_softc; 1578 MY_LOCK(sc); 1579 ifm = &sc->ifmedia; 1580 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { 1581 MY_UNLOCK(sc); 1582 return (EINVAL); 1583 } 1584 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) 1585 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); 1586 else 1587 my_setmode_mii(sc, ifm->ifm_media); 1588 MY_UNLOCK(sc); 1589 return (0); 1590 } 1591 1592 /* 1593 * Report current media status. 1594 */ 1595 1596 static void 1597 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr) 1598 { 1599 struct my_softc *sc; 1600 u_int16_t advert = 0, ability = 0; 1601 1602 sc = ifp->if_softc; 1603 MY_LOCK(sc); 1604 ifmr->ifm_active = IFM_ETHER; 1605 if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { 1606 #if 0 /* this version did not support 1000M, */ 1607 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000) 1608 ifmr->ifm_active = IFM_ETHER | IFM_1000TX; 1609 #endif 1610 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) 1611 ifmr->ifm_active = IFM_ETHER | IFM_100_TX; 1612 else 1613 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1614 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) 1615 ifmr->ifm_active |= IFM_FDX; 1616 else 1617 ifmr->ifm_active |= IFM_HDX; 1618 1619 MY_UNLOCK(sc); 1620 return; 1621 } 1622 ability = my_phy_readreg(sc, PHY_LPAR); 1623 advert = my_phy_readreg(sc, PHY_ANAR); 1624 1625 #if 0 /* this version did not support 1000M, */ 1626 if (sc->my_pinfo->my_vid = MarvellPHYID0) { 1627 ability2 = my_phy_readreg(sc, PHY_1000SR); 1628 if (ability2 & PHY_1000SR_1000BTXFULL) { 1629 advert = 0; 1630 ability = 0; 1631 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX; 1632 } else if (ability & PHY_1000SR_1000BTXHALF) { 1633 advert = 0; 1634 ability = 0; 1635 ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX; 1636 } 1637 } 1638 #endif 1639 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) 1640 ifmr->ifm_active = IFM_ETHER | IFM_100_T4; 1641 else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) 1642 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1643 else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) 1644 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX; 1645 else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) 1646 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX; 1647 else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF) 1648 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX; 1649 MY_UNLOCK(sc); 1650 return; 1651 } 1652 1653 static int 1654 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data) 1655 { 1656 struct my_softc *sc = ifp->if_softc; 1657 struct ifreq *ifr = (struct ifreq *) data; 1658 int error; 1659 1660 switch (command) { 1661 case SIOCSIFFLAGS: 1662 MY_LOCK(sc); 1663 if (ifp->if_flags & IFF_UP) 1664 my_init_locked(sc); 1665 else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1666 my_stop(sc); 1667 MY_UNLOCK(sc); 1668 error = 0; 1669 break; 1670 case SIOCADDMULTI: 1671 case SIOCDELMULTI: 1672 MY_LOCK(sc); 1673 my_setmulti(sc); 1674 MY_UNLOCK(sc); 1675 error = 0; 1676 break; 1677 case SIOCGIFMEDIA: 1678 case SIOCSIFMEDIA: 1679 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 1680 break; 1681 default: 1682 error = ether_ioctl(ifp, command, data); 1683 break; 1684 } 1685 return (error); 1686 } 1687 1688 static void 1689 my_watchdog(void *arg) 1690 { 1691 struct my_softc *sc; 1692 struct ifnet *ifp; 1693 1694 sc = arg; 1695 MY_LOCK_ASSERT(sc); 1696 callout_reset(&sc->my_watchdog, hz, my_watchdog, sc); 1697 if (sc->my_timer == 0 || --sc->my_timer > 0) 1698 return; 1699 1700 ifp = sc->my_ifp; 1701 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1702 if_printf(ifp, "watchdog timeout\n"); 1703 if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1704 if_printf(ifp, "no carrier - transceiver cable problem?\n"); 1705 my_stop(sc); 1706 my_reset(sc); 1707 my_init_locked(sc); 1708 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1709 my_start_locked(ifp); 1710 } 1711 1712 1713 /* 1714 * Stop the adapter and free any mbufs allocated to the RX and TX lists. 1715 */ 1716 static void 1717 my_stop(struct my_softc * sc) 1718 { 1719 int i; 1720 struct ifnet *ifp; 1721 1722 MY_LOCK_ASSERT(sc); 1723 ifp = sc->my_ifp; 1724 1725 callout_stop(&sc->my_autoneg_timer); 1726 callout_stop(&sc->my_watchdog); 1727 1728 MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE)); 1729 CSR_WRITE_4(sc, MY_IMR, 0x00000000); 1730 CSR_WRITE_4(sc, MY_TXLBA, 0x00000000); 1731 CSR_WRITE_4(sc, MY_RXLBA, 0x00000000); 1732 1733 /* 1734 * Free data in the RX lists. 1735 */ 1736 for (i = 0; i < MY_RX_LIST_CNT; i++) { 1737 if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) { 1738 m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf); 1739 sc->my_cdata.my_rx_chain[i].my_mbuf = NULL; 1740 } 1741 } 1742 bzero((char *)&sc->my_ldata->my_rx_list, 1743 sizeof(sc->my_ldata->my_rx_list)); 1744 /* 1745 * Free the TX list buffers. 1746 */ 1747 for (i = 0; i < MY_TX_LIST_CNT; i++) { 1748 if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) { 1749 m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf); 1750 sc->my_cdata.my_tx_chain[i].my_mbuf = NULL; 1751 } 1752 } 1753 bzero((char *)&sc->my_ldata->my_tx_list, 1754 sizeof(sc->my_ldata->my_tx_list)); 1755 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1756 return; 1757 } 1758 1759 /* 1760 * Stop all chip I/O so that the kernel's probe routines don't get confused 1761 * by errant DMAs when rebooting. 1762 */ 1763 static int 1764 my_shutdown(device_t dev) 1765 { 1766 struct my_softc *sc; 1767 1768 sc = device_get_softc(dev); 1769 MY_LOCK(sc); 1770 my_stop(sc); 1771 MY_UNLOCK(sc); 1772 return 0; 1773 } 1774