1 /*- 2 * Copyright (c) 1997, 1998, 1999, 2000-2003 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * ASIX Electronics AX88172/AX88178/AX88778 USB 2.0 ethernet driver. 38 * Used in the LinkSys USB200M and various other adapters. 39 * 40 * Manuals available from: 41 * http://www.asix.com.tw/datasheet/mac/Ax88172.PDF 42 * Note: you need the manual for the AX88170 chip (USB 1.x ethernet 43 * controller) to find the definitions for the RX control register. 44 * http://www.asix.com.tw/datasheet/mac/Ax88170.PDF 45 * 46 * Written by Bill Paul <wpaul@windriver.com> 47 * Senior Engineer 48 * Wind River Systems 49 */ 50 51 /* 52 * The AX88172 provides USB ethernet supports at 10 and 100Mbps. 53 * It uses an external PHY (reference designs use a RealTek chip), 54 * and has a 64-bit multicast hash filter. There is some information 55 * missing from the manual which one needs to know in order to make 56 * the chip function: 57 * 58 * - You must set bit 7 in the RX control register, otherwise the 59 * chip won't receive any packets. 60 * - You must initialize all 3 IPG registers, or you won't be able 61 * to send any packets. 62 * 63 * Note that this device appears to only support loading the station 64 * address via autload from the EEPROM (i.e. there's no way to manaully 65 * set it). 66 * 67 * (Adam Weinberger wanted me to name this driver if_gir.c.) 68 */ 69 70 /* 71 * Ax88178 and Ax88772 support backported from the OpenBSD driver. 72 * 2007/02/12, J.R. Oldroyd, fbsd@opal.com 73 * 74 * Manual here: 75 * http://www.asix.com.tw/FrootAttach/datasheet/AX88178_datasheet_Rev10.pdf 76 * http://www.asix.com.tw/FrootAttach/datasheet/AX88772_datasheet_Rev10.pdf 77 */ 78 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/bus.h> 82 #include <sys/condvar.h> 83 #include <sys/endian.h> 84 #include <sys/kernel.h> 85 #include <sys/lock.h> 86 #include <sys/malloc.h> 87 #include <sys/mbuf.h> 88 #include <sys/module.h> 89 #include <sys/mutex.h> 90 #include <sys/socket.h> 91 #include <sys/sockio.h> 92 #include <sys/sysctl.h> 93 #include <sys/sx.h> 94 95 #include <net/if.h> 96 #include <net/ethernet.h> 97 #include <net/if_types.h> 98 #include <net/if_media.h> 99 #include <net/if_vlan_var.h> 100 101 #include <dev/mii/mii.h> 102 #include <dev/mii/miivar.h> 103 104 #include <dev/usb/usb.h> 105 #include <dev/usb/usbdi.h> 106 #include <dev/usb/usbdi_util.h> 107 #include "usbdevs.h" 108 109 #define USB_DEBUG_VAR axe_debug 110 #include <dev/usb/usb_debug.h> 111 #include <dev/usb/usb_process.h> 112 113 #include <dev/usb/net/usb_ethernet.h> 114 #include <dev/usb/net/if_axereg.h> 115 116 /* 117 * AXE_178_MAX_FRAME_BURST 118 * max frame burst size for Ax88178 and Ax88772 119 * 0 2048 bytes 120 * 1 4096 bytes 121 * 2 8192 bytes 122 * 3 16384 bytes 123 * use the largest your system can handle without USB stalling. 124 * 125 * NB: 88772 parts appear to generate lots of input errors with 126 * a 2K rx buffer and 8K is only slightly faster than 4K on an 127 * EHCI port on a T42 so change at your own risk. 128 */ 129 #define AXE_178_MAX_FRAME_BURST 1 130 131 #define AXE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 132 133 #ifdef USB_DEBUG 134 static int axe_debug = 0; 135 136 static SYSCTL_NODE(_hw_usb, OID_AUTO, axe, CTLFLAG_RW, 0, "USB axe"); 137 SYSCTL_INT(_hw_usb_axe, OID_AUTO, debug, CTLFLAG_RW, &axe_debug, 0, 138 "Debug level"); 139 #endif 140 141 /* 142 * Various supported device vendors/products. 143 */ 144 static const STRUCT_USB_HOST_ID axe_devs[] = { 145 #define AXE_DEV(v,p,i) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i) } 146 AXE_DEV(ABOCOM, UF200, 0), 147 AXE_DEV(ACERCM, EP1427X2, 0), 148 AXE_DEV(APPLE, ETHERNET, AXE_FLAG_772), 149 AXE_DEV(ASIX, AX88172, 0), 150 AXE_DEV(ASIX, AX88178, AXE_FLAG_178), 151 AXE_DEV(ASIX, AX88772, AXE_FLAG_772), 152 AXE_DEV(ASIX, AX88772A, AXE_FLAG_772A), 153 AXE_DEV(ASIX, AX88772B, AXE_FLAG_772B), 154 AXE_DEV(ASIX, AX88772B_1, AXE_FLAG_772B), 155 AXE_DEV(ATEN, UC210T, 0), 156 AXE_DEV(BELKIN, F5D5055, AXE_FLAG_178), 157 AXE_DEV(BILLIONTON, USB2AR, 0), 158 AXE_DEV(CISCOLINKSYS, USB200MV2, AXE_FLAG_772A), 159 AXE_DEV(COREGA, FETHER_USB2_TX, 0), 160 AXE_DEV(DLINK, DUBE100, 0), 161 AXE_DEV(DLINK, DUBE100B1, AXE_FLAG_772), 162 AXE_DEV(GOODWAY, GWUSB2E, 0), 163 AXE_DEV(IODATA, ETGUS2, AXE_FLAG_178), 164 AXE_DEV(JVC, MP_PRX1, 0), 165 AXE_DEV(LINKSYS2, USB200M, 0), 166 AXE_DEV(LINKSYS4, USB1000, AXE_FLAG_178), 167 AXE_DEV(LOGITEC, LAN_GTJU2A, AXE_FLAG_178), 168 AXE_DEV(MELCO, LUAU2KTX, 0), 169 AXE_DEV(MELCO, LUA3U2AGT, AXE_FLAG_178), 170 AXE_DEV(NETGEAR, FA120, 0), 171 AXE_DEV(OQO, ETHER01PLUS, AXE_FLAG_772), 172 AXE_DEV(PLANEX3, GU1000T, AXE_FLAG_178), 173 AXE_DEV(SITECOM, LN029, 0), 174 AXE_DEV(SITECOMEU, LN028, AXE_FLAG_178), 175 AXE_DEV(SYSTEMTALKS, SGCX2UL, 0), 176 #undef AXE_DEV 177 }; 178 179 static device_probe_t axe_probe; 180 static device_attach_t axe_attach; 181 static device_detach_t axe_detach; 182 183 static usb_callback_t axe_bulk_read_callback; 184 static usb_callback_t axe_bulk_write_callback; 185 186 static miibus_readreg_t axe_miibus_readreg; 187 static miibus_writereg_t axe_miibus_writereg; 188 static miibus_statchg_t axe_miibus_statchg; 189 190 static uether_fn_t axe_attach_post; 191 static uether_fn_t axe_init; 192 static uether_fn_t axe_stop; 193 static uether_fn_t axe_start; 194 static uether_fn_t axe_tick; 195 static uether_fn_t axe_setmulti; 196 static uether_fn_t axe_setpromisc; 197 198 static int axe_attach_post_sub(struct usb_ether *); 199 static int axe_ifmedia_upd(struct ifnet *); 200 static void axe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 201 static int axe_cmd(struct axe_softc *, int, int, int, void *); 202 static void axe_ax88178_init(struct axe_softc *); 203 static void axe_ax88772_init(struct axe_softc *); 204 static void axe_ax88772_phywake(struct axe_softc *); 205 static void axe_ax88772a_init(struct axe_softc *); 206 static void axe_ax88772b_init(struct axe_softc *); 207 static int axe_get_phyno(struct axe_softc *, int); 208 static int axe_ioctl(struct ifnet *, u_long, caddr_t); 209 static int axe_rx_frame(struct usb_ether *, struct usb_page_cache *, int); 210 static int axe_rxeof(struct usb_ether *, struct usb_page_cache *, 211 unsigned int offset, unsigned int, struct axe_csum_hdr *); 212 static void axe_csum_cfg(struct usb_ether *); 213 214 static const struct usb_config axe_config[AXE_N_TRANSFER] = { 215 216 [AXE_BULK_DT_WR] = { 217 .type = UE_BULK, 218 .endpoint = UE_ADDR_ANY, 219 .direction = UE_DIR_OUT, 220 .frames = 16, 221 .bufsize = 16 * MCLBYTES, 222 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 223 .callback = axe_bulk_write_callback, 224 .timeout = 10000, /* 10 seconds */ 225 }, 226 227 [AXE_BULK_DT_RD] = { 228 .type = UE_BULK, 229 .endpoint = UE_ADDR_ANY, 230 .direction = UE_DIR_IN, 231 .bufsize = 16384, /* bytes */ 232 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 233 .callback = axe_bulk_read_callback, 234 .timeout = 0, /* no timeout */ 235 }, 236 }; 237 238 static const struct ax88772b_mfb ax88772b_mfb_table[] = { 239 { 0x8000, 0x8001, 2048 }, 240 { 0x8100, 0x8147, 4096}, 241 { 0x8200, 0x81EB, 6144}, 242 { 0x8300, 0x83D7, 8192}, 243 { 0x8400, 0x851E, 16384}, 244 { 0x8500, 0x8666, 20480}, 245 { 0x8600, 0x87AE, 24576}, 246 { 0x8700, 0x8A3D, 32768} 247 }; 248 249 static device_method_t axe_methods[] = { 250 /* Device interface */ 251 DEVMETHOD(device_probe, axe_probe), 252 DEVMETHOD(device_attach, axe_attach), 253 DEVMETHOD(device_detach, axe_detach), 254 255 /* MII interface */ 256 DEVMETHOD(miibus_readreg, axe_miibus_readreg), 257 DEVMETHOD(miibus_writereg, axe_miibus_writereg), 258 DEVMETHOD(miibus_statchg, axe_miibus_statchg), 259 260 DEVMETHOD_END 261 }; 262 263 static driver_t axe_driver = { 264 .name = "axe", 265 .methods = axe_methods, 266 .size = sizeof(struct axe_softc), 267 }; 268 269 static devclass_t axe_devclass; 270 271 DRIVER_MODULE(axe, uhub, axe_driver, axe_devclass, NULL, 0); 272 DRIVER_MODULE(miibus, axe, miibus_driver, miibus_devclass, 0, 0); 273 MODULE_DEPEND(axe, uether, 1, 1, 1); 274 MODULE_DEPEND(axe, usb, 1, 1, 1); 275 MODULE_DEPEND(axe, ether, 1, 1, 1); 276 MODULE_DEPEND(axe, miibus, 1, 1, 1); 277 MODULE_VERSION(axe, 1); 278 279 static const struct usb_ether_methods axe_ue_methods = { 280 .ue_attach_post = axe_attach_post, 281 .ue_attach_post_sub = axe_attach_post_sub, 282 .ue_start = axe_start, 283 .ue_init = axe_init, 284 .ue_stop = axe_stop, 285 .ue_tick = axe_tick, 286 .ue_setmulti = axe_setmulti, 287 .ue_setpromisc = axe_setpromisc, 288 .ue_mii_upd = axe_ifmedia_upd, 289 .ue_mii_sts = axe_ifmedia_sts, 290 }; 291 292 static int 293 axe_cmd(struct axe_softc *sc, int cmd, int index, int val, void *buf) 294 { 295 struct usb_device_request req; 296 usb_error_t err; 297 298 AXE_LOCK_ASSERT(sc, MA_OWNED); 299 300 req.bmRequestType = (AXE_CMD_IS_WRITE(cmd) ? 301 UT_WRITE_VENDOR_DEVICE : 302 UT_READ_VENDOR_DEVICE); 303 req.bRequest = AXE_CMD_CMD(cmd); 304 USETW(req.wValue, val); 305 USETW(req.wIndex, index); 306 USETW(req.wLength, AXE_CMD_LEN(cmd)); 307 308 err = uether_do_request(&sc->sc_ue, &req, buf, 1000); 309 310 return (err); 311 } 312 313 static int 314 axe_miibus_readreg(device_t dev, int phy, int reg) 315 { 316 struct axe_softc *sc = device_get_softc(dev); 317 uint16_t val; 318 int locked; 319 320 locked = mtx_owned(&sc->sc_mtx); 321 if (!locked) 322 AXE_LOCK(sc); 323 324 axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL); 325 axe_cmd(sc, AXE_CMD_MII_READ_REG, reg, phy, &val); 326 axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL); 327 328 val = le16toh(val); 329 if (AXE_IS_772(sc) && reg == MII_BMSR) { 330 /* 331 * BMSR of AX88772 indicates that it supports extended 332 * capability but the extended status register is 333 * revered for embedded ethernet PHY. So clear the 334 * extended capability bit of BMSR. 335 */ 336 val &= ~BMSR_EXTCAP; 337 } 338 339 if (!locked) 340 AXE_UNLOCK(sc); 341 return (val); 342 } 343 344 static int 345 axe_miibus_writereg(device_t dev, int phy, int reg, int val) 346 { 347 struct axe_softc *sc = device_get_softc(dev); 348 int locked; 349 350 val = htole32(val); 351 locked = mtx_owned(&sc->sc_mtx); 352 if (!locked) 353 AXE_LOCK(sc); 354 355 axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL); 356 axe_cmd(sc, AXE_CMD_MII_WRITE_REG, reg, phy, &val); 357 axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL); 358 359 if (!locked) 360 AXE_UNLOCK(sc); 361 return (0); 362 } 363 364 static void 365 axe_miibus_statchg(device_t dev) 366 { 367 struct axe_softc *sc = device_get_softc(dev); 368 struct mii_data *mii = GET_MII(sc); 369 struct ifnet *ifp; 370 uint16_t val; 371 int err, locked; 372 373 locked = mtx_owned(&sc->sc_mtx); 374 if (!locked) 375 AXE_LOCK(sc); 376 377 ifp = uether_getifp(&sc->sc_ue); 378 if (mii == NULL || ifp == NULL || 379 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 380 goto done; 381 382 sc->sc_flags &= ~AXE_FLAG_LINK; 383 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 384 (IFM_ACTIVE | IFM_AVALID)) { 385 switch (IFM_SUBTYPE(mii->mii_media_active)) { 386 case IFM_10_T: 387 case IFM_100_TX: 388 sc->sc_flags |= AXE_FLAG_LINK; 389 break; 390 case IFM_1000_T: 391 if ((sc->sc_flags & AXE_FLAG_178) == 0) 392 break; 393 sc->sc_flags |= AXE_FLAG_LINK; 394 break; 395 default: 396 break; 397 } 398 } 399 400 /* Lost link, do nothing. */ 401 if ((sc->sc_flags & AXE_FLAG_LINK) == 0) 402 goto done; 403 404 val = 0; 405 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 406 val |= AXE_MEDIA_FULL_DUPLEX; 407 if (AXE_IS_178_FAMILY(sc)) { 408 if ((IFM_OPTIONS(mii->mii_media_active) & 409 IFM_ETH_TXPAUSE) != 0) 410 val |= AXE_178_MEDIA_TXFLOW_CONTROL_EN; 411 if ((IFM_OPTIONS(mii->mii_media_active) & 412 IFM_ETH_RXPAUSE) != 0) 413 val |= AXE_178_MEDIA_RXFLOW_CONTROL_EN; 414 } 415 } 416 if (AXE_IS_178_FAMILY(sc)) { 417 val |= AXE_178_MEDIA_RX_EN | AXE_178_MEDIA_MAGIC; 418 if ((sc->sc_flags & AXE_FLAG_178) != 0) 419 val |= AXE_178_MEDIA_ENCK; 420 switch (IFM_SUBTYPE(mii->mii_media_active)) { 421 case IFM_1000_T: 422 val |= AXE_178_MEDIA_GMII | AXE_178_MEDIA_ENCK; 423 break; 424 case IFM_100_TX: 425 val |= AXE_178_MEDIA_100TX; 426 break; 427 case IFM_10_T: 428 /* doesn't need to be handled */ 429 break; 430 } 431 } 432 err = axe_cmd(sc, AXE_CMD_WRITE_MEDIA, 0, val, NULL); 433 if (err) 434 device_printf(dev, "media change failed, error %d\n", err); 435 done: 436 if (!locked) 437 AXE_UNLOCK(sc); 438 } 439 440 /* 441 * Set media options. 442 */ 443 static int 444 axe_ifmedia_upd(struct ifnet *ifp) 445 { 446 struct axe_softc *sc = ifp->if_softc; 447 struct mii_data *mii = GET_MII(sc); 448 struct mii_softc *miisc; 449 int error; 450 451 AXE_LOCK_ASSERT(sc, MA_OWNED); 452 453 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 454 PHY_RESET(miisc); 455 error = mii_mediachg(mii); 456 return (error); 457 } 458 459 /* 460 * Report current media status. 461 */ 462 static void 463 axe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 464 { 465 struct axe_softc *sc = ifp->if_softc; 466 struct mii_data *mii = GET_MII(sc); 467 468 AXE_LOCK(sc); 469 mii_pollstat(mii); 470 ifmr->ifm_active = mii->mii_media_active; 471 ifmr->ifm_status = mii->mii_media_status; 472 AXE_UNLOCK(sc); 473 } 474 475 static void 476 axe_setmulti(struct usb_ether *ue) 477 { 478 struct axe_softc *sc = uether_getsc(ue); 479 struct ifnet *ifp = uether_getifp(ue); 480 struct ifmultiaddr *ifma; 481 uint32_t h = 0; 482 uint16_t rxmode; 483 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 484 485 AXE_LOCK_ASSERT(sc, MA_OWNED); 486 487 axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode); 488 rxmode = le16toh(rxmode); 489 490 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 491 rxmode |= AXE_RXCMD_ALLMULTI; 492 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); 493 return; 494 } 495 rxmode &= ~AXE_RXCMD_ALLMULTI; 496 497 if_maddr_rlock(ifp); 498 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 499 { 500 if (ifma->ifma_addr->sa_family != AF_LINK) 501 continue; 502 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 503 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 504 hashtbl[h / 8] |= 1 << (h % 8); 505 } 506 if_maddr_runlock(ifp); 507 508 axe_cmd(sc, AXE_CMD_WRITE_MCAST, 0, 0, (void *)&hashtbl); 509 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); 510 } 511 512 static int 513 axe_get_phyno(struct axe_softc *sc, int sel) 514 { 515 int phyno; 516 517 switch (AXE_PHY_TYPE(sc->sc_phyaddrs[sel])) { 518 case PHY_TYPE_100_HOME: 519 case PHY_TYPE_GIG: 520 phyno = AXE_PHY_NO(sc->sc_phyaddrs[sel]); 521 break; 522 case PHY_TYPE_SPECIAL: 523 /* FALLTHROUGH */ 524 case PHY_TYPE_RSVD: 525 /* FALLTHROUGH */ 526 case PHY_TYPE_NON_SUP: 527 /* FALLTHROUGH */ 528 default: 529 phyno = -1; 530 break; 531 } 532 533 return (phyno); 534 } 535 536 #define AXE_GPIO_WRITE(x, y) do { \ 537 axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, (x), NULL); \ 538 uether_pause(ue, (y)); \ 539 } while (0) 540 541 static void 542 axe_ax88178_init(struct axe_softc *sc) 543 { 544 struct usb_ether *ue; 545 int gpio0, ledmode, phymode; 546 uint16_t eeprom, val; 547 548 ue = &sc->sc_ue; 549 axe_cmd(sc, AXE_CMD_SROM_WR_ENABLE, 0, 0, NULL); 550 /* XXX magic */ 551 axe_cmd(sc, AXE_CMD_SROM_READ, 0, 0x0017, &eeprom); 552 eeprom = le16toh(eeprom); 553 axe_cmd(sc, AXE_CMD_SROM_WR_DISABLE, 0, 0, NULL); 554 555 /* if EEPROM is invalid we have to use to GPIO0 */ 556 if (eeprom == 0xffff) { 557 phymode = AXE_PHY_MODE_MARVELL; 558 gpio0 = 1; 559 ledmode = 0; 560 } else { 561 phymode = eeprom & 0x7f; 562 gpio0 = (eeprom & 0x80) ? 0 : 1; 563 ledmode = eeprom >> 8; 564 } 565 566 if (bootverbose) 567 device_printf(sc->sc_ue.ue_dev, 568 "EEPROM data : 0x%04x, phymode : 0x%02x\n", eeprom, 569 phymode); 570 /* Program GPIOs depending on PHY hardware. */ 571 switch (phymode) { 572 case AXE_PHY_MODE_MARVELL: 573 if (gpio0 == 1) { 574 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0_EN, 575 hz / 32); 576 AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN, 577 hz / 32); 578 AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2_EN, hz / 4); 579 AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN, 580 hz / 32); 581 } else { 582 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 | 583 AXE_GPIO1_EN, hz / 3); 584 if (ledmode == 1) { 585 AXE_GPIO_WRITE(AXE_GPIO1_EN, hz / 3); 586 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN, 587 hz / 3); 588 } else { 589 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | 590 AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); 591 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | 592 AXE_GPIO2_EN, hz / 4); 593 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | 594 AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); 595 } 596 } 597 break; 598 case AXE_PHY_MODE_CICADA: 599 case AXE_PHY_MODE_CICADA_V2: 600 case AXE_PHY_MODE_CICADA_V2_ASIX: 601 if (gpio0 == 1) 602 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0 | 603 AXE_GPIO0_EN, hz / 32); 604 else 605 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 | 606 AXE_GPIO1_EN, hz / 32); 607 break; 608 case AXE_PHY_MODE_AGERE: 609 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 | 610 AXE_GPIO1_EN, hz / 32); 611 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 | 612 AXE_GPIO2_EN, hz / 32); 613 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2_EN, hz / 4); 614 AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 | 615 AXE_GPIO2_EN, hz / 32); 616 break; 617 case AXE_PHY_MODE_REALTEK_8211CL: 618 case AXE_PHY_MODE_REALTEK_8211BN: 619 case AXE_PHY_MODE_REALTEK_8251CL: 620 val = gpio0 == 1 ? AXE_GPIO0 | AXE_GPIO0_EN : 621 AXE_GPIO1 | AXE_GPIO1_EN; 622 AXE_GPIO_WRITE(val, hz / 32); 623 AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); 624 AXE_GPIO_WRITE(val | AXE_GPIO2_EN, hz / 4); 625 AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32); 626 if (phymode == AXE_PHY_MODE_REALTEK_8211CL) { 627 axe_miibus_writereg(ue->ue_dev, sc->sc_phyno, 628 0x1F, 0x0005); 629 axe_miibus_writereg(ue->ue_dev, sc->sc_phyno, 630 0x0C, 0x0000); 631 val = axe_miibus_readreg(ue->ue_dev, sc->sc_phyno, 632 0x0001); 633 axe_miibus_writereg(ue->ue_dev, sc->sc_phyno, 634 0x01, val | 0x0080); 635 axe_miibus_writereg(ue->ue_dev, sc->sc_phyno, 636 0x1F, 0x0000); 637 } 638 break; 639 default: 640 /* Unknown PHY model or no need to program GPIOs. */ 641 break; 642 } 643 644 /* soft reset */ 645 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL); 646 uether_pause(ue, hz / 4); 647 648 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, 649 AXE_SW_RESET_PRL | AXE_178_RESET_MAGIC, NULL); 650 uether_pause(ue, hz / 4); 651 /* Enable MII/GMII/RGMII interface to work with external PHY. */ 652 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0, NULL); 653 uether_pause(ue, hz / 4); 654 655 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL); 656 } 657 658 static void 659 axe_ax88772_init(struct axe_softc *sc) 660 { 661 axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, 0x00b0, NULL); 662 uether_pause(&sc->sc_ue, hz / 16); 663 664 if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) { 665 /* ask for the embedded PHY */ 666 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x01, NULL); 667 uether_pause(&sc->sc_ue, hz / 64); 668 669 /* power down and reset state, pin reset state */ 670 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, 671 AXE_SW_RESET_CLEAR, NULL); 672 uether_pause(&sc->sc_ue, hz / 16); 673 674 /* power down/reset state, pin operating state */ 675 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, 676 AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL); 677 uether_pause(&sc->sc_ue, hz / 4); 678 679 /* power up, reset */ 680 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRL, NULL); 681 682 /* power up, operating */ 683 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, 684 AXE_SW_RESET_IPRL | AXE_SW_RESET_PRL, NULL); 685 } else { 686 /* ask for external PHY */ 687 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x00, NULL); 688 uether_pause(&sc->sc_ue, hz / 64); 689 690 /* power down internal PHY */ 691 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, 692 AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL); 693 } 694 695 uether_pause(&sc->sc_ue, hz / 4); 696 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL); 697 } 698 699 static void 700 axe_ax88772_phywake(struct axe_softc *sc) 701 { 702 struct usb_ether *ue; 703 704 ue = &sc->sc_ue; 705 if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) { 706 /* Manually select internal(embedded) PHY - MAC mode. */ 707 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB | 708 AXE_SW_PHY_SELECT_EMBEDDED | AXE_SW_PHY_SELECT_SS_MII, 709 NULL); 710 uether_pause(&sc->sc_ue, hz / 32); 711 } else { 712 /* 713 * Manually select external PHY - MAC mode. 714 * Reverse MII/RMII is for AX88772A PHY mode. 715 */ 716 axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB | 717 AXE_SW_PHY_SELECT_EXT | AXE_SW_PHY_SELECT_SS_MII, NULL); 718 uether_pause(&sc->sc_ue, hz / 32); 719 } 720 /* Take PHY out of power down. */ 721 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPPD | 722 AXE_SW_RESET_IPRL, NULL); 723 uether_pause(&sc->sc_ue, hz / 4); 724 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL); 725 uether_pause(&sc->sc_ue, hz); 726 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL); 727 uether_pause(&sc->sc_ue, hz / 32); 728 axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL); 729 uether_pause(&sc->sc_ue, hz / 32); 730 } 731 732 static void 733 axe_ax88772a_init(struct axe_softc *sc) 734 { 735 struct usb_ether *ue; 736 737 ue = &sc->sc_ue; 738 /* Reload EEPROM. */ 739 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32); 740 axe_ax88772_phywake(sc); 741 /* Stop MAC. */ 742 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL); 743 } 744 745 static void 746 axe_ax88772b_init(struct axe_softc *sc) 747 { 748 struct usb_ether *ue; 749 uint16_t eeprom; 750 uint8_t *eaddr; 751 int i; 752 753 ue = &sc->sc_ue; 754 /* Reload EEPROM. */ 755 AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32); 756 /* 757 * Save PHY power saving configuration(high byte) and 758 * clear EEPROM checksum value(low byte). 759 */ 760 axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_PHY_PWRCFG, &eeprom); 761 sc->sc_pwrcfg = le16toh(eeprom) & 0xFF00; 762 763 /* 764 * Auto-loaded default station address from internal ROM is 765 * 00:00:00:00:00:00 such that an explicit access to EEPROM 766 * is required to get real station address. 767 */ 768 eaddr = ue->ue_eaddr; 769 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) { 770 axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_NODE_ID + i, 771 &eeprom); 772 eeprom = le16toh(eeprom); 773 *eaddr++ = (uint8_t)(eeprom & 0xFF); 774 *eaddr++ = (uint8_t)((eeprom >> 8) & 0xFF); 775 } 776 /* Wakeup PHY. */ 777 axe_ax88772_phywake(sc); 778 /* Stop MAC. */ 779 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL); 780 } 781 782 #undef AXE_GPIO_WRITE 783 784 static void 785 axe_reset(struct axe_softc *sc) 786 { 787 struct usb_config_descriptor *cd; 788 usb_error_t err; 789 790 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); 791 792 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, 793 cd->bConfigurationValue); 794 if (err) 795 DPRINTF("reset failed (ignored)\n"); 796 797 /* Wait a little while for the chip to get its brains in order. */ 798 uether_pause(&sc->sc_ue, hz / 100); 799 800 /* Reinitialize controller to achieve full reset. */ 801 if (sc->sc_flags & AXE_FLAG_178) 802 axe_ax88178_init(sc); 803 else if (sc->sc_flags & AXE_FLAG_772) 804 axe_ax88772_init(sc); 805 else if (sc->sc_flags & AXE_FLAG_772A) 806 axe_ax88772a_init(sc); 807 else if (sc->sc_flags & AXE_FLAG_772B) 808 axe_ax88772b_init(sc); 809 } 810 811 static void 812 axe_attach_post(struct usb_ether *ue) 813 { 814 struct axe_softc *sc = uether_getsc(ue); 815 816 /* 817 * Load PHY indexes first. Needed by axe_xxx_init(). 818 */ 819 axe_cmd(sc, AXE_CMD_READ_PHYID, 0, 0, sc->sc_phyaddrs); 820 if (bootverbose) 821 device_printf(sc->sc_ue.ue_dev, "PHYADDR 0x%02x:0x%02x\n", 822 sc->sc_phyaddrs[0], sc->sc_phyaddrs[1]); 823 sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_PRI); 824 if (sc->sc_phyno == -1) 825 sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_SEC); 826 if (sc->sc_phyno == -1) { 827 device_printf(sc->sc_ue.ue_dev, 828 "no valid PHY address found, assuming PHY address 0\n"); 829 sc->sc_phyno = 0; 830 } 831 832 /* Initialize controller and get station address. */ 833 if (sc->sc_flags & AXE_FLAG_178) { 834 axe_ax88178_init(sc); 835 sc->sc_tx_bufsz = 16 * 1024; 836 axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr); 837 } else if (sc->sc_flags & AXE_FLAG_772) { 838 axe_ax88772_init(sc); 839 sc->sc_tx_bufsz = 8 * 1024; 840 axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr); 841 } else if (sc->sc_flags & AXE_FLAG_772A) { 842 axe_ax88772a_init(sc); 843 sc->sc_tx_bufsz = 8 * 1024; 844 axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr); 845 } else if (sc->sc_flags & AXE_FLAG_772B) { 846 axe_ax88772b_init(sc); 847 sc->sc_tx_bufsz = 8 * 1024; 848 } else 849 axe_cmd(sc, AXE_172_CMD_READ_NODEID, 0, 0, ue->ue_eaddr); 850 851 /* 852 * Fetch IPG values. 853 */ 854 if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B)) { 855 /* Set IPG values. */ 856 sc->sc_ipgs[0] = 0x15; 857 sc->sc_ipgs[1] = 0x16; 858 sc->sc_ipgs[2] = 0x1A; 859 } else 860 axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, sc->sc_ipgs); 861 } 862 863 static int 864 axe_attach_post_sub(struct usb_ether *ue) 865 { 866 struct axe_softc *sc; 867 struct ifnet *ifp; 868 u_int adv_pause; 869 int error; 870 871 sc = uether_getsc(ue); 872 ifp = ue->ue_ifp; 873 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 874 ifp->if_start = uether_start; 875 ifp->if_ioctl = axe_ioctl; 876 ifp->if_init = uether_init; 877 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 878 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 879 IFQ_SET_READY(&ifp->if_snd); 880 881 if (AXE_IS_178_FAMILY(sc)) 882 ifp->if_capabilities |= IFCAP_VLAN_MTU; 883 if (sc->sc_flags & AXE_FLAG_772B) { 884 ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_RXCSUM; 885 ifp->if_hwassist = AXE_CSUM_FEATURES; 886 /* 887 * Checksum offloading of AX88772B also works with VLAN 888 * tagged frames but there is no way to take advantage 889 * of the feature because vlan(4) assumes 890 * IFCAP_VLAN_HWTAGGING is prerequisite condition to 891 * support checksum offloading with VLAN. VLAN hardware 892 * tagging support of AX88772B is very limited so it's 893 * not possible to announce IFCAP_VLAN_HWTAGGING. 894 */ 895 } 896 ifp->if_capenable = ifp->if_capabilities; 897 if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B | AXE_FLAG_178)) 898 adv_pause = MIIF_DOPAUSE; 899 else 900 adv_pause = 0; 901 mtx_lock(&Giant); 902 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, 903 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, 904 BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, adv_pause); 905 mtx_unlock(&Giant); 906 907 return (error); 908 } 909 910 /* 911 * Probe for a AX88172 chip. 912 */ 913 static int 914 axe_probe(device_t dev) 915 { 916 struct usb_attach_arg *uaa = device_get_ivars(dev); 917 918 if (uaa->usb_mode != USB_MODE_HOST) 919 return (ENXIO); 920 if (uaa->info.bConfigIndex != AXE_CONFIG_IDX) 921 return (ENXIO); 922 if (uaa->info.bIfaceIndex != AXE_IFACE_IDX) 923 return (ENXIO); 924 925 return (usbd_lookup_id_by_uaa(axe_devs, sizeof(axe_devs), uaa)); 926 } 927 928 /* 929 * Attach the interface. Allocate softc structures, do ifmedia 930 * setup and ethernet/BPF attach. 931 */ 932 static int 933 axe_attach(device_t dev) 934 { 935 struct usb_attach_arg *uaa = device_get_ivars(dev); 936 struct axe_softc *sc = device_get_softc(dev); 937 struct usb_ether *ue = &sc->sc_ue; 938 uint8_t iface_index; 939 int error; 940 941 sc->sc_flags = USB_GET_DRIVER_INFO(uaa); 942 943 device_set_usb_desc(dev); 944 945 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 946 947 iface_index = AXE_IFACE_IDX; 948 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, 949 axe_config, AXE_N_TRANSFER, sc, &sc->sc_mtx); 950 if (error) { 951 device_printf(dev, "allocating USB transfers failed\n"); 952 goto detach; 953 } 954 955 ue->ue_sc = sc; 956 ue->ue_dev = dev; 957 ue->ue_udev = uaa->device; 958 ue->ue_mtx = &sc->sc_mtx; 959 ue->ue_methods = &axe_ue_methods; 960 961 error = uether_ifattach(ue); 962 if (error) { 963 device_printf(dev, "could not attach interface\n"); 964 goto detach; 965 } 966 return (0); /* success */ 967 968 detach: 969 axe_detach(dev); 970 return (ENXIO); /* failure */ 971 } 972 973 static int 974 axe_detach(device_t dev) 975 { 976 struct axe_softc *sc = device_get_softc(dev); 977 struct usb_ether *ue = &sc->sc_ue; 978 979 usbd_transfer_unsetup(sc->sc_xfer, AXE_N_TRANSFER); 980 uether_ifdetach(ue); 981 mtx_destroy(&sc->sc_mtx); 982 983 return (0); 984 } 985 986 #if (AXE_BULK_BUF_SIZE >= 0x10000) 987 #error "Please update axe_bulk_read_callback()!" 988 #endif 989 990 static void 991 axe_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) 992 { 993 struct axe_softc *sc = usbd_xfer_softc(xfer); 994 struct usb_ether *ue = &sc->sc_ue; 995 struct usb_page_cache *pc; 996 int actlen; 997 998 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 999 1000 switch (USB_GET_STATE(xfer)) { 1001 case USB_ST_TRANSFERRED: 1002 pc = usbd_xfer_get_frame(xfer, 0); 1003 axe_rx_frame(ue, pc, actlen); 1004 1005 /* FALLTHROUGH */ 1006 case USB_ST_SETUP: 1007 tr_setup: 1008 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); 1009 usbd_transfer_submit(xfer); 1010 uether_rxflush(ue); 1011 return; 1012 1013 default: /* Error */ 1014 DPRINTF("bulk read error, %s\n", usbd_errstr(error)); 1015 1016 if (error != USB_ERR_CANCELLED) { 1017 /* try to clear stall first */ 1018 usbd_xfer_set_stall(xfer); 1019 goto tr_setup; 1020 } 1021 return; 1022 1023 } 1024 } 1025 1026 static int 1027 axe_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) 1028 { 1029 struct axe_softc *sc; 1030 struct axe_sframe_hdr hdr; 1031 struct axe_csum_hdr csum_hdr; 1032 int error, len, pos; 1033 1034 sc = uether_getsc(ue); 1035 pos = 0; 1036 len = 0; 1037 error = 0; 1038 if ((sc->sc_flags & AXE_FLAG_STD_FRAME) != 0) { 1039 while (pos < actlen) { 1040 if ((int)(pos + sizeof(hdr)) > actlen) { 1041 /* too little data */ 1042 error = EINVAL; 1043 break; 1044 } 1045 usbd_copy_out(pc, pos, &hdr, sizeof(hdr)); 1046 1047 if ((hdr.len ^ hdr.ilen) != sc->sc_lenmask) { 1048 /* we lost sync */ 1049 error = EINVAL; 1050 break; 1051 } 1052 pos += sizeof(hdr); 1053 len = le16toh(hdr.len); 1054 if (pos + len > actlen) { 1055 /* invalid length */ 1056 error = EINVAL; 1057 break; 1058 } 1059 axe_rxeof(ue, pc, pos, len, NULL); 1060 pos += len + (len % 2); 1061 } 1062 } else if ((sc->sc_flags & AXE_FLAG_CSUM_FRAME) != 0) { 1063 while (pos < actlen) { 1064 if ((int)(pos + sizeof(csum_hdr)) > actlen) { 1065 /* too little data */ 1066 error = EINVAL; 1067 break; 1068 } 1069 usbd_copy_out(pc, pos, &csum_hdr, sizeof(csum_hdr)); 1070 1071 csum_hdr.len = le16toh(csum_hdr.len); 1072 csum_hdr.ilen = le16toh(csum_hdr.ilen); 1073 csum_hdr.cstatus = le16toh(csum_hdr.cstatus); 1074 if ((AXE_CSUM_RXBYTES(csum_hdr.len) ^ 1075 AXE_CSUM_RXBYTES(csum_hdr.ilen)) != 1076 sc->sc_lenmask) { 1077 /* we lost sync */ 1078 error = EINVAL; 1079 break; 1080 } 1081 /* 1082 * Get total transferred frame length including 1083 * checksum header. The length should be multiple 1084 * of 4. 1085 */ 1086 len = sizeof(csum_hdr) + AXE_CSUM_RXBYTES(csum_hdr.len); 1087 len = (len + 3) & ~3; 1088 if (pos + len > actlen) { 1089 /* invalid length */ 1090 error = EINVAL; 1091 break; 1092 } 1093 axe_rxeof(ue, pc, pos + sizeof(csum_hdr), 1094 AXE_CSUM_RXBYTES(csum_hdr.len), &csum_hdr); 1095 pos += len; 1096 } 1097 } else 1098 axe_rxeof(ue, pc, 0, actlen, NULL); 1099 1100 if (error != 0) 1101 ue->ue_ifp->if_ierrors++; 1102 return (error); 1103 } 1104 1105 static int 1106 axe_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, 1107 unsigned int len, struct axe_csum_hdr *csum_hdr) 1108 { 1109 struct ifnet *ifp = ue->ue_ifp; 1110 struct mbuf *m; 1111 1112 if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { 1113 ifp->if_ierrors++; 1114 return (EINVAL); 1115 } 1116 1117 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1118 if (m == NULL) { 1119 ifp->if_iqdrops++; 1120 return (ENOMEM); 1121 } 1122 m->m_len = m->m_pkthdr.len = MCLBYTES; 1123 m_adj(m, ETHER_ALIGN); 1124 1125 usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); 1126 1127 ifp->if_ipackets++; 1128 m->m_pkthdr.rcvif = ifp; 1129 m->m_pkthdr.len = m->m_len = len; 1130 1131 if (csum_hdr != NULL && csum_hdr->cstatus & AXE_CSUM_HDR_L3_TYPE_IPV4) { 1132 if ((csum_hdr->cstatus & (AXE_CSUM_HDR_L4_CSUM_ERR | 1133 AXE_CSUM_HDR_L3_CSUM_ERR)) == 0) { 1134 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1135 CSUM_IP_VALID; 1136 if ((csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) == 1137 AXE_CSUM_HDR_L4_TYPE_TCP || 1138 (csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) == 1139 AXE_CSUM_HDR_L4_TYPE_UDP) { 1140 m->m_pkthdr.csum_flags |= 1141 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1142 m->m_pkthdr.csum_data = 0xffff; 1143 } 1144 } 1145 } 1146 1147 _IF_ENQUEUE(&ue->ue_rxq, m); 1148 return (0); 1149 } 1150 1151 #if ((AXE_BULK_BUF_SIZE >= 0x10000) || (AXE_BULK_BUF_SIZE < (MCLBYTES+4))) 1152 #error "Please update axe_bulk_write_callback()!" 1153 #endif 1154 1155 static void 1156 axe_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) 1157 { 1158 struct axe_softc *sc = usbd_xfer_softc(xfer); 1159 struct axe_sframe_hdr hdr; 1160 struct ifnet *ifp = uether_getifp(&sc->sc_ue); 1161 struct usb_page_cache *pc; 1162 struct mbuf *m; 1163 int nframes, pos; 1164 1165 switch (USB_GET_STATE(xfer)) { 1166 case USB_ST_TRANSFERRED: 1167 DPRINTFN(11, "transfer complete\n"); 1168 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1169 /* FALLTHROUGH */ 1170 case USB_ST_SETUP: 1171 tr_setup: 1172 if ((sc->sc_flags & AXE_FLAG_LINK) == 0 || 1173 (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { 1174 /* 1175 * Don't send anything if there is no link or 1176 * controller is busy. 1177 */ 1178 return; 1179 } 1180 1181 for (nframes = 0; nframes < 16 && 1182 !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { 1183 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1184 if (m == NULL) 1185 break; 1186 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, 1187 nframes); 1188 pos = 0; 1189 pc = usbd_xfer_get_frame(xfer, nframes); 1190 if (AXE_IS_178_FAMILY(sc)) { 1191 hdr.len = htole16(m->m_pkthdr.len); 1192 hdr.ilen = ~hdr.len; 1193 /* 1194 * If upper stack computed checksum, driver 1195 * should tell controller not to insert 1196 * computed checksum for checksum offloading 1197 * enabled controller. 1198 */ 1199 if (ifp->if_capabilities & IFCAP_TXCSUM) { 1200 if ((m->m_pkthdr.csum_flags & 1201 AXE_CSUM_FEATURES) != 0) 1202 hdr.len |= htole16( 1203 AXE_TX_CSUM_PSEUDO_HDR); 1204 else 1205 hdr.len |= htole16( 1206 AXE_TX_CSUM_DIS); 1207 } 1208 usbd_copy_in(pc, pos, &hdr, sizeof(hdr)); 1209 pos += sizeof(hdr); 1210 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); 1211 pos += m->m_pkthdr.len; 1212 if ((pos % 512) == 0) { 1213 hdr.len = 0; 1214 hdr.ilen = 0xffff; 1215 usbd_copy_in(pc, pos, &hdr, 1216 sizeof(hdr)); 1217 pos += sizeof(hdr); 1218 } 1219 } else { 1220 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); 1221 pos += m->m_pkthdr.len; 1222 } 1223 1224 /* 1225 * XXX 1226 * Update TX packet counter here. This is not 1227 * correct way but it seems that there is no way 1228 * to know how many packets are sent at the end 1229 * of transfer because controller combines 1230 * multiple writes into single one if there is 1231 * room in TX buffer of controller. 1232 */ 1233 ifp->if_opackets++; 1234 1235 /* 1236 * if there's a BPF listener, bounce a copy 1237 * of this frame to him: 1238 */ 1239 BPF_MTAP(ifp, m); 1240 1241 m_freem(m); 1242 1243 /* Set frame length. */ 1244 usbd_xfer_set_frame_len(xfer, nframes, pos); 1245 } 1246 if (nframes != 0) { 1247 usbd_xfer_set_frames(xfer, nframes); 1248 usbd_transfer_submit(xfer); 1249 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1250 } 1251 return; 1252 /* NOTREACHED */ 1253 default: /* Error */ 1254 DPRINTFN(11, "transfer error, %s\n", 1255 usbd_errstr(error)); 1256 1257 ifp->if_oerrors++; 1258 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1259 1260 if (error != USB_ERR_CANCELLED) { 1261 /* try to clear stall first */ 1262 usbd_xfer_set_stall(xfer); 1263 goto tr_setup; 1264 } 1265 return; 1266 1267 } 1268 } 1269 1270 static void 1271 axe_tick(struct usb_ether *ue) 1272 { 1273 struct axe_softc *sc = uether_getsc(ue); 1274 struct mii_data *mii = GET_MII(sc); 1275 1276 AXE_LOCK_ASSERT(sc, MA_OWNED); 1277 1278 mii_tick(mii); 1279 if ((sc->sc_flags & AXE_FLAG_LINK) == 0) { 1280 axe_miibus_statchg(ue->ue_dev); 1281 if ((sc->sc_flags & AXE_FLAG_LINK) != 0) 1282 axe_start(ue); 1283 } 1284 } 1285 1286 static void 1287 axe_start(struct usb_ether *ue) 1288 { 1289 struct axe_softc *sc = uether_getsc(ue); 1290 1291 /* 1292 * start the USB transfers, if not already started: 1293 */ 1294 usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_RD]); 1295 usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_WR]); 1296 } 1297 1298 static void 1299 axe_csum_cfg(struct usb_ether *ue) 1300 { 1301 struct axe_softc *sc; 1302 struct ifnet *ifp; 1303 uint16_t csum1, csum2; 1304 1305 sc = uether_getsc(ue); 1306 AXE_LOCK_ASSERT(sc, MA_OWNED); 1307 1308 if ((sc->sc_flags & AXE_FLAG_772B) != 0) { 1309 ifp = uether_getifp(ue); 1310 csum1 = 0; 1311 csum2 = 0; 1312 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1313 csum1 |= AXE_TXCSUM_IP | AXE_TXCSUM_TCP | 1314 AXE_TXCSUM_UDP; 1315 axe_cmd(sc, AXE_772B_CMD_WRITE_TXCSUM, csum2, csum1, NULL); 1316 csum1 = 0; 1317 csum2 = 0; 1318 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1319 csum1 |= AXE_RXCSUM_IP | AXE_RXCSUM_IPVE | 1320 AXE_RXCSUM_TCP | AXE_RXCSUM_UDP | AXE_RXCSUM_ICMP | 1321 AXE_RXCSUM_IGMP; 1322 axe_cmd(sc, AXE_772B_CMD_WRITE_RXCSUM, csum2, csum1, NULL); 1323 } 1324 } 1325 1326 static void 1327 axe_init(struct usb_ether *ue) 1328 { 1329 struct axe_softc *sc = uether_getsc(ue); 1330 struct ifnet *ifp = uether_getifp(ue); 1331 uint16_t rxmode; 1332 1333 AXE_LOCK_ASSERT(sc, MA_OWNED); 1334 1335 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1336 return; 1337 1338 /* Cancel pending I/O */ 1339 axe_stop(ue); 1340 1341 axe_reset(sc); 1342 1343 /* Set MAC address and transmitter IPG values. */ 1344 if (AXE_IS_178_FAMILY(sc)) { 1345 axe_cmd(sc, AXE_178_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp)); 1346 axe_cmd(sc, AXE_178_CMD_WRITE_IPG012, sc->sc_ipgs[2], 1347 (sc->sc_ipgs[1] << 8) | (sc->sc_ipgs[0]), NULL); 1348 } else { 1349 axe_cmd(sc, AXE_172_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp)); 1350 axe_cmd(sc, AXE_172_CMD_WRITE_IPG0, 0, sc->sc_ipgs[0], NULL); 1351 axe_cmd(sc, AXE_172_CMD_WRITE_IPG1, 0, sc->sc_ipgs[1], NULL); 1352 axe_cmd(sc, AXE_172_CMD_WRITE_IPG2, 0, sc->sc_ipgs[2], NULL); 1353 } 1354 1355 if (AXE_IS_178_FAMILY(sc)) { 1356 sc->sc_flags &= ~(AXE_FLAG_STD_FRAME | AXE_FLAG_CSUM_FRAME); 1357 if ((sc->sc_flags & AXE_FLAG_772B) != 0) 1358 sc->sc_lenmask = AXE_CSUM_HDR_LEN_MASK; 1359 else 1360 sc->sc_lenmask = AXE_HDR_LEN_MASK; 1361 if ((sc->sc_flags & AXE_FLAG_772B) != 0 && 1362 (ifp->if_capenable & IFCAP_RXCSUM) != 0) 1363 sc->sc_flags |= AXE_FLAG_CSUM_FRAME; 1364 else 1365 sc->sc_flags |= AXE_FLAG_STD_FRAME; 1366 } 1367 1368 /* Configure TX/RX checksum offloading. */ 1369 axe_csum_cfg(ue); 1370 1371 if (sc->sc_flags & AXE_FLAG_772B) { 1372 /* AX88772B uses different maximum frame burst configuration. */ 1373 axe_cmd(sc, AXE_772B_CMD_RXCTL_WRITE_CFG, 1374 ax88772b_mfb_table[AX88772B_MFB_16K].threshold, 1375 ax88772b_mfb_table[AX88772B_MFB_16K].byte_cnt, NULL); 1376 } 1377 1378 /* Enable receiver, set RX mode. */ 1379 rxmode = (AXE_RXCMD_MULTICAST | AXE_RXCMD_ENABLE); 1380 if (AXE_IS_178_FAMILY(sc)) { 1381 if (sc->sc_flags & AXE_FLAG_772B) { 1382 /* 1383 * Select RX header format type 1. Aligning IP 1384 * header on 4 byte boundary is not needed when 1385 * checksum offloading feature is not used 1386 * because we always copy the received frame in 1387 * RX handler. When RX checksum offloading is 1388 * active, aligning IP header is required to 1389 * reflect actual frame length including RX 1390 * header size. 1391 */ 1392 rxmode |= AXE_772B_RXCMD_HDR_TYPE_1; 1393 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1394 rxmode |= AXE_772B_RXCMD_IPHDR_ALIGN; 1395 } else { 1396 /* 1397 * Default Rx buffer size is too small to get 1398 * maximum performance. 1399 */ 1400 rxmode |= AXE_178_RXCMD_MFB_16384; 1401 } 1402 } else { 1403 rxmode |= AXE_172_RXCMD_UNICAST; 1404 } 1405 1406 /* If we want promiscuous mode, set the allframes bit. */ 1407 if (ifp->if_flags & IFF_PROMISC) 1408 rxmode |= AXE_RXCMD_PROMISC; 1409 1410 if (ifp->if_flags & IFF_BROADCAST) 1411 rxmode |= AXE_RXCMD_BROADCAST; 1412 1413 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); 1414 1415 /* Load the multicast filter. */ 1416 axe_setmulti(ue); 1417 1418 usbd_xfer_set_stall(sc->sc_xfer[AXE_BULK_DT_WR]); 1419 1420 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1421 /* Switch to selected media. */ 1422 axe_ifmedia_upd(ifp); 1423 } 1424 1425 static void 1426 axe_setpromisc(struct usb_ether *ue) 1427 { 1428 struct axe_softc *sc = uether_getsc(ue); 1429 struct ifnet *ifp = uether_getifp(ue); 1430 uint16_t rxmode; 1431 1432 axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode); 1433 1434 rxmode = le16toh(rxmode); 1435 1436 if (ifp->if_flags & IFF_PROMISC) { 1437 rxmode |= AXE_RXCMD_PROMISC; 1438 } else { 1439 rxmode &= ~AXE_RXCMD_PROMISC; 1440 } 1441 1442 axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); 1443 1444 axe_setmulti(ue); 1445 } 1446 1447 static void 1448 axe_stop(struct usb_ether *ue) 1449 { 1450 struct axe_softc *sc = uether_getsc(ue); 1451 struct ifnet *ifp = uether_getifp(ue); 1452 1453 AXE_LOCK_ASSERT(sc, MA_OWNED); 1454 1455 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1456 sc->sc_flags &= ~AXE_FLAG_LINK; 1457 1458 /* 1459 * stop all the transfers, if not already stopped: 1460 */ 1461 usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_WR]); 1462 usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_RD]); 1463 } 1464 1465 static int 1466 axe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1467 { 1468 struct usb_ether *ue = ifp->if_softc; 1469 struct axe_softc *sc; 1470 struct ifreq *ifr; 1471 int error, mask, reinit; 1472 1473 sc = uether_getsc(ue); 1474 ifr = (struct ifreq *)data; 1475 error = 0; 1476 reinit = 0; 1477 if (cmd == SIOCSIFCAP) { 1478 AXE_LOCK(sc); 1479 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1480 if ((mask & IFCAP_TXCSUM) != 0 && 1481 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1482 ifp->if_capenable ^= IFCAP_TXCSUM; 1483 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1484 ifp->if_hwassist |= AXE_CSUM_FEATURES; 1485 else 1486 ifp->if_hwassist &= ~AXE_CSUM_FEATURES; 1487 reinit++; 1488 } 1489 if ((mask & IFCAP_RXCSUM) != 0 && 1490 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 1491 ifp->if_capenable ^= IFCAP_RXCSUM; 1492 reinit++; 1493 } 1494 if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) 1495 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1496 else 1497 reinit = 0; 1498 AXE_UNLOCK(sc); 1499 if (reinit > 0) 1500 uether_init(ue); 1501 } else 1502 error = uether_ioctl(ifp, cmd, data); 1503 1504 return (error); 1505 } 1506