1 /*- 2 * Copyright (c) 2013-2014 Kevin Lo 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * ASIX Electronics AX88178A/AX88179 USB 2.0/3.0 gigabit ethernet driver. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/condvar.h> 38 #include <sys/endian.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/socket.h> 44 #include <sys/sysctl.h> 45 #include <sys/unistd.h> 46 47 #include <net/if.h> 48 #include <net/if_var.h> 49 50 #include <dev/usb/usb.h> 51 #include <dev/usb/usbdi.h> 52 #include <dev/usb/usbdi_util.h> 53 #include "usbdevs.h" 54 55 #define USB_DEBUG_VAR axge_debug 56 #include <dev/usb/usb_debug.h> 57 #include <dev/usb/usb_process.h> 58 59 #include <dev/usb/net/usb_ethernet.h> 60 #include <dev/usb/net/if_axgereg.h> 61 62 /* 63 * Various supported device vendors/products. 64 */ 65 66 static const STRUCT_USB_HOST_ID axge_devs[] = { 67 #define AXGE_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } 68 AXGE_DEV(ASIX, AX88178A), 69 AXGE_DEV(ASIX, AX88179), 70 AXGE_DEV(DLINK, DUB1312), 71 AXGE_DEV(LENOVO, GIGALAN), 72 AXGE_DEV(SITECOMEU, LN032), 73 #undef AXGE_DEV 74 }; 75 76 static const struct { 77 uint8_t ctrl; 78 uint8_t timer_l; 79 uint8_t timer_h; 80 uint8_t size; 81 uint8_t ifg; 82 } __packed axge_bulk_size[] = { 83 { 7, 0x4f, 0x00, 0x12, 0xff }, 84 { 7, 0x20, 0x03, 0x16, 0xff }, 85 { 7, 0xae, 0x07, 0x18, 0xff }, 86 { 7, 0xcc, 0x4c, 0x18, 0x08 } 87 }; 88 89 /* prototypes */ 90 91 static device_probe_t axge_probe; 92 static device_attach_t axge_attach; 93 static device_detach_t axge_detach; 94 95 static usb_callback_t axge_bulk_read_callback; 96 static usb_callback_t axge_bulk_write_callback; 97 98 static miibus_readreg_t axge_miibus_readreg; 99 static miibus_writereg_t axge_miibus_writereg; 100 static miibus_statchg_t axge_miibus_statchg; 101 102 static uether_fn_t axge_attach_post; 103 static uether_fn_t axge_init; 104 static uether_fn_t axge_stop; 105 static uether_fn_t axge_start; 106 static uether_fn_t axge_tick; 107 static uether_fn_t axge_rxfilter; 108 109 static int axge_read_mem(struct axge_softc *, uint8_t, uint16_t, 110 uint16_t, void *, int); 111 static void axge_write_mem(struct axge_softc *, uint8_t, uint16_t, 112 uint16_t, void *, int); 113 static uint8_t axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t); 114 static uint16_t axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t, 115 uint16_t); 116 static void axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t, 117 uint8_t); 118 static void axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t, 119 uint16_t, uint16_t); 120 static void axge_chip_init(struct axge_softc *); 121 static void axge_reset(struct axge_softc *); 122 123 static int axge_attach_post_sub(struct usb_ether *); 124 static int axge_ifmedia_upd(struct ifnet *); 125 static void axge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 126 static int axge_ioctl(struct ifnet *, u_long, caddr_t); 127 static void axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int); 128 static void axge_rxeof(struct usb_ether *, struct usb_page_cache *, 129 unsigned int, unsigned int, uint32_t); 130 static void axge_csum_cfg(struct usb_ether *); 131 132 #define AXGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 133 134 #ifdef USB_DEBUG 135 static int axge_debug = 0; 136 137 static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW, 0, "USB axge"); 138 SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0, 139 "Debug level"); 140 #endif 141 142 static const struct usb_config axge_config[AXGE_N_TRANSFER] = { 143 [AXGE_BULK_DT_WR] = { 144 .type = UE_BULK, 145 .endpoint = UE_ADDR_ANY, 146 .direction = UE_DIR_OUT, 147 .frames = AXGE_N_FRAMES, 148 .bufsize = AXGE_N_FRAMES * MCLBYTES, 149 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 150 .callback = axge_bulk_write_callback, 151 .timeout = 10000, /* 10 seconds */ 152 }, 153 [AXGE_BULK_DT_RD] = { 154 .type = UE_BULK, 155 .endpoint = UE_ADDR_ANY, 156 .direction = UE_DIR_IN, 157 .bufsize = 65536, 158 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 159 .callback = axge_bulk_read_callback, 160 .timeout = 0, /* no timeout */ 161 }, 162 }; 163 164 static device_method_t axge_methods[] = { 165 /* Device interface. */ 166 DEVMETHOD(device_probe, axge_probe), 167 DEVMETHOD(device_attach, axge_attach), 168 DEVMETHOD(device_detach, axge_detach), 169 170 /* MII interface. */ 171 DEVMETHOD(miibus_readreg, axge_miibus_readreg), 172 DEVMETHOD(miibus_writereg, axge_miibus_writereg), 173 DEVMETHOD(miibus_statchg, axge_miibus_statchg), 174 175 DEVMETHOD_END 176 }; 177 178 static driver_t axge_driver = { 179 .name = "axge", 180 .methods = axge_methods, 181 .size = sizeof(struct axge_softc), 182 }; 183 184 static devclass_t axge_devclass; 185 186 DRIVER_MODULE(axge, uhub, axge_driver, axge_devclass, NULL, NULL); 187 DRIVER_MODULE(miibus, axge, miibus_driver, miibus_devclass, NULL, NULL); 188 MODULE_DEPEND(axge, uether, 1, 1, 1); 189 MODULE_DEPEND(axge, usb, 1, 1, 1); 190 MODULE_DEPEND(axge, ether, 1, 1, 1); 191 MODULE_DEPEND(axge, miibus, 1, 1, 1); 192 MODULE_VERSION(axge, 1); 193 USB_PNP_HOST_INFO(axge_devs); 194 195 static const struct usb_ether_methods axge_ue_methods = { 196 .ue_attach_post = axge_attach_post, 197 .ue_attach_post_sub = axge_attach_post_sub, 198 .ue_start = axge_start, 199 .ue_init = axge_init, 200 .ue_stop = axge_stop, 201 .ue_tick = axge_tick, 202 .ue_setmulti = axge_rxfilter, 203 .ue_setpromisc = axge_rxfilter, 204 .ue_mii_upd = axge_ifmedia_upd, 205 .ue_mii_sts = axge_ifmedia_sts, 206 }; 207 208 static int 209 axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 210 uint16_t val, void *buf, int len) 211 { 212 struct usb_device_request req; 213 214 AXGE_LOCK_ASSERT(sc, MA_OWNED); 215 216 req.bmRequestType = UT_READ_VENDOR_DEVICE; 217 req.bRequest = cmd; 218 USETW(req.wValue, val); 219 USETW(req.wIndex, index); 220 USETW(req.wLength, len); 221 222 return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); 223 } 224 225 static void 226 axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 227 uint16_t val, void *buf, int len) 228 { 229 struct usb_device_request req; 230 231 AXGE_LOCK_ASSERT(sc, MA_OWNED); 232 233 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 234 req.bRequest = cmd; 235 USETW(req.wValue, val); 236 USETW(req.wIndex, index); 237 USETW(req.wLength, len); 238 239 if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) { 240 /* Error ignored. */ 241 } 242 } 243 244 static uint8_t 245 axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg) 246 { 247 uint8_t val; 248 249 axge_read_mem(sc, cmd, 1, reg, &val, 1); 250 return (val); 251 } 252 253 static uint16_t 254 axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 255 uint16_t reg) 256 { 257 uint8_t val[2]; 258 259 axge_read_mem(sc, cmd, index, reg, &val, 2); 260 return (UGETW(val)); 261 } 262 263 static void 264 axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val) 265 { 266 axge_write_mem(sc, cmd, 1, reg, &val, 1); 267 } 268 269 static void 270 axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 271 uint16_t reg, uint16_t val) 272 { 273 uint8_t temp[2]; 274 275 USETW(temp, val); 276 axge_write_mem(sc, cmd, index, reg, &temp, 2); 277 } 278 279 static int 280 axge_miibus_readreg(device_t dev, int phy, int reg) 281 { 282 struct axge_softc *sc; 283 uint16_t val; 284 int locked; 285 286 sc = device_get_softc(dev); 287 locked = mtx_owned(&sc->sc_mtx); 288 if (!locked) 289 AXGE_LOCK(sc); 290 291 val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy); 292 293 if (!locked) 294 AXGE_UNLOCK(sc); 295 296 return (val); 297 } 298 299 static int 300 axge_miibus_writereg(device_t dev, int phy, int reg, int val) 301 { 302 struct axge_softc *sc; 303 int locked; 304 305 sc = device_get_softc(dev); 306 locked = mtx_owned(&sc->sc_mtx); 307 if (!locked) 308 AXGE_LOCK(sc); 309 310 axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val); 311 312 if (!locked) 313 AXGE_UNLOCK(sc); 314 315 return (0); 316 } 317 318 static void 319 axge_miibus_statchg(device_t dev) 320 { 321 struct axge_softc *sc; 322 struct mii_data *mii; 323 struct ifnet *ifp; 324 uint8_t link_status, tmp[5]; 325 uint16_t val; 326 int locked; 327 328 sc = device_get_softc(dev); 329 mii = GET_MII(sc); 330 locked = mtx_owned(&sc->sc_mtx); 331 if (!locked) 332 AXGE_LOCK(sc); 333 334 ifp = uether_getifp(&sc->sc_ue); 335 if (mii == NULL || ifp == NULL || 336 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 337 goto done; 338 339 sc->sc_flags &= ~AXGE_FLAG_LINK; 340 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 341 (IFM_ACTIVE | IFM_AVALID)) { 342 switch (IFM_SUBTYPE(mii->mii_media_active)) { 343 case IFM_10_T: 344 case IFM_100_TX: 345 case IFM_1000_T: 346 sc->sc_flags |= AXGE_FLAG_LINK; 347 break; 348 default: 349 break; 350 } 351 } 352 353 /* Lost link, do nothing. */ 354 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0) 355 goto done; 356 357 link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR); 358 359 val = 0; 360 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 361 val |= MSR_FD; 362 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 363 val |= MSR_TFC; 364 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 365 val |= MSR_RFC; 366 } 367 val |= MSR_RE; 368 switch (IFM_SUBTYPE(mii->mii_media_active)) { 369 case IFM_1000_T: 370 val |= MSR_GM | MSR_EN_125MHZ; 371 if (link_status & PLSR_USB_SS) 372 memcpy(tmp, &axge_bulk_size[0], 5); 373 else if (link_status & PLSR_USB_HS) 374 memcpy(tmp, &axge_bulk_size[1], 5); 375 else 376 memcpy(tmp, &axge_bulk_size[3], 5); 377 break; 378 case IFM_100_TX: 379 val |= MSR_PS; 380 if (link_status & (PLSR_USB_SS | PLSR_USB_HS)) 381 memcpy(tmp, &axge_bulk_size[2], 5); 382 else 383 memcpy(tmp, &axge_bulk_size[3], 5); 384 break; 385 case IFM_10_T: 386 memcpy(tmp, &axge_bulk_size[3], 5); 387 break; 388 } 389 /* Rx bulk configuration. */ 390 axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5); 391 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 392 done: 393 if (!locked) 394 AXGE_UNLOCK(sc); 395 } 396 397 static void 398 axge_chip_init(struct axge_softc *sc) 399 { 400 /* Power up ethernet PHY. */ 401 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0); 402 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL); 403 uether_pause(&sc->sc_ue, hz / 4); 404 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 405 AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS); 406 uether_pause(&sc->sc_ue, hz / 10); 407 } 408 409 static void 410 axge_reset(struct axge_softc *sc) 411 { 412 struct usb_config_descriptor *cd; 413 usb_error_t err; 414 415 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); 416 417 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, 418 cd->bConfigurationValue); 419 if (err) 420 DPRINTF("reset failed (ignored)\n"); 421 422 /* Wait a little while for the chip to get its brains in order. */ 423 uether_pause(&sc->sc_ue, hz / 100); 424 425 /* Reinitialize controller to achieve full reset. */ 426 axge_chip_init(sc); 427 } 428 429 static void 430 axge_attach_post(struct usb_ether *ue) 431 { 432 struct axge_softc *sc; 433 434 sc = uether_getsc(ue); 435 436 /* Initialize controller and get station address. */ 437 axge_chip_init(sc); 438 axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 439 ue->ue_eaddr, ETHER_ADDR_LEN); 440 } 441 442 static int 443 axge_attach_post_sub(struct usb_ether *ue) 444 { 445 struct axge_softc *sc; 446 struct ifnet *ifp; 447 int error; 448 449 sc = uether_getsc(ue); 450 ifp = ue->ue_ifp; 451 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 452 ifp->if_start = uether_start; 453 ifp->if_ioctl = axge_ioctl; 454 ifp->if_init = uether_init; 455 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 456 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 457 IFQ_SET_READY(&ifp->if_snd); 458 459 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM; 460 ifp->if_hwassist = AXGE_CSUM_FEATURES; 461 ifp->if_capenable = ifp->if_capabilities; 462 463 mtx_lock(&Giant); 464 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, 465 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, 466 BMSR_DEFCAPMASK, AXGE_PHY_ADDR, MII_OFFSET_ANY, MIIF_DOPAUSE); 467 mtx_unlock(&Giant); 468 469 return (error); 470 } 471 472 /* 473 * Set media options. 474 */ 475 static int 476 axge_ifmedia_upd(struct ifnet *ifp) 477 { 478 struct axge_softc *sc; 479 struct mii_data *mii; 480 struct mii_softc *miisc; 481 int error; 482 483 sc = ifp->if_softc; 484 mii = GET_MII(sc); 485 AXGE_LOCK_ASSERT(sc, MA_OWNED); 486 487 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 488 PHY_RESET(miisc); 489 error = mii_mediachg(mii); 490 491 return (error); 492 } 493 494 /* 495 * Report current media status. 496 */ 497 static void 498 axge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 499 { 500 struct axge_softc *sc; 501 struct mii_data *mii; 502 503 sc = ifp->if_softc; 504 mii = GET_MII(sc); 505 AXGE_LOCK(sc); 506 mii_pollstat(mii); 507 ifmr->ifm_active = mii->mii_media_active; 508 ifmr->ifm_status = mii->mii_media_status; 509 AXGE_UNLOCK(sc); 510 } 511 512 /* 513 * Probe for a AX88179 chip. 514 */ 515 static int 516 axge_probe(device_t dev) 517 { 518 struct usb_attach_arg *uaa; 519 520 uaa = device_get_ivars(dev); 521 if (uaa->usb_mode != USB_MODE_HOST) 522 return (ENXIO); 523 if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX) 524 return (ENXIO); 525 if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX) 526 return (ENXIO); 527 528 return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa)); 529 } 530 531 /* 532 * Attach the interface. Allocate softc structures, do ifmedia 533 * setup and ethernet/BPF attach. 534 */ 535 static int 536 axge_attach(device_t dev) 537 { 538 struct usb_attach_arg *uaa; 539 struct axge_softc *sc; 540 struct usb_ether *ue; 541 uint8_t iface_index; 542 int error; 543 544 uaa = device_get_ivars(dev); 545 sc = device_get_softc(dev); 546 ue = &sc->sc_ue; 547 548 device_set_usb_desc(dev); 549 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 550 551 iface_index = AXGE_IFACE_IDX; 552 error = usbd_transfer_setup(uaa->device, &iface_index, 553 sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx); 554 if (error) { 555 device_printf(dev, "allocating USB transfers failed\n"); 556 mtx_destroy(&sc->sc_mtx); 557 return (ENXIO); 558 } 559 560 ue->ue_sc = sc; 561 ue->ue_dev = dev; 562 ue->ue_udev = uaa->device; 563 ue->ue_mtx = &sc->sc_mtx; 564 ue->ue_methods = &axge_ue_methods; 565 566 error = uether_ifattach(ue); 567 if (error) { 568 device_printf(dev, "could not attach interface\n"); 569 goto detach; 570 } 571 return (0); /* success */ 572 573 detach: 574 axge_detach(dev); 575 return (ENXIO); /* failure */ 576 } 577 578 static int 579 axge_detach(device_t dev) 580 { 581 struct axge_softc *sc; 582 struct usb_ether *ue; 583 uint16_t val; 584 585 sc = device_get_softc(dev); 586 ue = &sc->sc_ue; 587 if (device_is_attached(dev)) { 588 AXGE_LOCK(sc); 589 /* 590 * XXX 591 * ether_ifdetach(9) should be called first. 592 */ 593 axge_stop(ue); 594 /* Force bulk-in to return a zero-length USB packet. */ 595 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR); 596 val |= EPPRCR_BZ | EPPRCR_IPRL; 597 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, val); 598 /* Change clock. */ 599 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 0); 600 /* Disable MAC. */ 601 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, 0); 602 AXGE_UNLOCK(sc); 603 } 604 usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER); 605 uether_ifdetach(ue); 606 mtx_destroy(&sc->sc_mtx); 607 608 return (0); 609 } 610 611 static void 612 axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) 613 { 614 struct axge_softc *sc; 615 struct usb_ether *ue; 616 struct usb_page_cache *pc; 617 int actlen; 618 619 sc = usbd_xfer_softc(xfer); 620 ue = &sc->sc_ue; 621 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 622 623 switch (USB_GET_STATE(xfer)) { 624 case USB_ST_TRANSFERRED: 625 pc = usbd_xfer_get_frame(xfer, 0); 626 axge_rx_frame(ue, pc, actlen); 627 628 /* FALLTHROUGH */ 629 case USB_ST_SETUP: 630 tr_setup: 631 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); 632 usbd_transfer_submit(xfer); 633 uether_rxflush(ue); 634 break; 635 636 default: 637 if (error != USB_ERR_CANCELLED) { 638 usbd_xfer_set_stall(xfer); 639 goto tr_setup; 640 } 641 break; 642 } 643 } 644 645 static void 646 axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) 647 { 648 struct axge_softc *sc; 649 struct ifnet *ifp; 650 struct usb_page_cache *pc; 651 struct mbuf *m; 652 struct axge_frame_txhdr txhdr; 653 int nframes, pos; 654 655 sc = usbd_xfer_softc(xfer); 656 ifp = uether_getifp(&sc->sc_ue); 657 658 switch (USB_GET_STATE(xfer)) { 659 case USB_ST_TRANSFERRED: 660 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 661 /* FALLTHROUGH */ 662 case USB_ST_SETUP: 663 tr_setup: 664 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 || 665 (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { 666 /* 667 * Don't send anything if there is no link or 668 * controller is busy. 669 */ 670 return; 671 } 672 673 for (nframes = 0; nframes < AXGE_N_FRAMES && 674 !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { 675 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 676 if (m == NULL) 677 break; 678 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, 679 nframes); 680 pc = usbd_xfer_get_frame(xfer, nframes); 681 txhdr.mss = 0; 682 txhdr.len = htole32(AXGE_TXBYTES(m->m_pkthdr.len)); 683 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0 && 684 (m->m_pkthdr.csum_flags & AXGE_CSUM_FEATURES) == 0) 685 txhdr.len |= htole32(AXGE_CSUM_DISABLE); 686 687 pos = 0; 688 usbd_copy_in(pc, pos, &txhdr, sizeof(txhdr)); 689 pos += sizeof(txhdr); 690 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); 691 pos += m->m_pkthdr.len; 692 693 /* 694 * if there's a BPF listener, bounce a copy 695 * of this frame to him: 696 */ 697 BPF_MTAP(ifp, m); 698 699 m_freem(m); 700 701 /* Set frame length. */ 702 usbd_xfer_set_frame_len(xfer, nframes, pos); 703 } 704 if (nframes != 0) { 705 /* 706 * XXX 707 * Update TX packet counter here. This is not 708 * correct way but it seems that there is no way 709 * to know how many packets are sent at the end 710 * of transfer because controller combines 711 * multiple writes into single one if there is 712 * room in TX buffer of controller. 713 */ 714 if_inc_counter(ifp, IFCOUNTER_OPACKETS, nframes); 715 usbd_xfer_set_frames(xfer, nframes); 716 usbd_transfer_submit(xfer); 717 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 718 } 719 return; 720 /* NOTREACHED */ 721 default: 722 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 723 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 724 725 if (error != USB_ERR_CANCELLED) { 726 usbd_xfer_set_stall(xfer); 727 goto tr_setup; 728 } 729 return; 730 731 } 732 } 733 734 static void 735 axge_tick(struct usb_ether *ue) 736 { 737 struct axge_softc *sc; 738 struct mii_data *mii; 739 740 sc = uether_getsc(ue); 741 mii = GET_MII(sc); 742 AXGE_LOCK_ASSERT(sc, MA_OWNED); 743 744 mii_tick(mii); 745 } 746 747 static void 748 axge_rxfilter(struct usb_ether *ue) 749 { 750 struct axge_softc *sc; 751 struct ifnet *ifp; 752 struct ifmultiaddr *ifma; 753 uint32_t h; 754 uint16_t rxmode; 755 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 756 757 sc = uether_getsc(ue); 758 ifp = uether_getifp(ue); 759 h = 0; 760 AXGE_LOCK_ASSERT(sc, MA_OWNED); 761 762 /* 763 * Configure RX settings. 764 * Don't set RCR_IPE(IP header alignment on 32bit boundary) to disable 765 * inserting extra padding bytes. This wastes ethernet to USB host 766 * bandwidth as well as complicating RX handling logic. Current USB 767 * framework requires copying RX frames to mbufs so there is no need 768 * to worry about alignment. 769 */ 770 rxmode = RCR_DROP_CRCERR | RCR_START; 771 if (ifp->if_flags & IFF_BROADCAST) 772 rxmode |= RCR_ACPT_BCAST; 773 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 774 if (ifp->if_flags & IFF_PROMISC) 775 rxmode |= RCR_PROMISC; 776 rxmode |= RCR_ACPT_ALL_MCAST; 777 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 778 return; 779 } 780 781 rxmode |= RCR_ACPT_MCAST; 782 if_maddr_rlock(ifp); 783 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 784 if (ifma->ifma_addr->sa_family != AF_LINK) 785 continue; 786 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 787 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 788 hashtbl[h / 8] |= 1 << (h % 8); 789 } 790 if_maddr_runlock(ifp); 791 792 axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8); 793 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 794 } 795 796 static void 797 axge_start(struct usb_ether *ue) 798 { 799 struct axge_softc *sc; 800 801 sc = uether_getsc(ue); 802 /* 803 * Start the USB transfers, if not already started. 804 */ 805 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]); 806 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]); 807 } 808 809 static void 810 axge_init(struct usb_ether *ue) 811 { 812 struct axge_softc *sc; 813 struct ifnet *ifp; 814 815 sc = uether_getsc(ue); 816 ifp = uether_getifp(ue); 817 AXGE_LOCK_ASSERT(sc, MA_OWNED); 818 819 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 820 return; 821 822 /* 823 * Cancel pending I/O and free all RX/TX buffers. 824 */ 825 axge_stop(ue); 826 827 axge_reset(sc); 828 829 /* Set MAC address. */ 830 axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 831 IF_LLADDR(ifp), ETHER_ADDR_LEN); 832 833 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34); 834 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52); 835 836 /* Configure TX/RX checksum offloading. */ 837 axge_csum_cfg(ue); 838 839 /* Configure RX filters. */ 840 axge_rxfilter(ue); 841 842 /* 843 * XXX 844 * Controller supports wakeup on link change detection, 845 * magic packet and wakeup frame recpetion. But it seems 846 * there is no framework for USB ethernet suspend/wakeup. 847 * Disable all wakeup functions. 848 */ 849 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 0); 850 (void)axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR); 851 852 /* Configure default medium type. */ 853 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, MSR_GM | MSR_FD | 854 MSR_RFC | MSR_TFC | MSR_RE); 855 856 usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]); 857 858 ifp->if_drv_flags |= IFF_DRV_RUNNING; 859 /* Switch to selected media. */ 860 axge_ifmedia_upd(ifp); 861 } 862 863 static void 864 axge_stop(struct usb_ether *ue) 865 { 866 struct axge_softc *sc; 867 struct ifnet *ifp; 868 uint16_t val; 869 870 sc = uether_getsc(ue); 871 ifp = uether_getifp(ue); 872 873 AXGE_LOCK_ASSERT(sc, MA_OWNED); 874 875 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR); 876 val &= ~MSR_RE; 877 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 878 879 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 880 sc->sc_flags &= ~AXGE_FLAG_LINK; 881 882 /* 883 * Stop all the transfers, if not already stopped: 884 */ 885 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]); 886 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]); 887 } 888 889 static int 890 axge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 891 { 892 struct usb_ether *ue; 893 struct axge_softc *sc; 894 struct ifreq *ifr; 895 int error, mask, reinit; 896 897 ue = ifp->if_softc; 898 sc = uether_getsc(ue); 899 ifr = (struct ifreq *)data; 900 error = 0; 901 reinit = 0; 902 if (cmd == SIOCSIFCAP) { 903 AXGE_LOCK(sc); 904 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 905 if ((mask & IFCAP_TXCSUM) != 0 && 906 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 907 ifp->if_capenable ^= IFCAP_TXCSUM; 908 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 909 ifp->if_hwassist |= AXGE_CSUM_FEATURES; 910 else 911 ifp->if_hwassist &= ~AXGE_CSUM_FEATURES; 912 reinit++; 913 } 914 if ((mask & IFCAP_RXCSUM) != 0 && 915 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 916 ifp->if_capenable ^= IFCAP_RXCSUM; 917 reinit++; 918 } 919 if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) 920 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 921 else 922 reinit = 0; 923 AXGE_UNLOCK(sc); 924 if (reinit > 0) 925 uether_init(ue); 926 } else 927 error = uether_ioctl(ifp, cmd, data); 928 929 return (error); 930 } 931 932 static void 933 axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) 934 { 935 struct axge_frame_rxhdr pkt_hdr; 936 uint32_t rxhdr; 937 uint32_t pos; 938 uint32_t pkt_cnt, pkt_end; 939 uint32_t hdr_off; 940 uint32_t pktlen; 941 942 /* verify we have enough data */ 943 if (actlen < (int)sizeof(rxhdr)) 944 return; 945 946 pos = 0; 947 948 usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr)); 949 rxhdr = le32toh(rxhdr); 950 951 pkt_cnt = rxhdr & 0xFFFF; 952 hdr_off = pkt_end = (rxhdr >> 16) & 0xFFFF; 953 954 /* 955 * <----------------------- actlen ------------------------> 956 * [frame #0]...[frame #N][pkt_hdr #0]...[pkt_hdr #N][rxhdr] 957 * Each RX frame would be aligned on 8 bytes boundary. If 958 * RCR_IPE bit is set in AXGE_RCR register, there would be 2 959 * padding bytes and 6 dummy bytes(as the padding also should 960 * be aligned on 8 bytes boundary) for each RX frame to align 961 * IP header on 32bits boundary. Driver don't set RCR_IPE bit 962 * of AXGE_RCR register, so there should be no padding bytes 963 * which simplifies RX logic a lot. 964 */ 965 while (pkt_cnt--) { 966 /* verify the header offset */ 967 if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) { 968 DPRINTF("End of packet headers\n"); 969 break; 970 } 971 usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr)); 972 pkt_hdr.status = le32toh(pkt_hdr.status); 973 pktlen = AXGE_RXBYTES(pkt_hdr.status); 974 if (pos + pktlen > pkt_end) { 975 DPRINTF("Data position reached end\n"); 976 break; 977 } 978 979 if (AXGE_RX_ERR(pkt_hdr.status) != 0) { 980 DPRINTF("Dropped a packet\n"); 981 if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1); 982 } else 983 axge_rxeof(ue, pc, pos, pktlen, pkt_hdr.status); 984 pos += (pktlen + 7) & ~7; 985 hdr_off += sizeof(pkt_hdr); 986 } 987 } 988 989 static void 990 axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, 991 unsigned int len, uint32_t status) 992 { 993 struct ifnet *ifp; 994 struct mbuf *m; 995 996 ifp = ue->ue_ifp; 997 if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { 998 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 999 return; 1000 } 1001 1002 if (len > MHLEN - ETHER_ALIGN) 1003 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1004 else 1005 m = m_gethdr(M_NOWAIT, MT_DATA); 1006 if (m == NULL) { 1007 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1008 return; 1009 } 1010 m->m_pkthdr.rcvif = ifp; 1011 m->m_len = m->m_pkthdr.len = len; 1012 m->m_data += ETHER_ALIGN; 1013 1014 usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); 1015 1016 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1017 if ((status & AXGE_RX_L3_CSUM_ERR) == 0 && 1018 (status & AXGE_RX_L3_TYPE_MASK) == AXGE_RX_L3_TYPE_IPV4) 1019 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1020 CSUM_IP_VALID; 1021 if ((status & AXGE_RX_L4_CSUM_ERR) == 0 && 1022 ((status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_UDP || 1023 (status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_TCP)) { 1024 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1025 CSUM_PSEUDO_HDR; 1026 m->m_pkthdr.csum_data = 0xffff; 1027 } 1028 } 1029 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1030 1031 _IF_ENQUEUE(&ue->ue_rxq, m); 1032 } 1033 1034 static void 1035 axge_csum_cfg(struct usb_ether *ue) 1036 { 1037 struct axge_softc *sc; 1038 struct ifnet *ifp; 1039 uint8_t csum; 1040 1041 sc = uether_getsc(ue); 1042 AXGE_LOCK_ASSERT(sc, MA_OWNED); 1043 ifp = uether_getifp(ue); 1044 1045 csum = 0; 1046 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1047 csum |= CTCR_IP | CTCR_TCP | CTCR_UDP; 1048 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum); 1049 1050 csum = 0; 1051 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1052 csum |= CRCR_IP | CRCR_TCP | CRCR_UDP; 1053 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum); 1054 } 1055