1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013-2014 Kevin Lo 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 /* 31 * ASIX Electronics AX88178A/AX88179 USB 2.0/3.0 gigabit ethernet driver. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/condvar.h> 38 #include <sys/endian.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/socket.h> 44 #include <sys/sysctl.h> 45 #include <sys/unistd.h> 46 47 #include <net/if.h> 48 #include <net/if_var.h> 49 #include <net/if_media.h> 50 51 #include <dev/mii/mii.h> 52 #include <dev/mii/miivar.h> 53 54 #include <dev/usb/usb.h> 55 #include <dev/usb/usbdi.h> 56 #include <dev/usb/usbdi_util.h> 57 #include "usbdevs.h" 58 59 #define USB_DEBUG_VAR axge_debug 60 #include <dev/usb/usb_debug.h> 61 #include <dev/usb/usb_process.h> 62 63 #include <dev/usb/net/usb_ethernet.h> 64 #include <dev/usb/net/if_axgereg.h> 65 66 #include "miibus_if.h" 67 68 /* 69 * Various supported device vendors/products. 70 */ 71 72 static const STRUCT_USB_HOST_ID axge_devs[] = { 73 #define AXGE_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } 74 AXGE_DEV(ASIX, AX88178A), 75 AXGE_DEV(ASIX, AX88179), 76 AXGE_DEV(BELKIN, B2B128), 77 AXGE_DEV(DLINK, DUB1312), 78 AXGE_DEV(LENOVO, GIGALAN), 79 AXGE_DEV(SITECOMEU, LN032), 80 #undef AXGE_DEV 81 }; 82 83 static const struct { 84 uint8_t ctrl; 85 uint8_t timer_l; 86 uint8_t timer_h; 87 uint8_t size; 88 uint8_t ifg; 89 } __packed axge_bulk_size[] = { 90 { 7, 0x4f, 0x00, 0x12, 0xff }, 91 { 7, 0x20, 0x03, 0x16, 0xff }, 92 { 7, 0xae, 0x07, 0x18, 0xff }, 93 { 7, 0xcc, 0x4c, 0x18, 0x08 } 94 }; 95 96 /* prototypes */ 97 98 static device_probe_t axge_probe; 99 static device_attach_t axge_attach; 100 static device_detach_t axge_detach; 101 102 static usb_callback_t axge_bulk_read_callback; 103 static usb_callback_t axge_bulk_write_callback; 104 105 static miibus_readreg_t axge_miibus_readreg; 106 static miibus_writereg_t axge_miibus_writereg; 107 static miibus_statchg_t axge_miibus_statchg; 108 109 static uether_fn_t axge_attach_post; 110 static uether_fn_t axge_init; 111 static uether_fn_t axge_stop; 112 static uether_fn_t axge_start; 113 static uether_fn_t axge_tick; 114 static uether_fn_t axge_rxfilter; 115 116 static int axge_read_mem(struct axge_softc *, uint8_t, uint16_t, 117 uint16_t, void *, int); 118 static void axge_write_mem(struct axge_softc *, uint8_t, uint16_t, 119 uint16_t, void *, int); 120 static uint8_t axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t); 121 static uint16_t axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t, 122 uint16_t); 123 static void axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t, 124 uint8_t); 125 static void axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t, 126 uint16_t, uint16_t); 127 static void axge_chip_init(struct axge_softc *); 128 static void axge_reset(struct axge_softc *); 129 130 static int axge_attach_post_sub(struct usb_ether *); 131 static int axge_ifmedia_upd(if_t); 132 static void axge_ifmedia_sts(if_t, struct ifmediareq *); 133 static int axge_ioctl(if_t, u_long, caddr_t); 134 static void axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int); 135 static void axge_rxeof(struct usb_ether *, struct usb_page_cache *, 136 unsigned, unsigned, uint32_t); 137 static void axge_csum_cfg(struct usb_ether *); 138 139 #define AXGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 140 141 #ifdef USB_DEBUG 142 static int axge_debug = 0; 143 144 static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 145 "USB axge"); 146 SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0, 147 "Debug level"); 148 #endif 149 150 static const struct usb_config axge_config[AXGE_N_TRANSFER] = { 151 [AXGE_BULK_DT_WR] = { 152 .type = UE_BULK, 153 .endpoint = UE_ADDR_ANY, 154 .direction = UE_DIR_OUT, 155 .frames = AXGE_N_FRAMES, 156 .bufsize = AXGE_N_FRAMES * MCLBYTES, 157 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 158 .callback = axge_bulk_write_callback, 159 .timeout = 10000, /* 10 seconds */ 160 }, 161 [AXGE_BULK_DT_RD] = { 162 .type = UE_BULK, 163 .endpoint = UE_ADDR_ANY, 164 .direction = UE_DIR_IN, 165 .bufsize = 65536, 166 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 167 .callback = axge_bulk_read_callback, 168 .timeout = 0, /* no timeout */ 169 }, 170 }; 171 172 static device_method_t axge_methods[] = { 173 /* Device interface. */ 174 DEVMETHOD(device_probe, axge_probe), 175 DEVMETHOD(device_attach, axge_attach), 176 DEVMETHOD(device_detach, axge_detach), 177 178 /* MII interface. */ 179 DEVMETHOD(miibus_readreg, axge_miibus_readreg), 180 DEVMETHOD(miibus_writereg, axge_miibus_writereg), 181 DEVMETHOD(miibus_statchg, axge_miibus_statchg), 182 183 DEVMETHOD_END 184 }; 185 186 static driver_t axge_driver = { 187 .name = "axge", 188 .methods = axge_methods, 189 .size = sizeof(struct axge_softc), 190 }; 191 192 DRIVER_MODULE(axge, uhub, axge_driver, NULL, NULL); 193 DRIVER_MODULE(miibus, axge, miibus_driver, NULL, NULL); 194 MODULE_DEPEND(axge, uether, 1, 1, 1); 195 MODULE_DEPEND(axge, usb, 1, 1, 1); 196 MODULE_DEPEND(axge, ether, 1, 1, 1); 197 MODULE_DEPEND(axge, miibus, 1, 1, 1); 198 MODULE_VERSION(axge, 1); 199 USB_PNP_HOST_INFO(axge_devs); 200 201 static const struct usb_ether_methods axge_ue_methods = { 202 .ue_attach_post = axge_attach_post, 203 .ue_attach_post_sub = axge_attach_post_sub, 204 .ue_start = axge_start, 205 .ue_init = axge_init, 206 .ue_stop = axge_stop, 207 .ue_tick = axge_tick, 208 .ue_setmulti = axge_rxfilter, 209 .ue_setpromisc = axge_rxfilter, 210 .ue_mii_upd = axge_ifmedia_upd, 211 .ue_mii_sts = axge_ifmedia_sts, 212 }; 213 214 static int 215 axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 216 uint16_t val, void *buf, int len) 217 { 218 struct usb_device_request req; 219 220 AXGE_LOCK_ASSERT(sc, MA_OWNED); 221 222 req.bmRequestType = UT_READ_VENDOR_DEVICE; 223 req.bRequest = cmd; 224 USETW(req.wValue, val); 225 USETW(req.wIndex, index); 226 USETW(req.wLength, len); 227 228 return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); 229 } 230 231 static void 232 axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 233 uint16_t val, void *buf, int len) 234 { 235 struct usb_device_request req; 236 237 AXGE_LOCK_ASSERT(sc, MA_OWNED); 238 239 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 240 req.bRequest = cmd; 241 USETW(req.wValue, val); 242 USETW(req.wIndex, index); 243 USETW(req.wLength, len); 244 245 if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) { 246 /* Error ignored. */ 247 } 248 } 249 250 static uint8_t 251 axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg) 252 { 253 uint8_t val; 254 255 axge_read_mem(sc, cmd, 1, reg, &val, 1); 256 return (val); 257 } 258 259 static uint16_t 260 axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 261 uint16_t reg) 262 { 263 uint8_t val[2]; 264 265 axge_read_mem(sc, cmd, index, reg, &val, 2); 266 return (UGETW(val)); 267 } 268 269 static void 270 axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val) 271 { 272 axge_write_mem(sc, cmd, 1, reg, &val, 1); 273 } 274 275 static void 276 axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 277 uint16_t reg, uint16_t val) 278 { 279 uint8_t temp[2]; 280 281 USETW(temp, val); 282 axge_write_mem(sc, cmd, index, reg, &temp, 2); 283 } 284 285 static int 286 axge_miibus_readreg(device_t dev, int phy, int reg) 287 { 288 struct axge_softc *sc; 289 uint16_t val; 290 int locked; 291 292 sc = device_get_softc(dev); 293 locked = mtx_owned(&sc->sc_mtx); 294 if (!locked) 295 AXGE_LOCK(sc); 296 297 val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy); 298 299 if (!locked) 300 AXGE_UNLOCK(sc); 301 302 return (val); 303 } 304 305 static int 306 axge_miibus_writereg(device_t dev, int phy, int reg, int val) 307 { 308 struct axge_softc *sc; 309 int locked; 310 311 sc = device_get_softc(dev); 312 locked = mtx_owned(&sc->sc_mtx); 313 if (!locked) 314 AXGE_LOCK(sc); 315 316 axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val); 317 318 if (!locked) 319 AXGE_UNLOCK(sc); 320 321 return (0); 322 } 323 324 static void 325 axge_miibus_statchg(device_t dev) 326 { 327 struct axge_softc *sc; 328 struct mii_data *mii; 329 if_t ifp; 330 uint8_t link_status, tmp[5]; 331 uint16_t val; 332 int locked; 333 334 sc = device_get_softc(dev); 335 mii = GET_MII(sc); 336 locked = mtx_owned(&sc->sc_mtx); 337 if (!locked) 338 AXGE_LOCK(sc); 339 340 ifp = uether_getifp(&sc->sc_ue); 341 if (mii == NULL || ifp == NULL || 342 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 343 goto done; 344 345 sc->sc_flags &= ~AXGE_FLAG_LINK; 346 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 347 (IFM_ACTIVE | IFM_AVALID)) { 348 switch (IFM_SUBTYPE(mii->mii_media_active)) { 349 case IFM_10_T: 350 case IFM_100_TX: 351 case IFM_1000_T: 352 sc->sc_flags |= AXGE_FLAG_LINK; 353 break; 354 default: 355 break; 356 } 357 } 358 359 /* Lost link, do nothing. */ 360 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0) 361 goto done; 362 363 link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR); 364 365 val = 0; 366 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 367 val |= MSR_FD; 368 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 369 val |= MSR_TFC; 370 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 371 val |= MSR_RFC; 372 } 373 val |= MSR_RE; 374 switch (IFM_SUBTYPE(mii->mii_media_active)) { 375 case IFM_1000_T: 376 val |= MSR_GM | MSR_EN_125MHZ; 377 if (link_status & PLSR_USB_SS) 378 memcpy(tmp, &axge_bulk_size[0], 5); 379 else if (link_status & PLSR_USB_HS) 380 memcpy(tmp, &axge_bulk_size[1], 5); 381 else 382 memcpy(tmp, &axge_bulk_size[3], 5); 383 break; 384 case IFM_100_TX: 385 val |= MSR_PS; 386 if (link_status & (PLSR_USB_SS | PLSR_USB_HS)) 387 memcpy(tmp, &axge_bulk_size[2], 5); 388 else 389 memcpy(tmp, &axge_bulk_size[3], 5); 390 break; 391 case IFM_10_T: 392 memcpy(tmp, &axge_bulk_size[3], 5); 393 break; 394 } 395 /* Rx bulk configuration. */ 396 axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5); 397 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 398 done: 399 if (!locked) 400 AXGE_UNLOCK(sc); 401 } 402 403 static void 404 axge_chip_init(struct axge_softc *sc) 405 { 406 /* Power up ethernet PHY. */ 407 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0); 408 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL); 409 uether_pause(&sc->sc_ue, hz / 4); 410 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 411 AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS); 412 uether_pause(&sc->sc_ue, hz / 10); 413 } 414 415 static void 416 axge_reset(struct axge_softc *sc) 417 { 418 struct usb_config_descriptor *cd; 419 usb_error_t err; 420 421 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); 422 423 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, 424 cd->bConfigurationValue); 425 if (err) 426 DPRINTF("reset failed (ignored)\n"); 427 428 /* Wait a little while for the chip to get its brains in order. */ 429 uether_pause(&sc->sc_ue, hz / 100); 430 431 /* Reinitialize controller to achieve full reset. */ 432 axge_chip_init(sc); 433 } 434 435 static void 436 axge_attach_post(struct usb_ether *ue) 437 { 438 struct axge_softc *sc; 439 440 sc = uether_getsc(ue); 441 442 /* Initialize controller and get station address. */ 443 axge_chip_init(sc); 444 axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 445 ue->ue_eaddr, ETHER_ADDR_LEN); 446 } 447 448 static int 449 axge_attach_post_sub(struct usb_ether *ue) 450 { 451 if_t ifp; 452 int error; 453 454 ifp = ue->ue_ifp; 455 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 456 if_setstartfn(ifp, uether_start); 457 if_setioctlfn(ifp, axge_ioctl); 458 if_setinitfn(ifp, uether_init); 459 if_setsendqlen(ifp, ifqmaxlen); 460 if_setsendqready(ifp); 461 462 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM, 0); 463 if_sethwassist(ifp, AXGE_CSUM_FEATURES); 464 if_setcapenable(ifp, if_getcapabilities(ifp)); 465 466 bus_topo_lock(); 467 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, 468 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, 469 BMSR_DEFCAPMASK, AXGE_PHY_ADDR, MII_OFFSET_ANY, MIIF_DOPAUSE); 470 bus_topo_unlock(); 471 472 return (error); 473 } 474 475 /* 476 * Set media options. 477 */ 478 static int 479 axge_ifmedia_upd(if_t ifp) 480 { 481 struct axge_softc *sc; 482 struct mii_data *mii; 483 struct mii_softc *miisc; 484 int error; 485 486 sc = if_getsoftc(ifp); 487 mii = GET_MII(sc); 488 AXGE_LOCK_ASSERT(sc, MA_OWNED); 489 490 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 491 PHY_RESET(miisc); 492 error = mii_mediachg(mii); 493 494 return (error); 495 } 496 497 /* 498 * Report current media status. 499 */ 500 static void 501 axge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 502 { 503 struct axge_softc *sc; 504 struct mii_data *mii; 505 506 sc = if_getsoftc(ifp); 507 mii = GET_MII(sc); 508 AXGE_LOCK(sc); 509 mii_pollstat(mii); 510 ifmr->ifm_active = mii->mii_media_active; 511 ifmr->ifm_status = mii->mii_media_status; 512 AXGE_UNLOCK(sc); 513 } 514 515 /* 516 * Probe for a AX88179 chip. 517 */ 518 static int 519 axge_probe(device_t dev) 520 { 521 struct usb_attach_arg *uaa; 522 523 uaa = device_get_ivars(dev); 524 if (uaa->usb_mode != USB_MODE_HOST) 525 return (ENXIO); 526 if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX) 527 return (ENXIO); 528 if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX) 529 return (ENXIO); 530 531 return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa)); 532 } 533 534 /* 535 * Attach the interface. Allocate softc structures, do ifmedia 536 * setup and ethernet/BPF attach. 537 */ 538 static int 539 axge_attach(device_t dev) 540 { 541 struct usb_attach_arg *uaa; 542 struct axge_softc *sc; 543 struct usb_ether *ue; 544 uint8_t iface_index; 545 int error; 546 547 uaa = device_get_ivars(dev); 548 sc = device_get_softc(dev); 549 ue = &sc->sc_ue; 550 551 device_set_usb_desc(dev); 552 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 553 554 iface_index = AXGE_IFACE_IDX; 555 error = usbd_transfer_setup(uaa->device, &iface_index, 556 sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx); 557 if (error) { 558 device_printf(dev, "allocating USB transfers failed\n"); 559 mtx_destroy(&sc->sc_mtx); 560 return (ENXIO); 561 } 562 563 ue->ue_sc = sc; 564 ue->ue_dev = dev; 565 ue->ue_udev = uaa->device; 566 ue->ue_mtx = &sc->sc_mtx; 567 ue->ue_methods = &axge_ue_methods; 568 569 error = uether_ifattach(ue); 570 if (error) { 571 device_printf(dev, "could not attach interface\n"); 572 goto detach; 573 } 574 return (0); /* success */ 575 576 detach: 577 axge_detach(dev); 578 return (ENXIO); /* failure */ 579 } 580 581 static int 582 axge_detach(device_t dev) 583 { 584 struct axge_softc *sc; 585 struct usb_ether *ue; 586 uint16_t val; 587 588 sc = device_get_softc(dev); 589 ue = &sc->sc_ue; 590 if (device_is_attached(dev)) { 591 /* wait for any post attach or other command to complete */ 592 usb_proc_drain(&ue->ue_tq); 593 594 AXGE_LOCK(sc); 595 /* 596 * XXX 597 * ether_ifdetach(9) should be called first. 598 */ 599 axge_stop(ue); 600 /* Force bulk-in to return a zero-length USB packet. */ 601 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR); 602 val |= EPPRCR_BZ | EPPRCR_IPRL; 603 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, val); 604 /* Change clock. */ 605 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 0); 606 /* Disable MAC. */ 607 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, 0); 608 AXGE_UNLOCK(sc); 609 } 610 usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER); 611 uether_ifdetach(ue); 612 mtx_destroy(&sc->sc_mtx); 613 614 return (0); 615 } 616 617 static void 618 axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) 619 { 620 struct axge_softc *sc; 621 struct usb_ether *ue; 622 struct usb_page_cache *pc; 623 int actlen; 624 625 sc = usbd_xfer_softc(xfer); 626 ue = &sc->sc_ue; 627 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 628 629 switch (USB_GET_STATE(xfer)) { 630 case USB_ST_TRANSFERRED: 631 pc = usbd_xfer_get_frame(xfer, 0); 632 axge_rx_frame(ue, pc, actlen); 633 634 /* FALLTHROUGH */ 635 case USB_ST_SETUP: 636 tr_setup: 637 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); 638 usbd_transfer_submit(xfer); 639 uether_rxflush(ue); 640 break; 641 642 default: 643 if (error != USB_ERR_CANCELLED) { 644 usbd_xfer_set_stall(xfer); 645 goto tr_setup; 646 } 647 break; 648 } 649 } 650 651 static void 652 axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) 653 { 654 struct axge_softc *sc; 655 if_t ifp; 656 struct usb_page_cache *pc; 657 struct mbuf *m; 658 struct axge_frame_txhdr txhdr; 659 int nframes, pos; 660 661 sc = usbd_xfer_softc(xfer); 662 ifp = uether_getifp(&sc->sc_ue); 663 664 switch (USB_GET_STATE(xfer)) { 665 case USB_ST_TRANSFERRED: 666 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 667 /* FALLTHROUGH */ 668 case USB_ST_SETUP: 669 tr_setup: 670 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 || 671 (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0) { 672 /* 673 * Don't send anything if there is no link or 674 * controller is busy. 675 */ 676 return; 677 } 678 679 for (nframes = 0; nframes < AXGE_N_FRAMES && 680 !if_sendq_empty(ifp); nframes++) { 681 m = if_dequeue(ifp); 682 if (m == NULL) 683 break; 684 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, 685 nframes); 686 pc = usbd_xfer_get_frame(xfer, nframes); 687 txhdr.mss = 0; 688 txhdr.len = htole32(AXGE_TXBYTES(m->m_pkthdr.len)); 689 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0 && 690 (m->m_pkthdr.csum_flags & AXGE_CSUM_FEATURES) == 0) 691 txhdr.len |= htole32(AXGE_CSUM_DISABLE); 692 693 pos = 0; 694 usbd_copy_in(pc, pos, &txhdr, sizeof(txhdr)); 695 pos += sizeof(txhdr); 696 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); 697 pos += m->m_pkthdr.len; 698 699 /* 700 * if there's a BPF listener, bounce a copy 701 * of this frame to him: 702 */ 703 BPF_MTAP(ifp, m); 704 705 m_freem(m); 706 707 /* Set frame length. */ 708 usbd_xfer_set_frame_len(xfer, nframes, pos); 709 } 710 if (nframes != 0) { 711 /* 712 * XXX 713 * Update TX packet counter here. This is not 714 * correct way but it seems that there is no way 715 * to know how many packets are sent at the end 716 * of transfer because controller combines 717 * multiple writes into single one if there is 718 * room in TX buffer of controller. 719 */ 720 if_inc_counter(ifp, IFCOUNTER_OPACKETS, nframes); 721 usbd_xfer_set_frames(xfer, nframes); 722 usbd_transfer_submit(xfer); 723 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 724 } 725 return; 726 /* NOTREACHED */ 727 default: 728 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 729 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 730 731 if (error != USB_ERR_CANCELLED) { 732 usbd_xfer_set_stall(xfer); 733 goto tr_setup; 734 } 735 return; 736 } 737 } 738 739 static void 740 axge_tick(struct usb_ether *ue) 741 { 742 struct axge_softc *sc; 743 struct mii_data *mii; 744 745 sc = uether_getsc(ue); 746 mii = GET_MII(sc); 747 AXGE_LOCK_ASSERT(sc, MA_OWNED); 748 749 mii_tick(mii); 750 } 751 752 static u_int 753 axge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 754 { 755 uint8_t *hashtbl = arg; 756 uint32_t h; 757 758 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 759 hashtbl[h / 8] |= 1 << (h % 8); 760 761 return (1); 762 } 763 764 static void 765 axge_rxfilter(struct usb_ether *ue) 766 { 767 struct axge_softc *sc; 768 if_t ifp; 769 uint16_t rxmode; 770 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 771 772 sc = uether_getsc(ue); 773 ifp = uether_getifp(ue); 774 AXGE_LOCK_ASSERT(sc, MA_OWNED); 775 776 /* 777 * Configure RX settings. 778 * Don't set RCR_IPE(IP header alignment on 32bit boundary) to disable 779 * inserting extra padding bytes. This wastes ethernet to USB host 780 * bandwidth as well as complicating RX handling logic. Current USB 781 * framework requires copying RX frames to mbufs so there is no need 782 * to worry about alignment. 783 */ 784 rxmode = RCR_DROP_CRCERR | RCR_START; 785 if (if_getflags(ifp) & IFF_BROADCAST) 786 rxmode |= RCR_ACPT_BCAST; 787 if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) { 788 if (if_getflags(ifp) & IFF_PROMISC) 789 rxmode |= RCR_PROMISC; 790 rxmode |= RCR_ACPT_ALL_MCAST; 791 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 792 return; 793 } 794 795 rxmode |= RCR_ACPT_MCAST; 796 if_foreach_llmaddr(ifp, axge_hash_maddr, &hashtbl); 797 798 axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8); 799 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 800 } 801 802 static void 803 axge_start(struct usb_ether *ue) 804 { 805 struct axge_softc *sc; 806 807 sc = uether_getsc(ue); 808 /* 809 * Start the USB transfers, if not already started. 810 */ 811 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]); 812 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]); 813 } 814 815 static void 816 axge_init(struct usb_ether *ue) 817 { 818 struct axge_softc *sc; 819 if_t ifp; 820 821 sc = uether_getsc(ue); 822 ifp = uether_getifp(ue); 823 AXGE_LOCK_ASSERT(sc, MA_OWNED); 824 825 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 826 return; 827 828 /* 829 * Cancel pending I/O and free all RX/TX buffers. 830 */ 831 axge_stop(ue); 832 833 axge_reset(sc); 834 835 /* Set MAC address. */ 836 axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 837 if_getlladdr(ifp), ETHER_ADDR_LEN); 838 839 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34); 840 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52); 841 842 /* Configure TX/RX checksum offloading. */ 843 axge_csum_cfg(ue); 844 845 /* Configure RX filters. */ 846 axge_rxfilter(ue); 847 848 /* 849 * XXX 850 * Controller supports wakeup on link change detection, 851 * magic packet and wakeup frame recpetion. But it seems 852 * there is no framework for USB ethernet suspend/wakeup. 853 * Disable all wakeup functions. 854 */ 855 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 0); 856 (void)axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR); 857 858 /* Configure default medium type. */ 859 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, MSR_GM | MSR_FD | 860 MSR_RFC | MSR_TFC | MSR_RE); 861 862 usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]); 863 864 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 865 /* Switch to selected media. */ 866 axge_ifmedia_upd(ifp); 867 } 868 869 static void 870 axge_stop(struct usb_ether *ue) 871 { 872 struct axge_softc *sc; 873 if_t ifp; 874 uint16_t val; 875 876 sc = uether_getsc(ue); 877 ifp = uether_getifp(ue); 878 879 AXGE_LOCK_ASSERT(sc, MA_OWNED); 880 881 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR); 882 val &= ~MSR_RE; 883 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 884 885 if (ifp != NULL) 886 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 887 sc->sc_flags &= ~AXGE_FLAG_LINK; 888 889 /* 890 * Stop all the transfers, if not already stopped: 891 */ 892 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]); 893 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]); 894 } 895 896 static int 897 axge_ioctl(if_t ifp, u_long cmd, caddr_t data) 898 { 899 struct usb_ether *ue; 900 struct axge_softc *sc; 901 struct ifreq *ifr; 902 int error, mask, reinit; 903 904 ue = if_getsoftc(ifp); 905 sc = uether_getsc(ue); 906 ifr = (struct ifreq *)data; 907 error = 0; 908 reinit = 0; 909 if (cmd == SIOCSIFCAP) { 910 AXGE_LOCK(sc); 911 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 912 if ((mask & IFCAP_TXCSUM) != 0 && 913 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 914 if_togglecapenable(ifp, IFCAP_TXCSUM); 915 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 916 if_sethwassistbits(ifp, AXGE_CSUM_FEATURES, 0); 917 else 918 if_sethwassistbits(ifp, 0, AXGE_CSUM_FEATURES); 919 reinit++; 920 } 921 if ((mask & IFCAP_RXCSUM) != 0 && 922 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) { 923 if_togglecapenable(ifp, IFCAP_RXCSUM); 924 reinit++; 925 } 926 if (reinit > 0 && if_getdrvflags(ifp) & IFF_DRV_RUNNING) 927 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 928 else 929 reinit = 0; 930 AXGE_UNLOCK(sc); 931 if (reinit > 0) 932 uether_init(ue); 933 } else 934 error = uether_ioctl(ifp, cmd, data); 935 936 return (error); 937 } 938 939 static void 940 axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) 941 { 942 struct axge_frame_rxhdr pkt_hdr; 943 uint32_t rxhdr; 944 uint32_t pos; 945 uint32_t pkt_cnt, pkt_end; 946 uint32_t hdr_off; 947 uint32_t pktlen; 948 949 /* verify we have enough data */ 950 if (actlen < (int)sizeof(rxhdr)) 951 return; 952 953 pos = 0; 954 955 usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr)); 956 rxhdr = le32toh(rxhdr); 957 958 pkt_cnt = rxhdr & 0xFFFF; 959 hdr_off = pkt_end = (rxhdr >> 16) & 0xFFFF; 960 961 /* 962 * <----------------------- actlen ------------------------> 963 * [frame #0]...[frame #N][pkt_hdr #0]...[pkt_hdr #N][rxhdr] 964 * Each RX frame would be aligned on 8 bytes boundary. If 965 * RCR_IPE bit is set in AXGE_RCR register, there would be 2 966 * padding bytes and 6 dummy bytes(as the padding also should 967 * be aligned on 8 bytes boundary) for each RX frame to align 968 * IP header on 32bits boundary. Driver don't set RCR_IPE bit 969 * of AXGE_RCR register, so there should be no padding bytes 970 * which simplifies RX logic a lot. 971 */ 972 while (pkt_cnt--) { 973 /* verify the header offset */ 974 if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) { 975 DPRINTF("End of packet headers\n"); 976 break; 977 } 978 usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr)); 979 pkt_hdr.status = le32toh(pkt_hdr.status); 980 pktlen = AXGE_RXBYTES(pkt_hdr.status); 981 if (pos + pktlen > pkt_end) { 982 DPRINTF("Data position reached end\n"); 983 break; 984 } 985 986 if (AXGE_RX_ERR(pkt_hdr.status) != 0) { 987 DPRINTF("Dropped a packet\n"); 988 if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1); 989 } else 990 axge_rxeof(ue, pc, pos, pktlen, pkt_hdr.status); 991 pos += (pktlen + 7) & ~7; 992 hdr_off += sizeof(pkt_hdr); 993 } 994 } 995 996 static void 997 axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned offset, 998 unsigned len, uint32_t status) 999 { 1000 if_t ifp; 1001 struct mbuf *m; 1002 1003 ifp = ue->ue_ifp; 1004 if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { 1005 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1006 return; 1007 } 1008 1009 if (len > MHLEN - ETHER_ALIGN) 1010 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1011 else 1012 m = m_gethdr(M_NOWAIT, MT_DATA); 1013 if (m == NULL) { 1014 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1015 return; 1016 } 1017 m->m_pkthdr.rcvif = ifp; 1018 m->m_len = m->m_pkthdr.len = len; 1019 m->m_data += ETHER_ALIGN; 1020 1021 usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); 1022 1023 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 1024 if ((status & AXGE_RX_L3_CSUM_ERR) == 0 && 1025 (status & AXGE_RX_L3_TYPE_MASK) == AXGE_RX_L3_TYPE_IPV4) 1026 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1027 CSUM_IP_VALID; 1028 if ((status & AXGE_RX_L4_CSUM_ERR) == 0 && 1029 ((status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_UDP || 1030 (status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_TCP)) { 1031 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1032 CSUM_PSEUDO_HDR; 1033 m->m_pkthdr.csum_data = 0xffff; 1034 } 1035 } 1036 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1037 1038 (void)mbufq_enqueue(&ue->ue_rxq, m); 1039 } 1040 1041 static void 1042 axge_csum_cfg(struct usb_ether *ue) 1043 { 1044 struct axge_softc *sc; 1045 if_t ifp; 1046 uint8_t csum; 1047 1048 sc = uether_getsc(ue); 1049 AXGE_LOCK_ASSERT(sc, MA_OWNED); 1050 ifp = uether_getifp(ue); 1051 1052 csum = 0; 1053 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1054 csum |= CTCR_IP | CTCR_TCP | CTCR_UDP; 1055 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum); 1056 1057 csum = 0; 1058 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 1059 csum |= CRCR_IP | CRCR_TCP | CRCR_UDP; 1060 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum); 1061 } 1062