1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013-2014 Kevin Lo 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 /* 33 * ASIX Electronics AX88178A/AX88179 USB 2.0/3.0 gigabit ethernet driver. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/bus.h> 39 #include <sys/condvar.h> 40 #include <sys/endian.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/module.h> 44 #include <sys/mutex.h> 45 #include <sys/socket.h> 46 #include <sys/sysctl.h> 47 #include <sys/unistd.h> 48 49 #include <net/if.h> 50 #include <net/if_var.h> 51 #include <net/if_media.h> 52 53 #include <dev/mii/mii.h> 54 #include <dev/mii/miivar.h> 55 56 #include <dev/usb/usb.h> 57 #include <dev/usb/usbdi.h> 58 #include <dev/usb/usbdi_util.h> 59 #include "usbdevs.h" 60 61 #define USB_DEBUG_VAR axge_debug 62 #include <dev/usb/usb_debug.h> 63 #include <dev/usb/usb_process.h> 64 65 #include <dev/usb/net/usb_ethernet.h> 66 #include <dev/usb/net/if_axgereg.h> 67 68 #include "miibus_if.h" 69 70 /* 71 * Various supported device vendors/products. 72 */ 73 74 static const STRUCT_USB_HOST_ID axge_devs[] = { 75 #define AXGE_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } 76 AXGE_DEV(ASIX, AX88178A), 77 AXGE_DEV(ASIX, AX88179), 78 AXGE_DEV(BELKIN, B2B128), 79 AXGE_DEV(DLINK, DUB1312), 80 AXGE_DEV(LENOVO, GIGALAN), 81 AXGE_DEV(SITECOMEU, LN032), 82 #undef AXGE_DEV 83 }; 84 85 static const struct { 86 uint8_t ctrl; 87 uint8_t timer_l; 88 uint8_t timer_h; 89 uint8_t size; 90 uint8_t ifg; 91 } __packed axge_bulk_size[] = { 92 { 7, 0x4f, 0x00, 0x12, 0xff }, 93 { 7, 0x20, 0x03, 0x16, 0xff }, 94 { 7, 0xae, 0x07, 0x18, 0xff }, 95 { 7, 0xcc, 0x4c, 0x18, 0x08 } 96 }; 97 98 /* prototypes */ 99 100 static device_probe_t axge_probe; 101 static device_attach_t axge_attach; 102 static device_detach_t axge_detach; 103 104 static usb_callback_t axge_bulk_read_callback; 105 static usb_callback_t axge_bulk_write_callback; 106 107 static miibus_readreg_t axge_miibus_readreg; 108 static miibus_writereg_t axge_miibus_writereg; 109 static miibus_statchg_t axge_miibus_statchg; 110 111 static uether_fn_t axge_attach_post; 112 static uether_fn_t axge_init; 113 static uether_fn_t axge_stop; 114 static uether_fn_t axge_start; 115 static uether_fn_t axge_tick; 116 static uether_fn_t axge_rxfilter; 117 118 static int axge_read_mem(struct axge_softc *, uint8_t, uint16_t, 119 uint16_t, void *, int); 120 static void axge_write_mem(struct axge_softc *, uint8_t, uint16_t, 121 uint16_t, void *, int); 122 static uint8_t axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t); 123 static uint16_t axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t, 124 uint16_t); 125 static void axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t, 126 uint8_t); 127 static void axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t, 128 uint16_t, uint16_t); 129 static void axge_chip_init(struct axge_softc *); 130 static void axge_reset(struct axge_softc *); 131 132 static int axge_attach_post_sub(struct usb_ether *); 133 static int axge_ifmedia_upd(struct ifnet *); 134 static void axge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 135 static int axge_ioctl(struct ifnet *, u_long, caddr_t); 136 static void axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int); 137 static void axge_rxeof(struct usb_ether *, struct usb_page_cache *, 138 unsigned int, unsigned int, uint32_t); 139 static void axge_csum_cfg(struct usb_ether *); 140 141 #define AXGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 142 143 #ifdef USB_DEBUG 144 static int axge_debug = 0; 145 146 static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 147 "USB axge"); 148 SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0, 149 "Debug level"); 150 #endif 151 152 static const struct usb_config axge_config[AXGE_N_TRANSFER] = { 153 [AXGE_BULK_DT_WR] = { 154 .type = UE_BULK, 155 .endpoint = UE_ADDR_ANY, 156 .direction = UE_DIR_OUT, 157 .frames = AXGE_N_FRAMES, 158 .bufsize = AXGE_N_FRAMES * MCLBYTES, 159 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 160 .callback = axge_bulk_write_callback, 161 .timeout = 10000, /* 10 seconds */ 162 }, 163 [AXGE_BULK_DT_RD] = { 164 .type = UE_BULK, 165 .endpoint = UE_ADDR_ANY, 166 .direction = UE_DIR_IN, 167 .bufsize = 65536, 168 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 169 .callback = axge_bulk_read_callback, 170 .timeout = 0, /* no timeout */ 171 }, 172 }; 173 174 static device_method_t axge_methods[] = { 175 /* Device interface. */ 176 DEVMETHOD(device_probe, axge_probe), 177 DEVMETHOD(device_attach, axge_attach), 178 DEVMETHOD(device_detach, axge_detach), 179 180 /* MII interface. */ 181 DEVMETHOD(miibus_readreg, axge_miibus_readreg), 182 DEVMETHOD(miibus_writereg, axge_miibus_writereg), 183 DEVMETHOD(miibus_statchg, axge_miibus_statchg), 184 185 DEVMETHOD_END 186 }; 187 188 static driver_t axge_driver = { 189 .name = "axge", 190 .methods = axge_methods, 191 .size = sizeof(struct axge_softc), 192 }; 193 194 static devclass_t axge_devclass; 195 196 DRIVER_MODULE(axge, uhub, axge_driver, axge_devclass, NULL, NULL); 197 DRIVER_MODULE(miibus, axge, miibus_driver, miibus_devclass, NULL, NULL); 198 MODULE_DEPEND(axge, uether, 1, 1, 1); 199 MODULE_DEPEND(axge, usb, 1, 1, 1); 200 MODULE_DEPEND(axge, ether, 1, 1, 1); 201 MODULE_DEPEND(axge, miibus, 1, 1, 1); 202 MODULE_VERSION(axge, 1); 203 USB_PNP_HOST_INFO(axge_devs); 204 205 static const struct usb_ether_methods axge_ue_methods = { 206 .ue_attach_post = axge_attach_post, 207 .ue_attach_post_sub = axge_attach_post_sub, 208 .ue_start = axge_start, 209 .ue_init = axge_init, 210 .ue_stop = axge_stop, 211 .ue_tick = axge_tick, 212 .ue_setmulti = axge_rxfilter, 213 .ue_setpromisc = axge_rxfilter, 214 .ue_mii_upd = axge_ifmedia_upd, 215 .ue_mii_sts = axge_ifmedia_sts, 216 }; 217 218 static int 219 axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 220 uint16_t val, void *buf, int len) 221 { 222 struct usb_device_request req; 223 224 AXGE_LOCK_ASSERT(sc, MA_OWNED); 225 226 req.bmRequestType = UT_READ_VENDOR_DEVICE; 227 req.bRequest = cmd; 228 USETW(req.wValue, val); 229 USETW(req.wIndex, index); 230 USETW(req.wLength, len); 231 232 return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); 233 } 234 235 static void 236 axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 237 uint16_t val, void *buf, int len) 238 { 239 struct usb_device_request req; 240 241 AXGE_LOCK_ASSERT(sc, MA_OWNED); 242 243 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 244 req.bRequest = cmd; 245 USETW(req.wValue, val); 246 USETW(req.wIndex, index); 247 USETW(req.wLength, len); 248 249 if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) { 250 /* Error ignored. */ 251 } 252 } 253 254 static uint8_t 255 axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg) 256 { 257 uint8_t val; 258 259 axge_read_mem(sc, cmd, 1, reg, &val, 1); 260 return (val); 261 } 262 263 static uint16_t 264 axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 265 uint16_t reg) 266 { 267 uint8_t val[2]; 268 269 axge_read_mem(sc, cmd, index, reg, &val, 2); 270 return (UGETW(val)); 271 } 272 273 static void 274 axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val) 275 { 276 axge_write_mem(sc, cmd, 1, reg, &val, 1); 277 } 278 279 static void 280 axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 281 uint16_t reg, uint16_t val) 282 { 283 uint8_t temp[2]; 284 285 USETW(temp, val); 286 axge_write_mem(sc, cmd, index, reg, &temp, 2); 287 } 288 289 static int 290 axge_miibus_readreg(device_t dev, int phy, int reg) 291 { 292 struct axge_softc *sc; 293 uint16_t val; 294 int locked; 295 296 sc = device_get_softc(dev); 297 locked = mtx_owned(&sc->sc_mtx); 298 if (!locked) 299 AXGE_LOCK(sc); 300 301 val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy); 302 303 if (!locked) 304 AXGE_UNLOCK(sc); 305 306 return (val); 307 } 308 309 static int 310 axge_miibus_writereg(device_t dev, int phy, int reg, int val) 311 { 312 struct axge_softc *sc; 313 int locked; 314 315 sc = device_get_softc(dev); 316 locked = mtx_owned(&sc->sc_mtx); 317 if (!locked) 318 AXGE_LOCK(sc); 319 320 axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val); 321 322 if (!locked) 323 AXGE_UNLOCK(sc); 324 325 return (0); 326 } 327 328 static void 329 axge_miibus_statchg(device_t dev) 330 { 331 struct axge_softc *sc; 332 struct mii_data *mii; 333 struct ifnet *ifp; 334 uint8_t link_status, tmp[5]; 335 uint16_t val; 336 int locked; 337 338 sc = device_get_softc(dev); 339 mii = GET_MII(sc); 340 locked = mtx_owned(&sc->sc_mtx); 341 if (!locked) 342 AXGE_LOCK(sc); 343 344 ifp = uether_getifp(&sc->sc_ue); 345 if (mii == NULL || ifp == NULL || 346 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 347 goto done; 348 349 sc->sc_flags &= ~AXGE_FLAG_LINK; 350 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 351 (IFM_ACTIVE | IFM_AVALID)) { 352 switch (IFM_SUBTYPE(mii->mii_media_active)) { 353 case IFM_10_T: 354 case IFM_100_TX: 355 case IFM_1000_T: 356 sc->sc_flags |= AXGE_FLAG_LINK; 357 break; 358 default: 359 break; 360 } 361 } 362 363 /* Lost link, do nothing. */ 364 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0) 365 goto done; 366 367 link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR); 368 369 val = 0; 370 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 371 val |= MSR_FD; 372 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 373 val |= MSR_TFC; 374 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 375 val |= MSR_RFC; 376 } 377 val |= MSR_RE; 378 switch (IFM_SUBTYPE(mii->mii_media_active)) { 379 case IFM_1000_T: 380 val |= MSR_GM | MSR_EN_125MHZ; 381 if (link_status & PLSR_USB_SS) 382 memcpy(tmp, &axge_bulk_size[0], 5); 383 else if (link_status & PLSR_USB_HS) 384 memcpy(tmp, &axge_bulk_size[1], 5); 385 else 386 memcpy(tmp, &axge_bulk_size[3], 5); 387 break; 388 case IFM_100_TX: 389 val |= MSR_PS; 390 if (link_status & (PLSR_USB_SS | PLSR_USB_HS)) 391 memcpy(tmp, &axge_bulk_size[2], 5); 392 else 393 memcpy(tmp, &axge_bulk_size[3], 5); 394 break; 395 case IFM_10_T: 396 memcpy(tmp, &axge_bulk_size[3], 5); 397 break; 398 } 399 /* Rx bulk configuration. */ 400 axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5); 401 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 402 done: 403 if (!locked) 404 AXGE_UNLOCK(sc); 405 } 406 407 static void 408 axge_chip_init(struct axge_softc *sc) 409 { 410 /* Power up ethernet PHY. */ 411 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0); 412 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL); 413 uether_pause(&sc->sc_ue, hz / 4); 414 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 415 AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS); 416 uether_pause(&sc->sc_ue, hz / 10); 417 } 418 419 static void 420 axge_reset(struct axge_softc *sc) 421 { 422 struct usb_config_descriptor *cd; 423 usb_error_t err; 424 425 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); 426 427 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, 428 cd->bConfigurationValue); 429 if (err) 430 DPRINTF("reset failed (ignored)\n"); 431 432 /* Wait a little while for the chip to get its brains in order. */ 433 uether_pause(&sc->sc_ue, hz / 100); 434 435 /* Reinitialize controller to achieve full reset. */ 436 axge_chip_init(sc); 437 } 438 439 static void 440 axge_attach_post(struct usb_ether *ue) 441 { 442 struct axge_softc *sc; 443 444 sc = uether_getsc(ue); 445 446 /* Initialize controller and get station address. */ 447 axge_chip_init(sc); 448 axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 449 ue->ue_eaddr, ETHER_ADDR_LEN); 450 } 451 452 static int 453 axge_attach_post_sub(struct usb_ether *ue) 454 { 455 struct axge_softc *sc; 456 struct ifnet *ifp; 457 int error; 458 459 sc = uether_getsc(ue); 460 ifp = ue->ue_ifp; 461 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 462 ifp->if_start = uether_start; 463 ifp->if_ioctl = axge_ioctl; 464 ifp->if_init = uether_init; 465 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 466 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 467 IFQ_SET_READY(&ifp->if_snd); 468 469 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM; 470 ifp->if_hwassist = AXGE_CSUM_FEATURES; 471 ifp->if_capenable = ifp->if_capabilities; 472 473 mtx_lock(&Giant); 474 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, 475 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, 476 BMSR_DEFCAPMASK, AXGE_PHY_ADDR, MII_OFFSET_ANY, MIIF_DOPAUSE); 477 mtx_unlock(&Giant); 478 479 return (error); 480 } 481 482 /* 483 * Set media options. 484 */ 485 static int 486 axge_ifmedia_upd(struct ifnet *ifp) 487 { 488 struct axge_softc *sc; 489 struct mii_data *mii; 490 struct mii_softc *miisc; 491 int error; 492 493 sc = ifp->if_softc; 494 mii = GET_MII(sc); 495 AXGE_LOCK_ASSERT(sc, MA_OWNED); 496 497 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 498 PHY_RESET(miisc); 499 error = mii_mediachg(mii); 500 501 return (error); 502 } 503 504 /* 505 * Report current media status. 506 */ 507 static void 508 axge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 509 { 510 struct axge_softc *sc; 511 struct mii_data *mii; 512 513 sc = ifp->if_softc; 514 mii = GET_MII(sc); 515 AXGE_LOCK(sc); 516 mii_pollstat(mii); 517 ifmr->ifm_active = mii->mii_media_active; 518 ifmr->ifm_status = mii->mii_media_status; 519 AXGE_UNLOCK(sc); 520 } 521 522 /* 523 * Probe for a AX88179 chip. 524 */ 525 static int 526 axge_probe(device_t dev) 527 { 528 struct usb_attach_arg *uaa; 529 530 uaa = device_get_ivars(dev); 531 if (uaa->usb_mode != USB_MODE_HOST) 532 return (ENXIO); 533 if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX) 534 return (ENXIO); 535 if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX) 536 return (ENXIO); 537 538 return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa)); 539 } 540 541 /* 542 * Attach the interface. Allocate softc structures, do ifmedia 543 * setup and ethernet/BPF attach. 544 */ 545 static int 546 axge_attach(device_t dev) 547 { 548 struct usb_attach_arg *uaa; 549 struct axge_softc *sc; 550 struct usb_ether *ue; 551 uint8_t iface_index; 552 int error; 553 554 uaa = device_get_ivars(dev); 555 sc = device_get_softc(dev); 556 ue = &sc->sc_ue; 557 558 device_set_usb_desc(dev); 559 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 560 561 iface_index = AXGE_IFACE_IDX; 562 error = usbd_transfer_setup(uaa->device, &iface_index, 563 sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx); 564 if (error) { 565 device_printf(dev, "allocating USB transfers failed\n"); 566 mtx_destroy(&sc->sc_mtx); 567 return (ENXIO); 568 } 569 570 ue->ue_sc = sc; 571 ue->ue_dev = dev; 572 ue->ue_udev = uaa->device; 573 ue->ue_mtx = &sc->sc_mtx; 574 ue->ue_methods = &axge_ue_methods; 575 576 error = uether_ifattach(ue); 577 if (error) { 578 device_printf(dev, "could not attach interface\n"); 579 goto detach; 580 } 581 return (0); /* success */ 582 583 detach: 584 axge_detach(dev); 585 return (ENXIO); /* failure */ 586 } 587 588 static int 589 axge_detach(device_t dev) 590 { 591 struct axge_softc *sc; 592 struct usb_ether *ue; 593 uint16_t val; 594 595 sc = device_get_softc(dev); 596 ue = &sc->sc_ue; 597 if (device_is_attached(dev)) { 598 /* wait for any post attach or other command to complete */ 599 usb_proc_drain(&ue->ue_tq); 600 601 AXGE_LOCK(sc); 602 /* 603 * XXX 604 * ether_ifdetach(9) should be called first. 605 */ 606 axge_stop(ue); 607 /* Force bulk-in to return a zero-length USB packet. */ 608 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR); 609 val |= EPPRCR_BZ | EPPRCR_IPRL; 610 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, val); 611 /* Change clock. */ 612 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 0); 613 /* Disable MAC. */ 614 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, 0); 615 AXGE_UNLOCK(sc); 616 } 617 usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER); 618 uether_ifdetach(ue); 619 mtx_destroy(&sc->sc_mtx); 620 621 return (0); 622 } 623 624 static void 625 axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) 626 { 627 struct axge_softc *sc; 628 struct usb_ether *ue; 629 struct usb_page_cache *pc; 630 int actlen; 631 632 sc = usbd_xfer_softc(xfer); 633 ue = &sc->sc_ue; 634 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 635 636 switch (USB_GET_STATE(xfer)) { 637 case USB_ST_TRANSFERRED: 638 pc = usbd_xfer_get_frame(xfer, 0); 639 axge_rx_frame(ue, pc, actlen); 640 641 /* FALLTHROUGH */ 642 case USB_ST_SETUP: 643 tr_setup: 644 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); 645 usbd_transfer_submit(xfer); 646 uether_rxflush(ue); 647 break; 648 649 default: 650 if (error != USB_ERR_CANCELLED) { 651 usbd_xfer_set_stall(xfer); 652 goto tr_setup; 653 } 654 break; 655 } 656 } 657 658 static void 659 axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) 660 { 661 struct axge_softc *sc; 662 struct ifnet *ifp; 663 struct usb_page_cache *pc; 664 struct mbuf *m; 665 struct axge_frame_txhdr txhdr; 666 int nframes, pos; 667 668 sc = usbd_xfer_softc(xfer); 669 ifp = uether_getifp(&sc->sc_ue); 670 671 switch (USB_GET_STATE(xfer)) { 672 case USB_ST_TRANSFERRED: 673 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 674 /* FALLTHROUGH */ 675 case USB_ST_SETUP: 676 tr_setup: 677 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 || 678 (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { 679 /* 680 * Don't send anything if there is no link or 681 * controller is busy. 682 */ 683 return; 684 } 685 686 for (nframes = 0; nframes < AXGE_N_FRAMES && 687 !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { 688 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 689 if (m == NULL) 690 break; 691 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, 692 nframes); 693 pc = usbd_xfer_get_frame(xfer, nframes); 694 txhdr.mss = 0; 695 txhdr.len = htole32(AXGE_TXBYTES(m->m_pkthdr.len)); 696 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0 && 697 (m->m_pkthdr.csum_flags & AXGE_CSUM_FEATURES) == 0) 698 txhdr.len |= htole32(AXGE_CSUM_DISABLE); 699 700 pos = 0; 701 usbd_copy_in(pc, pos, &txhdr, sizeof(txhdr)); 702 pos += sizeof(txhdr); 703 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); 704 pos += m->m_pkthdr.len; 705 706 /* 707 * if there's a BPF listener, bounce a copy 708 * of this frame to him: 709 */ 710 BPF_MTAP(ifp, m); 711 712 m_freem(m); 713 714 /* Set frame length. */ 715 usbd_xfer_set_frame_len(xfer, nframes, pos); 716 } 717 if (nframes != 0) { 718 /* 719 * XXX 720 * Update TX packet counter here. This is not 721 * correct way but it seems that there is no way 722 * to know how many packets are sent at the end 723 * of transfer because controller combines 724 * multiple writes into single one if there is 725 * room in TX buffer of controller. 726 */ 727 if_inc_counter(ifp, IFCOUNTER_OPACKETS, nframes); 728 usbd_xfer_set_frames(xfer, nframes); 729 usbd_transfer_submit(xfer); 730 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 731 } 732 return; 733 /* NOTREACHED */ 734 default: 735 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 736 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 737 738 if (error != USB_ERR_CANCELLED) { 739 usbd_xfer_set_stall(xfer); 740 goto tr_setup; 741 } 742 return; 743 } 744 } 745 746 static void 747 axge_tick(struct usb_ether *ue) 748 { 749 struct axge_softc *sc; 750 struct mii_data *mii; 751 752 sc = uether_getsc(ue); 753 mii = GET_MII(sc); 754 AXGE_LOCK_ASSERT(sc, MA_OWNED); 755 756 mii_tick(mii); 757 } 758 759 static u_int 760 axge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 761 { 762 uint8_t *hashtbl = arg; 763 uint32_t h; 764 765 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 766 hashtbl[h / 8] |= 1 << (h % 8); 767 768 return (1); 769 } 770 771 static void 772 axge_rxfilter(struct usb_ether *ue) 773 { 774 struct axge_softc *sc; 775 struct ifnet *ifp; 776 uint16_t rxmode; 777 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 778 779 sc = uether_getsc(ue); 780 ifp = uether_getifp(ue); 781 AXGE_LOCK_ASSERT(sc, MA_OWNED); 782 783 /* 784 * Configure RX settings. 785 * Don't set RCR_IPE(IP header alignment on 32bit boundary) to disable 786 * inserting extra padding bytes. This wastes ethernet to USB host 787 * bandwidth as well as complicating RX handling logic. Current USB 788 * framework requires copying RX frames to mbufs so there is no need 789 * to worry about alignment. 790 */ 791 rxmode = RCR_DROP_CRCERR | RCR_START; 792 if (ifp->if_flags & IFF_BROADCAST) 793 rxmode |= RCR_ACPT_BCAST; 794 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 795 if (ifp->if_flags & IFF_PROMISC) 796 rxmode |= RCR_PROMISC; 797 rxmode |= RCR_ACPT_ALL_MCAST; 798 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 799 return; 800 } 801 802 rxmode |= RCR_ACPT_MCAST; 803 if_foreach_llmaddr(ifp, axge_hash_maddr, &hashtbl); 804 805 axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8); 806 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 807 } 808 809 static void 810 axge_start(struct usb_ether *ue) 811 { 812 struct axge_softc *sc; 813 814 sc = uether_getsc(ue); 815 /* 816 * Start the USB transfers, if not already started. 817 */ 818 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]); 819 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]); 820 } 821 822 static void 823 axge_init(struct usb_ether *ue) 824 { 825 struct axge_softc *sc; 826 struct ifnet *ifp; 827 828 sc = uether_getsc(ue); 829 ifp = uether_getifp(ue); 830 AXGE_LOCK_ASSERT(sc, MA_OWNED); 831 832 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 833 return; 834 835 /* 836 * Cancel pending I/O and free all RX/TX buffers. 837 */ 838 axge_stop(ue); 839 840 axge_reset(sc); 841 842 /* Set MAC address. */ 843 axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 844 IF_LLADDR(ifp), ETHER_ADDR_LEN); 845 846 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34); 847 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52); 848 849 /* Configure TX/RX checksum offloading. */ 850 axge_csum_cfg(ue); 851 852 /* Configure RX filters. */ 853 axge_rxfilter(ue); 854 855 /* 856 * XXX 857 * Controller supports wakeup on link change detection, 858 * magic packet and wakeup frame recpetion. But it seems 859 * there is no framework for USB ethernet suspend/wakeup. 860 * Disable all wakeup functions. 861 */ 862 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 0); 863 (void)axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR); 864 865 /* Configure default medium type. */ 866 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, MSR_GM | MSR_FD | 867 MSR_RFC | MSR_TFC | MSR_RE); 868 869 usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]); 870 871 ifp->if_drv_flags |= IFF_DRV_RUNNING; 872 /* Switch to selected media. */ 873 axge_ifmedia_upd(ifp); 874 } 875 876 static void 877 axge_stop(struct usb_ether *ue) 878 { 879 struct axge_softc *sc; 880 struct ifnet *ifp; 881 uint16_t val; 882 883 sc = uether_getsc(ue); 884 ifp = uether_getifp(ue); 885 886 AXGE_LOCK_ASSERT(sc, MA_OWNED); 887 888 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR); 889 val &= ~MSR_RE; 890 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 891 892 if (ifp != NULL) 893 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 894 sc->sc_flags &= ~AXGE_FLAG_LINK; 895 896 /* 897 * Stop all the transfers, if not already stopped: 898 */ 899 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]); 900 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]); 901 } 902 903 static int 904 axge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 905 { 906 struct usb_ether *ue; 907 struct axge_softc *sc; 908 struct ifreq *ifr; 909 int error, mask, reinit; 910 911 ue = ifp->if_softc; 912 sc = uether_getsc(ue); 913 ifr = (struct ifreq *)data; 914 error = 0; 915 reinit = 0; 916 if (cmd == SIOCSIFCAP) { 917 AXGE_LOCK(sc); 918 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 919 if ((mask & IFCAP_TXCSUM) != 0 && 920 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 921 ifp->if_capenable ^= IFCAP_TXCSUM; 922 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 923 ifp->if_hwassist |= AXGE_CSUM_FEATURES; 924 else 925 ifp->if_hwassist &= ~AXGE_CSUM_FEATURES; 926 reinit++; 927 } 928 if ((mask & IFCAP_RXCSUM) != 0 && 929 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 930 ifp->if_capenable ^= IFCAP_RXCSUM; 931 reinit++; 932 } 933 if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) 934 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 935 else 936 reinit = 0; 937 AXGE_UNLOCK(sc); 938 if (reinit > 0) 939 uether_init(ue); 940 } else 941 error = uether_ioctl(ifp, cmd, data); 942 943 return (error); 944 } 945 946 static void 947 axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) 948 { 949 struct axge_frame_rxhdr pkt_hdr; 950 uint32_t rxhdr; 951 uint32_t pos; 952 uint32_t pkt_cnt, pkt_end; 953 uint32_t hdr_off; 954 uint32_t pktlen; 955 956 /* verify we have enough data */ 957 if (actlen < (int)sizeof(rxhdr)) 958 return; 959 960 pos = 0; 961 962 usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr)); 963 rxhdr = le32toh(rxhdr); 964 965 pkt_cnt = rxhdr & 0xFFFF; 966 hdr_off = pkt_end = (rxhdr >> 16) & 0xFFFF; 967 968 /* 969 * <----------------------- actlen ------------------------> 970 * [frame #0]...[frame #N][pkt_hdr #0]...[pkt_hdr #N][rxhdr] 971 * Each RX frame would be aligned on 8 bytes boundary. If 972 * RCR_IPE bit is set in AXGE_RCR register, there would be 2 973 * padding bytes and 6 dummy bytes(as the padding also should 974 * be aligned on 8 bytes boundary) for each RX frame to align 975 * IP header on 32bits boundary. Driver don't set RCR_IPE bit 976 * of AXGE_RCR register, so there should be no padding bytes 977 * which simplifies RX logic a lot. 978 */ 979 while (pkt_cnt--) { 980 /* verify the header offset */ 981 if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) { 982 DPRINTF("End of packet headers\n"); 983 break; 984 } 985 usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr)); 986 pkt_hdr.status = le32toh(pkt_hdr.status); 987 pktlen = AXGE_RXBYTES(pkt_hdr.status); 988 if (pos + pktlen > pkt_end) { 989 DPRINTF("Data position reached end\n"); 990 break; 991 } 992 993 if (AXGE_RX_ERR(pkt_hdr.status) != 0) { 994 DPRINTF("Dropped a packet\n"); 995 if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1); 996 } else 997 axge_rxeof(ue, pc, pos, pktlen, pkt_hdr.status); 998 pos += (pktlen + 7) & ~7; 999 hdr_off += sizeof(pkt_hdr); 1000 } 1001 } 1002 1003 static void 1004 axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, 1005 unsigned int len, uint32_t status) 1006 { 1007 struct ifnet *ifp; 1008 struct mbuf *m; 1009 1010 ifp = ue->ue_ifp; 1011 if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { 1012 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1013 return; 1014 } 1015 1016 if (len > MHLEN - ETHER_ALIGN) 1017 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1018 else 1019 m = m_gethdr(M_NOWAIT, MT_DATA); 1020 if (m == NULL) { 1021 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1022 return; 1023 } 1024 m->m_pkthdr.rcvif = ifp; 1025 m->m_len = m->m_pkthdr.len = len; 1026 m->m_data += ETHER_ALIGN; 1027 1028 usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); 1029 1030 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1031 if ((status & AXGE_RX_L3_CSUM_ERR) == 0 && 1032 (status & AXGE_RX_L3_TYPE_MASK) == AXGE_RX_L3_TYPE_IPV4) 1033 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1034 CSUM_IP_VALID; 1035 if ((status & AXGE_RX_L4_CSUM_ERR) == 0 && 1036 ((status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_UDP || 1037 (status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_TCP)) { 1038 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1039 CSUM_PSEUDO_HDR; 1040 m->m_pkthdr.csum_data = 0xffff; 1041 } 1042 } 1043 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1044 1045 (void)mbufq_enqueue(&ue->ue_rxq, m); 1046 } 1047 1048 static void 1049 axge_csum_cfg(struct usb_ether *ue) 1050 { 1051 struct axge_softc *sc; 1052 struct ifnet *ifp; 1053 uint8_t csum; 1054 1055 sc = uether_getsc(ue); 1056 AXGE_LOCK_ASSERT(sc, MA_OWNED); 1057 ifp = uether_getifp(ue); 1058 1059 csum = 0; 1060 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1061 csum |= CTCR_IP | CTCR_TCP | CTCR_UDP; 1062 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum); 1063 1064 csum = 0; 1065 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1066 csum |= CRCR_IP | CRCR_TCP | CRCR_UDP; 1067 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum); 1068 } 1069