1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013-2014 Kevin Lo 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 /* 33 * ASIX Electronics AX88178A/AX88179 USB 2.0/3.0 gigabit ethernet driver. 34 */ 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/bus.h> 39 #include <sys/condvar.h> 40 #include <sys/endian.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/module.h> 44 #include <sys/mutex.h> 45 #include <sys/socket.h> 46 #include <sys/sysctl.h> 47 #include <sys/unistd.h> 48 49 #include <net/if.h> 50 #include <net/if_var.h> 51 #include <net/if_media.h> 52 53 #include <dev/mii/mii.h> 54 #include <dev/mii/miivar.h> 55 56 #include <dev/usb/usb.h> 57 #include <dev/usb/usbdi.h> 58 #include <dev/usb/usbdi_util.h> 59 #include "usbdevs.h" 60 61 #define USB_DEBUG_VAR axge_debug 62 #include <dev/usb/usb_debug.h> 63 #include <dev/usb/usb_process.h> 64 65 #include <dev/usb/net/usb_ethernet.h> 66 #include <dev/usb/net/if_axgereg.h> 67 68 #include "miibus_if.h" 69 70 /* 71 * Various supported device vendors/products. 72 */ 73 74 static const STRUCT_USB_HOST_ID axge_devs[] = { 75 #define AXGE_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } 76 AXGE_DEV(ASIX, AX88178A), 77 AXGE_DEV(ASIX, AX88179), 78 AXGE_DEV(DLINK, DUB1312), 79 AXGE_DEV(LENOVO, GIGALAN), 80 AXGE_DEV(SITECOMEU, LN032), 81 #undef AXGE_DEV 82 }; 83 84 static const struct { 85 uint8_t ctrl; 86 uint8_t timer_l; 87 uint8_t timer_h; 88 uint8_t size; 89 uint8_t ifg; 90 } __packed axge_bulk_size[] = { 91 { 7, 0x4f, 0x00, 0x12, 0xff }, 92 { 7, 0x20, 0x03, 0x16, 0xff }, 93 { 7, 0xae, 0x07, 0x18, 0xff }, 94 { 7, 0xcc, 0x4c, 0x18, 0x08 } 95 }; 96 97 /* prototypes */ 98 99 static device_probe_t axge_probe; 100 static device_attach_t axge_attach; 101 static device_detach_t axge_detach; 102 103 static usb_callback_t axge_bulk_read_callback; 104 static usb_callback_t axge_bulk_write_callback; 105 106 static miibus_readreg_t axge_miibus_readreg; 107 static miibus_writereg_t axge_miibus_writereg; 108 static miibus_statchg_t axge_miibus_statchg; 109 110 static uether_fn_t axge_attach_post; 111 static uether_fn_t axge_init; 112 static uether_fn_t axge_stop; 113 static uether_fn_t axge_start; 114 static uether_fn_t axge_tick; 115 static uether_fn_t axge_rxfilter; 116 117 static int axge_read_mem(struct axge_softc *, uint8_t, uint16_t, 118 uint16_t, void *, int); 119 static void axge_write_mem(struct axge_softc *, uint8_t, uint16_t, 120 uint16_t, void *, int); 121 static uint8_t axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t); 122 static uint16_t axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t, 123 uint16_t); 124 static void axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t, 125 uint8_t); 126 static void axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t, 127 uint16_t, uint16_t); 128 static void axge_chip_init(struct axge_softc *); 129 static void axge_reset(struct axge_softc *); 130 131 static int axge_attach_post_sub(struct usb_ether *); 132 static int axge_ifmedia_upd(struct ifnet *); 133 static void axge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 134 static int axge_ioctl(struct ifnet *, u_long, caddr_t); 135 static void axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int); 136 static void axge_rxeof(struct usb_ether *, struct usb_page_cache *, 137 unsigned int, unsigned int, uint32_t); 138 static void axge_csum_cfg(struct usb_ether *); 139 140 #define AXGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 141 142 #ifdef USB_DEBUG 143 static int axge_debug = 0; 144 145 static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW, 0, "USB axge"); 146 SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0, 147 "Debug level"); 148 #endif 149 150 static const struct usb_config axge_config[AXGE_N_TRANSFER] = { 151 [AXGE_BULK_DT_WR] = { 152 .type = UE_BULK, 153 .endpoint = UE_ADDR_ANY, 154 .direction = UE_DIR_OUT, 155 .frames = AXGE_N_FRAMES, 156 .bufsize = AXGE_N_FRAMES * MCLBYTES, 157 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 158 .callback = axge_bulk_write_callback, 159 .timeout = 10000, /* 10 seconds */ 160 }, 161 [AXGE_BULK_DT_RD] = { 162 .type = UE_BULK, 163 .endpoint = UE_ADDR_ANY, 164 .direction = UE_DIR_IN, 165 .bufsize = 65536, 166 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 167 .callback = axge_bulk_read_callback, 168 .timeout = 0, /* no timeout */ 169 }, 170 }; 171 172 static device_method_t axge_methods[] = { 173 /* Device interface. */ 174 DEVMETHOD(device_probe, axge_probe), 175 DEVMETHOD(device_attach, axge_attach), 176 DEVMETHOD(device_detach, axge_detach), 177 178 /* MII interface. */ 179 DEVMETHOD(miibus_readreg, axge_miibus_readreg), 180 DEVMETHOD(miibus_writereg, axge_miibus_writereg), 181 DEVMETHOD(miibus_statchg, axge_miibus_statchg), 182 183 DEVMETHOD_END 184 }; 185 186 static driver_t axge_driver = { 187 .name = "axge", 188 .methods = axge_methods, 189 .size = sizeof(struct axge_softc), 190 }; 191 192 static devclass_t axge_devclass; 193 194 DRIVER_MODULE(axge, uhub, axge_driver, axge_devclass, NULL, NULL); 195 DRIVER_MODULE(miibus, axge, miibus_driver, miibus_devclass, NULL, NULL); 196 MODULE_DEPEND(axge, uether, 1, 1, 1); 197 MODULE_DEPEND(axge, usb, 1, 1, 1); 198 MODULE_DEPEND(axge, ether, 1, 1, 1); 199 MODULE_DEPEND(axge, miibus, 1, 1, 1); 200 MODULE_VERSION(axge, 1); 201 USB_PNP_HOST_INFO(axge_devs); 202 203 static const struct usb_ether_methods axge_ue_methods = { 204 .ue_attach_post = axge_attach_post, 205 .ue_attach_post_sub = axge_attach_post_sub, 206 .ue_start = axge_start, 207 .ue_init = axge_init, 208 .ue_stop = axge_stop, 209 .ue_tick = axge_tick, 210 .ue_setmulti = axge_rxfilter, 211 .ue_setpromisc = axge_rxfilter, 212 .ue_mii_upd = axge_ifmedia_upd, 213 .ue_mii_sts = axge_ifmedia_sts, 214 }; 215 216 static int 217 axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 218 uint16_t val, void *buf, int len) 219 { 220 struct usb_device_request req; 221 222 AXGE_LOCK_ASSERT(sc, MA_OWNED); 223 224 req.bmRequestType = UT_READ_VENDOR_DEVICE; 225 req.bRequest = cmd; 226 USETW(req.wValue, val); 227 USETW(req.wIndex, index); 228 USETW(req.wLength, len); 229 230 return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); 231 } 232 233 static void 234 axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 235 uint16_t val, void *buf, int len) 236 { 237 struct usb_device_request req; 238 239 AXGE_LOCK_ASSERT(sc, MA_OWNED); 240 241 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 242 req.bRequest = cmd; 243 USETW(req.wValue, val); 244 USETW(req.wIndex, index); 245 USETW(req.wLength, len); 246 247 if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) { 248 /* Error ignored. */ 249 } 250 } 251 252 static uint8_t 253 axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg) 254 { 255 uint8_t val; 256 257 axge_read_mem(sc, cmd, 1, reg, &val, 1); 258 return (val); 259 } 260 261 static uint16_t 262 axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 263 uint16_t reg) 264 { 265 uint8_t val[2]; 266 267 axge_read_mem(sc, cmd, index, reg, &val, 2); 268 return (UGETW(val)); 269 } 270 271 static void 272 axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val) 273 { 274 axge_write_mem(sc, cmd, 1, reg, &val, 1); 275 } 276 277 static void 278 axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 279 uint16_t reg, uint16_t val) 280 { 281 uint8_t temp[2]; 282 283 USETW(temp, val); 284 axge_write_mem(sc, cmd, index, reg, &temp, 2); 285 } 286 287 static int 288 axge_miibus_readreg(device_t dev, int phy, int reg) 289 { 290 struct axge_softc *sc; 291 uint16_t val; 292 int locked; 293 294 sc = device_get_softc(dev); 295 locked = mtx_owned(&sc->sc_mtx); 296 if (!locked) 297 AXGE_LOCK(sc); 298 299 val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy); 300 301 if (!locked) 302 AXGE_UNLOCK(sc); 303 304 return (val); 305 } 306 307 static int 308 axge_miibus_writereg(device_t dev, int phy, int reg, int val) 309 { 310 struct axge_softc *sc; 311 int locked; 312 313 sc = device_get_softc(dev); 314 locked = mtx_owned(&sc->sc_mtx); 315 if (!locked) 316 AXGE_LOCK(sc); 317 318 axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val); 319 320 if (!locked) 321 AXGE_UNLOCK(sc); 322 323 return (0); 324 } 325 326 static void 327 axge_miibus_statchg(device_t dev) 328 { 329 struct axge_softc *sc; 330 struct mii_data *mii; 331 struct ifnet *ifp; 332 uint8_t link_status, tmp[5]; 333 uint16_t val; 334 int locked; 335 336 sc = device_get_softc(dev); 337 mii = GET_MII(sc); 338 locked = mtx_owned(&sc->sc_mtx); 339 if (!locked) 340 AXGE_LOCK(sc); 341 342 ifp = uether_getifp(&sc->sc_ue); 343 if (mii == NULL || ifp == NULL || 344 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 345 goto done; 346 347 sc->sc_flags &= ~AXGE_FLAG_LINK; 348 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 349 (IFM_ACTIVE | IFM_AVALID)) { 350 switch (IFM_SUBTYPE(mii->mii_media_active)) { 351 case IFM_10_T: 352 case IFM_100_TX: 353 case IFM_1000_T: 354 sc->sc_flags |= AXGE_FLAG_LINK; 355 break; 356 default: 357 break; 358 } 359 } 360 361 /* Lost link, do nothing. */ 362 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0) 363 goto done; 364 365 link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR); 366 367 val = 0; 368 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 369 val |= MSR_FD; 370 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 371 val |= MSR_TFC; 372 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 373 val |= MSR_RFC; 374 } 375 val |= MSR_RE; 376 switch (IFM_SUBTYPE(mii->mii_media_active)) { 377 case IFM_1000_T: 378 val |= MSR_GM | MSR_EN_125MHZ; 379 if (link_status & PLSR_USB_SS) 380 memcpy(tmp, &axge_bulk_size[0], 5); 381 else if (link_status & PLSR_USB_HS) 382 memcpy(tmp, &axge_bulk_size[1], 5); 383 else 384 memcpy(tmp, &axge_bulk_size[3], 5); 385 break; 386 case IFM_100_TX: 387 val |= MSR_PS; 388 if (link_status & (PLSR_USB_SS | PLSR_USB_HS)) 389 memcpy(tmp, &axge_bulk_size[2], 5); 390 else 391 memcpy(tmp, &axge_bulk_size[3], 5); 392 break; 393 case IFM_10_T: 394 memcpy(tmp, &axge_bulk_size[3], 5); 395 break; 396 } 397 /* Rx bulk configuration. */ 398 axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5); 399 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 400 done: 401 if (!locked) 402 AXGE_UNLOCK(sc); 403 } 404 405 static void 406 axge_chip_init(struct axge_softc *sc) 407 { 408 /* Power up ethernet PHY. */ 409 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0); 410 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL); 411 uether_pause(&sc->sc_ue, hz / 4); 412 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 413 AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS); 414 uether_pause(&sc->sc_ue, hz / 10); 415 } 416 417 static void 418 axge_reset(struct axge_softc *sc) 419 { 420 struct usb_config_descriptor *cd; 421 usb_error_t err; 422 423 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); 424 425 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, 426 cd->bConfigurationValue); 427 if (err) 428 DPRINTF("reset failed (ignored)\n"); 429 430 /* Wait a little while for the chip to get its brains in order. */ 431 uether_pause(&sc->sc_ue, hz / 100); 432 433 /* Reinitialize controller to achieve full reset. */ 434 axge_chip_init(sc); 435 } 436 437 static void 438 axge_attach_post(struct usb_ether *ue) 439 { 440 struct axge_softc *sc; 441 442 sc = uether_getsc(ue); 443 444 /* Initialize controller and get station address. */ 445 axge_chip_init(sc); 446 axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 447 ue->ue_eaddr, ETHER_ADDR_LEN); 448 } 449 450 static int 451 axge_attach_post_sub(struct usb_ether *ue) 452 { 453 struct axge_softc *sc; 454 struct ifnet *ifp; 455 int error; 456 457 sc = uether_getsc(ue); 458 ifp = ue->ue_ifp; 459 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 460 ifp->if_start = uether_start; 461 ifp->if_ioctl = axge_ioctl; 462 ifp->if_init = uether_init; 463 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 464 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 465 IFQ_SET_READY(&ifp->if_snd); 466 467 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM; 468 ifp->if_hwassist = AXGE_CSUM_FEATURES; 469 ifp->if_capenable = ifp->if_capabilities; 470 471 mtx_lock(&Giant); 472 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, 473 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, 474 BMSR_DEFCAPMASK, AXGE_PHY_ADDR, MII_OFFSET_ANY, MIIF_DOPAUSE); 475 mtx_unlock(&Giant); 476 477 return (error); 478 } 479 480 /* 481 * Set media options. 482 */ 483 static int 484 axge_ifmedia_upd(struct ifnet *ifp) 485 { 486 struct axge_softc *sc; 487 struct mii_data *mii; 488 struct mii_softc *miisc; 489 int error; 490 491 sc = ifp->if_softc; 492 mii = GET_MII(sc); 493 AXGE_LOCK_ASSERT(sc, MA_OWNED); 494 495 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 496 PHY_RESET(miisc); 497 error = mii_mediachg(mii); 498 499 return (error); 500 } 501 502 /* 503 * Report current media status. 504 */ 505 static void 506 axge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 507 { 508 struct axge_softc *sc; 509 struct mii_data *mii; 510 511 sc = ifp->if_softc; 512 mii = GET_MII(sc); 513 AXGE_LOCK(sc); 514 mii_pollstat(mii); 515 ifmr->ifm_active = mii->mii_media_active; 516 ifmr->ifm_status = mii->mii_media_status; 517 AXGE_UNLOCK(sc); 518 } 519 520 /* 521 * Probe for a AX88179 chip. 522 */ 523 static int 524 axge_probe(device_t dev) 525 { 526 struct usb_attach_arg *uaa; 527 528 uaa = device_get_ivars(dev); 529 if (uaa->usb_mode != USB_MODE_HOST) 530 return (ENXIO); 531 if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX) 532 return (ENXIO); 533 if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX) 534 return (ENXIO); 535 536 return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa)); 537 } 538 539 /* 540 * Attach the interface. Allocate softc structures, do ifmedia 541 * setup and ethernet/BPF attach. 542 */ 543 static int 544 axge_attach(device_t dev) 545 { 546 struct usb_attach_arg *uaa; 547 struct axge_softc *sc; 548 struct usb_ether *ue; 549 uint8_t iface_index; 550 int error; 551 552 uaa = device_get_ivars(dev); 553 sc = device_get_softc(dev); 554 ue = &sc->sc_ue; 555 556 device_set_usb_desc(dev); 557 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 558 559 iface_index = AXGE_IFACE_IDX; 560 error = usbd_transfer_setup(uaa->device, &iface_index, 561 sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx); 562 if (error) { 563 device_printf(dev, "allocating USB transfers failed\n"); 564 mtx_destroy(&sc->sc_mtx); 565 return (ENXIO); 566 } 567 568 ue->ue_sc = sc; 569 ue->ue_dev = dev; 570 ue->ue_udev = uaa->device; 571 ue->ue_mtx = &sc->sc_mtx; 572 ue->ue_methods = &axge_ue_methods; 573 574 error = uether_ifattach(ue); 575 if (error) { 576 device_printf(dev, "could not attach interface\n"); 577 goto detach; 578 } 579 return (0); /* success */ 580 581 detach: 582 axge_detach(dev); 583 return (ENXIO); /* failure */ 584 } 585 586 static int 587 axge_detach(device_t dev) 588 { 589 struct axge_softc *sc; 590 struct usb_ether *ue; 591 uint16_t val; 592 593 sc = device_get_softc(dev); 594 ue = &sc->sc_ue; 595 if (device_is_attached(dev)) { 596 597 /* wait for any post attach or other command to complete */ 598 usb_proc_drain(&ue->ue_tq); 599 600 AXGE_LOCK(sc); 601 /* 602 * XXX 603 * ether_ifdetach(9) should be called first. 604 */ 605 axge_stop(ue); 606 /* Force bulk-in to return a zero-length USB packet. */ 607 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR); 608 val |= EPPRCR_BZ | EPPRCR_IPRL; 609 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, val); 610 /* Change clock. */ 611 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 0); 612 /* Disable MAC. */ 613 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, 0); 614 AXGE_UNLOCK(sc); 615 } 616 usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER); 617 uether_ifdetach(ue); 618 mtx_destroy(&sc->sc_mtx); 619 620 return (0); 621 } 622 623 static void 624 axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) 625 { 626 struct axge_softc *sc; 627 struct usb_ether *ue; 628 struct usb_page_cache *pc; 629 int actlen; 630 631 sc = usbd_xfer_softc(xfer); 632 ue = &sc->sc_ue; 633 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 634 635 switch (USB_GET_STATE(xfer)) { 636 case USB_ST_TRANSFERRED: 637 pc = usbd_xfer_get_frame(xfer, 0); 638 axge_rx_frame(ue, pc, actlen); 639 640 /* FALLTHROUGH */ 641 case USB_ST_SETUP: 642 tr_setup: 643 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); 644 usbd_transfer_submit(xfer); 645 uether_rxflush(ue); 646 break; 647 648 default: 649 if (error != USB_ERR_CANCELLED) { 650 usbd_xfer_set_stall(xfer); 651 goto tr_setup; 652 } 653 break; 654 } 655 } 656 657 static void 658 axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) 659 { 660 struct axge_softc *sc; 661 struct ifnet *ifp; 662 struct usb_page_cache *pc; 663 struct mbuf *m; 664 struct axge_frame_txhdr txhdr; 665 int nframes, pos; 666 667 sc = usbd_xfer_softc(xfer); 668 ifp = uether_getifp(&sc->sc_ue); 669 670 switch (USB_GET_STATE(xfer)) { 671 case USB_ST_TRANSFERRED: 672 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 673 /* FALLTHROUGH */ 674 case USB_ST_SETUP: 675 tr_setup: 676 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 || 677 (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { 678 /* 679 * Don't send anything if there is no link or 680 * controller is busy. 681 */ 682 return; 683 } 684 685 for (nframes = 0; nframes < AXGE_N_FRAMES && 686 !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { 687 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 688 if (m == NULL) 689 break; 690 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, 691 nframes); 692 pc = usbd_xfer_get_frame(xfer, nframes); 693 txhdr.mss = 0; 694 txhdr.len = htole32(AXGE_TXBYTES(m->m_pkthdr.len)); 695 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0 && 696 (m->m_pkthdr.csum_flags & AXGE_CSUM_FEATURES) == 0) 697 txhdr.len |= htole32(AXGE_CSUM_DISABLE); 698 699 pos = 0; 700 usbd_copy_in(pc, pos, &txhdr, sizeof(txhdr)); 701 pos += sizeof(txhdr); 702 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); 703 pos += m->m_pkthdr.len; 704 705 /* 706 * if there's a BPF listener, bounce a copy 707 * of this frame to him: 708 */ 709 BPF_MTAP(ifp, m); 710 711 m_freem(m); 712 713 /* Set frame length. */ 714 usbd_xfer_set_frame_len(xfer, nframes, pos); 715 } 716 if (nframes != 0) { 717 /* 718 * XXX 719 * Update TX packet counter here. This is not 720 * correct way but it seems that there is no way 721 * to know how many packets are sent at the end 722 * of transfer because controller combines 723 * multiple writes into single one if there is 724 * room in TX buffer of controller. 725 */ 726 if_inc_counter(ifp, IFCOUNTER_OPACKETS, nframes); 727 usbd_xfer_set_frames(xfer, nframes); 728 usbd_transfer_submit(xfer); 729 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 730 } 731 return; 732 /* NOTREACHED */ 733 default: 734 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 735 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 736 737 if (error != USB_ERR_CANCELLED) { 738 usbd_xfer_set_stall(xfer); 739 goto tr_setup; 740 } 741 return; 742 743 } 744 } 745 746 static void 747 axge_tick(struct usb_ether *ue) 748 { 749 struct axge_softc *sc; 750 struct mii_data *mii; 751 752 sc = uether_getsc(ue); 753 mii = GET_MII(sc); 754 AXGE_LOCK_ASSERT(sc, MA_OWNED); 755 756 mii_tick(mii); 757 } 758 759 static void 760 axge_rxfilter(struct usb_ether *ue) 761 { 762 struct axge_softc *sc; 763 struct ifnet *ifp; 764 struct ifmultiaddr *ifma; 765 uint32_t h; 766 uint16_t rxmode; 767 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 768 769 sc = uether_getsc(ue); 770 ifp = uether_getifp(ue); 771 h = 0; 772 AXGE_LOCK_ASSERT(sc, MA_OWNED); 773 774 /* 775 * Configure RX settings. 776 * Don't set RCR_IPE(IP header alignment on 32bit boundary) to disable 777 * inserting extra padding bytes. This wastes ethernet to USB host 778 * bandwidth as well as complicating RX handling logic. Current USB 779 * framework requires copying RX frames to mbufs so there is no need 780 * to worry about alignment. 781 */ 782 rxmode = RCR_DROP_CRCERR | RCR_START; 783 if (ifp->if_flags & IFF_BROADCAST) 784 rxmode |= RCR_ACPT_BCAST; 785 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 786 if (ifp->if_flags & IFF_PROMISC) 787 rxmode |= RCR_PROMISC; 788 rxmode |= RCR_ACPT_ALL_MCAST; 789 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 790 return; 791 } 792 793 rxmode |= RCR_ACPT_MCAST; 794 if_maddr_rlock(ifp); 795 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 796 if (ifma->ifma_addr->sa_family != AF_LINK) 797 continue; 798 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 799 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 800 hashtbl[h / 8] |= 1 << (h % 8); 801 } 802 if_maddr_runlock(ifp); 803 804 axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8); 805 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 806 } 807 808 static void 809 axge_start(struct usb_ether *ue) 810 { 811 struct axge_softc *sc; 812 813 sc = uether_getsc(ue); 814 /* 815 * Start the USB transfers, if not already started. 816 */ 817 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]); 818 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]); 819 } 820 821 static void 822 axge_init(struct usb_ether *ue) 823 { 824 struct axge_softc *sc; 825 struct ifnet *ifp; 826 827 sc = uether_getsc(ue); 828 ifp = uether_getifp(ue); 829 AXGE_LOCK_ASSERT(sc, MA_OWNED); 830 831 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 832 return; 833 834 /* 835 * Cancel pending I/O and free all RX/TX buffers. 836 */ 837 axge_stop(ue); 838 839 axge_reset(sc); 840 841 /* Set MAC address. */ 842 axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 843 IF_LLADDR(ifp), ETHER_ADDR_LEN); 844 845 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34); 846 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52); 847 848 /* Configure TX/RX checksum offloading. */ 849 axge_csum_cfg(ue); 850 851 /* Configure RX filters. */ 852 axge_rxfilter(ue); 853 854 /* 855 * XXX 856 * Controller supports wakeup on link change detection, 857 * magic packet and wakeup frame recpetion. But it seems 858 * there is no framework for USB ethernet suspend/wakeup. 859 * Disable all wakeup functions. 860 */ 861 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 0); 862 (void)axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR); 863 864 /* Configure default medium type. */ 865 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, MSR_GM | MSR_FD | 866 MSR_RFC | MSR_TFC | MSR_RE); 867 868 usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]); 869 870 ifp->if_drv_flags |= IFF_DRV_RUNNING; 871 /* Switch to selected media. */ 872 axge_ifmedia_upd(ifp); 873 } 874 875 static void 876 axge_stop(struct usb_ether *ue) 877 { 878 struct axge_softc *sc; 879 struct ifnet *ifp; 880 uint16_t val; 881 882 sc = uether_getsc(ue); 883 ifp = uether_getifp(ue); 884 885 AXGE_LOCK_ASSERT(sc, MA_OWNED); 886 887 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR); 888 val &= ~MSR_RE; 889 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 890 891 if (ifp != NULL) 892 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 893 sc->sc_flags &= ~AXGE_FLAG_LINK; 894 895 /* 896 * Stop all the transfers, if not already stopped: 897 */ 898 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]); 899 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]); 900 } 901 902 static int 903 axge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 904 { 905 struct usb_ether *ue; 906 struct axge_softc *sc; 907 struct ifreq *ifr; 908 int error, mask, reinit; 909 910 ue = ifp->if_softc; 911 sc = uether_getsc(ue); 912 ifr = (struct ifreq *)data; 913 error = 0; 914 reinit = 0; 915 if (cmd == SIOCSIFCAP) { 916 AXGE_LOCK(sc); 917 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 918 if ((mask & IFCAP_TXCSUM) != 0 && 919 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 920 ifp->if_capenable ^= IFCAP_TXCSUM; 921 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 922 ifp->if_hwassist |= AXGE_CSUM_FEATURES; 923 else 924 ifp->if_hwassist &= ~AXGE_CSUM_FEATURES; 925 reinit++; 926 } 927 if ((mask & IFCAP_RXCSUM) != 0 && 928 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 929 ifp->if_capenable ^= IFCAP_RXCSUM; 930 reinit++; 931 } 932 if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) 933 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 934 else 935 reinit = 0; 936 AXGE_UNLOCK(sc); 937 if (reinit > 0) 938 uether_init(ue); 939 } else 940 error = uether_ioctl(ifp, cmd, data); 941 942 return (error); 943 } 944 945 static void 946 axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) 947 { 948 struct axge_frame_rxhdr pkt_hdr; 949 uint32_t rxhdr; 950 uint32_t pos; 951 uint32_t pkt_cnt, pkt_end; 952 uint32_t hdr_off; 953 uint32_t pktlen; 954 955 /* verify we have enough data */ 956 if (actlen < (int)sizeof(rxhdr)) 957 return; 958 959 pos = 0; 960 961 usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr)); 962 rxhdr = le32toh(rxhdr); 963 964 pkt_cnt = rxhdr & 0xFFFF; 965 hdr_off = pkt_end = (rxhdr >> 16) & 0xFFFF; 966 967 /* 968 * <----------------------- actlen ------------------------> 969 * [frame #0]...[frame #N][pkt_hdr #0]...[pkt_hdr #N][rxhdr] 970 * Each RX frame would be aligned on 8 bytes boundary. If 971 * RCR_IPE bit is set in AXGE_RCR register, there would be 2 972 * padding bytes and 6 dummy bytes(as the padding also should 973 * be aligned on 8 bytes boundary) for each RX frame to align 974 * IP header on 32bits boundary. Driver don't set RCR_IPE bit 975 * of AXGE_RCR register, so there should be no padding bytes 976 * which simplifies RX logic a lot. 977 */ 978 while (pkt_cnt--) { 979 /* verify the header offset */ 980 if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) { 981 DPRINTF("End of packet headers\n"); 982 break; 983 } 984 usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr)); 985 pkt_hdr.status = le32toh(pkt_hdr.status); 986 pktlen = AXGE_RXBYTES(pkt_hdr.status); 987 if (pos + pktlen > pkt_end) { 988 DPRINTF("Data position reached end\n"); 989 break; 990 } 991 992 if (AXGE_RX_ERR(pkt_hdr.status) != 0) { 993 DPRINTF("Dropped a packet\n"); 994 if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1); 995 } else 996 axge_rxeof(ue, pc, pos, pktlen, pkt_hdr.status); 997 pos += (pktlen + 7) & ~7; 998 hdr_off += sizeof(pkt_hdr); 999 } 1000 } 1001 1002 static void 1003 axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset, 1004 unsigned int len, uint32_t status) 1005 { 1006 struct ifnet *ifp; 1007 struct mbuf *m; 1008 1009 ifp = ue->ue_ifp; 1010 if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { 1011 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1012 return; 1013 } 1014 1015 if (len > MHLEN - ETHER_ALIGN) 1016 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1017 else 1018 m = m_gethdr(M_NOWAIT, MT_DATA); 1019 if (m == NULL) { 1020 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1021 return; 1022 } 1023 m->m_pkthdr.rcvif = ifp; 1024 m->m_len = m->m_pkthdr.len = len; 1025 m->m_data += ETHER_ALIGN; 1026 1027 usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); 1028 1029 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1030 if ((status & AXGE_RX_L3_CSUM_ERR) == 0 && 1031 (status & AXGE_RX_L3_TYPE_MASK) == AXGE_RX_L3_TYPE_IPV4) 1032 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1033 CSUM_IP_VALID; 1034 if ((status & AXGE_RX_L4_CSUM_ERR) == 0 && 1035 ((status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_UDP || 1036 (status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_TCP)) { 1037 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1038 CSUM_PSEUDO_HDR; 1039 m->m_pkthdr.csum_data = 0xffff; 1040 } 1041 } 1042 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1043 1044 (void)mbufq_enqueue(&ue->ue_rxq, m); 1045 } 1046 1047 static void 1048 axge_csum_cfg(struct usb_ether *ue) 1049 { 1050 struct axge_softc *sc; 1051 struct ifnet *ifp; 1052 uint8_t csum; 1053 1054 sc = uether_getsc(ue); 1055 AXGE_LOCK_ASSERT(sc, MA_OWNED); 1056 ifp = uether_getifp(ue); 1057 1058 csum = 0; 1059 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1060 csum |= CTCR_IP | CTCR_TCP | CTCR_UDP; 1061 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum); 1062 1063 csum = 0; 1064 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1065 csum |= CRCR_IP | CRCR_TCP | CRCR_UDP; 1066 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum); 1067 } 1068