1 /*- 2 * Copyright (c) 2013-2014 Kevin Lo 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 /* 31 * ASIX Electronics AX88178A/AX88179 USB 2.0/3.0 gigabit ethernet driver. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/condvar.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/socket.h> 43 #include <sys/sysctl.h> 44 #include <sys/unistd.h> 45 46 #include <net/if.h> 47 #include <net/if_var.h> 48 49 #include <dev/usb/usb.h> 50 #include <dev/usb/usbdi.h> 51 #include <dev/usb/usbdi_util.h> 52 #include "usbdevs.h" 53 54 #define USB_DEBUG_VAR axge_debug 55 #include <dev/usb/usb_debug.h> 56 #include <dev/usb/usb_process.h> 57 58 #include <dev/usb/net/usb_ethernet.h> 59 #include <dev/usb/net/if_axgereg.h> 60 61 /* 62 * Various supported device vendors/products. 63 */ 64 65 static const STRUCT_USB_HOST_ID axge_devs[] = { 66 #define AXGE_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } 67 AXGE_DEV(ASIX, AX88178A), 68 AXGE_DEV(ASIX, AX88179), 69 AXGE_DEV(DLINK, DUB1312), 70 AXGE_DEV(LENOVO, GIGALAN), 71 AXGE_DEV(SITECOMEU, LN032), 72 #undef AXGE_DEV 73 }; 74 75 static const struct { 76 uint8_t ctrl; 77 uint8_t timer_l; 78 uint8_t timer_h; 79 uint8_t size; 80 uint8_t ifg; 81 } __packed axge_bulk_size[] = { 82 { 7, 0x4f, 0x00, 0x12, 0xff }, 83 { 7, 0x20, 0x03, 0x16, 0xff }, 84 { 7, 0xae, 0x07, 0x18, 0xff }, 85 { 7, 0xcc, 0x4c, 0x18, 0x08 } 86 }; 87 88 /* prototypes */ 89 90 static device_probe_t axge_probe; 91 static device_attach_t axge_attach; 92 static device_detach_t axge_detach; 93 94 static usb_callback_t axge_bulk_read_callback; 95 static usb_callback_t axge_bulk_write_callback; 96 97 static miibus_readreg_t axge_miibus_readreg; 98 static miibus_writereg_t axge_miibus_writereg; 99 static miibus_statchg_t axge_miibus_statchg; 100 101 static uether_fn_t axge_attach_post; 102 static uether_fn_t axge_init; 103 static uether_fn_t axge_stop; 104 static uether_fn_t axge_start; 105 static uether_fn_t axge_tick; 106 static uether_fn_t axge_setmulti; 107 static uether_fn_t axge_setpromisc; 108 109 static int axge_read_mem(struct axge_softc *, uint8_t, uint16_t, 110 uint16_t, void *, int); 111 static void axge_write_mem(struct axge_softc *, uint8_t, uint16_t, 112 uint16_t, void *, int); 113 static uint8_t axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t); 114 static uint16_t axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t, 115 uint16_t); 116 static void axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t, 117 uint8_t); 118 static void axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t, 119 uint16_t, uint16_t); 120 static void axge_chip_init(struct axge_softc *); 121 static void axge_reset(struct axge_softc *); 122 123 static int axge_attach_post_sub(struct usb_ether *); 124 static int axge_ifmedia_upd(struct ifnet *); 125 static void axge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 126 static int axge_ioctl(struct ifnet *, u_long, caddr_t); 127 static void axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int); 128 static void axge_rxeof(struct usb_ether *, struct usb_page_cache *, 129 unsigned int, unsigned int, uint32_t); 130 static void axge_csum_cfg(struct usb_ether *); 131 132 #define AXGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 133 134 #ifdef USB_DEBUG 135 static int axge_debug = 0; 136 137 static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW, 0, "USB axge"); 138 SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0, 139 "Debug level"); 140 #endif 141 142 static const struct usb_config axge_config[AXGE_N_TRANSFER] = { 143 [AXGE_BULK_DT_WR] = { 144 .type = UE_BULK, 145 .endpoint = UE_ADDR_ANY, 146 .direction = UE_DIR_OUT, 147 .frames = 16, 148 .bufsize = 16 * MCLBYTES, 149 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 150 .callback = axge_bulk_write_callback, 151 .timeout = 10000, /* 10 seconds */ 152 }, 153 [AXGE_BULK_DT_RD] = { 154 .type = UE_BULK, 155 .endpoint = UE_ADDR_ANY, 156 .direction = UE_DIR_IN, 157 .bufsize = 65536, 158 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 159 .callback = axge_bulk_read_callback, 160 .timeout = 0, /* no timeout */ 161 }, 162 }; 163 164 static device_method_t axge_methods[] = { 165 /* Device interface. */ 166 DEVMETHOD(device_probe, axge_probe), 167 DEVMETHOD(device_attach, axge_attach), 168 DEVMETHOD(device_detach, axge_detach), 169 170 /* MII interface. */ 171 DEVMETHOD(miibus_readreg, axge_miibus_readreg), 172 DEVMETHOD(miibus_writereg, axge_miibus_writereg), 173 DEVMETHOD(miibus_statchg, axge_miibus_statchg), 174 175 DEVMETHOD_END 176 }; 177 178 static driver_t axge_driver = { 179 .name = "axge", 180 .methods = axge_methods, 181 .size = sizeof(struct axge_softc), 182 }; 183 184 static devclass_t axge_devclass; 185 186 DRIVER_MODULE(axge, uhub, axge_driver, axge_devclass, NULL, NULL); 187 DRIVER_MODULE(miibus, axge, miibus_driver, miibus_devclass, NULL, NULL); 188 MODULE_DEPEND(axge, uether, 1, 1, 1); 189 MODULE_DEPEND(axge, usb, 1, 1, 1); 190 MODULE_DEPEND(axge, ether, 1, 1, 1); 191 MODULE_DEPEND(axge, miibus, 1, 1, 1); 192 MODULE_VERSION(axge, 1); 193 USB_PNP_HOST_INFO(axge_devs); 194 195 static const struct usb_ether_methods axge_ue_methods = { 196 .ue_attach_post = axge_attach_post, 197 .ue_attach_post_sub = axge_attach_post_sub, 198 .ue_start = axge_start, 199 .ue_init = axge_init, 200 .ue_stop = axge_stop, 201 .ue_tick = axge_tick, 202 .ue_setmulti = axge_setmulti, 203 .ue_setpromisc = axge_setpromisc, 204 .ue_mii_upd = axge_ifmedia_upd, 205 .ue_mii_sts = axge_ifmedia_sts, 206 }; 207 208 static int 209 axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 210 uint16_t val, void *buf, int len) 211 { 212 struct usb_device_request req; 213 214 AXGE_LOCK_ASSERT(sc, MA_OWNED); 215 216 req.bmRequestType = UT_READ_VENDOR_DEVICE; 217 req.bRequest = cmd; 218 USETW(req.wValue, val); 219 USETW(req.wIndex, index); 220 USETW(req.wLength, len); 221 222 return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); 223 } 224 225 static void 226 axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index, 227 uint16_t val, void *buf, int len) 228 { 229 struct usb_device_request req; 230 231 AXGE_LOCK_ASSERT(sc, MA_OWNED); 232 233 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 234 req.bRequest = cmd; 235 USETW(req.wValue, val); 236 USETW(req.wIndex, index); 237 USETW(req.wLength, len); 238 239 if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) { 240 /* Error ignored. */ 241 } 242 } 243 244 static uint8_t 245 axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg) 246 { 247 uint8_t val; 248 249 axge_read_mem(sc, cmd, 1, reg, &val, 1); 250 return (val); 251 } 252 253 static uint16_t 254 axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 255 uint16_t reg) 256 { 257 uint8_t val[2]; 258 259 axge_read_mem(sc, cmd, index, reg, &val, 2); 260 return (UGETW(val)); 261 } 262 263 static void 264 axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val) 265 { 266 axge_write_mem(sc, cmd, 1, reg, &val, 1); 267 } 268 269 static void 270 axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index, 271 uint16_t reg, uint16_t val) 272 { 273 uint8_t temp[2]; 274 275 USETW(temp, val); 276 axge_write_mem(sc, cmd, index, reg, &temp, 2); 277 } 278 279 static int 280 axge_miibus_readreg(device_t dev, int phy, int reg) 281 { 282 struct axge_softc *sc; 283 uint16_t val; 284 int locked; 285 286 sc = device_get_softc(dev); 287 locked = mtx_owned(&sc->sc_mtx); 288 if (!locked) 289 AXGE_LOCK(sc); 290 291 val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy); 292 293 if (!locked) 294 AXGE_UNLOCK(sc); 295 296 return (val); 297 } 298 299 static int 300 axge_miibus_writereg(device_t dev, int phy, int reg, int val) 301 { 302 struct axge_softc *sc; 303 int locked; 304 305 sc = device_get_softc(dev); 306 if (sc->sc_phyno != phy) 307 return (0); 308 locked = mtx_owned(&sc->sc_mtx); 309 if (!locked) 310 AXGE_LOCK(sc); 311 312 axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val); 313 314 if (!locked) 315 AXGE_UNLOCK(sc); 316 317 return (0); 318 } 319 320 static void 321 axge_miibus_statchg(device_t dev) 322 { 323 struct axge_softc *sc; 324 struct mii_data *mii; 325 struct ifnet *ifp; 326 uint8_t link_status, tmp[5]; 327 uint16_t val; 328 int locked; 329 330 sc = device_get_softc(dev); 331 mii = GET_MII(sc); 332 locked = mtx_owned(&sc->sc_mtx); 333 if (!locked) 334 AXGE_LOCK(sc); 335 336 ifp = uether_getifp(&sc->sc_ue); 337 if (mii == NULL || ifp == NULL || 338 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 339 goto done; 340 341 sc->sc_flags &= ~AXGE_FLAG_LINK; 342 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 343 (IFM_ACTIVE | IFM_AVALID)) { 344 switch (IFM_SUBTYPE(mii->mii_media_active)) { 345 case IFM_10_T: 346 case IFM_100_TX: 347 case IFM_1000_T: 348 sc->sc_flags |= AXGE_FLAG_LINK; 349 break; 350 default: 351 break; 352 } 353 } 354 355 /* Lost link, do nothing. */ 356 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0) 357 goto done; 358 359 link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR); 360 361 val = 0; 362 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 363 val |= MSR_FD; 364 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 365 val |= MSR_TFC; 366 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 367 val |= MSR_RFC; 368 } 369 val |= MSR_RE; 370 switch (IFM_SUBTYPE(mii->mii_media_active)) { 371 case IFM_1000_T: 372 val |= MSR_GM | MSR_EN_125MHZ; 373 if (link_status & PLSR_USB_SS) 374 memcpy(tmp, &axge_bulk_size[0], 5); 375 else if (link_status & PLSR_USB_HS) 376 memcpy(tmp, &axge_bulk_size[1], 5); 377 else 378 memcpy(tmp, &axge_bulk_size[3], 5); 379 break; 380 case IFM_100_TX: 381 val |= MSR_PS; 382 if (link_status & (PLSR_USB_SS | PLSR_USB_HS)) 383 memcpy(tmp, &axge_bulk_size[2], 5); 384 else 385 memcpy(tmp, &axge_bulk_size[3], 5); 386 break; 387 case IFM_10_T: 388 memcpy(tmp, &axge_bulk_size[3], 5); 389 break; 390 } 391 /* Rx bulk configuration. */ 392 axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5); 393 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val); 394 done: 395 if (!locked) 396 AXGE_UNLOCK(sc); 397 } 398 399 static void 400 axge_chip_init(struct axge_softc *sc) 401 { 402 /* Power up ethernet PHY. */ 403 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0); 404 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL); 405 uether_pause(&sc->sc_ue, hz / 4); 406 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 407 AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS); 408 uether_pause(&sc->sc_ue, hz / 10); 409 } 410 411 static void 412 axge_reset(struct axge_softc *sc) 413 { 414 struct usb_config_descriptor *cd; 415 usb_error_t err; 416 417 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev); 418 419 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx, 420 cd->bConfigurationValue); 421 if (err) 422 DPRINTF("reset failed (ignored)\n"); 423 424 /* Wait a little while for the chip to get its brains in order. */ 425 uether_pause(&sc->sc_ue, hz / 100); 426 427 /* Reinitialize controller to achieve full reset. */ 428 axge_chip_init(sc); 429 } 430 431 static void 432 axge_attach_post(struct usb_ether *ue) 433 { 434 struct axge_softc *sc; 435 436 sc = uether_getsc(ue); 437 sc->sc_phyno = 3; 438 439 /* Initialize controller and get station address. */ 440 axge_chip_init(sc); 441 axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 442 ue->ue_eaddr, ETHER_ADDR_LEN); 443 } 444 445 static int 446 axge_attach_post_sub(struct usb_ether *ue) 447 { 448 struct axge_softc *sc; 449 struct ifnet *ifp; 450 int error; 451 452 sc = uether_getsc(ue); 453 ifp = ue->ue_ifp; 454 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 455 ifp->if_start = uether_start; 456 ifp->if_ioctl = axge_ioctl; 457 ifp->if_init = uether_init; 458 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 459 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 460 IFQ_SET_READY(&ifp->if_snd); 461 462 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM; 463 ifp->if_hwassist = AXGE_CSUM_FEATURES; 464 ifp->if_capenable = ifp->if_capabilities; 465 466 mtx_lock(&Giant); 467 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, 468 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, 469 BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, MIIF_DOPAUSE); 470 mtx_unlock(&Giant); 471 472 return (error); 473 } 474 475 /* 476 * Set media options. 477 */ 478 static int 479 axge_ifmedia_upd(struct ifnet *ifp) 480 { 481 struct axge_softc *sc; 482 struct mii_data *mii; 483 struct mii_softc *miisc; 484 int error; 485 486 sc = ifp->if_softc; 487 mii = GET_MII(sc); 488 AXGE_LOCK_ASSERT(sc, MA_OWNED); 489 490 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 491 PHY_RESET(miisc); 492 error = mii_mediachg(mii); 493 494 return (error); 495 } 496 497 /* 498 * Report current media status. 499 */ 500 static void 501 axge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 502 { 503 struct axge_softc *sc; 504 struct mii_data *mii; 505 506 sc = ifp->if_softc; 507 mii = GET_MII(sc); 508 AXGE_LOCK(sc); 509 mii_pollstat(mii); 510 ifmr->ifm_active = mii->mii_media_active; 511 ifmr->ifm_status = mii->mii_media_status; 512 AXGE_UNLOCK(sc); 513 } 514 515 /* 516 * Probe for a AX88179 chip. 517 */ 518 static int 519 axge_probe(device_t dev) 520 { 521 struct usb_attach_arg *uaa; 522 523 uaa = device_get_ivars(dev); 524 if (uaa->usb_mode != USB_MODE_HOST) 525 return (ENXIO); 526 if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX) 527 return (ENXIO); 528 if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX) 529 return (ENXIO); 530 531 return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa)); 532 } 533 534 /* 535 * Attach the interface. Allocate softc structures, do ifmedia 536 * setup and ethernet/BPF attach. 537 */ 538 static int 539 axge_attach(device_t dev) 540 { 541 struct usb_attach_arg *uaa; 542 struct axge_softc *sc; 543 struct usb_ether *ue; 544 uint8_t iface_index; 545 int error; 546 547 uaa = device_get_ivars(dev); 548 sc = device_get_softc(dev); 549 ue = &sc->sc_ue; 550 551 device_set_usb_desc(dev); 552 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 553 554 iface_index = AXGE_IFACE_IDX; 555 error = usbd_transfer_setup(uaa->device, &iface_index, 556 sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx); 557 if (error) { 558 device_printf(dev, "allocating USB transfers failed\n"); 559 goto detach; 560 } 561 562 ue->ue_sc = sc; 563 ue->ue_dev = dev; 564 ue->ue_udev = uaa->device; 565 ue->ue_mtx = &sc->sc_mtx; 566 ue->ue_methods = &axge_ue_methods; 567 568 error = uether_ifattach(ue); 569 if (error) { 570 device_printf(dev, "could not attach interface\n"); 571 goto detach; 572 } 573 return (0); /* success */ 574 575 detach: 576 axge_detach(dev); 577 return (ENXIO); /* failure */ 578 } 579 580 static int 581 axge_detach(device_t dev) 582 { 583 struct axge_softc *sc; 584 struct usb_ether *ue; 585 586 sc = device_get_softc(dev); 587 ue = &sc->sc_ue; 588 usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER); 589 uether_ifdetach(ue); 590 mtx_destroy(&sc->sc_mtx); 591 592 return (0); 593 } 594 595 static void 596 axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) 597 { 598 struct axge_softc *sc; 599 struct usb_ether *ue; 600 struct usb_page_cache *pc; 601 int actlen; 602 603 sc = usbd_xfer_softc(xfer); 604 ue = &sc->sc_ue; 605 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 606 607 switch (USB_GET_STATE(xfer)) { 608 case USB_ST_TRANSFERRED: 609 pc = usbd_xfer_get_frame(xfer, 0); 610 axge_rx_frame(ue, pc, actlen); 611 612 /* FALLTHROUGH */ 613 case USB_ST_SETUP: 614 tr_setup: 615 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); 616 usbd_transfer_submit(xfer); 617 uether_rxflush(ue); 618 break; 619 620 default: 621 if (error != USB_ERR_CANCELLED) { 622 usbd_xfer_set_stall(xfer); 623 goto tr_setup; 624 } 625 break; 626 } 627 } 628 629 static void 630 axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) 631 { 632 struct axge_softc *sc; 633 struct ifnet *ifp; 634 struct usb_page_cache *pc; 635 struct mbuf *m; 636 uint32_t txhdr; 637 int nframes, pos; 638 639 sc = usbd_xfer_softc(xfer); 640 ifp = uether_getifp(&sc->sc_ue); 641 642 switch (USB_GET_STATE(xfer)) { 643 case USB_ST_TRANSFERRED: 644 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 645 /* FALLTHROUGH */ 646 case USB_ST_SETUP: 647 tr_setup: 648 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 || 649 (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) { 650 /* 651 * Don't send anything if there is no link or 652 * controller is busy. 653 */ 654 return; 655 } 656 657 for (nframes = 0; nframes < 16 && 658 !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) { 659 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 660 if (m == NULL) 661 break; 662 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES, 663 nframes); 664 pos = 0; 665 pc = usbd_xfer_get_frame(xfer, nframes); 666 txhdr = htole32(m->m_pkthdr.len); 667 usbd_copy_in(pc, 0, &txhdr, sizeof(txhdr)); 668 txhdr = 0; 669 txhdr = htole32(txhdr); 670 usbd_copy_in(pc, 4, &txhdr, sizeof(txhdr)); 671 pos += 8; 672 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len); 673 pos += m->m_pkthdr.len; 674 if ((pos % usbd_xfer_max_framelen(xfer)) == 0) 675 txhdr |= 0x80008000; 676 677 /* 678 * XXX 679 * Update TX packet counter here. This is not 680 * correct way but it seems that there is no way 681 * to know how many packets are sent at the end 682 * of transfer because controller combines 683 * multiple writes into single one if there is 684 * room in TX buffer of controller. 685 */ 686 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 687 688 /* 689 * if there's a BPF listener, bounce a copy 690 * of this frame to him: 691 */ 692 BPF_MTAP(ifp, m); 693 694 m_freem(m); 695 696 /* Set frame length. */ 697 usbd_xfer_set_frame_len(xfer, nframes, pos); 698 } 699 if (nframes != 0) { 700 usbd_xfer_set_frames(xfer, nframes); 701 usbd_transfer_submit(xfer); 702 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 703 } 704 return; 705 /* NOTREACHED */ 706 default: 707 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 708 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 709 710 if (error != USB_ERR_CANCELLED) { 711 usbd_xfer_set_stall(xfer); 712 goto tr_setup; 713 } 714 return; 715 716 } 717 } 718 719 static void 720 axge_tick(struct usb_ether *ue) 721 { 722 struct axge_softc *sc; 723 struct mii_data *mii; 724 725 sc = uether_getsc(ue); 726 mii = GET_MII(sc); 727 AXGE_LOCK_ASSERT(sc, MA_OWNED); 728 729 mii_tick(mii); 730 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0) { 731 axge_miibus_statchg(ue->ue_dev); 732 if ((sc->sc_flags & AXGE_FLAG_LINK) != 0) 733 axge_start(ue); 734 } 735 } 736 737 static void 738 axge_setmulti(struct usb_ether *ue) 739 { 740 struct axge_softc *sc; 741 struct ifnet *ifp; 742 struct ifmultiaddr *ifma; 743 uint32_t h; 744 uint16_t rxmode; 745 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 746 747 sc = uether_getsc(ue); 748 ifp = uether_getifp(ue); 749 h = 0; 750 AXGE_LOCK_ASSERT(sc, MA_OWNED); 751 752 rxmode = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR); 753 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 754 rxmode |= RCR_AMALL; 755 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 756 return; 757 } 758 rxmode &= ~RCR_AMALL; 759 760 if_maddr_rlock(ifp); 761 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 762 if (ifma->ifma_addr->sa_family != AF_LINK) 763 continue; 764 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 765 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 766 hashtbl[h / 8] |= 1 << (h % 8); 767 } 768 if_maddr_runlock(ifp); 769 770 axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8); 771 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 772 } 773 774 static void 775 axge_setpromisc(struct usb_ether *ue) 776 { 777 struct axge_softc *sc; 778 struct ifnet *ifp; 779 uint16_t rxmode; 780 781 sc = uether_getsc(ue); 782 ifp = uether_getifp(ue); 783 rxmode = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR); 784 785 if (ifp->if_flags & IFF_PROMISC) 786 rxmode |= RCR_PRO; 787 else 788 rxmode &= ~RCR_PRO; 789 790 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 791 axge_setmulti(ue); 792 } 793 794 static void 795 axge_start(struct usb_ether *ue) 796 { 797 struct axge_softc *sc; 798 799 sc = uether_getsc(ue); 800 /* 801 * Start the USB transfers, if not already started. 802 */ 803 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]); 804 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]); 805 } 806 807 static void 808 axge_init(struct usb_ether *ue) 809 { 810 struct axge_softc *sc; 811 struct ifnet *ifp; 812 uint16_t rxmode; 813 814 sc = uether_getsc(ue); 815 ifp = uether_getifp(ue); 816 AXGE_LOCK_ASSERT(sc, MA_OWNED); 817 818 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 819 return; 820 821 /* 822 * Cancel pending I/O and free all RX/TX buffers. 823 */ 824 axge_stop(ue); 825 826 axge_reset(sc); 827 828 /* Set MAC address. */ 829 axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR, 830 IF_LLADDR(ifp), ETHER_ADDR_LEN); 831 832 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34); 833 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52); 834 835 /* Configure TX/RX checksum offloading. */ 836 axge_csum_cfg(ue); 837 838 /* Configure RX settings. */ 839 rxmode = (RCR_AM | RCR_SO | RCR_DROP_CRCE); 840 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 841 rxmode |= RCR_IPE; 842 843 /* If we want promiscuous mode, set the allframes bit. */ 844 if (ifp->if_flags & IFF_PROMISC) 845 rxmode |= RCR_PRO; 846 847 if (ifp->if_flags & IFF_BROADCAST) 848 rxmode |= RCR_AB; 849 850 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode); 851 852 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 853 MMSR_PME_TYPE | MMSR_PME_POL | MMSR_RWMP); 854 855 /* Load the multicast filter. */ 856 axge_setmulti(ue); 857 858 usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]); 859 860 ifp->if_drv_flags |= IFF_DRV_RUNNING; 861 /* Switch to selected media. */ 862 axge_ifmedia_upd(ifp); 863 } 864 865 static void 866 axge_stop(struct usb_ether *ue) 867 { 868 struct axge_softc *sc; 869 struct ifnet *ifp; 870 871 sc = uether_getsc(ue); 872 ifp = uether_getifp(ue); 873 874 AXGE_LOCK_ASSERT(sc, MA_OWNED); 875 876 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 877 sc->sc_flags &= ~AXGE_FLAG_LINK; 878 879 /* 880 * Stop all the transfers, if not already stopped: 881 */ 882 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]); 883 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]); 884 } 885 886 static int 887 axge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 888 { 889 struct usb_ether *ue; 890 struct axge_softc *sc; 891 struct ifreq *ifr; 892 int error, mask, reinit; 893 894 ue = ifp->if_softc; 895 sc = uether_getsc(ue); 896 ifr = (struct ifreq *)data; 897 error = 0; 898 reinit = 0; 899 if (cmd == SIOCSIFCAP) { 900 AXGE_LOCK(sc); 901 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 902 if ((mask & IFCAP_TXCSUM) != 0 && 903 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 904 ifp->if_capenable ^= IFCAP_TXCSUM; 905 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 906 ifp->if_hwassist |= AXGE_CSUM_FEATURES; 907 else 908 ifp->if_hwassist &= ~AXGE_CSUM_FEATURES; 909 reinit++; 910 } 911 if ((mask & IFCAP_RXCSUM) != 0 && 912 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 913 ifp->if_capenable ^= IFCAP_RXCSUM; 914 reinit++; 915 } 916 if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) 917 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 918 else 919 reinit = 0; 920 AXGE_UNLOCK(sc); 921 if (reinit > 0) 922 uether_init(ue); 923 } else 924 error = uether_ioctl(ifp, cmd, data); 925 926 return (error); 927 } 928 929 static void 930 axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen) 931 { 932 uint32_t pos; 933 uint32_t pkt_cnt; 934 uint32_t rxhdr; 935 uint32_t pkt_hdr; 936 uint32_t hdr_off; 937 uint32_t pktlen; 938 939 /* verify we have enough data */ 940 if (actlen < (int)sizeof(rxhdr)) 941 return; 942 943 pos = 0; 944 945 usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr)); 946 rxhdr = le32toh(rxhdr); 947 948 pkt_cnt = (uint16_t)rxhdr; 949 hdr_off = (uint16_t)(rxhdr >> 16); 950 951 while (pkt_cnt--) { 952 /* verify the header offset */ 953 if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) { 954 DPRINTF("End of packet headers\n"); 955 break; 956 } 957 if ((int)pos >= actlen) { 958 DPRINTF("Data position reached end\n"); 959 break; 960 } 961 usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr)); 962 963 pkt_hdr = le32toh(pkt_hdr); 964 pktlen = (pkt_hdr >> 16) & 0x1fff; 965 if (pkt_hdr & (AXGE_RXHDR_CRC_ERR | AXGE_RXHDR_DROP_ERR)) { 966 DPRINTF("Dropped a packet\n"); 967 if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1); 968 } 969 if (pktlen >= 6 && (int)(pos + pktlen) <= actlen) { 970 axge_rxeof(ue, pc, pos + 2, pktlen - 6, pkt_hdr); 971 } else { 972 DPRINTF("Invalid packet pos=%d len=%d\n", 973 (int)pos, (int)pktlen); 974 } 975 pos += (pktlen + 7) & ~7; 976 hdr_off += sizeof(pkt_hdr); 977 } 978 } 979 980 static void 981 axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, 982 unsigned int offset, unsigned int len, uint32_t pkt_hdr) 983 { 984 struct ifnet *ifp; 985 struct mbuf *m; 986 987 ifp = ue->ue_ifp; 988 if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) { 989 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 990 return; 991 } 992 993 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 994 if (m == NULL) { 995 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 996 return; 997 } 998 m->m_pkthdr.rcvif = ifp; 999 m->m_len = m->m_pkthdr.len = len + ETHER_ALIGN; 1000 m_adj(m, ETHER_ALIGN); 1001 1002 usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); 1003 1004 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1005 1006 if ((pkt_hdr & (AXGE_RXHDR_L4CSUM_ERR | AXGE_RXHDR_L3CSUM_ERR)) == 0) { 1007 if ((pkt_hdr & AXGE_RXHDR_L4_TYPE_MASK) == 1008 AXGE_RXHDR_L4_TYPE_TCP || 1009 (pkt_hdr & AXGE_RXHDR_L4_TYPE_MASK) == 1010 AXGE_RXHDR_L4_TYPE_UDP) { 1011 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1012 CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; 1013 m->m_pkthdr.csum_data = 0xffff; 1014 } 1015 } 1016 1017 _IF_ENQUEUE(&ue->ue_rxq, m); 1018 } 1019 1020 static void 1021 axge_csum_cfg(struct usb_ether *ue) 1022 { 1023 struct axge_softc *sc; 1024 struct ifnet *ifp; 1025 uint8_t csum; 1026 1027 sc = uether_getsc(ue); 1028 AXGE_LOCK_ASSERT(sc, MA_OWNED); 1029 ifp = uether_getifp(ue); 1030 1031 csum = 0; 1032 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1033 csum |= CTCR_IP | CTCR_TCP | CTCR_UDP; 1034 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum); 1035 1036 csum = 0; 1037 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1038 csum |= CRCR_IP | CRCR_TCP | CRCR_UDP; 1039 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum); 1040 } 1041