1 /*- 2 * Copyright (c) 2015-2016 Kevin Lo <kevlo@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/condvar.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/module.h> 37 #include <sys/mutex.h> 38 #include <sys/sbuf.h> 39 #include <sys/socket.h> 40 #include <sys/sysctl.h> 41 #include <sys/unistd.h> 42 43 #include <net/if.h> 44 #include <net/if_var.h> 45 #include <net/if_media.h> 46 47 /* needed for checksum offload */ 48 #include <netinet/in.h> 49 #include <netinet/ip.h> 50 51 #include <dev/mii/mii.h> 52 #include <dev/mii/miivar.h> 53 54 #include <dev/usb/usb.h> 55 #include <dev/usb/usbdi.h> 56 #include <dev/usb/usbdi_util.h> 57 #include "usbdevs.h" 58 59 #define USB_DEBUG_VAR ure_debug 60 #include <dev/usb/usb_debug.h> 61 #include <dev/usb/usb_process.h> 62 63 #include <dev/usb/net/usb_ethernet.h> 64 #include <dev/usb/net/if_urereg.h> 65 66 #include "miibus_if.h" 67 68 #include "opt_inet6.h" 69 70 #ifdef USB_DEBUG 71 static int ure_debug = 0; 72 73 static SYSCTL_NODE(_hw_usb, OID_AUTO, ure, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 74 "USB ure"); 75 SYSCTL_INT(_hw_usb_ure, OID_AUTO, debug, CTLFLAG_RWTUN, &ure_debug, 0, 76 "Debug level"); 77 #endif 78 79 #ifdef USB_DEBUG_VAR 80 #ifdef USB_DEBUG 81 #define DEVPRINTFN(n,dev,fmt,...) do { \ 82 if ((USB_DEBUG_VAR) >= (n)) { \ 83 device_printf((dev), "%s: " fmt, \ 84 __FUNCTION__ ,##__VA_ARGS__); \ 85 } \ 86 } while (0) 87 #define DEVPRINTF(...) DEVPRINTFN(1, __VA_ARGS__) 88 #else 89 #define DEVPRINTF(...) do { } while (0) 90 #define DEVPRINTFN(...) do { } while (0) 91 #endif 92 #endif 93 94 /* 95 * Various supported device vendors/products. 96 */ 97 static const STRUCT_USB_HOST_ID ure_devs[] = { 98 #define URE_DEV(v,p,i) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i) } 99 URE_DEV(LENOVO, RTL8153, 0), 100 URE_DEV(LENOVO, TBT3LAN, 0), 101 URE_DEV(LENOVO, ONELINK, 0), 102 URE_DEV(LENOVO, USBCLAN, 0), 103 URE_DEV(NVIDIA, RTL8153, 0), 104 URE_DEV(REALTEK, RTL8152, URE_FLAG_8152), 105 URE_DEV(REALTEK, RTL8153, 0), 106 URE_DEV(TPLINK, RTL8153, 0), 107 #undef URE_DEV 108 }; 109 110 static device_probe_t ure_probe; 111 static device_attach_t ure_attach; 112 static device_detach_t ure_detach; 113 114 static usb_callback_t ure_bulk_read_callback; 115 static usb_callback_t ure_bulk_write_callback; 116 117 static miibus_readreg_t ure_miibus_readreg; 118 static miibus_writereg_t ure_miibus_writereg; 119 static miibus_statchg_t ure_miibus_statchg; 120 121 static uether_fn_t ure_attach_post; 122 static uether_fn_t ure_init; 123 static uether_fn_t ure_stop; 124 static uether_fn_t ure_start; 125 static uether_fn_t ure_tick; 126 static uether_fn_t ure_rxfilter; 127 128 static int ure_ctl(struct ure_softc *, uint8_t, uint16_t, uint16_t, 129 void *, int); 130 static int ure_read_mem(struct ure_softc *, uint16_t, uint16_t, void *, 131 int); 132 static int ure_write_mem(struct ure_softc *, uint16_t, uint16_t, void *, 133 int); 134 static uint8_t ure_read_1(struct ure_softc *, uint16_t, uint16_t); 135 static uint16_t ure_read_2(struct ure_softc *, uint16_t, uint16_t); 136 static uint32_t ure_read_4(struct ure_softc *, uint16_t, uint16_t); 137 static int ure_write_1(struct ure_softc *, uint16_t, uint16_t, uint32_t); 138 static int ure_write_2(struct ure_softc *, uint16_t, uint16_t, uint32_t); 139 static int ure_write_4(struct ure_softc *, uint16_t, uint16_t, uint32_t); 140 static uint16_t ure_ocp_reg_read(struct ure_softc *, uint16_t); 141 static void ure_ocp_reg_write(struct ure_softc *, uint16_t, uint16_t); 142 143 static int ure_sysctl_chipver(SYSCTL_HANDLER_ARGS); 144 145 static void ure_read_chipver(struct ure_softc *); 146 static int ure_attach_post_sub(struct usb_ether *); 147 static void ure_reset(struct ure_softc *); 148 static int ure_ifmedia_upd(struct ifnet *); 149 static void ure_ifmedia_sts(struct ifnet *, struct ifmediareq *); 150 static int ure_ioctl(struct ifnet *, u_long, caddr_t); 151 static void ure_rtl8152_init(struct ure_softc *); 152 static void ure_rtl8153_init(struct ure_softc *); 153 static void ure_disable_teredo(struct ure_softc *); 154 static void ure_init_fifo(struct ure_softc *); 155 static void ure_rxcsum(int capenb, struct ure_rxpkt *rp, struct mbuf *m); 156 static int ure_txcsum(struct mbuf *m, int caps, uint32_t *regout); 157 158 static const struct usb_config ure_config_rx[URE_N_TRANSFER] = { 159 { 160 .type = UE_BULK, 161 .endpoint = UE_ADDR_ANY, 162 .direction = UE_DIR_IN, 163 .bufsize = URE_TRANSFER_SIZE, 164 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 165 .callback = ure_bulk_read_callback, 166 .timeout = 0, /* no timeout */ 167 }, 168 { 169 .type = UE_BULK, 170 .endpoint = UE_ADDR_ANY, 171 .direction = UE_DIR_IN, 172 .bufsize = URE_TRANSFER_SIZE, 173 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 174 .callback = ure_bulk_read_callback, 175 .timeout = 0, /* no timeout */ 176 }, 177 #if URE_N_TRANSFER == 4 178 { 179 .type = UE_BULK, 180 .endpoint = UE_ADDR_ANY, 181 .direction = UE_DIR_IN, 182 .bufsize = URE_TRANSFER_SIZE, 183 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 184 .callback = ure_bulk_read_callback, 185 .timeout = 0, /* no timeout */ 186 }, 187 { 188 .type = UE_BULK, 189 .endpoint = UE_ADDR_ANY, 190 .direction = UE_DIR_IN, 191 .bufsize = URE_TRANSFER_SIZE, 192 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, 193 .callback = ure_bulk_read_callback, 194 .timeout = 0, /* no timeout */ 195 }, 196 #endif 197 }; 198 199 static const struct usb_config ure_config_tx[URE_N_TRANSFER] = { 200 { 201 .type = UE_BULK, 202 .endpoint = UE_ADDR_ANY, 203 .direction = UE_DIR_OUT, 204 .bufsize = URE_TRANSFER_SIZE, 205 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 206 .callback = ure_bulk_write_callback, 207 .timeout = 10000, /* 10 seconds */ 208 }, 209 { 210 .type = UE_BULK, 211 .endpoint = UE_ADDR_ANY, 212 .direction = UE_DIR_OUT, 213 .bufsize = URE_TRANSFER_SIZE, 214 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 215 .callback = ure_bulk_write_callback, 216 .timeout = 10000, /* 10 seconds */ 217 }, 218 #if URE_N_TRANSFER == 4 219 { 220 .type = UE_BULK, 221 .endpoint = UE_ADDR_ANY, 222 .direction = UE_DIR_OUT, 223 .bufsize = URE_TRANSFER_SIZE, 224 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 225 .callback = ure_bulk_write_callback, 226 .timeout = 10000, /* 10 seconds */ 227 }, 228 { 229 .type = UE_BULK, 230 .endpoint = UE_ADDR_ANY, 231 .direction = UE_DIR_OUT, 232 .bufsize = URE_TRANSFER_SIZE, 233 .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, 234 .callback = ure_bulk_write_callback, 235 .timeout = 10000, /* 10 seconds */ 236 }, 237 #endif 238 }; 239 240 static device_method_t ure_methods[] = { 241 /* Device interface. */ 242 DEVMETHOD(device_probe, ure_probe), 243 DEVMETHOD(device_attach, ure_attach), 244 DEVMETHOD(device_detach, ure_detach), 245 246 /* MII interface. */ 247 DEVMETHOD(miibus_readreg, ure_miibus_readreg), 248 DEVMETHOD(miibus_writereg, ure_miibus_writereg), 249 DEVMETHOD(miibus_statchg, ure_miibus_statchg), 250 251 DEVMETHOD_END 252 }; 253 254 static driver_t ure_driver = { 255 .name = "ure", 256 .methods = ure_methods, 257 .size = sizeof(struct ure_softc), 258 }; 259 260 static devclass_t ure_devclass; 261 262 DRIVER_MODULE(ure, uhub, ure_driver, ure_devclass, NULL, NULL); 263 DRIVER_MODULE(miibus, ure, miibus_driver, miibus_devclass, NULL, NULL); 264 MODULE_DEPEND(ure, uether, 1, 1, 1); 265 MODULE_DEPEND(ure, usb, 1, 1, 1); 266 MODULE_DEPEND(ure, ether, 1, 1, 1); 267 MODULE_DEPEND(ure, miibus, 1, 1, 1); 268 MODULE_VERSION(ure, 1); 269 USB_PNP_HOST_INFO(ure_devs); 270 271 static const struct usb_ether_methods ure_ue_methods = { 272 .ue_attach_post = ure_attach_post, 273 .ue_attach_post_sub = ure_attach_post_sub, 274 .ue_start = ure_start, 275 .ue_init = ure_init, 276 .ue_stop = ure_stop, 277 .ue_tick = ure_tick, 278 .ue_setmulti = ure_rxfilter, 279 .ue_setpromisc = ure_rxfilter, 280 .ue_mii_upd = ure_ifmedia_upd, 281 .ue_mii_sts = ure_ifmedia_sts, 282 }; 283 284 static int 285 ure_ctl(struct ure_softc *sc, uint8_t rw, uint16_t val, uint16_t index, 286 void *buf, int len) 287 { 288 struct usb_device_request req; 289 290 URE_LOCK_ASSERT(sc, MA_OWNED); 291 292 if (rw == URE_CTL_WRITE) 293 req.bmRequestType = UT_WRITE_VENDOR_DEVICE; 294 else 295 req.bmRequestType = UT_READ_VENDOR_DEVICE; 296 req.bRequest = UR_SET_ADDRESS; 297 USETW(req.wValue, val); 298 USETW(req.wIndex, index); 299 USETW(req.wLength, len); 300 301 return (uether_do_request(&sc->sc_ue, &req, buf, 1000)); 302 } 303 304 static int 305 ure_read_mem(struct ure_softc *sc, uint16_t addr, uint16_t index, 306 void *buf, int len) 307 { 308 309 return (ure_ctl(sc, URE_CTL_READ, addr, index, buf, len)); 310 } 311 312 static int 313 ure_write_mem(struct ure_softc *sc, uint16_t addr, uint16_t index, 314 void *buf, int len) 315 { 316 317 return (ure_ctl(sc, URE_CTL_WRITE, addr, index, buf, len)); 318 } 319 320 static uint8_t 321 ure_read_1(struct ure_softc *sc, uint16_t reg, uint16_t index) 322 { 323 uint32_t val; 324 uint8_t temp[4]; 325 uint8_t shift; 326 327 shift = (reg & 3) << 3; 328 reg &= ~3; 329 330 ure_read_mem(sc, reg, index, &temp, 4); 331 val = UGETDW(temp); 332 val >>= shift; 333 334 return (val & 0xff); 335 } 336 337 static uint16_t 338 ure_read_2(struct ure_softc *sc, uint16_t reg, uint16_t index) 339 { 340 uint32_t val; 341 uint8_t temp[4]; 342 uint8_t shift; 343 344 shift = (reg & 2) << 3; 345 reg &= ~3; 346 347 ure_read_mem(sc, reg, index, &temp, 4); 348 val = UGETDW(temp); 349 val >>= shift; 350 351 return (val & 0xffff); 352 } 353 354 static uint32_t 355 ure_read_4(struct ure_softc *sc, uint16_t reg, uint16_t index) 356 { 357 uint8_t temp[4]; 358 359 ure_read_mem(sc, reg, index, &temp, 4); 360 return (UGETDW(temp)); 361 } 362 363 static int 364 ure_write_1(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val) 365 { 366 uint16_t byen; 367 uint8_t temp[4]; 368 uint8_t shift; 369 370 byen = URE_BYTE_EN_BYTE; 371 shift = reg & 3; 372 val &= 0xff; 373 374 if (reg & 3) { 375 byen <<= shift; 376 val <<= (shift << 3); 377 reg &= ~3; 378 } 379 380 USETDW(temp, val); 381 return (ure_write_mem(sc, reg, index | byen, &temp, 4)); 382 } 383 384 static int 385 ure_write_2(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val) 386 { 387 uint16_t byen; 388 uint8_t temp[4]; 389 uint8_t shift; 390 391 byen = URE_BYTE_EN_WORD; 392 shift = reg & 2; 393 val &= 0xffff; 394 395 if (reg & 2) { 396 byen <<= shift; 397 val <<= (shift << 3); 398 reg &= ~3; 399 } 400 401 USETDW(temp, val); 402 return (ure_write_mem(sc, reg, index | byen, &temp, 4)); 403 } 404 405 static int 406 ure_write_4(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val) 407 { 408 uint8_t temp[4]; 409 410 USETDW(temp, val); 411 return (ure_write_mem(sc, reg, index | URE_BYTE_EN_DWORD, &temp, 4)); 412 } 413 414 static uint16_t 415 ure_ocp_reg_read(struct ure_softc *sc, uint16_t addr) 416 { 417 uint16_t reg; 418 419 ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000); 420 reg = (addr & 0x0fff) | 0xb000; 421 422 return (ure_read_2(sc, reg, URE_MCU_TYPE_PLA)); 423 } 424 425 static void 426 ure_ocp_reg_write(struct ure_softc *sc, uint16_t addr, uint16_t data) 427 { 428 uint16_t reg; 429 430 ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000); 431 reg = (addr & 0x0fff) | 0xb000; 432 433 ure_write_2(sc, reg, URE_MCU_TYPE_PLA, data); 434 } 435 436 static int 437 ure_miibus_readreg(device_t dev, int phy, int reg) 438 { 439 struct ure_softc *sc; 440 uint16_t val; 441 int locked; 442 443 sc = device_get_softc(dev); 444 locked = mtx_owned(&sc->sc_mtx); 445 if (!locked) 446 URE_LOCK(sc); 447 448 /* Let the rgephy driver read the URE_GMEDIASTAT register. */ 449 if (reg == URE_GMEDIASTAT) { 450 if (!locked) 451 URE_UNLOCK(sc); 452 return (ure_read_1(sc, URE_GMEDIASTAT, URE_MCU_TYPE_PLA)); 453 } 454 455 val = ure_ocp_reg_read(sc, URE_OCP_BASE_MII + reg * 2); 456 457 if (!locked) 458 URE_UNLOCK(sc); 459 return (val); 460 } 461 462 static int 463 ure_miibus_writereg(device_t dev, int phy, int reg, int val) 464 { 465 struct ure_softc *sc; 466 int locked; 467 468 sc = device_get_softc(dev); 469 if (sc->sc_phyno != phy) 470 return (0); 471 472 locked = mtx_owned(&sc->sc_mtx); 473 if (!locked) 474 URE_LOCK(sc); 475 476 ure_ocp_reg_write(sc, URE_OCP_BASE_MII + reg * 2, val); 477 478 if (!locked) 479 URE_UNLOCK(sc); 480 return (0); 481 } 482 483 static void 484 ure_miibus_statchg(device_t dev) 485 { 486 struct ure_softc *sc; 487 struct mii_data *mii; 488 struct ifnet *ifp; 489 int locked; 490 491 sc = device_get_softc(dev); 492 mii = GET_MII(sc); 493 locked = mtx_owned(&sc->sc_mtx); 494 if (!locked) 495 URE_LOCK(sc); 496 497 ifp = uether_getifp(&sc->sc_ue); 498 if (mii == NULL || ifp == NULL || 499 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 500 goto done; 501 502 sc->sc_flags &= ~URE_FLAG_LINK; 503 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 504 (IFM_ACTIVE | IFM_AVALID)) { 505 switch (IFM_SUBTYPE(mii->mii_media_active)) { 506 case IFM_10_T: 507 case IFM_100_TX: 508 sc->sc_flags |= URE_FLAG_LINK; 509 sc->sc_rxstarted = 0; 510 break; 511 case IFM_1000_T: 512 if ((sc->sc_flags & URE_FLAG_8152) != 0) 513 break; 514 sc->sc_flags |= URE_FLAG_LINK; 515 sc->sc_rxstarted = 0; 516 break; 517 default: 518 break; 519 } 520 } 521 522 /* Lost link, do nothing. */ 523 if ((sc->sc_flags & URE_FLAG_LINK) == 0) 524 goto done; 525 done: 526 if (!locked) 527 URE_UNLOCK(sc); 528 } 529 530 /* 531 * Probe for a RTL8152/RTL8153 chip. 532 */ 533 static int 534 ure_probe(device_t dev) 535 { 536 struct usb_attach_arg *uaa; 537 538 uaa = device_get_ivars(dev); 539 if (uaa->usb_mode != USB_MODE_HOST) 540 return (ENXIO); 541 if (uaa->info.bConfigIndex != URE_CONFIG_IDX) 542 return (ENXIO); 543 if (uaa->info.bIfaceIndex != URE_IFACE_IDX) 544 return (ENXIO); 545 546 return (usbd_lookup_id_by_uaa(ure_devs, sizeof(ure_devs), uaa)); 547 } 548 549 /* 550 * Attach the interface. Allocate softc structures, do ifmedia 551 * setup and ethernet/BPF attach. 552 */ 553 static int 554 ure_attach(device_t dev) 555 { 556 struct usb_attach_arg *uaa = device_get_ivars(dev); 557 struct ure_softc *sc = device_get_softc(dev); 558 struct usb_ether *ue = &sc->sc_ue; 559 uint8_t iface_index; 560 int error; 561 562 sc->sc_flags = USB_GET_DRIVER_INFO(uaa); 563 device_set_usb_desc(dev); 564 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); 565 566 iface_index = URE_IFACE_IDX; 567 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_rx_xfer, 568 ure_config_rx, URE_N_TRANSFER, sc, &sc->sc_mtx); 569 if (error != 0) { 570 device_printf(dev, "allocating USB RX transfers failed\n"); 571 goto detach; 572 } 573 574 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_tx_xfer, 575 ure_config_tx, URE_N_TRANSFER, sc, &sc->sc_mtx); 576 if (error != 0) { 577 usbd_transfer_unsetup(sc->sc_rx_xfer, URE_N_TRANSFER); 578 device_printf(dev, "allocating USB TX transfers failed\n"); 579 goto detach; 580 } 581 582 /* Mark all TX transfers as available */ 583 for (int i = 0; i < URE_N_TRANSFER; i++) { 584 sc->sc_txavail[i] = sc->sc_tx_xfer[i]; 585 DEVPRINTF(dev, "sc_txavail[%d] = %p\n", i, sc->sc_txavail[i]); 586 } 587 sc->sc_txpos = 0; 588 589 ue->ue_sc = sc; 590 ue->ue_dev = dev; 591 ue->ue_udev = uaa->device; 592 ue->ue_mtx = &sc->sc_mtx; 593 ue->ue_methods = &ure_ue_methods; 594 595 error = uether_ifattach(ue); 596 if (error != 0) { 597 device_printf(dev, "could not attach interface\n"); 598 goto detach; 599 } 600 return (0); /* success */ 601 602 detach: 603 ure_detach(dev); 604 return (ENXIO); /* failure */ 605 } 606 607 static int 608 ure_detach(device_t dev) 609 { 610 struct ure_softc *sc = device_get_softc(dev); 611 struct usb_ether *ue = &sc->sc_ue; 612 613 usbd_transfer_unsetup(sc->sc_tx_xfer, URE_N_TRANSFER); 614 usbd_transfer_unsetup(sc->sc_rx_xfer, URE_N_TRANSFER); 615 uether_ifdetach(ue); 616 mtx_destroy(&sc->sc_mtx); 617 618 return (0); 619 } 620 621 /* 622 * Copy from USB buffers to a new mbuf chain with pkt header. 623 * 624 * This will use m_getm2 to get a mbuf chain w/ properly sized mbuf 625 * clusters as necessary. 626 */ 627 static struct mbuf * 628 ure_makembuf(struct usb_page_cache *pc, usb_frlength_t offset, 629 usb_frlength_t len) 630 { 631 struct usb_page_search_res; 632 struct mbuf *m, *mb; 633 usb_frlength_t tlen; 634 635 m = m_getm2(NULL, len + ETHER_ALIGN, M_NOWAIT, MT_DATA, M_PKTHDR); 636 if (m == NULL) 637 return (m); 638 639 /* uether_newbuf does this. */ 640 m_adj(m, ETHER_ALIGN); 641 642 m->m_pkthdr.len = len; 643 644 for (mb = m; len > 0; mb = mb->m_next) { 645 tlen = MIN(len, M_TRAILINGSPACE(mb)); 646 647 usbd_copy_out(pc, offset, mtod(mb, uint8_t *), tlen); 648 mb->m_len = tlen; 649 650 offset += tlen; 651 len -= tlen; 652 } 653 654 return (m); 655 } 656 657 static void 658 ure_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) 659 { 660 struct ure_softc *sc = usbd_xfer_softc(xfer); 661 struct usb_ether *ue = &sc->sc_ue; 662 struct ifnet *ifp = uether_getifp(ue); 663 struct usb_page_cache *pc; 664 struct mbuf *m; 665 struct ure_rxpkt pkt; 666 int actlen, off, len; 667 int caps; 668 uint32_t pktcsum; 669 670 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); 671 672 switch (USB_GET_STATE(xfer)) { 673 case USB_ST_TRANSFERRED: 674 off = 0; 675 pc = usbd_xfer_get_frame(xfer, 0); 676 caps = if_getcapenable(ifp); 677 DEVPRINTFN(13, sc->sc_ue.ue_dev, "rcb start\n"); 678 while (actlen > 0) { 679 if (actlen < (int)(sizeof(pkt))) { 680 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 681 goto tr_setup; 682 } 683 usbd_copy_out(pc, off, &pkt, sizeof(pkt)); 684 685 off += sizeof(pkt); 686 actlen -= sizeof(pkt); 687 688 len = le32toh(pkt.ure_pktlen) & URE_RXPKT_LEN_MASK; 689 690 DEVPRINTFN(13, sc->sc_ue.ue_dev, 691 "rxpkt: %#x, %#x, %#x, %#x, %#x, %#x\n", 692 pkt.ure_pktlen, pkt.ure_csum, pkt.ure_misc, 693 pkt.ure_rsvd2, pkt.ure_rsvd3, pkt.ure_rsvd4); 694 DEVPRINTFN(13, sc->sc_ue.ue_dev, "len: %d\n", len); 695 696 if (len >= URE_RXPKT_LEN_MASK) { 697 /* 698 * drop the rest of this segment. With out 699 * more information, we cannot know where next 700 * packet starts. Blindly continuing would 701 * cause a packet in packet attack, allowing 702 * one VLAN to inject packets w/o a VLAN tag, 703 * or injecting packets into other VLANs. 704 */ 705 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 706 goto tr_setup; 707 } 708 709 if (actlen < len) { 710 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 711 goto tr_setup; 712 } 713 714 if (len != 0) 715 m = ure_makembuf(pc, off, len - ETHER_CRC_LEN); 716 else 717 m = NULL; 718 if (m == NULL) { 719 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 720 } else { 721 /* make mbuf and queue */ 722 pktcsum = le32toh(pkt.ure_csum); 723 if (caps & IFCAP_VLAN_HWTAGGING && 724 pktcsum & URE_RXPKT_RX_VLAN_TAG) { 725 m->m_pkthdr.ether_vtag = 726 bswap16(pktcsum & 727 URE_RXPKT_VLAN_MASK); 728 m->m_flags |= M_VLANTAG; 729 } 730 731 /* set the necessary flags for rx checksum */ 732 ure_rxcsum(caps, &pkt, m); 733 734 uether_rxmbuf(ue, m, len - ETHER_CRC_LEN); 735 } 736 737 off += roundup(len, URE_RXPKT_ALIGN); 738 actlen -= roundup(len, URE_RXPKT_ALIGN); 739 } 740 DEVPRINTFN(13, sc->sc_ue.ue_dev, "rcb end\n"); 741 742 /* FALLTHROUGH */ 743 case USB_ST_SETUP: 744 tr_setup: 745 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); 746 usbd_transfer_submit(xfer); 747 uether_rxflush(ue); 748 return; 749 750 default: /* Error */ 751 DPRINTF("bulk read error, %s\n", 752 usbd_errstr(error)); 753 754 if (error != USB_ERR_CANCELLED) { 755 /* try to clear stall first */ 756 usbd_xfer_set_stall(xfer); 757 goto tr_setup; 758 } 759 return; 760 } 761 } 762 763 static void 764 ure_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) 765 { 766 struct ure_softc *sc = usbd_xfer_softc(xfer); 767 struct ifnet *ifp = uether_getifp(&sc->sc_ue); 768 struct usb_page_cache *pc; 769 struct mbuf *m; 770 struct ure_txpkt txpkt; 771 uint32_t regtmp; 772 int len, pos; 773 int rem; 774 int caps; 775 776 switch (USB_GET_STATE(xfer)) { 777 case USB_ST_TRANSFERRED: 778 DPRINTFN(11, "transfer complete\n"); 779 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 780 781 /* FALLTHROUGH */ 782 case USB_ST_SETUP: 783 tr_setup: 784 if ((sc->sc_flags & URE_FLAG_LINK) == 0) { 785 /* don't send anything if there is no link! */ 786 break; 787 } 788 789 pc = usbd_xfer_get_frame(xfer, 0); 790 caps = if_getcapenable(ifp); 791 792 pos = 0; 793 rem = URE_TRANSFER_SIZE; 794 while (rem > sizeof(txpkt)) { 795 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 796 if (m == NULL) 797 break; 798 799 /* 800 * make sure we don't ever send too large of a 801 * packet 802 */ 803 len = m->m_pkthdr.len; 804 if ((len & URE_TXPKT_LEN_MASK) != len) { 805 device_printf(sc->sc_ue.ue_dev, 806 "pkt len too large: %#x", len); 807 pkterror: 808 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 809 m_freem(m); 810 continue; 811 } 812 813 if (sizeof(txpkt) + 814 roundup(len, URE_TXPKT_ALIGN) > rem) { 815 /* out of space */ 816 IFQ_DRV_PREPEND(&ifp->if_snd, m); 817 m = NULL; 818 break; 819 } 820 821 txpkt = (struct ure_txpkt){}; 822 txpkt.ure_pktlen = htole32((len & URE_TXPKT_LEN_MASK) | 823 URE_TKPKT_TX_FS | URE_TKPKT_TX_LS); 824 if (m->m_flags & M_VLANTAG) { 825 txpkt.ure_csum = htole32( 826 bswap16(m->m_pkthdr.ether_vtag & 827 URE_TXPKT_VLAN_MASK) | URE_TXPKT_VLAN); 828 } 829 if (ure_txcsum(m, caps, ®tmp)) { 830 device_printf(sc->sc_ue.ue_dev, 831 "pkt l4 off too large"); 832 goto pkterror; 833 } 834 txpkt.ure_csum |= htole32(regtmp); 835 836 DEVPRINTFN(13, sc->sc_ue.ue_dev, 837 "txpkt: mbflg: %#x, %#x, %#x\n", 838 m->m_pkthdr.csum_flags, le32toh(txpkt.ure_pktlen), 839 le32toh(txpkt.ure_csum)); 840 841 usbd_copy_in(pc, pos, &txpkt, sizeof(txpkt)); 842 843 pos += sizeof(txpkt); 844 rem -= sizeof(txpkt); 845 846 usbd_m_copy_in(pc, pos, m, 0, len); 847 848 pos += roundup(len, URE_TXPKT_ALIGN); 849 rem -= roundup(len, URE_TXPKT_ALIGN); 850 851 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 852 853 /* 854 * If there's a BPF listener, bounce a copy 855 * of this frame to him. 856 */ 857 BPF_MTAP(ifp, m); 858 859 m_freem(m); 860 } 861 862 /* no packets to send */ 863 if (pos == 0) 864 break; 865 866 /* Set frame length. */ 867 usbd_xfer_set_frame_len(xfer, 0, pos); 868 869 usbd_transfer_submit(xfer); 870 871 KASSERT(sc->sc_txpos >= 0 && sc->sc_txpos <= URE_N_TRANSFER, 872 ("sc_txpos invalid: %d", sc->sc_txpos)); 873 if (sc->sc_txpos < URE_N_TRANSFER && 874 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 875 xfer = sc->sc_txavail[sc->sc_txpos++]; 876 usbd_transfer_start(xfer); 877 } 878 879 if (sc->sc_txpos == URE_N_TRANSFER) 880 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 881 return; 882 883 default: /* Error */ 884 DPRINTFN(11, "transfer error, %s\n", 885 usbd_errstr(error)); 886 887 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 888 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 889 890 if (error == USB_ERR_TIMEOUT) { 891 DEVPRINTFN(12, sc->sc_ue.ue_dev, 892 "pkt tx timeout\n"); 893 } 894 895 if (error != USB_ERR_CANCELLED) { 896 /* try to clear stall first */ 897 usbd_xfer_set_stall(xfer); 898 goto tr_setup; 899 } 900 } 901 902 KASSERT(sc->sc_txpos > 0 && sc->sc_txpos <= URE_N_TRANSFER, ("sc_txpos invalid: %d", sc->sc_txpos)); 903 sc->sc_txavail[(--(sc->sc_txpos))] = xfer; 904 if (sc->sc_txpos < URE_N_TRANSFER) 905 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 906 } 907 908 static void 909 ure_read_chipver(struct ure_softc *sc) 910 { 911 uint16_t ver; 912 913 ver = ure_read_2(sc, URE_PLA_TCR1, URE_MCU_TYPE_PLA) & URE_VERSION_MASK; 914 sc->sc_ver = ver; 915 switch (ver) { 916 case 0x4c00: 917 sc->sc_chip |= URE_CHIP_VER_4C00; 918 break; 919 case 0x4c10: 920 sc->sc_chip |= URE_CHIP_VER_4C10; 921 break; 922 case 0x5c00: 923 sc->sc_chip |= URE_CHIP_VER_5C00; 924 break; 925 case 0x5c10: 926 sc->sc_chip |= URE_CHIP_VER_5C10; 927 break; 928 case 0x5c20: 929 sc->sc_chip |= URE_CHIP_VER_5C20; 930 break; 931 case 0x5c30: 932 sc->sc_chip |= URE_CHIP_VER_5C30; 933 break; 934 default: 935 device_printf(sc->sc_ue.ue_dev, 936 "unknown version 0x%04x\n", ver); 937 break; 938 } 939 } 940 941 static int 942 ure_sysctl_chipver(SYSCTL_HANDLER_ARGS) 943 { 944 struct sbuf sb; 945 struct ure_softc *sc = arg1; 946 int error; 947 948 sbuf_new_for_sysctl(&sb, NULL, 0, req); 949 950 sbuf_printf(&sb, "%04x", sc->sc_ver); 951 952 error = sbuf_finish(&sb); 953 sbuf_delete(&sb); 954 955 return (error); 956 } 957 958 static void 959 ure_attach_post(struct usb_ether *ue) 960 { 961 struct ure_softc *sc = uether_getsc(ue); 962 struct sysctl_ctx_list *sctx; 963 struct sysctl_oid *soid; 964 965 sc->sc_rxstarted = 0; 966 sc->sc_phyno = 0; 967 968 /* Determine the chip version. */ 969 ure_read_chipver(sc); 970 971 /* Initialize controller and get station address. */ 972 if (sc->sc_flags & URE_FLAG_8152) 973 ure_rtl8152_init(sc); 974 else 975 ure_rtl8153_init(sc); 976 977 if ((sc->sc_chip & URE_CHIP_VER_4C00) || 978 (sc->sc_chip & URE_CHIP_VER_4C10)) 979 ure_read_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA, 980 ue->ue_eaddr, 8); 981 else 982 ure_read_mem(sc, URE_PLA_BACKUP, URE_MCU_TYPE_PLA, 983 ue->ue_eaddr, 8); 984 985 if (ETHER_IS_ZERO(sc->sc_ue.ue_eaddr)) { 986 device_printf(sc->sc_ue.ue_dev, "MAC assigned randomly\n"); 987 arc4rand(sc->sc_ue.ue_eaddr, ETHER_ADDR_LEN, 0); 988 sc->sc_ue.ue_eaddr[0] &= ~0x01; /* unicast */ 989 sc->sc_ue.ue_eaddr[0] |= 0x02; /* locally administered */ 990 } 991 992 sctx = device_get_sysctl_ctx(sc->sc_ue.ue_dev); 993 soid = device_get_sysctl_tree(sc->sc_ue.ue_dev); 994 SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "chipver", 995 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, 996 ure_sysctl_chipver, "A", 997 "Return string with chip version."); 998 } 999 1000 static int 1001 ure_attach_post_sub(struct usb_ether *ue) 1002 { 1003 struct ure_softc *sc; 1004 struct ifnet *ifp; 1005 int error; 1006 1007 sc = uether_getsc(ue); 1008 ifp = ue->ue_ifp; 1009 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1010 ifp->if_start = uether_start; 1011 ifp->if_ioctl = ure_ioctl; 1012 ifp->if_init = uether_init; 1013 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 1014 /* 1015 * Try to keep two transfers full at a time. 1016 * ~(TRANSFER_SIZE / 80 bytes/pkt * 2 buffers in flight) 1017 */ 1018 ifp->if_snd.ifq_drv_maxlen = 512; 1019 IFQ_SET_READY(&ifp->if_snd); 1020 1021 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 1022 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); 1023 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM|IFCAP_HWCSUM, 0); 1024 if_sethwassist(ifp, CSUM_IP|CSUM_IP_UDP|CSUM_IP_TCP); 1025 #ifdef INET6 1026 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM_IPV6, 0); 1027 #endif 1028 if_setcapenable(ifp, if_getcapabilities(ifp)); 1029 1030 mtx_lock(&Giant); 1031 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, 1032 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts, 1033 BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, 0); 1034 mtx_unlock(&Giant); 1035 1036 return (error); 1037 } 1038 1039 static void 1040 ure_init(struct usb_ether *ue) 1041 { 1042 struct ure_softc *sc = uether_getsc(ue); 1043 struct ifnet *ifp = uether_getifp(ue); 1044 uint16_t cpcr; 1045 1046 URE_LOCK_ASSERT(sc, MA_OWNED); 1047 1048 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1049 return; 1050 1051 /* Cancel pending I/O. */ 1052 ure_stop(ue); 1053 1054 ure_reset(sc); 1055 1056 /* Set MAC address. */ 1057 ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_CONFIG); 1058 ure_write_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA | URE_BYTE_EN_SIX_BYTES, 1059 IF_LLADDR(ifp), 8); 1060 ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_NORAML); 1061 1062 /* Reset the packet filter. */ 1063 ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA, 1064 ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) & 1065 ~URE_FMC_FCR_MCU_EN); 1066 ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA, 1067 ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) | 1068 URE_FMC_FCR_MCU_EN); 1069 1070 /* Enable RX VLANs if enabled */ 1071 cpcr = ure_read_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA); 1072 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { 1073 DEVPRINTFN(12, sc->sc_ue.ue_dev, "enabled hw vlan tag\n"); 1074 cpcr |= URE_CPCR_RX_VLAN; 1075 } else { 1076 DEVPRINTFN(12, sc->sc_ue.ue_dev, "disabled hw vlan tag\n"); 1077 cpcr &= ~URE_CPCR_RX_VLAN; 1078 } 1079 ure_write_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA, cpcr); 1080 1081 /* Enable transmit and receive. */ 1082 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, 1083 ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) | URE_CR_RE | 1084 URE_CR_TE); 1085 1086 ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA, 1087 ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) & 1088 ~URE_RXDY_GATED_EN); 1089 1090 /* Configure RX filters. */ 1091 ure_rxfilter(ue); 1092 1093 usbd_xfer_set_stall(sc->sc_tx_xfer[0]); 1094 1095 /* Indicate we are up and running. */ 1096 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1097 1098 /* Switch to selected media. */ 1099 ure_ifmedia_upd(ifp); 1100 } 1101 1102 static void 1103 ure_tick(struct usb_ether *ue) 1104 { 1105 struct ure_softc *sc = uether_getsc(ue); 1106 struct ifnet *ifp = uether_getifp(ue); 1107 struct mii_data *mii = GET_MII(sc); 1108 1109 URE_LOCK_ASSERT(sc, MA_OWNED); 1110 1111 KASSERT(sc->sc_txpos >= 0 && sc->sc_txpos <= URE_N_TRANSFER, ("sc_txpos invalid: %d", sc->sc_txpos)); 1112 (void)ifp; 1113 DEVPRINTFN(13, sc->sc_ue.ue_dev, 1114 "sc_txpos: %d, oactive: %d\n", sc->sc_txpos, !!(ifp->if_drv_flags & IFF_DRV_OACTIVE)); 1115 for (int i = 0; i < URE_N_TRANSFER; i++) 1116 DEVPRINTFN(13, sc->sc_ue.ue_dev, 1117 "rx[%d] = %d\n", i, USB_GET_STATE(sc->sc_rx_xfer[i])); 1118 1119 for (int i = 0; i < URE_N_TRANSFER; i++) 1120 DEVPRINTFN(13, sc->sc_ue.ue_dev, 1121 "tx[%d] = %d\n", i, USB_GET_STATE(sc->sc_tx_xfer[i])); 1122 1123 mii_tick(mii); 1124 if ((sc->sc_flags & URE_FLAG_LINK) == 0 1125 && mii->mii_media_status & IFM_ACTIVE && 1126 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1127 sc->sc_flags |= URE_FLAG_LINK; 1128 sc->sc_rxstarted = 0; 1129 ure_start(ue); 1130 } 1131 } 1132 1133 static u_int 1134 ure_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 1135 { 1136 uint32_t h, *hashes = arg; 1137 1138 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 1139 if (h < 32) 1140 hashes[0] |= (1 << h); 1141 else 1142 hashes[1] |= (1 << (h - 32)); 1143 return (1); 1144 } 1145 1146 /* 1147 * Program the 64-bit multicast hash filter. 1148 */ 1149 static void 1150 ure_rxfilter(struct usb_ether *ue) 1151 { 1152 struct ure_softc *sc = uether_getsc(ue); 1153 struct ifnet *ifp = uether_getifp(ue); 1154 uint32_t rxmode; 1155 uint32_t h, hashes[2] = { 0, 0 }; 1156 1157 URE_LOCK_ASSERT(sc, MA_OWNED); 1158 1159 rxmode = ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA); 1160 rxmode &= ~(URE_RCR_AAP | URE_RCR_AM); 1161 rxmode |= URE_RCR_APM; /* accept physical match packets */ 1162 rxmode |= URE_RCR_AB; /* always accept broadcasts */ 1163 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 1164 if (ifp->if_flags & IFF_PROMISC) 1165 rxmode |= URE_RCR_AAP; 1166 rxmode |= URE_RCR_AM; 1167 hashes[0] = hashes[1] = 0xffffffff; 1168 goto done; 1169 } 1170 1171 /* calculate multicast masks */ 1172 if_foreach_llmaddr(ifp, ure_hash_maddr, &hashes); 1173 1174 h = bswap32(hashes[0]); 1175 hashes[0] = bswap32(hashes[1]); 1176 hashes[1] = h; 1177 rxmode |= URE_RCR_AM; /* accept multicast packets */ 1178 1179 done: 1180 DEVPRINTFN(14, ue->ue_dev, "rxfilt: RCR: %#x\n", 1181 ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA)); 1182 ure_write_4(sc, URE_PLA_MAR0, URE_MCU_TYPE_PLA, hashes[0]); 1183 ure_write_4(sc, URE_PLA_MAR4, URE_MCU_TYPE_PLA, hashes[1]); 1184 ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, rxmode); 1185 } 1186 1187 static void 1188 ure_start(struct usb_ether *ue) 1189 { 1190 struct ure_softc *sc = uether_getsc(ue); 1191 struct usb_xfer *xfer; 1192 struct ifnet *ifp; 1193 1194 URE_LOCK_ASSERT(sc, MA_OWNED); 1195 1196 if (!sc->sc_rxstarted) { 1197 sc->sc_rxstarted = 1; 1198 for (int i = 0; i < URE_N_TRANSFER; i++) 1199 usbd_transfer_start(sc->sc_rx_xfer[i]); 1200 } 1201 1202 /* 1203 * start the USB transfers, if not already started: 1204 */ 1205 if (sc->sc_txpos == URE_N_TRANSFER) { 1206 ifp = uether_getifp(&sc->sc_ue); 1207 1208 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1209 return; 1210 } 1211 1212 KASSERT(sc->sc_txpos >= 0 && sc->sc_txpos < URE_N_TRANSFER, ("sc_txpos invalid: %d", sc->sc_txpos)); 1213 xfer = sc->sc_txavail[sc->sc_txpos++]; 1214 if (sc->sc_txpos == URE_N_TRANSFER) { 1215 ifp = uether_getifp(&sc->sc_ue); 1216 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1217 } 1218 usbd_transfer_start(xfer); 1219 } 1220 1221 static void 1222 ure_reset(struct ure_softc *sc) 1223 { 1224 int i; 1225 1226 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, URE_CR_RST); 1227 1228 for (i = 0; i < URE_TIMEOUT; i++) { 1229 if (!(ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) & 1230 URE_CR_RST)) 1231 break; 1232 uether_pause(&sc->sc_ue, hz / 100); 1233 } 1234 if (i == URE_TIMEOUT) 1235 device_printf(sc->sc_ue.ue_dev, "reset never completed\n"); 1236 } 1237 1238 /* 1239 * Set media options. 1240 */ 1241 static int 1242 ure_ifmedia_upd(struct ifnet *ifp) 1243 { 1244 struct ure_softc *sc = ifp->if_softc; 1245 struct mii_data *mii = GET_MII(sc); 1246 struct mii_softc *miisc; 1247 int error; 1248 1249 URE_LOCK_ASSERT(sc, MA_OWNED); 1250 1251 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1252 PHY_RESET(miisc); 1253 error = mii_mediachg(mii); 1254 return (error); 1255 } 1256 1257 /* 1258 * Report current media status. 1259 */ 1260 static void 1261 ure_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1262 { 1263 struct ure_softc *sc; 1264 struct mii_data *mii; 1265 1266 sc = ifp->if_softc; 1267 mii = GET_MII(sc); 1268 1269 URE_LOCK(sc); 1270 mii_pollstat(mii); 1271 ifmr->ifm_active = mii->mii_media_active; 1272 ifmr->ifm_status = mii->mii_media_status; 1273 URE_UNLOCK(sc); 1274 } 1275 1276 static int 1277 ure_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1278 { 1279 struct usb_ether *ue = ifp->if_softc; 1280 struct ure_softc *sc; 1281 struct ifreq *ifr; 1282 int error, mask, reinit; 1283 1284 sc = uether_getsc(ue); 1285 ifr = (struct ifreq *)data; 1286 error = 0; 1287 reinit = 0; 1288 switch (cmd) { 1289 case SIOCSIFCAP: 1290 URE_LOCK(sc); 1291 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1292 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1293 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1294 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1295 reinit++; 1296 } 1297 if ((mask & IFCAP_TXCSUM) != 0 && 1298 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1299 ifp->if_capenable ^= IFCAP_TXCSUM; 1300 } 1301 if ((mask & IFCAP_RXCSUM) != 0 && 1302 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 1303 ifp->if_capenable ^= IFCAP_RXCSUM; 1304 } 1305 if ((mask & IFCAP_TXCSUM_IPV6) != 0 && 1306 (ifp->if_capabilities & IFCAP_TXCSUM_IPV6) != 0) { 1307 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1308 } 1309 if ((mask & IFCAP_RXCSUM_IPV6) != 0 && 1310 (ifp->if_capabilities & IFCAP_RXCSUM_IPV6) != 0) { 1311 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1312 } 1313 if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) 1314 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1315 else 1316 reinit = 0; 1317 URE_UNLOCK(sc); 1318 if (reinit > 0) 1319 uether_init(ue); 1320 break; 1321 1322 case SIOCSIFMTU: 1323 /* 1324 * in testing large MTUs "crashes" the device, it 1325 * leaves the device w/ a broken state where link 1326 * is in a bad state. 1327 */ 1328 if (ifr->ifr_mtu < ETHERMIN || 1329 ifr->ifr_mtu > (4096 - ETHER_HDR_LEN - 1330 ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN)) { 1331 error = EINVAL; 1332 break; 1333 } 1334 URE_LOCK(sc); 1335 if (if_getmtu(ifp) != ifr->ifr_mtu) 1336 if_setmtu(ifp, ifr->ifr_mtu); 1337 URE_UNLOCK(sc); 1338 break; 1339 1340 default: 1341 error = uether_ioctl(ifp, cmd, data); 1342 } 1343 1344 return (error); 1345 } 1346 1347 static void 1348 ure_rtl8152_init(struct ure_softc *sc) 1349 { 1350 uint32_t pwrctrl; 1351 1352 /* Disable ALDPS. */ 1353 ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA | 1354 URE_DIS_SDSAVE); 1355 uether_pause(&sc->sc_ue, hz / 50); 1356 1357 if (sc->sc_chip & URE_CHIP_VER_4C00) { 1358 ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA, 1359 ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) & 1360 ~URE_LED_MODE_MASK); 1361 } 1362 1363 ure_write_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB, 1364 ure_read_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB) & 1365 ~URE_POWER_CUT); 1366 ure_write_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB, 1367 ure_read_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB) & 1368 ~URE_RESUME_INDICATE); 1369 1370 ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA, 1371 ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) | 1372 URE_TX_10M_IDLE_EN | URE_PFM_PWM_SWITCH); 1373 pwrctrl = ure_read_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA); 1374 pwrctrl &= ~URE_MCU_CLK_RATIO_MASK; 1375 pwrctrl |= URE_MCU_CLK_RATIO | URE_D3_CLK_GATED_EN; 1376 ure_write_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, pwrctrl); 1377 ure_write_2(sc, URE_PLA_GPHY_INTR_IMR, URE_MCU_TYPE_PLA, 1378 URE_GPHY_STS_MSK | URE_SPEED_DOWN_MSK | URE_SPDWN_RXDV_MSK | 1379 URE_SPDWN_LINKCHG_MSK); 1380 1381 /* Enable Rx aggregation. */ 1382 ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB, 1383 ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) & 1384 ~URE_RX_AGG_DISABLE); 1385 1386 /* Disable ALDPS. */ 1387 ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA | 1388 URE_DIS_SDSAVE); 1389 uether_pause(&sc->sc_ue, hz / 50); 1390 1391 ure_init_fifo(sc); 1392 1393 ure_write_1(sc, URE_USB_TX_AGG, URE_MCU_TYPE_USB, 1394 URE_TX_AGG_MAX_THRESHOLD); 1395 ure_write_4(sc, URE_USB_RX_BUF_TH, URE_MCU_TYPE_USB, URE_RX_THR_HIGH); 1396 ure_write_4(sc, URE_USB_TX_DMA, URE_MCU_TYPE_USB, 1397 URE_TEST_MODE_DISABLE | URE_TX_SIZE_ADJUST1); 1398 } 1399 1400 static void 1401 ure_rtl8153_init(struct ure_softc *sc) 1402 { 1403 uint16_t val; 1404 uint8_t u1u2[8]; 1405 int i; 1406 1407 /* Disable ALDPS. */ 1408 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, 1409 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS); 1410 uether_pause(&sc->sc_ue, hz / 50); 1411 1412 memset(u1u2, 0x00, sizeof(u1u2)); 1413 ure_write_mem(sc, URE_USB_TOLERANCE, 1414 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); 1415 1416 for (i = 0; i < URE_TIMEOUT; i++) { 1417 if (ure_read_2(sc, URE_PLA_BOOT_CTRL, URE_MCU_TYPE_PLA) & 1418 URE_AUTOLOAD_DONE) 1419 break; 1420 uether_pause(&sc->sc_ue, hz / 100); 1421 } 1422 if (i == URE_TIMEOUT) 1423 device_printf(sc->sc_ue.ue_dev, 1424 "timeout waiting for chip autoload\n"); 1425 1426 for (i = 0; i < URE_TIMEOUT; i++) { 1427 val = ure_ocp_reg_read(sc, URE_OCP_PHY_STATUS) & 1428 URE_PHY_STAT_MASK; 1429 if (val == URE_PHY_STAT_LAN_ON || val == URE_PHY_STAT_PWRDN) 1430 break; 1431 uether_pause(&sc->sc_ue, hz / 100); 1432 } 1433 if (i == URE_TIMEOUT) 1434 device_printf(sc->sc_ue.ue_dev, 1435 "timeout waiting for phy to stabilize\n"); 1436 1437 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, 1438 ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB) & 1439 ~URE_U2P3_ENABLE); 1440 1441 if (sc->sc_chip & URE_CHIP_VER_5C10) { 1442 val = ure_read_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB); 1443 val &= ~URE_PWD_DN_SCALE_MASK; 1444 val |= URE_PWD_DN_SCALE(96); 1445 ure_write_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB, val); 1446 1447 ure_write_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB, 1448 ure_read_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB) | 1449 URE_USB2PHY_L1 | URE_USB2PHY_SUSPEND); 1450 } else if (sc->sc_chip & URE_CHIP_VER_5C20) { 1451 ure_write_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA, 1452 ure_read_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA) & 1453 ~URE_ECM_ALDPS); 1454 } 1455 if (sc->sc_chip & (URE_CHIP_VER_5C20 | URE_CHIP_VER_5C30)) { 1456 val = ure_read_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB); 1457 if (ure_read_2(sc, URE_USB_BURST_SIZE, URE_MCU_TYPE_USB) == 1458 0) 1459 val &= ~URE_DYNAMIC_BURST; 1460 else 1461 val |= URE_DYNAMIC_BURST; 1462 ure_write_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB, val); 1463 } 1464 1465 ure_write_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB, 1466 ure_read_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB) | 1467 URE_EP4_FULL_FC); 1468 1469 ure_write_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB, 1470 ure_read_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB) & 1471 ~URE_TIMER11_EN); 1472 1473 ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA, 1474 ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) & 1475 ~URE_LED_MODE_MASK); 1476 1477 if ((sc->sc_chip & URE_CHIP_VER_5C10) && 1478 usbd_get_speed(sc->sc_ue.ue_udev) != USB_SPEED_SUPER) 1479 val = URE_LPM_TIMER_500MS; 1480 else 1481 val = URE_LPM_TIMER_500US; 1482 ure_write_1(sc, URE_USB_LPM_CTRL, URE_MCU_TYPE_USB, 1483 val | URE_FIFO_EMPTY_1FB | URE_ROK_EXIT_LPM); 1484 1485 val = ure_read_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB); 1486 val &= ~URE_SEN_VAL_MASK; 1487 val |= URE_SEN_VAL_NORMAL | URE_SEL_RXIDLE; 1488 ure_write_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB, val); 1489 1490 ure_write_2(sc, URE_USB_CONNECT_TIMER, URE_MCU_TYPE_USB, 0x0001); 1491 1492 ure_write_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB, 1493 ure_read_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB) & 1494 ~(URE_PWR_EN | URE_PHASE2_EN)); 1495 ure_write_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB, 1496 ure_read_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB) & 1497 ~URE_PCUT_STATUS); 1498 1499 memset(u1u2, 0xff, sizeof(u1u2)); 1500 ure_write_mem(sc, URE_USB_TOLERANCE, 1501 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); 1502 1503 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, 1504 URE_ALDPS_SPDWN_RATIO); 1505 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL2, URE_MCU_TYPE_PLA, 1506 URE_EEE_SPDWN_RATIO); 1507 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL3, URE_MCU_TYPE_PLA, 1508 URE_PKT_AVAIL_SPDWN_EN | URE_SUSPEND_SPDWN_EN | 1509 URE_U1U2_SPDWN_EN | URE_L1_SPDWN_EN); 1510 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL4, URE_MCU_TYPE_PLA, 1511 URE_PWRSAVE_SPDWN_EN | URE_RXDV_SPDWN_EN | URE_TX10MIDLE_EN | 1512 URE_TP100_SPDWN_EN | URE_TP500_SPDWN_EN | URE_TP1000_SPDWN_EN | 1513 URE_EEE_SPDWN_EN); 1514 1515 val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB); 1516 if (!(sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10))) 1517 val |= URE_U2P3_ENABLE; 1518 else 1519 val &= ~URE_U2P3_ENABLE; 1520 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val); 1521 1522 memset(u1u2, 0x00, sizeof(u1u2)); 1523 ure_write_mem(sc, URE_USB_TOLERANCE, 1524 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); 1525 1526 /* Disable ALDPS. */ 1527 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, 1528 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS); 1529 uether_pause(&sc->sc_ue, hz / 50); 1530 1531 ure_init_fifo(sc); 1532 1533 /* Enable Rx aggregation. */ 1534 ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB, 1535 ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) & 1536 ~URE_RX_AGG_DISABLE); 1537 1538 val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB); 1539 if (!(sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10))) 1540 val |= URE_U2P3_ENABLE; 1541 else 1542 val &= ~URE_U2P3_ENABLE; 1543 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val); 1544 1545 memset(u1u2, 0xff, sizeof(u1u2)); 1546 ure_write_mem(sc, URE_USB_TOLERANCE, 1547 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2)); 1548 } 1549 1550 static void 1551 ure_stop(struct usb_ether *ue) 1552 { 1553 struct ure_softc *sc = uether_getsc(ue); 1554 struct ifnet *ifp = uether_getifp(ue); 1555 1556 URE_LOCK_ASSERT(sc, MA_OWNED); 1557 1558 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1559 sc->sc_flags &= ~URE_FLAG_LINK; 1560 sc->sc_rxstarted = 0; 1561 1562 /* 1563 * stop all the transfers, if not already stopped: 1564 */ 1565 for (int i = 0; i < URE_N_TRANSFER; i++) { 1566 usbd_transfer_stop(sc->sc_rx_xfer[i]); 1567 usbd_transfer_stop(sc->sc_tx_xfer[i]); 1568 } 1569 } 1570 1571 static void 1572 ure_disable_teredo(struct ure_softc *sc) 1573 { 1574 1575 ure_write_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA, 1576 ure_read_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA) & 1577 ~(URE_TEREDO_SEL | URE_TEREDO_RS_EVENT_MASK | URE_OOB_TEREDO_EN)); 1578 ure_write_2(sc, URE_PLA_WDT6_CTRL, URE_MCU_TYPE_PLA, 1579 URE_WDT6_SET_MODE); 1580 ure_write_2(sc, URE_PLA_REALWOW_TIMER, URE_MCU_TYPE_PLA, 0); 1581 ure_write_4(sc, URE_PLA_TEREDO_TIMER, URE_MCU_TYPE_PLA, 0); 1582 } 1583 1584 static void 1585 ure_init_fifo(struct ure_softc *sc) 1586 { 1587 uint32_t rx_fifo1, rx_fifo2; 1588 int i; 1589 1590 ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA, 1591 ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) | 1592 URE_RXDY_GATED_EN); 1593 1594 ure_disable_teredo(sc); 1595 1596 DEVPRINTFN(14, sc->sc_ue.ue_dev, "init_fifo: RCR: %#x\n", ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA)); 1597 ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, 1598 ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA) & 1599 ~URE_RCR_ACPT_ALL); 1600 1601 if (!(sc->sc_flags & URE_FLAG_8152)) { 1602 if (sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10 | 1603 URE_CHIP_VER_5C20)) { 1604 ure_ocp_reg_write(sc, URE_OCP_ADC_CFG, 1605 URE_CKADSEL_L | URE_ADC_EN | URE_EN_EMI_L); 1606 } 1607 if (sc->sc_chip & URE_CHIP_VER_5C00) { 1608 ure_ocp_reg_write(sc, URE_OCP_EEE_CFG, 1609 ure_ocp_reg_read(sc, URE_OCP_EEE_CFG) & 1610 ~URE_CTAP_SHORT_EN); 1611 } 1612 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, 1613 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) | 1614 URE_EEE_CLKDIV_EN); 1615 ure_ocp_reg_write(sc, URE_OCP_DOWN_SPEED, 1616 ure_ocp_reg_read(sc, URE_OCP_DOWN_SPEED) | 1617 URE_EN_10M_BGOFF); 1618 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG, 1619 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) | 1620 URE_EN_10M_PLLOFF); 1621 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_IMPEDANCE); 1622 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0b13); 1623 ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA, 1624 ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) | 1625 URE_PFM_PWM_SWITCH); 1626 1627 /* Enable LPF corner auto tune. */ 1628 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_LPF_CFG); 1629 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0xf70f); 1630 1631 /* Adjust 10M amplitude. */ 1632 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP1); 1633 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x00af); 1634 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP2); 1635 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0208); 1636 } 1637 1638 ure_reset(sc); 1639 1640 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, 0); 1641 1642 ure_write_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA, 1643 ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & 1644 ~URE_NOW_IS_OOB); 1645 1646 ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA, 1647 ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) & 1648 ~URE_MCU_BORW_EN); 1649 for (i = 0; i < URE_TIMEOUT; i++) { 1650 if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & 1651 URE_LINK_LIST_READY) 1652 break; 1653 uether_pause(&sc->sc_ue, hz / 100); 1654 } 1655 if (i == URE_TIMEOUT) 1656 device_printf(sc->sc_ue.ue_dev, 1657 "timeout waiting for OOB control\n"); 1658 ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA, 1659 ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) | 1660 URE_RE_INIT_LL); 1661 for (i = 0; i < URE_TIMEOUT; i++) { 1662 if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) & 1663 URE_LINK_LIST_READY) 1664 break; 1665 uether_pause(&sc->sc_ue, hz / 100); 1666 } 1667 if (i == URE_TIMEOUT) 1668 device_printf(sc->sc_ue.ue_dev, 1669 "timeout waiting for OOB control\n"); 1670 1671 ure_write_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA, 1672 ure_read_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA) & 1673 ~URE_CPCR_RX_VLAN); 1674 ure_write_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA, 1675 ure_read_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA) | 1676 URE_TCR0_AUTO_FIFO); 1677 1678 /* Configure Rx FIFO threshold. */ 1679 ure_write_4(sc, URE_PLA_RXFIFO_CTRL0, URE_MCU_TYPE_PLA, 1680 URE_RXFIFO_THR1_NORMAL); 1681 if (usbd_get_speed(sc->sc_ue.ue_udev) == USB_SPEED_FULL) { 1682 rx_fifo1 = URE_RXFIFO_THR2_FULL; 1683 rx_fifo2 = URE_RXFIFO_THR3_FULL; 1684 } else { 1685 rx_fifo1 = URE_RXFIFO_THR2_HIGH; 1686 rx_fifo2 = URE_RXFIFO_THR3_HIGH; 1687 } 1688 ure_write_4(sc, URE_PLA_RXFIFO_CTRL1, URE_MCU_TYPE_PLA, rx_fifo1); 1689 ure_write_4(sc, URE_PLA_RXFIFO_CTRL2, URE_MCU_TYPE_PLA, rx_fifo2); 1690 1691 /* Configure Tx FIFO threshold. */ 1692 ure_write_4(sc, URE_PLA_TXFIFO_CTRL, URE_MCU_TYPE_PLA, 1693 URE_TXFIFO_THR_NORMAL); 1694 } 1695 1696 /* 1697 * Update mbuf for rx checksum from hardware 1698 */ 1699 static void 1700 ure_rxcsum(int capenb, struct ure_rxpkt *rp, struct mbuf *m) 1701 { 1702 int flags; 1703 uint32_t csum, misc; 1704 int tcp, udp; 1705 1706 m->m_pkthdr.csum_flags = 0; 1707 1708 if (!(capenb & IFCAP_RXCSUM)) 1709 return; 1710 1711 csum = le32toh(rp->ure_csum); 1712 misc = le32toh(rp->ure_misc); 1713 1714 tcp = udp = 0; 1715 1716 flags = 0; 1717 if (csum & URE_RXPKT_IPV4_CS) 1718 flags |= CSUM_IP_CHECKED; 1719 else if (csum & URE_RXPKT_IPV6_CS) 1720 flags = 0; 1721 1722 tcp = rp->ure_csum & URE_RXPKT_TCP_CS; 1723 udp = rp->ure_csum & URE_RXPKT_UDP_CS; 1724 1725 if (__predict_true((flags & CSUM_IP_CHECKED) && 1726 !(misc & URE_RXPKT_IP_F))) { 1727 flags |= CSUM_IP_VALID; 1728 } 1729 if (__predict_true( 1730 (tcp && !(misc & URE_RXPKT_TCP_F)) || 1731 (udp && !(misc & URE_RXPKT_UDP_F)))) { 1732 flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1733 m->m_pkthdr.csum_data = 0xFFFF; 1734 } 1735 1736 m->m_pkthdr.csum_flags = flags; 1737 } 1738 1739 /* 1740 * If the L4 checksum offset is larger than 0x7ff (2047), return failure. 1741 * We currently restrict MTU such that it can't happen, and even if we 1742 * did have a large enough MTU, only a very specially crafted IPv6 packet 1743 * with MANY headers could possibly come close. 1744 * 1745 * Returns 0 for success, and 1 if the packet cannot be checksummed and 1746 * should be dropped. 1747 */ 1748 static int 1749 ure_txcsum(struct mbuf *m, int caps, uint32_t *regout) 1750 { 1751 struct ip ip; 1752 struct ether_header *eh; 1753 int flags; 1754 uint32_t data; 1755 uint32_t reg; 1756 int l3off, l4off; 1757 uint16_t type; 1758 1759 *regout = 0; 1760 flags = m->m_pkthdr.csum_flags; 1761 if (flags == 0) 1762 return (0); 1763 1764 if (__predict_true(m->m_len >= (int)sizeof(*eh))) { 1765 eh = mtod(m, struct ether_header *); 1766 type = eh->ether_type; 1767 } else 1768 m_copydata(m, offsetof(struct ether_header, ether_type), 1769 sizeof(type), (caddr_t)&type); 1770 1771 switch (type = htons(type)) { 1772 case ETHERTYPE_IP: 1773 case ETHERTYPE_IPV6: 1774 l3off = ETHER_HDR_LEN; 1775 break; 1776 case ETHERTYPE_VLAN: 1777 /* XXX - what about QinQ? */ 1778 l3off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1779 break; 1780 default: 1781 return (0); 1782 } 1783 1784 reg = 0; 1785 1786 if (flags & CSUM_IP) 1787 reg |= URE_TXPKT_IPV4_CS; 1788 1789 data = m->m_pkthdr.csum_data; 1790 if (flags & (CSUM_IP_TCP | CSUM_IP_UDP)) { 1791 m_copydata(m, l3off, sizeof ip, (caddr_t)&ip); 1792 l4off = l3off + (ip.ip_hl << 2) + data; 1793 if (__predict_false(l4off > URE_L4_OFFSET_MAX)) 1794 return (1); 1795 1796 reg |= URE_TXPKT_IPV4_CS; 1797 if (flags & CSUM_IP_TCP) 1798 reg |= URE_TXPKT_TCP_CS; 1799 else if (flags & CSUM_IP_UDP) 1800 reg |= URE_TXPKT_UDP_CS; 1801 reg |= l4off << URE_L4_OFFSET_SHIFT; 1802 } 1803 #ifdef INET6 1804 else if (flags & (CSUM_IP6_TCP | CSUM_IP6_UDP)) { 1805 l4off = l3off + data; 1806 if (__predict_false(l4off > URE_L4_OFFSET_MAX)) 1807 return (1); 1808 1809 reg |= URE_TXPKT_IPV6_CS; 1810 if (flags & CSUM_IP6_TCP) 1811 reg |= URE_TXPKT_TCP_CS; 1812 else if (flags & CSUM_IP6_UDP) 1813 reg |= URE_TXPKT_UDP_CS; 1814 reg |= l4off << URE_L4_OFFSET_SHIFT; 1815 } 1816 #endif 1817 *regout = reg; 1818 return 0; 1819 } 1820