1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019-2025 Ruslan Bukin <br@bsdpad.com> 5 * 6 * This software was developed by SRI International and the University of 7 * Cambridge Computer Laboratory (Department of Computer Science and 8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the 9 * DARPA SSITH research programme. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bus.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/rman.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_page.h> 48 49 #include <net/bpf.h> 50 #include <net/if.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 #include <net/if_var.h> 56 57 #include <machine/bus.h> 58 59 #include <dev/mii/mii.h> 60 #include <dev/mii/miivar.h> 61 #include <dev/mii/tiphy.h> 62 #include <dev/ofw/ofw_bus.h> 63 #include <dev/ofw/ofw_bus_subr.h> 64 #include <dev/xilinx/if_xaereg.h> 65 #include <dev/xilinx/if_xaevar.h> 66 67 #include <dev/xilinx/axidma.h> 68 69 #include "miibus_if.h" 70 71 #define READ4(_sc, _reg) \ 72 bus_read_4((_sc)->res[0], _reg) 73 #define WRITE4(_sc, _reg, _val) \ 74 bus_write_4((_sc)->res[0], _reg, _val) 75 76 #define READ8(_sc, _reg) \ 77 bus_read_8((_sc)->res[0], _reg) 78 #define WRITE8(_sc, _reg, _val) \ 79 bus_write_8((_sc)->res[0], _reg, _val) 80 81 #define XAE_LOCK(sc) mtx_lock(&(sc)->mtx) 82 #define XAE_UNLOCK(sc) mtx_unlock(&(sc)->mtx) 83 #define XAE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 84 #define XAE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 85 86 #define XAE_DEBUG 87 #undef XAE_DEBUG 88 89 #ifdef XAE_DEBUG 90 #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__) 91 #else 92 #define dprintf(fmt, ...) 93 #endif 94 95 #define RX_QUEUE_SIZE 64 96 #define TX_QUEUE_SIZE 64 97 #define NUM_RX_MBUF 16 98 #define BUFRING_SIZE 8192 99 #define MDIO_CLK_DIV_DEFAULT 29 100 #define BUF_NPAGES 512 101 102 #define PHY1_RD(sc, _r) \ 103 xae_miibus_read_reg(sc->dev, 1, _r) 104 #define PHY1_WR(sc, _r, _v) \ 105 xae_miibus_write_reg(sc->dev, 1, _r, _v) 106 107 #define PHY_RD(sc, _r) \ 108 xae_miibus_read_reg(sc->dev, sc->phy_addr, _r) 109 #define PHY_WR(sc, _r, _v) \ 110 xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v) 111 112 /* Use this macro to access regs > 0x1f */ 113 #define WRITE_TI_EREG(sc, reg, data) { \ 114 PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK); \ 115 PHY_WR(sc, MII_MMDAADR, reg); \ 116 PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI); \ 117 PHY_WR(sc, MII_MMDAADR, data); \ 118 } 119 120 /* Not documented, Xilinx VCU118 workaround */ 121 #define CFG4_SGMII_TMR 0x160 /* bits 8:7 MUST be '10' */ 122 #define DP83867_SGMIICTL1 0xD3 /* not documented register */ 123 #define SGMIICTL1_SGMII_6W (1 << 14) /* no idea what it is */ 124 125 static struct resource_spec xae_spec[] = { 126 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 127 { SYS_RES_IRQ, 0, RF_ACTIVE }, 128 { -1, 0 } 129 }; 130 131 static void xae_stop_locked(struct xae_softc *sc); 132 static void xae_setup_rxfilter(struct xae_softc *sc); 133 134 static int 135 xae_rx_enqueue(struct xae_softc *sc, uint32_t n) 136 { 137 struct mbuf *m; 138 int i; 139 140 for (i = 0; i < n; i++) { 141 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 142 if (m == NULL) { 143 device_printf(sc->dev, 144 "%s: Can't alloc rx mbuf\n", __func__); 145 return (-1); 146 } 147 148 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 149 xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM); 150 } 151 152 return (0); 153 } 154 155 static int 156 xae_get_phyaddr(phandle_t node, int *phy_addr) 157 { 158 phandle_t phy_node; 159 pcell_t phy_handle, phy_reg; 160 161 if (OF_getencprop(node, "phy-handle", (void *)&phy_handle, 162 sizeof(phy_handle)) <= 0) 163 return (ENXIO); 164 165 phy_node = OF_node_from_xref(phy_handle); 166 167 if (OF_getencprop(phy_node, "reg", (void *)&phy_reg, 168 sizeof(phy_reg)) <= 0) 169 return (ENXIO); 170 171 *phy_addr = phy_reg; 172 173 return (0); 174 } 175 176 static int 177 xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status) 178 { 179 xdma_transfer_status_t st; 180 struct xae_softc *sc; 181 if_t ifp; 182 struct mbuf *m; 183 int err; 184 185 sc = arg; 186 187 XAE_LOCK(sc); 188 189 ifp = sc->ifp; 190 191 for (;;) { 192 err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st); 193 if (err != 0) { 194 break; 195 } 196 197 if (st.error != 0) { 198 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 199 } 200 201 m_freem(m); 202 } 203 204 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 205 206 XAE_UNLOCK(sc); 207 208 return (0); 209 } 210 211 static int 212 xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status) 213 { 214 xdma_transfer_status_t st; 215 struct xae_softc *sc; 216 if_t ifp; 217 struct mbuf *m; 218 int err; 219 uint32_t cnt_processed; 220 221 sc = arg; 222 223 dprintf("%s\n", __func__); 224 225 XAE_LOCK(sc); 226 227 ifp = sc->ifp; 228 229 cnt_processed = 0; 230 for (;;) { 231 err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st); 232 if (err != 0) { 233 break; 234 } 235 cnt_processed++; 236 237 if (st.error != 0) { 238 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 239 m_freem(m); 240 continue; 241 } 242 243 m->m_pkthdr.len = m->m_len = st.transferred; 244 m->m_pkthdr.rcvif = ifp; 245 XAE_UNLOCK(sc); 246 if_input(ifp, m); 247 XAE_LOCK(sc); 248 } 249 250 xae_rx_enqueue(sc, cnt_processed); 251 252 XAE_UNLOCK(sc); 253 254 return (0); 255 } 256 257 static void 258 xae_qflush(if_t ifp) 259 { 260 } 261 262 static int 263 xae_transmit_locked(if_t ifp) 264 { 265 struct xae_softc *sc; 266 struct mbuf *m; 267 struct buf_ring *br; 268 int error; 269 int enq; 270 271 dprintf("%s\n", __func__); 272 273 sc = if_getsoftc(ifp); 274 br = sc->br; 275 276 enq = 0; 277 278 while ((m = drbr_peek(ifp, br)) != NULL) { 279 error = xdma_enqueue_mbuf(sc->xchan_tx, 280 &m, 0, 4, 4, XDMA_MEM_TO_DEV); 281 if (error != 0) { 282 /* No space in request queue available yet. */ 283 drbr_putback(ifp, br, m); 284 break; 285 } 286 287 drbr_advance(ifp, br); 288 289 enq++; 290 291 /* If anyone is interested give them a copy. */ 292 ETHER_BPF_MTAP(ifp, m); 293 } 294 295 if (enq > 0) 296 xdma_queue_submit(sc->xchan_tx); 297 298 return (0); 299 } 300 301 static int 302 xae_transmit(if_t ifp, struct mbuf *m) 303 { 304 struct xae_softc *sc; 305 int error; 306 307 dprintf("%s\n", __func__); 308 309 sc = if_getsoftc(ifp); 310 311 XAE_LOCK(sc); 312 313 error = drbr_enqueue(ifp, sc->br, m); 314 if (error) { 315 XAE_UNLOCK(sc); 316 return (error); 317 } 318 319 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 320 IFF_DRV_RUNNING) { 321 XAE_UNLOCK(sc); 322 return (0); 323 } 324 325 if (!sc->link_is_up) { 326 XAE_UNLOCK(sc); 327 return (0); 328 } 329 330 error = xae_transmit_locked(ifp); 331 332 XAE_UNLOCK(sc); 333 334 return (error); 335 } 336 337 static void 338 xae_stop_locked(struct xae_softc *sc) 339 { 340 if_t ifp; 341 uint32_t reg; 342 343 XAE_ASSERT_LOCKED(sc); 344 345 ifp = sc->ifp; 346 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 347 348 callout_stop(&sc->xae_callout); 349 350 /* Stop the transmitter */ 351 reg = READ4(sc, XAE_TC); 352 reg &= ~TC_TX; 353 WRITE4(sc, XAE_TC, reg); 354 355 /* Stop the receiver. */ 356 reg = READ4(sc, XAE_RCW1); 357 reg &= ~RCW1_RX; 358 WRITE4(sc, XAE_RCW1, reg); 359 } 360 361 static uint64_t 362 xae_stat(struct xae_softc *sc, int counter_id) 363 { 364 uint64_t new, old; 365 uint64_t delta; 366 367 KASSERT(counter_id < XAE_MAX_COUNTERS, 368 ("counter %d is out of range", counter_id)); 369 370 new = READ8(sc, XAE_STATCNT(counter_id)); 371 old = sc->counters[counter_id]; 372 373 if (new >= old) 374 delta = new - old; 375 else 376 delta = UINT64_MAX - old + new; 377 sc->counters[counter_id] = new; 378 379 return (delta); 380 } 381 382 static void 383 xae_harvest_stats(struct xae_softc *sc) 384 { 385 if_t ifp; 386 387 ifp = sc->ifp; 388 389 if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES)); 390 if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS)); 391 if_inc_counter(ifp, IFCOUNTER_IERRORS, 392 xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) + 393 xae_stat(sc, RX_LEN_OUT_OF_RANGE) + 394 xae_stat(sc, RX_ALIGNMENT_ERRORS)); 395 396 if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES)); 397 if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES)); 398 if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS)); 399 if_inc_counter(ifp, IFCOUNTER_OERRORS, 400 xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS)); 401 402 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 403 xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) + 404 xae_stat(sc, TX_MULTI_COLLISION_FRAMES) + 405 xae_stat(sc, TX_LATE_COLLISIONS) + 406 xae_stat(sc, TX_EXCESS_COLLISIONS)); 407 } 408 409 static void 410 xae_tick(void *arg) 411 { 412 struct xae_softc *sc; 413 if_t ifp; 414 int link_was_up; 415 416 sc = arg; 417 418 XAE_ASSERT_LOCKED(sc); 419 420 ifp = sc->ifp; 421 422 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 423 return; 424 425 /* Gather stats from hardware counters. */ 426 xae_harvest_stats(sc); 427 428 /* Check the media status. */ 429 link_was_up = sc->link_is_up; 430 mii_tick(sc->mii_softc); 431 if (sc->link_is_up && !link_was_up) 432 xae_transmit_locked(sc->ifp); 433 434 /* Schedule another check one second from now. */ 435 callout_reset(&sc->xae_callout, hz, xae_tick, sc); 436 } 437 438 static void 439 xae_init_locked(struct xae_softc *sc) 440 { 441 if_t ifp; 442 443 XAE_ASSERT_LOCKED(sc); 444 445 ifp = sc->ifp; 446 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 447 return; 448 449 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 450 451 xae_setup_rxfilter(sc); 452 453 /* Enable the transmitter */ 454 WRITE4(sc, XAE_TC, TC_TX); 455 456 /* Enable the receiver. */ 457 WRITE4(sc, XAE_RCW1, RCW1_RX); 458 459 /* 460 * Call mii_mediachg() which will call back into xae_miibus_statchg() 461 * to set up the remaining config registers based on current media. 462 */ 463 mii_mediachg(sc->mii_softc); 464 callout_reset(&sc->xae_callout, hz, xae_tick, sc); 465 } 466 467 static void 468 xae_init(void *arg) 469 { 470 struct xae_softc *sc; 471 472 sc = arg; 473 474 XAE_LOCK(sc); 475 xae_init_locked(sc); 476 XAE_UNLOCK(sc); 477 } 478 479 static void 480 xae_media_status(if_t ifp, struct ifmediareq *ifmr) 481 { 482 struct xae_softc *sc; 483 struct mii_data *mii; 484 485 sc = if_getsoftc(ifp); 486 mii = sc->mii_softc; 487 488 XAE_LOCK(sc); 489 mii_pollstat(mii); 490 ifmr->ifm_active = mii->mii_media_active; 491 ifmr->ifm_status = mii->mii_media_status; 492 XAE_UNLOCK(sc); 493 } 494 495 static int 496 xae_media_change_locked(struct xae_softc *sc) 497 { 498 499 return (mii_mediachg(sc->mii_softc)); 500 } 501 502 static int 503 xae_media_change(if_t ifp) 504 { 505 struct xae_softc *sc; 506 int error; 507 508 sc = if_getsoftc(ifp); 509 510 XAE_LOCK(sc); 511 error = xae_media_change_locked(sc); 512 XAE_UNLOCK(sc); 513 514 return (error); 515 } 516 517 static u_int 518 xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 519 { 520 struct xae_softc *sc = arg; 521 uint32_t reg; 522 uint8_t *ma; 523 524 if (cnt >= XAE_MULTICAST_TABLE_SIZE) 525 return (1); 526 527 ma = LLADDR(sdl); 528 529 reg = READ4(sc, XAE_FFC) & 0xffffff00; 530 reg |= cnt; 531 WRITE4(sc, XAE_FFC, reg); 532 533 reg = (ma[0]); 534 reg |= (ma[1] << 8); 535 reg |= (ma[2] << 16); 536 reg |= (ma[3] << 24); 537 WRITE4(sc, XAE_FFV(0), reg); 538 539 reg = ma[4]; 540 reg |= ma[5] << 8; 541 WRITE4(sc, XAE_FFV(1), reg); 542 543 return (1); 544 } 545 546 static void 547 xae_setup_rxfilter(struct xae_softc *sc) 548 { 549 if_t ifp; 550 uint32_t reg; 551 552 XAE_ASSERT_LOCKED(sc); 553 554 ifp = sc->ifp; 555 556 /* 557 * Set the multicast (group) filter hash. 558 */ 559 if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 560 reg = READ4(sc, XAE_FFC); 561 reg |= FFC_PM; 562 WRITE4(sc, XAE_FFC, reg); 563 } else { 564 reg = READ4(sc, XAE_FFC); 565 reg &= ~FFC_PM; 566 WRITE4(sc, XAE_FFC, reg); 567 568 if_foreach_llmaddr(ifp, xae_write_maddr, sc); 569 } 570 571 /* 572 * Set the primary address. 573 */ 574 reg = sc->macaddr[0]; 575 reg |= (sc->macaddr[1] << 8); 576 reg |= (sc->macaddr[2] << 16); 577 reg |= (sc->macaddr[3] << 24); 578 WRITE4(sc, XAE_UAW0, reg); 579 580 reg = sc->macaddr[4]; 581 reg |= (sc->macaddr[5] << 8); 582 WRITE4(sc, XAE_UAW1, reg); 583 } 584 585 static int 586 xae_ioctl(if_t ifp, u_long cmd, caddr_t data) 587 { 588 struct xae_softc *sc; 589 struct mii_data *mii; 590 struct ifreq *ifr; 591 int mask, error; 592 593 sc = if_getsoftc(ifp); 594 ifr = (struct ifreq *)data; 595 596 error = 0; 597 switch (cmd) { 598 case SIOCSIFFLAGS: 599 XAE_LOCK(sc); 600 if (if_getflags(ifp) & IFF_UP) { 601 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 602 if ((if_getflags(ifp) ^ sc->if_flags) & 603 (IFF_PROMISC | IFF_ALLMULTI)) 604 xae_setup_rxfilter(sc); 605 } else { 606 if (!sc->is_detaching) 607 xae_init_locked(sc); 608 } 609 } else { 610 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 611 xae_stop_locked(sc); 612 } 613 sc->if_flags = if_getflags(ifp); 614 XAE_UNLOCK(sc); 615 break; 616 case SIOCADDMULTI: 617 case SIOCDELMULTI: 618 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 619 XAE_LOCK(sc); 620 xae_setup_rxfilter(sc); 621 XAE_UNLOCK(sc); 622 } 623 break; 624 case SIOCSIFMEDIA: 625 case SIOCGIFMEDIA: 626 mii = sc->mii_softc; 627 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 628 break; 629 case SIOCSIFCAP: 630 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; 631 if (mask & IFCAP_VLAN_MTU) { 632 /* No work to do except acknowledge the change took */ 633 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 634 } 635 break; 636 637 default: 638 error = ether_ioctl(ifp, cmd, data); 639 break; 640 } 641 642 return (error); 643 } 644 645 static void 646 xae_intr(void *arg) 647 { 648 649 } 650 651 static int 652 xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr) 653 { 654 phandle_t node; 655 int len; 656 657 node = ofw_bus_get_node(sc->dev); 658 659 /* Check if there is property */ 660 if ((len = OF_getproplen(node, "local-mac-address")) <= 0) 661 return (EINVAL); 662 663 if (len != ETHER_ADDR_LEN) 664 return (EINVAL); 665 666 OF_getprop(node, "local-mac-address", hwaddr, 667 ETHER_ADDR_LEN); 668 669 return (0); 670 } 671 672 static int 673 mdio_wait(struct xae_softc *sc) 674 { 675 uint32_t reg; 676 int timeout; 677 678 timeout = 200; 679 680 do { 681 reg = READ4(sc, XAE_MDIO_CTRL); 682 if (reg & MDIO_CTRL_READY) 683 break; 684 DELAY(1); 685 } while (timeout--); 686 687 if (timeout <= 0) { 688 printf("Failed to get MDIO ready\n"); 689 return (1); 690 } 691 692 return (0); 693 } 694 695 static int 696 xae_miibus_read_reg(device_t dev, int phy, int reg) 697 { 698 struct xae_softc *sc; 699 uint32_t mii; 700 int rv; 701 702 sc = device_get_softc(dev); 703 704 if (mdio_wait(sc)) 705 return (0); 706 707 mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE; 708 mii |= (reg << MDIO_TX_REGAD_S); 709 mii |= (phy << MDIO_TX_PHYAD_S); 710 711 WRITE4(sc, XAE_MDIO_CTRL, mii); 712 713 if (mdio_wait(sc)) 714 return (0); 715 716 rv = READ4(sc, XAE_MDIO_READ); 717 718 return (rv); 719 } 720 721 static int 722 xae_miibus_write_reg(device_t dev, int phy, int reg, int val) 723 { 724 struct xae_softc *sc; 725 uint32_t mii; 726 727 sc = device_get_softc(dev); 728 729 if (mdio_wait(sc)) 730 return (1); 731 732 mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE; 733 mii |= (reg << MDIO_TX_REGAD_S); 734 mii |= (phy << MDIO_TX_PHYAD_S); 735 736 WRITE4(sc, XAE_MDIO_WRITE, val); 737 WRITE4(sc, XAE_MDIO_CTRL, mii); 738 739 if (mdio_wait(sc)) 740 return (1); 741 742 return (0); 743 } 744 745 static void 746 xae_phy_fixup(struct xae_softc *sc) 747 { 748 uint32_t reg; 749 750 do { 751 WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W); 752 PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN); 753 754 reg = PHY_RD(sc, DP83867_CFG2); 755 reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M; 756 reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4); 757 reg |= CFG2_INTERRUPT_POLARITY; 758 reg |= CFG2_SPEED_OPT_ENHANCED_EN; 759 reg |= CFG2_SPEED_OPT_10M_EN; 760 PHY_WR(sc, DP83867_CFG2, reg); 761 762 WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR); 763 PHY_WR(sc, MII_BMCR, 764 BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET); 765 } while (PHY1_RD(sc, MII_BMCR) == 0x0ffff); 766 767 do { 768 PHY1_WR(sc, MII_BMCR, 769 BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG); 770 DELAY(40000); 771 } while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0); 772 } 773 774 static int 775 get_xdma_std(struct xae_softc *sc) 776 { 777 778 sc->xdma_tx = xdma_ofw_get(sc->dev, "tx"); 779 if (sc->xdma_tx == NULL) 780 return (ENXIO); 781 782 sc->xdma_rx = xdma_ofw_get(sc->dev, "rx"); 783 if (sc->xdma_rx == NULL) { 784 xdma_put(sc->xdma_tx); 785 return (ENXIO); 786 } 787 788 return (0); 789 } 790 791 static int 792 get_xdma_axistream(struct xae_softc *sc) 793 { 794 struct axidma_fdt_data *data; 795 device_t dma_dev; 796 phandle_t node; 797 pcell_t prop; 798 size_t len; 799 800 node = ofw_bus_get_node(sc->dev); 801 len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop)); 802 if (len != sizeof(prop)) { 803 device_printf(sc->dev, 804 "%s: Couldn't get axistream-connected prop.\n", __func__); 805 return (ENXIO); 806 } 807 dma_dev = OF_device_from_xref(prop); 808 if (dma_dev == NULL) { 809 device_printf(sc->dev, "Could not get DMA device by xref.\n"); 810 return (ENXIO); 811 } 812 813 sc->xdma_tx = xdma_get(sc->dev, dma_dev); 814 if (sc->xdma_tx == NULL) { 815 device_printf(sc->dev, "Could not find DMA controller.\n"); 816 return (ENXIO); 817 } 818 data = malloc(sizeof(struct axidma_fdt_data), 819 M_DEVBUF, (M_WAITOK | M_ZERO)); 820 data->id = AXIDMA_TX_CHAN; 821 sc->xdma_tx->data = data; 822 823 sc->xdma_rx = xdma_get(sc->dev, dma_dev); 824 if (sc->xdma_rx == NULL) { 825 device_printf(sc->dev, "Could not find DMA controller.\n"); 826 return (ENXIO); 827 } 828 data = malloc(sizeof(struct axidma_fdt_data), 829 M_DEVBUF, (M_WAITOK | M_ZERO)); 830 data->id = AXIDMA_RX_CHAN; 831 sc->xdma_rx->data = data; 832 833 return (0); 834 } 835 836 static int 837 setup_xdma(struct xae_softc *sc) 838 { 839 device_t dev; 840 vmem_t *vmem; 841 vm_paddr_t phys; 842 vm_page_t m; 843 int error; 844 845 dev = sc->dev; 846 847 /* Get xDMA controller */ 848 error = get_xdma_std(sc); 849 850 if (error) { 851 device_printf(sc->dev, 852 "Fallback to axistream-connected property\n"); 853 error = get_xdma_axistream(sc); 854 } 855 856 if (error) { 857 device_printf(dev, "Could not find xDMA controllers.\n"); 858 return (ENXIO); 859 } 860 861 /* Alloc xDMA TX virtual channel. */ 862 sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0); 863 if (sc->xchan_tx == NULL) { 864 device_printf(dev, "Can't alloc virtual DMA TX channel.\n"); 865 return (ENXIO); 866 } 867 868 /* Setup interrupt handler. */ 869 error = xdma_setup_intr(sc->xchan_tx, 0, 870 xae_xdma_tx_intr, sc, &sc->ih_tx); 871 if (error) { 872 device_printf(sc->dev, 873 "Can't setup xDMA TX interrupt handler.\n"); 874 return (ENXIO); 875 } 876 877 /* Alloc xDMA RX virtual channel. */ 878 sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0); 879 if (sc->xchan_rx == NULL) { 880 device_printf(dev, "Can't alloc virtual DMA RX channel.\n"); 881 return (ENXIO); 882 } 883 884 /* Setup interrupt handler. */ 885 error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET, 886 xae_xdma_rx_intr, sc, &sc->ih_rx); 887 if (error) { 888 device_printf(sc->dev, 889 "Can't setup xDMA RX interrupt handler.\n"); 890 return (ENXIO); 891 } 892 893 /* Setup bounce buffer */ 894 vmem = xdma_get_memory(dev); 895 if (!vmem) { 896 m = vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO, 897 BUF_NPAGES, 0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, 898 VM_MEMATTR_DEFAULT); 899 phys = VM_PAGE_TO_PHYS(m); 900 vmem = vmem_create("xdma vmem", 0, 0, PAGE_SIZE, PAGE_SIZE, 901 M_BESTFIT | M_WAITOK); 902 vmem_add(vmem, phys, BUF_NPAGES * PAGE_SIZE, 0); 903 } 904 905 xchan_set_memory(sc->xchan_tx, vmem); 906 xchan_set_memory(sc->xchan_rx, vmem); 907 908 xdma_prep_sg(sc->xchan_tx, 909 TX_QUEUE_SIZE, /* xchan requests queue size */ 910 MCLBYTES, /* maxsegsize */ 911 8, /* maxnsegs */ 912 16, /* alignment */ 913 0, /* boundary */ 914 BUS_SPACE_MAXADDR_32BIT, 915 BUS_SPACE_MAXADDR); 916 917 xdma_prep_sg(sc->xchan_rx, 918 RX_QUEUE_SIZE, /* xchan requests queue size */ 919 MCLBYTES, /* maxsegsize */ 920 1, /* maxnsegs */ 921 16, /* alignment */ 922 0, /* boundary */ 923 BUS_SPACE_MAXADDR_32BIT, 924 BUS_SPACE_MAXADDR); 925 926 return (0); 927 } 928 929 static int 930 xae_probe(device_t dev) 931 { 932 933 if (!ofw_bus_status_okay(dev)) 934 return (ENXIO); 935 936 if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a")) 937 return (ENXIO); 938 939 device_set_desc(dev, "Xilinx AXI Ethernet"); 940 941 return (BUS_PROBE_DEFAULT); 942 } 943 944 static int 945 xae_attach(device_t dev) 946 { 947 struct xae_softc *sc; 948 if_t ifp; 949 phandle_t node; 950 uint32_t reg; 951 int error; 952 953 sc = device_get_softc(dev); 954 sc->dev = dev; 955 node = ofw_bus_get_node(dev); 956 957 if (setup_xdma(sc) != 0) { 958 device_printf(dev, "Could not setup xDMA.\n"); 959 return (ENXIO); 960 } 961 962 mtx_init(&sc->mtx, device_get_nameunit(sc->dev), 963 MTX_NETWORK_LOCK, MTX_DEF); 964 965 sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF, 966 M_NOWAIT, &sc->mtx); 967 if (sc->br == NULL) 968 return (ENOMEM); 969 970 if (bus_alloc_resources(dev, xae_spec, sc->res)) { 971 device_printf(dev, "could not allocate resources\n"); 972 return (ENXIO); 973 } 974 975 /* Memory interface */ 976 sc->bst = rman_get_bustag(sc->res[0]); 977 sc->bsh = rman_get_bushandle(sc->res[0]); 978 979 device_printf(sc->dev, "Identification: %x\n", 980 READ4(sc, XAE_IDENT)); 981 982 /* Get MAC addr */ 983 if (xae_get_hwaddr(sc, sc->macaddr)) { 984 device_printf(sc->dev, "can't get mac\n"); 985 return (ENXIO); 986 } 987 988 /* Enable MII clock */ 989 reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S); 990 reg |= MDIO_SETUP_ENABLE; 991 WRITE4(sc, XAE_MDIO_SETUP, reg); 992 if (mdio_wait(sc)) 993 return (ENXIO); 994 995 callout_init_mtx(&sc->xae_callout, &sc->mtx, 0); 996 997 /* Setup interrupt handler. */ 998 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 999 NULL, xae_intr, sc, &sc->intr_cookie); 1000 if (error != 0) { 1001 device_printf(dev, "could not setup interrupt handler.\n"); 1002 return (ENXIO); 1003 } 1004 1005 /* Set up the ethernet interface. */ 1006 sc->ifp = ifp = if_alloc(IFT_ETHER); 1007 if_setsoftc(ifp, sc); 1008 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1009 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1010 if_setcapabilities(ifp, IFCAP_VLAN_MTU); 1011 if_setcapenable(ifp, if_getcapabilities(ifp)); 1012 if_settransmitfn(ifp, xae_transmit); 1013 if_setqflushfn(ifp, xae_qflush); 1014 if_setioctlfn(ifp, xae_ioctl); 1015 if_setinitfn(ifp, xae_init); 1016 if_setsendqlen(ifp, TX_DESC_COUNT - 1); 1017 if_setsendqready(ifp); 1018 1019 if (xae_get_phyaddr(node, &sc->phy_addr) != 0) 1020 return (ENXIO); 1021 1022 /* Attach the mii driver. */ 1023 error = mii_attach(dev, &sc->miibus, ifp, xae_media_change, 1024 xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr, 1025 MII_OFFSET_ANY, 0); 1026 1027 if (error != 0) { 1028 device_printf(dev, "PHY attach failed\n"); 1029 return (ENXIO); 1030 } 1031 sc->mii_softc = device_get_softc(sc->miibus); 1032 1033 /* Apply vcu118 workaround. */ 1034 if (OF_getproplen(node, "xlnx,vcu118") >= 0) 1035 xae_phy_fixup(sc); 1036 1037 /* All ready to run, attach the ethernet interface. */ 1038 ether_ifattach(ifp, sc->macaddr); 1039 sc->is_attached = true; 1040 1041 xae_rx_enqueue(sc, NUM_RX_MBUF); 1042 xdma_queue_submit(sc->xchan_rx); 1043 1044 return (0); 1045 } 1046 1047 static int 1048 xae_detach(device_t dev) 1049 { 1050 struct xae_softc *sc; 1051 if_t ifp; 1052 1053 sc = device_get_softc(dev); 1054 1055 KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized", 1056 device_get_nameunit(dev))); 1057 1058 ifp = sc->ifp; 1059 1060 /* Only cleanup if attach succeeded. */ 1061 if (device_is_attached(dev)) { 1062 XAE_LOCK(sc); 1063 xae_stop_locked(sc); 1064 XAE_UNLOCK(sc); 1065 callout_drain(&sc->xae_callout); 1066 ether_ifdetach(ifp); 1067 } 1068 1069 bus_generic_detach(dev); 1070 1071 if (ifp != NULL) 1072 if_free(ifp); 1073 1074 mtx_destroy(&sc->mtx); 1075 1076 bus_teardown_intr(dev, sc->res[1], sc->intr_cookie); 1077 1078 bus_release_resources(dev, xae_spec, sc->res); 1079 1080 xdma_channel_free(sc->xchan_tx); 1081 xdma_channel_free(sc->xchan_rx); 1082 xdma_put(sc->xdma_tx); 1083 xdma_put(sc->xdma_rx); 1084 1085 return (0); 1086 } 1087 1088 static void 1089 xae_miibus_statchg(device_t dev) 1090 { 1091 struct xae_softc *sc; 1092 struct mii_data *mii; 1093 uint32_t reg; 1094 1095 /* 1096 * Called by the MII bus driver when the PHY establishes 1097 * link to set the MAC interface registers. 1098 */ 1099 1100 sc = device_get_softc(dev); 1101 1102 XAE_ASSERT_LOCKED(sc); 1103 1104 mii = sc->mii_softc; 1105 1106 if (mii->mii_media_status & IFM_ACTIVE) 1107 sc->link_is_up = true; 1108 else 1109 sc->link_is_up = false; 1110 1111 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1112 case IFM_1000_T: 1113 case IFM_1000_SX: 1114 reg = SPEED_1000; 1115 break; 1116 case IFM_100_TX: 1117 reg = SPEED_100; 1118 break; 1119 case IFM_10_T: 1120 reg = SPEED_10; 1121 break; 1122 case IFM_NONE: 1123 sc->link_is_up = false; 1124 return; 1125 default: 1126 sc->link_is_up = false; 1127 device_printf(dev, "Unsupported media %u\n", 1128 IFM_SUBTYPE(mii->mii_media_active)); 1129 return; 1130 } 1131 1132 WRITE4(sc, XAE_SPEED, reg); 1133 } 1134 1135 static device_method_t xae_methods[] = { 1136 DEVMETHOD(device_probe, xae_probe), 1137 DEVMETHOD(device_attach, xae_attach), 1138 DEVMETHOD(device_detach, xae_detach), 1139 1140 /* MII Interface */ 1141 DEVMETHOD(miibus_readreg, xae_miibus_read_reg), 1142 DEVMETHOD(miibus_writereg, xae_miibus_write_reg), 1143 DEVMETHOD(miibus_statchg, xae_miibus_statchg), 1144 { 0, 0 } 1145 }; 1146 1147 driver_t xae_driver = { 1148 "xae", 1149 xae_methods, 1150 sizeof(struct xae_softc), 1151 }; 1152 1153 DRIVER_MODULE(xae, simplebus, xae_driver, 0, 0); 1154 DRIVER_MODULE(miibus, xae, miibus_driver, 0, 0); 1155 1156 MODULE_DEPEND(xae, ether, 1, 1, 1); 1157 MODULE_DEPEND(xae, miibus, 1, 1, 1); 1158