1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Ethernet media access controller (EMAC) 33 * Chapter 17, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) 34 * 35 * EMAC is an instance of the Synopsys DesignWare 3504-0 36 * Universal 10/100/1000 Ethernet MAC (DWC_gmac). 37 */ 38 39 #include <sys/cdefs.h> 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bus.h> 43 #include <sys/gpio.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/module.h> 49 #include <sys/mutex.h> 50 #include <sys/rman.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 54 #include <net/bpf.h> 55 #include <net/if.h> 56 #include <net/ethernet.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/if_types.h> 60 #include <net/if_var.h> 61 62 #include <machine/bus.h> 63 64 #include <dev/dwc/if_dwc.h> 65 #include <dev/dwc/if_dwcvar.h> 66 #include <dev/mii/mii.h> 67 #include <dev/mii/miivar.h> 68 #include <dev/ofw/ofw_bus.h> 69 #include <dev/ofw/ofw_bus_subr.h> 70 #include <dev/mii/mii_fdt.h> 71 72 #include <dev/extres/clk/clk.h> 73 #include <dev/extres/hwreset/hwreset.h> 74 75 #include "if_dwc_if.h" 76 #include "gpio_if.h" 77 #include "miibus_if.h" 78 79 #define READ4(_sc, _reg) \ 80 bus_read_4((_sc)->res[0], _reg) 81 #define WRITE4(_sc, _reg, _val) \ 82 bus_write_4((_sc)->res[0], _reg, _val) 83 84 #define MAC_RESET_TIMEOUT 100 85 #define WATCHDOG_TIMEOUT_SECS 5 86 #define STATS_HARVEST_INTERVAL 2 87 88 #define DWC_LOCK(sc) mtx_lock(&(sc)->mtx) 89 #define DWC_UNLOCK(sc) mtx_unlock(&(sc)->mtx) 90 #define DWC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 91 #define DWC_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 92 93 /* TX descriptors - TDESC0 is almost unified */ 94 #define TDESC0_OWN (1U << 31) 95 #define TDESC0_IHE (1U << 16) /* IP Header Error */ 96 #define TDESC0_ES (1U << 15) /* Error Summary */ 97 #define TDESC0_JT (1U << 14) /* Jabber Timeout */ 98 #define TDESC0_FF (1U << 13) /* Frame Flushed */ 99 #define TDESC0_PCE (1U << 12) /* Payload Checksum Error */ 100 #define TDESC0_LOC (1U << 11) /* Loss of Carrier */ 101 #define TDESC0_NC (1U << 10) /* No Carrier */ 102 #define TDESC0_LC (1U << 9) /* Late Collision */ 103 #define TDESC0_EC (1U << 8) /* Excessive Collision */ 104 #define TDESC0_VF (1U << 7) /* VLAN Frame */ 105 #define TDESC0_CC_MASK 0xf 106 #define TDESC0_CC_SHIFT 3 /* Collision Count */ 107 #define TDESC0_ED (1U << 2) /* Excessive Deferral */ 108 #define TDESC0_UF (1U << 1) /* Underflow Error */ 109 #define TDESC0_DB (1U << 0) /* Deferred Bit */ 110 /* TX descriptors - TDESC0 extended format only */ 111 #define ETDESC0_IC (1U << 30) /* Interrupt on Completion */ 112 #define ETDESC0_LS (1U << 29) /* Last Segment */ 113 #define ETDESC0_FS (1U << 28) /* First Segment */ 114 #define ETDESC0_DC (1U << 27) /* Disable CRC */ 115 #define ETDESC0_DP (1U << 26) /* Disable Padding */ 116 #define ETDESC0_CIC_NONE (0U << 22) /* Checksum Insertion Control */ 117 #define ETDESC0_CIC_HDR (1U << 22) 118 #define ETDESC0_CIC_SEG (2U << 22) 119 #define ETDESC0_CIC_FULL (3U << 22) 120 #define ETDESC0_TER (1U << 21) /* Transmit End of Ring */ 121 #define ETDESC0_TCH (1U << 20) /* Second Address Chained */ 122 123 /* TX descriptors - TDESC1 normal format */ 124 #define NTDESC1_IC (1U << 31) /* Interrupt on Completion */ 125 #define NTDESC1_LS (1U << 30) /* Last Segment */ 126 #define NTDESC1_FS (1U << 29) /* First Segment */ 127 #define NTDESC1_CIC_NONE (0U << 27) /* Checksum Insertion Control */ 128 #define NTDESC1_CIC_HDR (1U << 27) 129 #define NTDESC1_CIC_SEG (2U << 27) 130 #define NTDESC1_CIC_FULL (3U << 27) 131 #define NTDESC1_DC (1U << 26) /* Disable CRC */ 132 #define NTDESC1_TER (1U << 25) /* Transmit End of Ring */ 133 #define NTDESC1_TCH (1U << 24) /* Second Address Chained */ 134 /* TX descriptors - TDESC1 extended format */ 135 #define ETDESC1_DP (1U << 23) /* Disable Padding */ 136 #define ETDESC1_TBS2_MASK 0x7ff 137 #define ETDESC1_TBS2_SHIFT 11 /* Receive Buffer 2 Size */ 138 #define ETDESC1_TBS1_MASK 0x7ff 139 #define ETDESC1_TBS1_SHIFT 0 /* Receive Buffer 1 Size */ 140 141 /* RX descriptor - RDESC0 is unified */ 142 #define RDESC0_OWN (1U << 31) 143 #define RDESC0_AFM (1U << 30) /* Dest. Address Filter Fail */ 144 #define RDESC0_FL_MASK 0x3fff 145 #define RDESC0_FL_SHIFT 16 /* Frame Length */ 146 #define RDESC0_ES (1U << 15) /* Error Summary */ 147 #define RDESC0_DE (1U << 14) /* Descriptor Error */ 148 #define RDESC0_SAF (1U << 13) /* Source Address Filter Fail */ 149 #define RDESC0_LE (1U << 12) /* Length Error */ 150 #define RDESC0_OE (1U << 11) /* Overflow Error */ 151 #define RDESC0_VLAN (1U << 10) /* VLAN Tag */ 152 #define RDESC0_FS (1U << 9) /* First Descriptor */ 153 #define RDESC0_LS (1U << 8) /* Last Descriptor */ 154 #define RDESC0_ICE (1U << 7) /* IPC Checksum Error */ 155 #define RDESC0_LC (1U << 6) /* Late Collision */ 156 #define RDESC0_FT (1U << 5) /* Frame Type */ 157 #define RDESC0_RWT (1U << 4) /* Receive Watchdog Timeout */ 158 #define RDESC0_RE (1U << 3) /* Receive Error */ 159 #define RDESC0_DBE (1U << 2) /* Dribble Bit Error */ 160 #define RDESC0_CE (1U << 1) /* CRC Error */ 161 #define RDESC0_PCE (1U << 0) /* Payload Checksum Error */ 162 #define RDESC0_RXMA (1U << 0) /* Rx MAC Address */ 163 164 /* RX descriptors - RDESC1 normal format */ 165 #define NRDESC1_DIC (1U << 31) /* Disable Intr on Completion */ 166 #define NRDESC1_RER (1U << 25) /* Receive End of Ring */ 167 #define NRDESC1_RCH (1U << 24) /* Second Address Chained */ 168 #define NRDESC1_RBS2_MASK 0x7ff 169 #define NRDESC1_RBS2_SHIFT 11 /* Receive Buffer 2 Size */ 170 #define NRDESC1_RBS1_MASK 0x7ff 171 #define NRDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ 172 173 /* RX descriptors - RDESC1 enhanced format */ 174 #define ERDESC1_DIC (1U << 31) /* Disable Intr on Completion */ 175 #define ERDESC1_RBS2_MASK 0x7ffff 176 #define ERDESC1_RBS2_SHIFT 16 /* Receive Buffer 2 Size */ 177 #define ERDESC1_RER (1U << 15) /* Receive End of Ring */ 178 #define ERDESC1_RCH (1U << 14) /* Second Address Chained */ 179 #define ERDESC1_RBS1_MASK 0x7ffff 180 #define ERDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ 181 182 /* 183 * A hardware buffer descriptor. Rx and Tx buffers have the same descriptor 184 * layout, but the bits in the fields have different meanings. 185 */ 186 struct dwc_hwdesc 187 { 188 uint32_t desc0; 189 uint32_t desc1; 190 uint32_t addr1; /* ptr to first buffer data */ 191 uint32_t addr2; /* ptr to next descriptor / second buffer data*/ 192 }; 193 194 195 struct dwc_hash_maddr_ctx { 196 struct dwc_softc *sc; 197 uint32_t hash[8]; 198 }; 199 200 /* 201 * The hardware imposes alignment restrictions on various objects involved in 202 * DMA transfers. These values are expressed in bytes (not bits). 203 */ 204 #define DWC_DESC_RING_ALIGN 2048 205 206 static struct resource_spec dwc_spec[] = { 207 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 208 { SYS_RES_IRQ, 0, RF_ACTIVE }, 209 { -1, 0 } 210 }; 211 212 static void dwc_txfinish_locked(struct dwc_softc *sc); 213 static void dwc_rxfinish_locked(struct dwc_softc *sc); 214 static void dwc_stop_locked(struct dwc_softc *sc); 215 static void dwc_setup_rxfilter(struct dwc_softc *sc); 216 static void dwc_setup_core(struct dwc_softc *sc); 217 static void dwc_enable_mac(struct dwc_softc *sc, bool enable); 218 static void dwc_init_dma(struct dwc_softc *sc); 219 static void dwc_stop_dma(struct dwc_softc *sc); 220 221 static void dwc_tick(void *arg); 222 223 /* Pause time field in the transmitted control frame */ 224 static int dwc_pause_time = 0xffff; 225 TUNABLE_INT("hw.dwc.pause_time", &dwc_pause_time); 226 227 /* 228 * MIIBUS functions 229 */ 230 231 static int 232 dwc_miibus_read_reg(device_t dev, int phy, int reg) 233 { 234 struct dwc_softc *sc; 235 uint16_t mii; 236 size_t cnt; 237 int rv = 0; 238 239 sc = device_get_softc(dev); 240 241 mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) 242 | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) 243 | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) 244 | GMII_ADDRESS_GB; /* Busy flag */ 245 246 WRITE4(sc, GMII_ADDRESS, mii); 247 248 for (cnt = 0; cnt < 1000; cnt++) { 249 if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { 250 rv = READ4(sc, GMII_DATA); 251 break; 252 } 253 DELAY(10); 254 } 255 256 return rv; 257 } 258 259 static int 260 dwc_miibus_write_reg(device_t dev, int phy, int reg, int val) 261 { 262 struct dwc_softc *sc; 263 uint16_t mii; 264 size_t cnt; 265 266 sc = device_get_softc(dev); 267 268 mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) 269 | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) 270 | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) 271 | GMII_ADDRESS_GB | GMII_ADDRESS_GW; 272 273 WRITE4(sc, GMII_DATA, val); 274 WRITE4(sc, GMII_ADDRESS, mii); 275 276 for (cnt = 0; cnt < 1000; cnt++) { 277 if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { 278 break; 279 } 280 DELAY(10); 281 } 282 283 return (0); 284 } 285 286 static void 287 dwc_miibus_statchg(device_t dev) 288 { 289 struct dwc_softc *sc; 290 struct mii_data *mii; 291 uint32_t reg; 292 293 /* 294 * Called by the MII bus driver when the PHY establishes 295 * link to set the MAC interface registers. 296 */ 297 298 sc = device_get_softc(dev); 299 300 DWC_ASSERT_LOCKED(sc); 301 302 mii = sc->mii_softc; 303 304 if (mii->mii_media_status & IFM_ACTIVE) 305 sc->link_is_up = true; 306 else 307 sc->link_is_up = false; 308 309 reg = READ4(sc, MAC_CONFIGURATION); 310 switch (IFM_SUBTYPE(mii->mii_media_active)) { 311 case IFM_1000_T: 312 case IFM_1000_SX: 313 reg &= ~(CONF_FES | CONF_PS); 314 break; 315 case IFM_100_TX: 316 reg |= (CONF_FES | CONF_PS); 317 break; 318 case IFM_10_T: 319 reg &= ~(CONF_FES); 320 reg |= (CONF_PS); 321 break; 322 case IFM_NONE: 323 sc->link_is_up = false; 324 return; 325 default: 326 sc->link_is_up = false; 327 device_printf(dev, "Unsupported media %u\n", 328 IFM_SUBTYPE(mii->mii_media_active)); 329 return; 330 } 331 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 332 reg |= (CONF_DM); 333 else 334 reg &= ~(CONF_DM); 335 WRITE4(sc, MAC_CONFIGURATION, reg); 336 337 reg = FLOW_CONTROL_UP; 338 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 339 reg |= FLOW_CONTROL_TX; 340 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 341 reg |= FLOW_CONTROL_RX; 342 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 343 reg |= dwc_pause_time << FLOW_CONTROL_PT_SHIFT; 344 WRITE4(sc, FLOW_CONTROL, reg); 345 346 IF_DWC_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active)); 347 348 } 349 350 /* 351 * Media functions 352 */ 353 354 static void 355 dwc_media_status(if_t ifp, struct ifmediareq *ifmr) 356 { 357 struct dwc_softc *sc; 358 struct mii_data *mii; 359 360 sc = if_getsoftc(ifp); 361 mii = sc->mii_softc; 362 DWC_LOCK(sc); 363 mii_pollstat(mii); 364 ifmr->ifm_active = mii->mii_media_active; 365 ifmr->ifm_status = mii->mii_media_status; 366 DWC_UNLOCK(sc); 367 } 368 369 static int 370 dwc_media_change_locked(struct dwc_softc *sc) 371 { 372 373 return (mii_mediachg(sc->mii_softc)); 374 } 375 376 static int 377 dwc_media_change(if_t ifp) 378 { 379 struct dwc_softc *sc; 380 int error; 381 382 sc = if_getsoftc(ifp); 383 384 DWC_LOCK(sc); 385 error = dwc_media_change_locked(sc); 386 DWC_UNLOCK(sc); 387 return (error); 388 } 389 390 /* 391 * Core functions 392 */ 393 394 static const uint8_t nibbletab[] = { 395 /* 0x0 0000 -> 0000 */ 0x0, 396 /* 0x1 0001 -> 1000 */ 0x8, 397 /* 0x2 0010 -> 0100 */ 0x4, 398 /* 0x3 0011 -> 1100 */ 0xc, 399 /* 0x4 0100 -> 0010 */ 0x2, 400 /* 0x5 0101 -> 1010 */ 0xa, 401 /* 0x6 0110 -> 0110 */ 0x6, 402 /* 0x7 0111 -> 1110 */ 0xe, 403 /* 0x8 1000 -> 0001 */ 0x1, 404 /* 0x9 1001 -> 1001 */ 0x9, 405 /* 0xa 1010 -> 0101 */ 0x5, 406 /* 0xb 1011 -> 1101 */ 0xd, 407 /* 0xc 1100 -> 0011 */ 0x3, 408 /* 0xd 1101 -> 1011 */ 0xb, 409 /* 0xe 1110 -> 0111 */ 0x7, 410 /* 0xf 1111 -> 1111 */ 0xf, }; 411 412 static uint8_t 413 bitreverse(uint8_t x) 414 { 415 416 return (nibbletab[x & 0xf] << 4) | nibbletab[x >> 4]; 417 } 418 419 static u_int 420 dwc_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 421 { 422 struct dwc_hash_maddr_ctx *ctx = arg; 423 uint32_t crc, hashbit, hashreg; 424 uint8_t val; 425 426 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); 427 /* Take lower 8 bits and reverse it */ 428 val = bitreverse(~crc & 0xff); 429 if (ctx->sc->mactype != DWC_GMAC_EXT_DESC) 430 val >>= 2; /* Only need lower 6 bits */ 431 hashreg = (val >> 5); 432 hashbit = (val & 31); 433 ctx->hash[hashreg] |= (1 << hashbit); 434 435 return (1); 436 } 437 438 static void 439 dwc_setup_rxfilter(struct dwc_softc *sc) 440 { 441 struct dwc_hash_maddr_ctx ctx; 442 if_t ifp; 443 uint8_t *eaddr; 444 uint32_t ffval, hi, lo; 445 int nhash, i; 446 447 DWC_ASSERT_LOCKED(sc); 448 449 ifp = sc->ifp; 450 nhash = sc->mactype != DWC_GMAC_EXT_DESC ? 2 : 8; 451 452 /* 453 * Set the multicast (group) filter hash. 454 */ 455 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { 456 ffval = (FRAME_FILTER_PM); 457 for (i = 0; i < nhash; i++) 458 ctx.hash[i] = ~0; 459 } else { 460 ffval = (FRAME_FILTER_HMC); 461 for (i = 0; i < nhash; i++) 462 ctx.hash[i] = 0; 463 ctx.sc = sc; 464 if_foreach_llmaddr(ifp, dwc_hash_maddr, &ctx); 465 } 466 467 /* 468 * Set the individual address filter hash. 469 */ 470 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 471 ffval |= (FRAME_FILTER_PR); 472 473 /* 474 * Set the primary address. 475 */ 476 eaddr = if_getlladdr(ifp); 477 lo = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | 478 (eaddr[3] << 24); 479 hi = eaddr[4] | (eaddr[5] << 8); 480 WRITE4(sc, MAC_ADDRESS_LOW(0), lo); 481 WRITE4(sc, MAC_ADDRESS_HIGH(0), hi); 482 WRITE4(sc, MAC_FRAME_FILTER, ffval); 483 if (sc->mactype != DWC_GMAC_EXT_DESC) { 484 WRITE4(sc, GMAC_MAC_HTLOW, ctx.hash[0]); 485 WRITE4(sc, GMAC_MAC_HTHIGH, ctx.hash[1]); 486 } else { 487 for (i = 0; i < nhash; i++) 488 WRITE4(sc, HASH_TABLE_REG(i), ctx.hash[i]); 489 } 490 } 491 492 static void 493 dwc_setup_core(struct dwc_softc *sc) 494 { 495 uint32_t reg; 496 497 DWC_ASSERT_LOCKED(sc); 498 499 /* Enable core */ 500 reg = READ4(sc, MAC_CONFIGURATION); 501 reg |= (CONF_JD | CONF_ACS | CONF_BE); 502 WRITE4(sc, MAC_CONFIGURATION, reg); 503 } 504 505 static void 506 dwc_enable_mac(struct dwc_softc *sc, bool enable) 507 { 508 uint32_t reg; 509 510 DWC_ASSERT_LOCKED(sc); 511 reg = READ4(sc, MAC_CONFIGURATION); 512 if (enable) 513 reg |= CONF_TE | CONF_RE; 514 else 515 reg &= ~(CONF_TE | CONF_RE); 516 WRITE4(sc, MAC_CONFIGURATION, reg); 517 } 518 519 static void 520 dwc_enable_csum_offload(struct dwc_softc *sc) 521 { 522 uint32_t reg; 523 524 DWC_ASSERT_LOCKED(sc); 525 reg = READ4(sc, MAC_CONFIGURATION); 526 if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) 527 reg |= CONF_IPC; 528 else 529 reg &= ~CONF_IPC; 530 WRITE4(sc, MAC_CONFIGURATION, reg); 531 } 532 533 static void 534 dwc_get_hwaddr(struct dwc_softc *sc, uint8_t *hwaddr) 535 { 536 uint32_t hi, lo, rnd; 537 538 /* 539 * Try to recover a MAC address from the running hardware. If there's 540 * something non-zero there, assume the bootloader did the right thing 541 * and just use it. 542 * 543 * Otherwise, set the address to a convenient locally assigned address, 544 * 'bsd' + random 24 low-order bits. 'b' is 0x62, which has the locally 545 * assigned bit set, and the broadcast/multicast bit clear. 546 */ 547 lo = READ4(sc, MAC_ADDRESS_LOW(0)); 548 hi = READ4(sc, MAC_ADDRESS_HIGH(0)) & 0xffff; 549 if ((lo != 0xffffffff) || (hi != 0xffff)) { 550 hwaddr[0] = (lo >> 0) & 0xff; 551 hwaddr[1] = (lo >> 8) & 0xff; 552 hwaddr[2] = (lo >> 16) & 0xff; 553 hwaddr[3] = (lo >> 24) & 0xff; 554 hwaddr[4] = (hi >> 0) & 0xff; 555 hwaddr[5] = (hi >> 8) & 0xff; 556 } else { 557 rnd = arc4random() & 0x00ffffff; 558 hwaddr[0] = 'b'; 559 hwaddr[1] = 's'; 560 hwaddr[2] = 'd'; 561 hwaddr[3] = rnd >> 16; 562 hwaddr[4] = rnd >> 8; 563 hwaddr[5] = rnd >> 0; 564 } 565 } 566 567 /* 568 * DMA functions 569 */ 570 571 static void 572 dwc_init_dma(struct dwc_softc *sc) 573 { 574 uint32_t reg; 575 576 DWC_ASSERT_LOCKED(sc); 577 578 /* Initializa DMA and enable transmitters */ 579 reg = READ4(sc, OPERATION_MODE); 580 reg |= (MODE_TSF | MODE_OSF | MODE_FUF); 581 reg &= ~(MODE_RSF); 582 reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT); 583 WRITE4(sc, OPERATION_MODE, reg); 584 585 WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT); 586 587 /* Start DMA */ 588 reg = READ4(sc, OPERATION_MODE); 589 reg |= (MODE_ST | MODE_SR); 590 WRITE4(sc, OPERATION_MODE, reg); 591 } 592 593 static void 594 dwc_stop_dma(struct dwc_softc *sc) 595 { 596 uint32_t reg; 597 598 DWC_ASSERT_LOCKED(sc); 599 600 /* Stop DMA TX */ 601 reg = READ4(sc, OPERATION_MODE); 602 reg &= ~(MODE_ST); 603 WRITE4(sc, OPERATION_MODE, reg); 604 605 /* Flush TX */ 606 reg = READ4(sc, OPERATION_MODE); 607 reg |= (MODE_FTF); 608 WRITE4(sc, OPERATION_MODE, reg); 609 610 /* Stop DMA RX */ 611 reg = READ4(sc, OPERATION_MODE); 612 reg &= ~(MODE_SR); 613 WRITE4(sc, OPERATION_MODE, reg); 614 } 615 616 static inline uint32_t 617 next_rxidx(struct dwc_softc *sc, uint32_t curidx) 618 { 619 620 return ((curidx + 1) % RX_DESC_COUNT); 621 } 622 623 static inline uint32_t 624 next_txidx(struct dwc_softc *sc, uint32_t curidx) 625 { 626 627 return ((curidx + 1) % TX_DESC_COUNT); 628 } 629 630 static void 631 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 632 { 633 634 if (error != 0) 635 return; 636 *(bus_addr_t *)arg = segs[0].ds_addr; 637 } 638 639 inline static void 640 dwc_setup_txdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr, 641 uint32_t len, uint32_t flags, bool first, bool last) 642 { 643 uint32_t desc0, desc1; 644 645 /* Addr/len 0 means we're clearing the descriptor after xmit done. */ 646 if (paddr == 0 || len == 0) { 647 desc0 = 0; 648 desc1 = 0; 649 --sc->tx_desccount; 650 } else { 651 if (sc->mactype != DWC_GMAC_EXT_DESC) { 652 desc0 = 0; 653 desc1 = NTDESC1_TCH | len | flags; 654 if (first) 655 desc1 |= NTDESC1_FS; 656 if (last) 657 desc1 |= NTDESC1_LS | NTDESC1_IC; 658 } else { 659 desc0 = ETDESC0_TCH | flags; 660 if (first) 661 desc0 |= ETDESC0_FS; 662 if (last) 663 desc0 |= ETDESC0_LS | ETDESC0_IC; 664 desc1 = len; 665 } 666 ++sc->tx_desccount; 667 } 668 669 sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr); 670 sc->txdesc_ring[idx].desc0 = desc0; 671 sc->txdesc_ring[idx].desc1 = desc1; 672 } 673 674 inline static void 675 dwc_set_owner(struct dwc_softc *sc, int idx) 676 { 677 wmb(); 678 sc->txdesc_ring[idx].desc0 |= TDESC0_OWN; 679 wmb(); 680 } 681 682 static int 683 dwc_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp) 684 { 685 struct bus_dma_segment segs[TX_MAP_MAX_SEGS]; 686 int error, nsegs; 687 struct mbuf * m; 688 uint32_t flags = 0; 689 int i; 690 int first, last; 691 692 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, 693 *mp, segs, &nsegs, 0); 694 if (error == EFBIG) { 695 /* 696 * The map may be partially mapped from the first call. 697 * Make sure to reset it. 698 */ 699 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); 700 if ((m = m_defrag(*mp, M_NOWAIT)) == NULL) 701 return (ENOMEM); 702 *mp = m; 703 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, 704 *mp, segs, &nsegs, 0); 705 } 706 if (error != 0) 707 return (ENOMEM); 708 709 if (sc->tx_desccount + nsegs > TX_DESC_COUNT) { 710 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); 711 return (ENOMEM); 712 } 713 714 m = *mp; 715 716 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { 717 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) { 718 if (sc->mactype != DWC_GMAC_EXT_DESC) 719 flags = NTDESC1_CIC_FULL; 720 else 721 flags = ETDESC0_CIC_FULL; 722 } else { 723 if (sc->mactype != DWC_GMAC_EXT_DESC) 724 flags = NTDESC1_CIC_HDR; 725 else 726 flags = ETDESC0_CIC_HDR; 727 } 728 } 729 730 bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, 731 BUS_DMASYNC_PREWRITE); 732 733 sc->txbuf_map[idx].mbuf = m; 734 735 first = sc->tx_desc_head; 736 for (i = 0; i < nsegs; i++) { 737 dwc_setup_txdesc(sc, sc->tx_desc_head, 738 segs[i].ds_addr, segs[i].ds_len, 739 (i == 0) ? flags : 0, /* only first desc needs flags */ 740 (i == 0), 741 (i == nsegs - 1)); 742 if (i > 0) 743 dwc_set_owner(sc, sc->tx_desc_head); 744 last = sc->tx_desc_head; 745 sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head); 746 } 747 748 sc->txbuf_map[idx].last_desc_idx = last; 749 750 dwc_set_owner(sc, first); 751 752 return (0); 753 } 754 755 inline static uint32_t 756 dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr) 757 { 758 uint32_t nidx; 759 760 sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr; 761 nidx = next_rxidx(sc, idx); 762 sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr + 763 (nidx * sizeof(struct dwc_hwdesc)); 764 if (sc->mactype != DWC_GMAC_EXT_DESC) 765 sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH | 766 MIN(MCLBYTES, NRDESC1_RBS1_MASK); 767 else 768 sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH | 769 MIN(MCLBYTES, ERDESC1_RBS1_MASK); 770 771 wmb(); 772 sc->rxdesc_ring[idx].desc0 = RDESC0_OWN; 773 wmb(); 774 return (nidx); 775 } 776 777 static int 778 dwc_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m) 779 { 780 struct bus_dma_segment seg; 781 int error, nsegs; 782 783 m_adj(m, ETHER_ALIGN); 784 785 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 786 m, &seg, &nsegs, 0); 787 if (error != 0) 788 return (error); 789 790 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 791 792 bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 793 BUS_DMASYNC_PREREAD); 794 795 sc->rxbuf_map[idx].mbuf = m; 796 dwc_setup_rxdesc(sc, idx, seg.ds_addr); 797 798 return (0); 799 } 800 801 static struct mbuf * 802 dwc_alloc_mbufcl(struct dwc_softc *sc) 803 { 804 struct mbuf *m; 805 806 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 807 if (m != NULL) 808 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 809 810 return (m); 811 } 812 813 static struct mbuf * 814 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc, 815 struct dwc_bufmap *map) 816 { 817 if_t ifp; 818 struct mbuf *m, *m0; 819 int len; 820 uint32_t rdesc0; 821 822 m = map->mbuf; 823 ifp = sc->ifp; 824 rdesc0 = desc ->desc0; 825 826 if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) != 827 (RDESC0_FS | RDESC0_LS)) { 828 /* 829 * Something very wrong happens. The whole packet should be 830 * recevied in one descriptr. Report problem. 831 */ 832 device_printf(sc->dev, 833 "%s: RX descriptor without FIRST and LAST bit set: 0x%08X", 834 __func__, rdesc0); 835 return (NULL); 836 } 837 838 len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK; 839 if (len < 64) { 840 /* 841 * Lenght is invalid, recycle old mbuf 842 * Probably impossible case 843 */ 844 return (NULL); 845 } 846 847 /* Allocate new buffer */ 848 m0 = dwc_alloc_mbufcl(sc); 849 if (m0 == NULL) { 850 /* no new mbuf available, recycle old */ 851 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1); 852 return (NULL); 853 } 854 /* Do dmasync for newly received packet */ 855 bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD); 856 bus_dmamap_unload(sc->rxbuf_tag, map->map); 857 858 /* Received packet is valid, process it */ 859 m->m_pkthdr.rcvif = ifp; 860 m->m_pkthdr.len = len; 861 m->m_len = len; 862 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 863 864 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 865 (rdesc0 & RDESC0_FT) != 0) { 866 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 867 if ((rdesc0 & RDESC0_ICE) == 0) 868 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 869 if ((rdesc0 & RDESC0_PCE) == 0) { 870 m->m_pkthdr.csum_flags |= 871 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 872 m->m_pkthdr.csum_data = 0xffff; 873 } 874 } 875 876 /* Remove trailing FCS */ 877 m_adj(m, -ETHER_CRC_LEN); 878 879 DWC_UNLOCK(sc); 880 if_input(ifp, m); 881 DWC_LOCK(sc); 882 return (m0); 883 } 884 885 static int 886 setup_dma(struct dwc_softc *sc) 887 { 888 struct mbuf *m; 889 int error; 890 int nidx; 891 int idx; 892 893 /* 894 * Set up TX descriptor ring, descriptors, and dma maps. 895 */ 896 error = bus_dma_tag_create( 897 bus_get_dma_tag(sc->dev), /* Parent tag. */ 898 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ 899 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 900 BUS_SPACE_MAXADDR, /* highaddr */ 901 NULL, NULL, /* filter, filterarg */ 902 TX_DESC_SIZE, 1, /* maxsize, nsegments */ 903 TX_DESC_SIZE, /* maxsegsize */ 904 0, /* flags */ 905 NULL, NULL, /* lockfunc, lockarg */ 906 &sc->txdesc_tag); 907 if (error != 0) { 908 device_printf(sc->dev, 909 "could not create TX ring DMA tag.\n"); 910 goto out; 911 } 912 913 error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring, 914 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 915 &sc->txdesc_map); 916 if (error != 0) { 917 device_printf(sc->dev, 918 "could not allocate TX descriptor ring.\n"); 919 goto out; 920 } 921 922 error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, 923 sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr, 924 &sc->txdesc_ring_paddr, 0); 925 if (error != 0) { 926 device_printf(sc->dev, 927 "could not load TX descriptor ring map.\n"); 928 goto out; 929 } 930 931 for (idx = 0; idx < TX_DESC_COUNT; idx++) { 932 nidx = next_txidx(sc, idx); 933 sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr + 934 (nidx * sizeof(struct dwc_hwdesc)); 935 } 936 937 error = bus_dma_tag_create( 938 bus_get_dma_tag(sc->dev), /* Parent tag. */ 939 1, 0, /* alignment, boundary */ 940 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 941 BUS_SPACE_MAXADDR, /* highaddr */ 942 NULL, NULL, /* filter, filterarg */ 943 MCLBYTES*TX_MAP_MAX_SEGS, /* maxsize */ 944 TX_MAP_MAX_SEGS, /* nsegments */ 945 MCLBYTES, /* maxsegsize */ 946 0, /* flags */ 947 NULL, NULL, /* lockfunc, lockarg */ 948 &sc->txbuf_tag); 949 if (error != 0) { 950 device_printf(sc->dev, 951 "could not create TX ring DMA tag.\n"); 952 goto out; 953 } 954 955 for (idx = 0; idx < TX_MAP_COUNT; idx++) { 956 error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT, 957 &sc->txbuf_map[idx].map); 958 if (error != 0) { 959 device_printf(sc->dev, 960 "could not create TX buffer DMA map.\n"); 961 goto out; 962 } 963 } 964 965 for (idx = 0; idx < TX_DESC_COUNT; idx++) 966 dwc_setup_txdesc(sc, idx, 0, 0, 0, false, false); 967 968 /* 969 * Set up RX descriptor ring, descriptors, dma maps, and mbufs. 970 */ 971 error = bus_dma_tag_create( 972 bus_get_dma_tag(sc->dev), /* Parent tag. */ 973 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ 974 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 975 BUS_SPACE_MAXADDR, /* highaddr */ 976 NULL, NULL, /* filter, filterarg */ 977 RX_DESC_SIZE, 1, /* maxsize, nsegments */ 978 RX_DESC_SIZE, /* maxsegsize */ 979 0, /* flags */ 980 NULL, NULL, /* lockfunc, lockarg */ 981 &sc->rxdesc_tag); 982 if (error != 0) { 983 device_printf(sc->dev, 984 "could not create RX ring DMA tag.\n"); 985 goto out; 986 } 987 988 error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring, 989 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 990 &sc->rxdesc_map); 991 if (error != 0) { 992 device_printf(sc->dev, 993 "could not allocate RX descriptor ring.\n"); 994 goto out; 995 } 996 997 error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, 998 sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr, 999 &sc->rxdesc_ring_paddr, 0); 1000 if (error != 0) { 1001 device_printf(sc->dev, 1002 "could not load RX descriptor ring map.\n"); 1003 goto out; 1004 } 1005 1006 error = bus_dma_tag_create( 1007 bus_get_dma_tag(sc->dev), /* Parent tag. */ 1008 1, 0, /* alignment, boundary */ 1009 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1010 BUS_SPACE_MAXADDR, /* highaddr */ 1011 NULL, NULL, /* filter, filterarg */ 1012 MCLBYTES, 1, /* maxsize, nsegments */ 1013 MCLBYTES, /* maxsegsize */ 1014 0, /* flags */ 1015 NULL, NULL, /* lockfunc, lockarg */ 1016 &sc->rxbuf_tag); 1017 if (error != 0) { 1018 device_printf(sc->dev, 1019 "could not create RX buf DMA tag.\n"); 1020 goto out; 1021 } 1022 1023 for (idx = 0; idx < RX_DESC_COUNT; idx++) { 1024 error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT, 1025 &sc->rxbuf_map[idx].map); 1026 if (error != 0) { 1027 device_printf(sc->dev, 1028 "could not create RX buffer DMA map.\n"); 1029 goto out; 1030 } 1031 if ((m = dwc_alloc_mbufcl(sc)) == NULL) { 1032 device_printf(sc->dev, "Could not alloc mbuf\n"); 1033 error = ENOMEM; 1034 goto out; 1035 } 1036 if ((error = dwc_setup_rxbuf(sc, idx, m)) != 0) { 1037 device_printf(sc->dev, 1038 "could not create new RX buffer.\n"); 1039 goto out; 1040 } 1041 } 1042 1043 out: 1044 if (error != 0) 1045 return (ENXIO); 1046 1047 return (0); 1048 } 1049 1050 static void 1051 free_dma(struct dwc_softc *sc) 1052 { 1053 bus_dmamap_t map; 1054 int idx; 1055 1056 /* Clean up RX DMA resources and free mbufs. */ 1057 for (idx = 0; idx < RX_DESC_COUNT; ++idx) { 1058 if ((map = sc->rxbuf_map[idx].map) != NULL) { 1059 bus_dmamap_unload(sc->rxbuf_tag, map); 1060 bus_dmamap_destroy(sc->rxbuf_tag, map); 1061 m_freem(sc->rxbuf_map[idx].mbuf); 1062 } 1063 } 1064 if (sc->rxbuf_tag != NULL) 1065 bus_dma_tag_destroy(sc->rxbuf_tag); 1066 if (sc->rxdesc_map != NULL) { 1067 bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map); 1068 bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring, 1069 sc->rxdesc_map); 1070 } 1071 if (sc->rxdesc_tag != NULL) 1072 bus_dma_tag_destroy(sc->rxdesc_tag); 1073 1074 /* Clean up TX DMA resources. */ 1075 for (idx = 0; idx < TX_DESC_COUNT; ++idx) { 1076 if ((map = sc->txbuf_map[idx].map) != NULL) { 1077 /* TX maps are already unloaded. */ 1078 bus_dmamap_destroy(sc->txbuf_tag, map); 1079 } 1080 } 1081 if (sc->txbuf_tag != NULL) 1082 bus_dma_tag_destroy(sc->txbuf_tag); 1083 if (sc->txdesc_map != NULL) { 1084 bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map); 1085 bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring, 1086 sc->txdesc_map); 1087 } 1088 if (sc->txdesc_tag != NULL) 1089 bus_dma_tag_destroy(sc->txdesc_tag); 1090 } 1091 1092 /* 1093 * if_ functions 1094 */ 1095 1096 static void 1097 dwc_txstart_locked(struct dwc_softc *sc) 1098 { 1099 if_t ifp; 1100 struct mbuf *m; 1101 int enqueued; 1102 1103 DWC_ASSERT_LOCKED(sc); 1104 1105 if (!sc->link_is_up) 1106 return; 1107 1108 ifp = sc->ifp; 1109 1110 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 1111 IFF_DRV_RUNNING) 1112 return; 1113 1114 enqueued = 0; 1115 1116 for (;;) { 1117 if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS + 1)) { 1118 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1119 break; 1120 } 1121 1122 if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) { 1123 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1124 break; 1125 } 1126 1127 m = if_dequeue(ifp); 1128 if (m == NULL) 1129 break; 1130 if (dwc_setup_txbuf(sc, sc->tx_map_head, &m) != 0) { 1131 if_sendq_prepend(ifp, m); 1132 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1133 break; 1134 } 1135 if_bpfmtap(ifp, m); 1136 sc->tx_map_head = next_txidx(sc, sc->tx_map_head); 1137 sc->tx_mapcount++; 1138 ++enqueued; 1139 } 1140 1141 if (enqueued != 0) { 1142 WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1); 1143 sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS; 1144 } 1145 } 1146 1147 static void 1148 dwc_txstart(if_t ifp) 1149 { 1150 struct dwc_softc *sc = if_getsoftc(ifp); 1151 1152 DWC_LOCK(sc); 1153 dwc_txstart_locked(sc); 1154 DWC_UNLOCK(sc); 1155 } 1156 1157 static void 1158 dwc_init_locked(struct dwc_softc *sc) 1159 { 1160 if_t ifp = sc->ifp; 1161 1162 DWC_ASSERT_LOCKED(sc); 1163 1164 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1165 return; 1166 1167 /* 1168 * Call mii_mediachg() which will call back into dwc_miibus_statchg() 1169 * to set up the remaining config registers based on current media. 1170 */ 1171 mii_mediachg(sc->mii_softc); 1172 1173 dwc_setup_rxfilter(sc); 1174 dwc_setup_core(sc); 1175 dwc_enable_mac(sc, true); 1176 dwc_enable_csum_offload(sc); 1177 dwc_init_dma(sc); 1178 1179 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 1180 1181 callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); 1182 } 1183 1184 static void 1185 dwc_init(void *if_softc) 1186 { 1187 struct dwc_softc *sc = if_softc; 1188 1189 DWC_LOCK(sc); 1190 dwc_init_locked(sc); 1191 DWC_UNLOCK(sc); 1192 } 1193 1194 static void 1195 dwc_stop_locked(struct dwc_softc *sc) 1196 { 1197 if_t ifp; 1198 1199 DWC_ASSERT_LOCKED(sc); 1200 1201 ifp = sc->ifp; 1202 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1203 sc->tx_watchdog_count = 0; 1204 sc->stats_harvest_count = 0; 1205 1206 callout_stop(&sc->dwc_callout); 1207 1208 dwc_stop_dma(sc); 1209 dwc_enable_mac(sc, false); 1210 } 1211 1212 static int 1213 dwc_ioctl(if_t ifp, u_long cmd, caddr_t data) 1214 { 1215 struct dwc_softc *sc; 1216 struct mii_data *mii; 1217 struct ifreq *ifr; 1218 int flags, mask, error; 1219 1220 sc = if_getsoftc(ifp); 1221 ifr = (struct ifreq *)data; 1222 1223 error = 0; 1224 switch (cmd) { 1225 case SIOCSIFFLAGS: 1226 DWC_LOCK(sc); 1227 if (if_getflags(ifp) & IFF_UP) { 1228 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1229 flags = if_getflags(ifp) ^ sc->if_flags; 1230 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 1231 dwc_setup_rxfilter(sc); 1232 } else { 1233 if (!sc->is_detaching) 1234 dwc_init_locked(sc); 1235 } 1236 } else { 1237 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1238 dwc_stop_locked(sc); 1239 } 1240 sc->if_flags = if_getflags(ifp); 1241 DWC_UNLOCK(sc); 1242 break; 1243 case SIOCADDMULTI: 1244 case SIOCDELMULTI: 1245 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1246 DWC_LOCK(sc); 1247 dwc_setup_rxfilter(sc); 1248 DWC_UNLOCK(sc); 1249 } 1250 break; 1251 case SIOCSIFMEDIA: 1252 case SIOCGIFMEDIA: 1253 mii = sc->mii_softc; 1254 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1255 break; 1256 case SIOCSIFCAP: 1257 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1258 if (mask & IFCAP_VLAN_MTU) { 1259 /* No work to do except acknowledge the change took */ 1260 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 1261 } 1262 if (mask & IFCAP_RXCSUM) 1263 if_togglecapenable(ifp, IFCAP_RXCSUM); 1264 if (mask & IFCAP_TXCSUM) 1265 if_togglecapenable(ifp, IFCAP_TXCSUM); 1266 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1267 if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); 1268 else 1269 if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); 1270 1271 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1272 DWC_LOCK(sc); 1273 dwc_enable_csum_offload(sc); 1274 DWC_UNLOCK(sc); 1275 } 1276 break; 1277 1278 default: 1279 error = ether_ioctl(ifp, cmd, data); 1280 break; 1281 } 1282 1283 return (error); 1284 } 1285 1286 /* 1287 * Interrupts functions 1288 */ 1289 1290 static void 1291 dwc_txfinish_locked(struct dwc_softc *sc) 1292 { 1293 struct dwc_bufmap *bmap; 1294 struct dwc_hwdesc *desc; 1295 if_t ifp; 1296 int idx, last_idx; 1297 bool map_finished; 1298 1299 DWC_ASSERT_LOCKED(sc); 1300 1301 ifp = sc->ifp; 1302 /* check if all descriptors of the map are done */ 1303 while (sc->tx_map_tail != sc->tx_map_head) { 1304 map_finished = true; 1305 bmap = &sc->txbuf_map[sc->tx_map_tail]; 1306 idx = sc->tx_desc_tail; 1307 last_idx = next_txidx(sc, bmap->last_desc_idx); 1308 while (idx != last_idx) { 1309 desc = &sc->txdesc_ring[idx]; 1310 if ((desc->desc0 & TDESC0_OWN) != 0) { 1311 map_finished = false; 1312 break; 1313 } 1314 idx = next_txidx(sc, idx); 1315 } 1316 1317 if (!map_finished) 1318 break; 1319 bus_dmamap_sync(sc->txbuf_tag, bmap->map, 1320 BUS_DMASYNC_POSTWRITE); 1321 bus_dmamap_unload(sc->txbuf_tag, bmap->map); 1322 m_freem(bmap->mbuf); 1323 bmap->mbuf = NULL; 1324 sc->tx_mapcount--; 1325 while (sc->tx_desc_tail != last_idx) { 1326 dwc_setup_txdesc(sc, sc->tx_desc_tail, 0, 0, 0, false, false); 1327 sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail); 1328 } 1329 sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail); 1330 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1331 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1332 } 1333 1334 /* If there are no buffers outstanding, muzzle the watchdog. */ 1335 if (sc->tx_desc_tail == sc->tx_desc_head) { 1336 sc->tx_watchdog_count = 0; 1337 } 1338 } 1339 1340 static void 1341 dwc_rxfinish_locked(struct dwc_softc *sc) 1342 { 1343 struct mbuf *m; 1344 int error, idx; 1345 struct dwc_hwdesc *desc; 1346 1347 DWC_ASSERT_LOCKED(sc); 1348 for (;;) { 1349 idx = sc->rx_idx; 1350 desc = sc->rxdesc_ring + idx; 1351 if ((desc->desc0 & RDESC0_OWN) != 0) 1352 break; 1353 1354 m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx); 1355 if (m == NULL) { 1356 wmb(); 1357 desc->desc0 = RDESC0_OWN; 1358 wmb(); 1359 } else { 1360 /* We cannot create hole in RX ring */ 1361 error = dwc_setup_rxbuf(sc, idx, m); 1362 if (error != 0) 1363 panic("dwc_setup_rxbuf failed: error %d\n", 1364 error); 1365 1366 } 1367 sc->rx_idx = next_rxidx(sc, sc->rx_idx); 1368 } 1369 } 1370 1371 static void 1372 dwc_intr(void *arg) 1373 { 1374 struct dwc_softc *sc; 1375 uint32_t reg; 1376 1377 sc = arg; 1378 1379 DWC_LOCK(sc); 1380 1381 reg = READ4(sc, INTERRUPT_STATUS); 1382 if (reg) 1383 READ4(sc, SGMII_RGMII_SMII_CTRL_STATUS); 1384 1385 reg = READ4(sc, DMA_STATUS); 1386 if (reg & DMA_STATUS_NIS) { 1387 if (reg & DMA_STATUS_RI) 1388 dwc_rxfinish_locked(sc); 1389 1390 if (reg & DMA_STATUS_TI) { 1391 dwc_txfinish_locked(sc); 1392 dwc_txstart_locked(sc); 1393 } 1394 } 1395 1396 if (reg & DMA_STATUS_AIS) { 1397 if (reg & DMA_STATUS_FBI) { 1398 /* Fatal bus error */ 1399 device_printf(sc->dev, 1400 "Ethernet DMA error, restarting controller.\n"); 1401 dwc_stop_locked(sc); 1402 dwc_init_locked(sc); 1403 } 1404 } 1405 1406 WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK); 1407 DWC_UNLOCK(sc); 1408 } 1409 1410 /* 1411 * Stats 1412 */ 1413 1414 static void dwc_clear_stats(struct dwc_softc *sc) 1415 { 1416 uint32_t reg; 1417 1418 reg = READ4(sc, MMC_CONTROL); 1419 reg |= (MMC_CONTROL_CNTRST); 1420 WRITE4(sc, MMC_CONTROL, reg); 1421 } 1422 1423 static void 1424 dwc_harvest_stats(struct dwc_softc *sc) 1425 { 1426 if_t ifp; 1427 1428 /* We don't need to harvest too often. */ 1429 if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL) 1430 return; 1431 1432 sc->stats_harvest_count = 0; 1433 ifp = sc->ifp; 1434 1435 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1436 READ4(sc, RXOVERSIZE_G) + READ4(sc, RXUNDERSIZE_G) + 1437 READ4(sc, RXCRCERROR) + READ4(sc, RXALIGNMENTERROR) + 1438 READ4(sc, RXRUNTERROR) + READ4(sc, RXJABBERERROR) + 1439 READ4(sc, RXLENGTHERROR)); 1440 1441 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1442 READ4(sc, TXOVERSIZE_G) + READ4(sc, TXEXCESSDEF) + 1443 READ4(sc, TXCARRIERERR) + READ4(sc, TXUNDERFLOWERROR)); 1444 1445 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1446 READ4(sc, TXEXESSCOL) + READ4(sc, TXLATECOL)); 1447 1448 dwc_clear_stats(sc); 1449 } 1450 1451 static void 1452 dwc_tick(void *arg) 1453 { 1454 struct dwc_softc *sc; 1455 if_t ifp; 1456 int link_was_up; 1457 1458 sc = arg; 1459 1460 DWC_ASSERT_LOCKED(sc); 1461 1462 ifp = sc->ifp; 1463 1464 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1465 return; 1466 1467 /* 1468 * Typical tx watchdog. If this fires it indicates that we enqueued 1469 * packets for output and never got a txdone interrupt for them. Maybe 1470 * it's a missed interrupt somehow, just pretend we got one. 1471 */ 1472 if (sc->tx_watchdog_count > 0) { 1473 if (--sc->tx_watchdog_count == 0) { 1474 dwc_txfinish_locked(sc); 1475 } 1476 } 1477 1478 /* Gather stats from hardware counters. */ 1479 dwc_harvest_stats(sc); 1480 1481 /* Check the media status. */ 1482 link_was_up = sc->link_is_up; 1483 mii_tick(sc->mii_softc); 1484 if (sc->link_is_up && !link_was_up) 1485 dwc_txstart_locked(sc); 1486 1487 /* Schedule another check one second from now. */ 1488 callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); 1489 } 1490 1491 /* 1492 * Probe/Attach functions 1493 */ 1494 1495 #define GPIO_ACTIVE_LOW 1 1496 1497 static int 1498 dwc_reset(device_t dev) 1499 { 1500 pcell_t gpio_prop[4]; 1501 pcell_t delay_prop[3]; 1502 phandle_t node, gpio_node; 1503 device_t gpio; 1504 uint32_t pin, flags; 1505 uint32_t pin_value; 1506 1507 node = ofw_bus_get_node(dev); 1508 if (OF_getencprop(node, "snps,reset-gpio", 1509 gpio_prop, sizeof(gpio_prop)) <= 0) 1510 return (0); 1511 1512 if (OF_getencprop(node, "snps,reset-delays-us", 1513 delay_prop, sizeof(delay_prop)) <= 0) { 1514 device_printf(dev, 1515 "Wrong property for snps,reset-delays-us"); 1516 return (ENXIO); 1517 } 1518 1519 gpio_node = OF_node_from_xref(gpio_prop[0]); 1520 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) { 1521 device_printf(dev, 1522 "Can't find gpio controller for phy reset\n"); 1523 return (ENXIO); 1524 } 1525 1526 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, 1527 nitems(gpio_prop) - 1, 1528 gpio_prop + 1, &pin, &flags) != 0) { 1529 device_printf(dev, "Can't map gpio for phy reset\n"); 1530 return (ENXIO); 1531 } 1532 1533 pin_value = GPIO_PIN_LOW; 1534 if (OF_hasprop(node, "snps,reset-active-low")) 1535 pin_value = GPIO_PIN_HIGH; 1536 1537 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); 1538 GPIO_PIN_SET(gpio, pin, pin_value); 1539 DELAY(delay_prop[0] * 5); 1540 GPIO_PIN_SET(gpio, pin, !pin_value); 1541 DELAY(delay_prop[1] * 5); 1542 GPIO_PIN_SET(gpio, pin, pin_value); 1543 DELAY(delay_prop[2] * 5); 1544 1545 return (0); 1546 } 1547 1548 static int 1549 dwc_clock_init(device_t dev) 1550 { 1551 hwreset_t rst; 1552 clk_t clk; 1553 int error; 1554 int64_t freq; 1555 1556 /* Enable clocks */ 1557 if (clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk) == 0) { 1558 error = clk_enable(clk); 1559 if (error != 0) { 1560 device_printf(dev, "could not enable main clock\n"); 1561 return (error); 1562 } 1563 if (bootverbose) { 1564 clk_get_freq(clk, &freq); 1565 device_printf(dev, "MAC clock(%s) freq: %jd\n", 1566 clk_get_name(clk), (intmax_t)freq); 1567 } 1568 } 1569 else { 1570 device_printf(dev, "could not find clock stmmaceth\n"); 1571 } 1572 1573 /* De-assert reset */ 1574 if (hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst) == 0) { 1575 error = hwreset_deassert(rst); 1576 if (error != 0) { 1577 device_printf(dev, "could not de-assert reset\n"); 1578 return (error); 1579 } 1580 } 1581 1582 return (0); 1583 } 1584 1585 static int 1586 dwc_probe(device_t dev) 1587 { 1588 1589 if (!ofw_bus_status_okay(dev)) 1590 return (ENXIO); 1591 1592 if (!ofw_bus_is_compatible(dev, "snps,dwmac")) 1593 return (ENXIO); 1594 1595 device_set_desc(dev, "Gigabit Ethernet Controller"); 1596 return (BUS_PROBE_DEFAULT); 1597 } 1598 1599 static int 1600 dwc_attach(device_t dev) 1601 { 1602 uint8_t macaddr[ETHER_ADDR_LEN]; 1603 struct dwc_softc *sc; 1604 if_t ifp; 1605 int error, i; 1606 uint32_t reg; 1607 phandle_t node; 1608 uint32_t txpbl, rxpbl, pbl; 1609 bool nopblx8 = false; 1610 bool fixed_burst = false; 1611 1612 sc = device_get_softc(dev); 1613 sc->dev = dev; 1614 sc->rx_idx = 0; 1615 sc->tx_desccount = TX_DESC_COUNT; 1616 sc->tx_mapcount = 0; 1617 sc->mii_clk = IF_DWC_MII_CLK(dev); 1618 sc->mactype = IF_DWC_MAC_TYPE(dev); 1619 1620 node = ofw_bus_get_node(dev); 1621 switch (mii_fdt_get_contype(node)) { 1622 case MII_CONTYPE_RGMII: 1623 case MII_CONTYPE_RGMII_ID: 1624 case MII_CONTYPE_RGMII_RXID: 1625 case MII_CONTYPE_RGMII_TXID: 1626 sc->phy_mode = PHY_MODE_RGMII; 1627 break; 1628 case MII_CONTYPE_RMII: 1629 sc->phy_mode = PHY_MODE_RMII; 1630 break; 1631 case MII_CONTYPE_MII: 1632 sc->phy_mode = PHY_MODE_MII; 1633 break; 1634 default: 1635 device_printf(dev, "Unsupported MII type\n"); 1636 return (ENXIO); 1637 } 1638 1639 if (OF_getencprop(node, "snps,pbl", &pbl, sizeof(uint32_t)) <= 0) 1640 pbl = BUS_MODE_DEFAULT_PBL; 1641 if (OF_getencprop(node, "snps,txpbl", &txpbl, sizeof(uint32_t)) <= 0) 1642 txpbl = pbl; 1643 if (OF_getencprop(node, "snps,rxpbl", &rxpbl, sizeof(uint32_t)) <= 0) 1644 rxpbl = pbl; 1645 if (OF_hasprop(node, "snps,no-pbl-x8") == 1) 1646 nopblx8 = true; 1647 if (OF_hasprop(node, "snps,fixed-burst") == 1) 1648 fixed_burst = true; 1649 1650 if (IF_DWC_INIT(dev) != 0) 1651 return (ENXIO); 1652 1653 if (dwc_clock_init(dev) != 0) 1654 return (ENXIO); 1655 1656 if (bus_alloc_resources(dev, dwc_spec, sc->res)) { 1657 device_printf(dev, "could not allocate resources\n"); 1658 return (ENXIO); 1659 } 1660 1661 /* Read MAC before reset */ 1662 dwc_get_hwaddr(sc, macaddr); 1663 1664 /* Reset the PHY if needed */ 1665 if (dwc_reset(dev) != 0) { 1666 device_printf(dev, "Can't reset the PHY\n"); 1667 bus_release_resources(dev, dwc_spec, sc->res); 1668 return (ENXIO); 1669 } 1670 1671 /* Reset */ 1672 reg = READ4(sc, BUS_MODE); 1673 reg |= (BUS_MODE_SWR); 1674 WRITE4(sc, BUS_MODE, reg); 1675 1676 for (i = 0; i < MAC_RESET_TIMEOUT; i++) { 1677 if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0) 1678 break; 1679 DELAY(10); 1680 } 1681 if (i >= MAC_RESET_TIMEOUT) { 1682 device_printf(sc->dev, "Can't reset DWC.\n"); 1683 bus_release_resources(dev, dwc_spec, sc->res); 1684 return (ENXIO); 1685 } 1686 1687 reg = BUS_MODE_USP; 1688 if (!nopblx8) 1689 reg |= BUS_MODE_EIGHTXPBL; 1690 reg |= (txpbl << BUS_MODE_PBL_SHIFT); 1691 reg |= (rxpbl << BUS_MODE_RPBL_SHIFT); 1692 if (fixed_burst) 1693 reg |= BUS_MODE_FIXEDBURST; 1694 1695 WRITE4(sc, BUS_MODE, reg); 1696 1697 /* 1698 * DMA must be stop while changing descriptor list addresses. 1699 */ 1700 reg = READ4(sc, OPERATION_MODE); 1701 reg &= ~(MODE_ST | MODE_SR); 1702 WRITE4(sc, OPERATION_MODE, reg); 1703 1704 if (setup_dma(sc)) { 1705 bus_release_resources(dev, dwc_spec, sc->res); 1706 return (ENXIO); 1707 } 1708 1709 /* Setup addresses */ 1710 WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr); 1711 WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr); 1712 1713 mtx_init(&sc->mtx, device_get_nameunit(sc->dev), 1714 MTX_NETWORK_LOCK, MTX_DEF); 1715 1716 callout_init_mtx(&sc->dwc_callout, &sc->mtx, 0); 1717 1718 /* Setup interrupt handler. */ 1719 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 1720 NULL, dwc_intr, sc, &sc->intr_cookie); 1721 if (error != 0) { 1722 device_printf(dev, "could not setup interrupt handler.\n"); 1723 bus_release_resources(dev, dwc_spec, sc->res); 1724 return (ENXIO); 1725 } 1726 1727 /* Set up the ethernet interface. */ 1728 sc->ifp = ifp = if_alloc(IFT_ETHER); 1729 1730 if_setsoftc(ifp, sc); 1731 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1732 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1733 if_setstartfn(ifp, dwc_txstart); 1734 if_setioctlfn(ifp, dwc_ioctl); 1735 if_setinitfn(ifp, dwc_init); 1736 if_setsendqlen(ifp, TX_MAP_COUNT - 1); 1737 if_setsendqready(sc->ifp); 1738 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); 1739 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); 1740 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 1741 1742 /* Attach the mii driver. */ 1743 error = mii_attach(dev, &sc->miibus, ifp, dwc_media_change, 1744 dwc_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, 1745 MII_OFFSET_ANY, 0); 1746 1747 if (error != 0) { 1748 device_printf(dev, "PHY attach failed\n"); 1749 bus_teardown_intr(dev, sc->res[1], sc->intr_cookie); 1750 bus_release_resources(dev, dwc_spec, sc->res); 1751 return (ENXIO); 1752 } 1753 sc->mii_softc = device_get_softc(sc->miibus); 1754 1755 /* All ready to run, attach the ethernet interface. */ 1756 ether_ifattach(ifp, macaddr); 1757 sc->is_attached = true; 1758 1759 return (0); 1760 } 1761 1762 static int 1763 dwc_detach(device_t dev) 1764 { 1765 struct dwc_softc *sc; 1766 1767 sc = device_get_softc(dev); 1768 1769 /* 1770 * Disable and tear down interrupts before anything else, so we don't 1771 * race with the handler. 1772 */ 1773 WRITE4(sc, INTERRUPT_ENABLE, 0); 1774 if (sc->intr_cookie != NULL) { 1775 bus_teardown_intr(dev, sc->res[1], sc->intr_cookie); 1776 } 1777 1778 if (sc->is_attached) { 1779 DWC_LOCK(sc); 1780 sc->is_detaching = true; 1781 dwc_stop_locked(sc); 1782 DWC_UNLOCK(sc); 1783 callout_drain(&sc->dwc_callout); 1784 ether_ifdetach(sc->ifp); 1785 } 1786 1787 if (sc->miibus != NULL) { 1788 device_delete_child(dev, sc->miibus); 1789 sc->miibus = NULL; 1790 } 1791 bus_generic_detach(dev); 1792 1793 /* Free DMA descriptors */ 1794 free_dma(sc); 1795 1796 if (sc->ifp != NULL) { 1797 if_free(sc->ifp); 1798 sc->ifp = NULL; 1799 } 1800 1801 bus_release_resources(dev, dwc_spec, sc->res); 1802 1803 mtx_destroy(&sc->mtx); 1804 return (0); 1805 } 1806 1807 static device_method_t dwc_methods[] = { 1808 DEVMETHOD(device_probe, dwc_probe), 1809 DEVMETHOD(device_attach, dwc_attach), 1810 DEVMETHOD(device_detach, dwc_detach), 1811 1812 /* MII Interface */ 1813 DEVMETHOD(miibus_readreg, dwc_miibus_read_reg), 1814 DEVMETHOD(miibus_writereg, dwc_miibus_write_reg), 1815 DEVMETHOD(miibus_statchg, dwc_miibus_statchg), 1816 1817 { 0, 0 } 1818 }; 1819 1820 driver_t dwc_driver = { 1821 "dwc", 1822 dwc_methods, 1823 sizeof(struct dwc_softc), 1824 }; 1825 1826 DRIVER_MODULE(dwc, simplebus, dwc_driver, 0, 0); 1827 DRIVER_MODULE(miibus, dwc, miibus_driver, 0, 0); 1828 1829 MODULE_DEPEND(dwc, ether, 1, 1, 1); 1830 MODULE_DEPEND(dwc, miibus, 1, 1, 1); 1831