1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Ethernet media access controller (EMAC) 33 * Chapter 17, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) 34 * 35 * EMAC is an instance of the Synopsys DesignWare 3504-0 36 * Universal 10/100/1000 Ethernet MAC (DWC_gmac). 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/bus.h> 45 #include <sys/gpio.h> 46 #include <sys/kernel.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/module.h> 51 #include <sys/mutex.h> 52 #include <sys/rman.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 56 #include <net/bpf.h> 57 #include <net/if.h> 58 #include <net/ethernet.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 #include <net/if_var.h> 63 64 #include <machine/bus.h> 65 66 #include <dev/dwc/if_dwc.h> 67 #include <dev/dwc/if_dwcvar.h> 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 #include <dev/ofw/ofw_bus.h> 71 #include <dev/ofw/ofw_bus_subr.h> 72 73 #ifdef EXT_RESOURCES 74 #include <dev/extres/clk/clk.h> 75 #include <dev/extres/hwreset/hwreset.h> 76 #endif 77 78 #include "if_dwc_if.h" 79 #include "gpio_if.h" 80 #include "miibus_if.h" 81 82 #define READ4(_sc, _reg) \ 83 bus_read_4((_sc)->res[0], _reg) 84 #define WRITE4(_sc, _reg, _val) \ 85 bus_write_4((_sc)->res[0], _reg, _val) 86 87 #define MAC_RESET_TIMEOUT 100 88 #define WATCHDOG_TIMEOUT_SECS 5 89 #define STATS_HARVEST_INTERVAL 2 90 91 #define DWC_LOCK(sc) mtx_lock(&(sc)->mtx) 92 #define DWC_UNLOCK(sc) mtx_unlock(&(sc)->mtx) 93 #define DWC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 94 #define DWC_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 95 96 /* TX descriptors - TDESC0 is almost unified */ 97 #define TDESC0_OWN (1U << 31) 98 #define TDESC0_IHE (1U << 16) /* IP Header Error */ 99 #define TDESC0_ES (1U << 15) /* Error Summary */ 100 #define TDESC0_JT (1U << 14) /* Jabber Timeout */ 101 #define TDESC0_FF (1U << 13) /* Frame Flushed */ 102 #define TDESC0_PCE (1U << 12) /* Payload Checksum Error */ 103 #define TDESC0_LOC (1U << 11) /* Loss of Carrier */ 104 #define TDESC0_NC (1U << 10) /* No Carrier */ 105 #define TDESC0_LC (1U << 9) /* Late Collision */ 106 #define TDESC0_EC (1U << 8) /* Excessive Collision */ 107 #define TDESC0_VF (1U << 7) /* VLAN Frame */ 108 #define TDESC0_CC_MASK 0xf 109 #define TDESC0_CC_SHIFT 3 /* Collision Count */ 110 #define TDESC0_ED (1U << 2) /* Excessive Deferral */ 111 #define TDESC0_UF (1U << 1) /* Underflow Error */ 112 #define TDESC0_DB (1U << 0) /* Deferred Bit */ 113 /* TX descriptors - TDESC0 extended format only */ 114 #define ETDESC0_IC (1U << 30) /* Interrupt on Completion */ 115 #define ETDESC0_LS (1U << 29) /* Last Segment */ 116 #define ETDESC0_FS (1U << 28) /* First Segment */ 117 #define ETDESC0_DC (1U << 27) /* Disable CRC */ 118 #define ETDESC0_DP (1U << 26) /* Disable Padding */ 119 #define ETDESC0_CIC_NONE (0U << 22) /* Checksum Insertion Control */ 120 #define ETDESC0_CIC_HDR (1U << 22) 121 #define ETDESC0_CIC_SEG (2U << 22) 122 #define ETDESC0_CIC_FULL (3U << 22) 123 #define ETDESC0_TER (1U << 21) /* Transmit End of Ring */ 124 #define ETDESC0_TCH (1U << 20) /* Second Address Chained */ 125 126 /* TX descriptors - TDESC1 normal format */ 127 #define NTDESC1_IC (1U << 31) /* Interrupt on Completion */ 128 #define NTDESC1_LS (1U << 30) /* Last Segment */ 129 #define NTDESC1_FS (1U << 29) /* First Segment */ 130 #define NTDESC1_CIC_NONE (0U << 27) /* Checksum Insertion Control */ 131 #define NTDESC1_CIC_HDR (1U << 27) 132 #define NTDESC1_CIC_SEG (2U << 27) 133 #define NTDESC1_CIC_FULL (3U << 27) 134 #define NTDESC1_DC (1U << 26) /* Disable CRC */ 135 #define NTDESC1_TER (1U << 25) /* Transmit End of Ring */ 136 #define NTDESC1_TCH (1U << 24) /* Second Address Chained */ 137 /* TX descriptors - TDESC1 extended format */ 138 #define ETDESC1_DP (1U << 23) /* Disable Padding */ 139 #define ETDESC1_TBS2_MASK 0x7ff 140 #define ETDESC1_TBS2_SHIFT 11 /* Receive Buffer 2 Size */ 141 #define ETDESC1_TBS1_MASK 0x7ff 142 #define ETDESC1_TBS1_SHIFT 0 /* Receive Buffer 1 Size */ 143 144 /* RX descriptor - RDESC0 is unified */ 145 #define RDESC0_OWN (1U << 31) 146 #define RDESC0_AFM (1U << 30) /* Dest. Address Filter Fail */ 147 #define RDESC0_FL_MASK 0x3fff 148 #define RDESC0_FL_SHIFT 16 /* Frame Length */ 149 #define RDESC0_ES (1U << 15) /* Error Summary */ 150 #define RDESC0_DE (1U << 14) /* Descriptor Error */ 151 #define RDESC0_SAF (1U << 13) /* Source Address Filter Fail */ 152 #define RDESC0_LE (1U << 12) /* Length Error */ 153 #define RDESC0_OE (1U << 11) /* Overflow Error */ 154 #define RDESC0_VLAN (1U << 10) /* VLAN Tag */ 155 #define RDESC0_FS (1U << 9) /* First Descriptor */ 156 #define RDESC0_LS (1U << 8) /* Last Descriptor */ 157 #define RDESC0_ICE (1U << 7) /* IPC Checksum Error */ 158 #define RDESC0_LC (1U << 6) /* Late Collision */ 159 #define RDESC0_FT (1U << 5) /* Frame Type */ 160 #define RDESC0_RWT (1U << 4) /* Receive Watchdog Timeout */ 161 #define RDESC0_RE (1U << 3) /* Receive Error */ 162 #define RDESC0_DBE (1U << 2) /* Dribble Bit Error */ 163 #define RDESC0_CE (1U << 1) /* CRC Error */ 164 #define RDESC0_PCE (1U << 0) /* Payload Checksum Error */ 165 #define RDESC0_RXMA (1U << 0) /* Rx MAC Address */ 166 167 /* RX descriptors - RDESC1 normal format */ 168 #define NRDESC1_DIC (1U << 31) /* Disable Intr on Completion */ 169 #define NRDESC1_RER (1U << 25) /* Receive End of Ring */ 170 #define NRDESC1_RCH (1U << 24) /* Second Address Chained */ 171 #define NRDESC1_RBS2_MASK 0x7ff 172 #define NRDESC1_RBS2_SHIFT 11 /* Receive Buffer 2 Size */ 173 #define NRDESC1_RBS1_MASK 0x7ff 174 #define NRDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ 175 176 /* RX descriptors - RDESC1 enhanced format */ 177 #define ERDESC1_DIC (1U << 31) /* Disable Intr on Completion */ 178 #define ERDESC1_RBS2_MASK 0x7ffff 179 #define ERDESC1_RBS2_SHIFT 16 /* Receive Buffer 2 Size */ 180 #define ERDESC1_RER (1U << 15) /* Receive End of Ring */ 181 #define ERDESC1_RCH (1U << 14) /* Second Address Chained */ 182 #define ERDESC1_RBS1_MASK 0x7ffff 183 #define ERDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ 184 185 /* 186 * A hardware buffer descriptor. Rx and Tx buffers have the same descriptor 187 * layout, but the bits in the fields have different meanings. 188 */ 189 struct dwc_hwdesc 190 { 191 uint32_t desc0; 192 uint32_t desc1; 193 uint32_t addr1; /* ptr to first buffer data */ 194 uint32_t addr2; /* ptr to next descriptor / second buffer data*/ 195 }; 196 197 198 struct dwc_hash_maddr_ctx { 199 struct dwc_softc *sc; 200 uint32_t hash[8]; 201 }; 202 203 /* 204 * The hardware imposes alignment restrictions on various objects involved in 205 * DMA transfers. These values are expressed in bytes (not bits). 206 */ 207 #define DWC_DESC_RING_ALIGN 2048 208 209 static struct resource_spec dwc_spec[] = { 210 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 211 { SYS_RES_IRQ, 0, RF_ACTIVE }, 212 { -1, 0 } 213 }; 214 215 static void dwc_txfinish_locked(struct dwc_softc *sc); 216 static void dwc_rxfinish_locked(struct dwc_softc *sc); 217 static void dwc_stop_locked(struct dwc_softc *sc); 218 static void dwc_setup_rxfilter(struct dwc_softc *sc); 219 static void dwc_setup_core(struct dwc_softc *sc); 220 static void dwc_enable_mac(struct dwc_softc *sc, bool enable); 221 static void dwc_init_dma(struct dwc_softc *sc); 222 static void dwc_stop_dma(struct dwc_softc *sc); 223 224 static void dwc_tick(void *arg); 225 226 /* Pause time field in the transmitted control frame */ 227 static int dwc_pause_time = 0xffff; 228 TUNABLE_INT("hw.dwc.pause_time", &dwc_pause_time); 229 230 /* 231 * MIIBUS functions 232 */ 233 234 static int 235 dwc_miibus_read_reg(device_t dev, int phy, int reg) 236 { 237 struct dwc_softc *sc; 238 uint16_t mii; 239 size_t cnt; 240 int rv = 0; 241 242 sc = device_get_softc(dev); 243 244 mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) 245 | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) 246 | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) 247 | GMII_ADDRESS_GB; /* Busy flag */ 248 249 WRITE4(sc, GMII_ADDRESS, mii); 250 251 for (cnt = 0; cnt < 1000; cnt++) { 252 if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { 253 rv = READ4(sc, GMII_DATA); 254 break; 255 } 256 DELAY(10); 257 } 258 259 return rv; 260 } 261 262 static int 263 dwc_miibus_write_reg(device_t dev, int phy, int reg, int val) 264 { 265 struct dwc_softc *sc; 266 uint16_t mii; 267 size_t cnt; 268 269 sc = device_get_softc(dev); 270 271 mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) 272 | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) 273 | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) 274 | GMII_ADDRESS_GB | GMII_ADDRESS_GW; 275 276 WRITE4(sc, GMII_DATA, val); 277 WRITE4(sc, GMII_ADDRESS, mii); 278 279 for (cnt = 0; cnt < 1000; cnt++) { 280 if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { 281 break; 282 } 283 DELAY(10); 284 } 285 286 return (0); 287 } 288 289 static void 290 dwc_miibus_statchg(device_t dev) 291 { 292 struct dwc_softc *sc; 293 struct mii_data *mii; 294 uint32_t reg; 295 296 /* 297 * Called by the MII bus driver when the PHY establishes 298 * link to set the MAC interface registers. 299 */ 300 301 sc = device_get_softc(dev); 302 303 DWC_ASSERT_LOCKED(sc); 304 305 mii = sc->mii_softc; 306 307 if (mii->mii_media_status & IFM_ACTIVE) 308 sc->link_is_up = true; 309 else 310 sc->link_is_up = false; 311 312 reg = READ4(sc, MAC_CONFIGURATION); 313 switch (IFM_SUBTYPE(mii->mii_media_active)) { 314 case IFM_1000_T: 315 case IFM_1000_SX: 316 reg &= ~(CONF_FES | CONF_PS); 317 break; 318 case IFM_100_TX: 319 reg |= (CONF_FES | CONF_PS); 320 break; 321 case IFM_10_T: 322 reg &= ~(CONF_FES); 323 reg |= (CONF_PS); 324 break; 325 case IFM_NONE: 326 sc->link_is_up = false; 327 return; 328 default: 329 sc->link_is_up = false; 330 device_printf(dev, "Unsupported media %u\n", 331 IFM_SUBTYPE(mii->mii_media_active)); 332 return; 333 } 334 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 335 reg |= (CONF_DM); 336 else 337 reg &= ~(CONF_DM); 338 WRITE4(sc, MAC_CONFIGURATION, reg); 339 340 reg = FLOW_CONTROL_UP; 341 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 342 reg |= FLOW_CONTROL_TX; 343 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 344 reg |= FLOW_CONTROL_RX; 345 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 346 reg |= dwc_pause_time << FLOW_CONTROL_PT_SHIFT; 347 WRITE4(sc, FLOW_CONTROL, reg); 348 349 IF_DWC_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active)); 350 351 } 352 353 /* 354 * Media functions 355 */ 356 357 static void 358 dwc_media_status(struct ifnet * ifp, struct ifmediareq *ifmr) 359 { 360 struct dwc_softc *sc; 361 struct mii_data *mii; 362 363 sc = ifp->if_softc; 364 mii = sc->mii_softc; 365 DWC_LOCK(sc); 366 mii_pollstat(mii); 367 ifmr->ifm_active = mii->mii_media_active; 368 ifmr->ifm_status = mii->mii_media_status; 369 DWC_UNLOCK(sc); 370 } 371 372 static int 373 dwc_media_change_locked(struct dwc_softc *sc) 374 { 375 376 return (mii_mediachg(sc->mii_softc)); 377 } 378 379 static int 380 dwc_media_change(struct ifnet * ifp) 381 { 382 struct dwc_softc *sc; 383 int error; 384 385 sc = ifp->if_softc; 386 387 DWC_LOCK(sc); 388 error = dwc_media_change_locked(sc); 389 DWC_UNLOCK(sc); 390 return (error); 391 } 392 393 /* 394 * Core functions 395 */ 396 397 static const uint8_t nibbletab[] = { 398 /* 0x0 0000 -> 0000 */ 0x0, 399 /* 0x1 0001 -> 1000 */ 0x8, 400 /* 0x2 0010 -> 0100 */ 0x4, 401 /* 0x3 0011 -> 1100 */ 0xc, 402 /* 0x4 0100 -> 0010 */ 0x2, 403 /* 0x5 0101 -> 1010 */ 0xa, 404 /* 0x6 0110 -> 0110 */ 0x6, 405 /* 0x7 0111 -> 1110 */ 0xe, 406 /* 0x8 1000 -> 0001 */ 0x1, 407 /* 0x9 1001 -> 1001 */ 0x9, 408 /* 0xa 1010 -> 0101 */ 0x5, 409 /* 0xb 1011 -> 1101 */ 0xd, 410 /* 0xc 1100 -> 0011 */ 0x3, 411 /* 0xd 1101 -> 1011 */ 0xb, 412 /* 0xe 1110 -> 0111 */ 0x7, 413 /* 0xf 1111 -> 1111 */ 0xf, }; 414 415 static uint8_t 416 bitreverse(uint8_t x) 417 { 418 419 return (nibbletab[x & 0xf] << 4) | nibbletab[x >> 4]; 420 } 421 422 static u_int 423 dwc_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 424 { 425 struct dwc_hash_maddr_ctx *ctx = arg; 426 uint32_t crc, hashbit, hashreg; 427 uint8_t val; 428 429 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); 430 /* Take lower 8 bits and reverse it */ 431 val = bitreverse(~crc & 0xff); 432 if (ctx->sc->mactype != DWC_GMAC_EXT_DESC) 433 val >>= 2; /* Only need lower 6 bits */ 434 hashreg = (val >> 5); 435 hashbit = (val & 31); 436 ctx->hash[hashreg] |= (1 << hashbit); 437 438 return (1); 439 } 440 441 static void 442 dwc_setup_rxfilter(struct dwc_softc *sc) 443 { 444 struct dwc_hash_maddr_ctx ctx; 445 struct ifnet *ifp; 446 uint8_t *eaddr; 447 uint32_t ffval, hi, lo; 448 int nhash, i; 449 450 DWC_ASSERT_LOCKED(sc); 451 452 ifp = sc->ifp; 453 nhash = sc->mactype != DWC_GMAC_EXT_DESC ? 2 : 8; 454 455 /* 456 * Set the multicast (group) filter hash. 457 */ 458 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 459 ffval = (FRAME_FILTER_PM); 460 for (i = 0; i < nhash; i++) 461 ctx.hash[i] = ~0; 462 } else { 463 ffval = (FRAME_FILTER_HMC); 464 for (i = 0; i < nhash; i++) 465 ctx.hash[i] = 0; 466 ctx.sc = sc; 467 if_foreach_llmaddr(ifp, dwc_hash_maddr, &ctx); 468 } 469 470 /* 471 * Set the individual address filter hash. 472 */ 473 if (ifp->if_flags & IFF_PROMISC) 474 ffval |= (FRAME_FILTER_PR); 475 476 /* 477 * Set the primary address. 478 */ 479 eaddr = IF_LLADDR(ifp); 480 lo = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | 481 (eaddr[3] << 24); 482 hi = eaddr[4] | (eaddr[5] << 8); 483 WRITE4(sc, MAC_ADDRESS_LOW(0), lo); 484 WRITE4(sc, MAC_ADDRESS_HIGH(0), hi); 485 WRITE4(sc, MAC_FRAME_FILTER, ffval); 486 if (sc->mactype != DWC_GMAC_EXT_DESC) { 487 WRITE4(sc, GMAC_MAC_HTLOW, ctx.hash[0]); 488 WRITE4(sc, GMAC_MAC_HTHIGH, ctx.hash[1]); 489 } else { 490 for (i = 0; i < nhash; i++) 491 WRITE4(sc, HASH_TABLE_REG(i), ctx.hash[i]); 492 } 493 } 494 495 static void 496 dwc_setup_core(struct dwc_softc *sc) 497 { 498 uint32_t reg; 499 500 DWC_ASSERT_LOCKED(sc); 501 502 /* Enable core */ 503 reg = READ4(sc, MAC_CONFIGURATION); 504 reg |= (CONF_JD | CONF_ACS | CONF_BE); 505 WRITE4(sc, MAC_CONFIGURATION, reg); 506 } 507 508 static void 509 dwc_enable_mac(struct dwc_softc *sc, bool enable) 510 { 511 uint32_t reg; 512 513 DWC_ASSERT_LOCKED(sc); 514 reg = READ4(sc, MAC_CONFIGURATION); 515 if (enable) 516 reg |= CONF_TE | CONF_RE; 517 else 518 reg &= ~(CONF_TE | CONF_RE); 519 WRITE4(sc, MAC_CONFIGURATION, reg); 520 } 521 522 static void 523 dwc_get_hwaddr(struct dwc_softc *sc, uint8_t *hwaddr) 524 { 525 uint32_t hi, lo, rnd; 526 527 /* 528 * Try to recover a MAC address from the running hardware. If there's 529 * something non-zero there, assume the bootloader did the right thing 530 * and just use it. 531 * 532 * Otherwise, set the address to a convenient locally assigned address, 533 * 'bsd' + random 24 low-order bits. 'b' is 0x62, which has the locally 534 * assigned bit set, and the broadcast/multicast bit clear. 535 */ 536 lo = READ4(sc, MAC_ADDRESS_LOW(0)); 537 hi = READ4(sc, MAC_ADDRESS_HIGH(0)) & 0xffff; 538 if ((lo != 0xffffffff) || (hi != 0xffff)) { 539 hwaddr[0] = (lo >> 0) & 0xff; 540 hwaddr[1] = (lo >> 8) & 0xff; 541 hwaddr[2] = (lo >> 16) & 0xff; 542 hwaddr[3] = (lo >> 24) & 0xff; 543 hwaddr[4] = (hi >> 0) & 0xff; 544 hwaddr[5] = (hi >> 8) & 0xff; 545 } else { 546 rnd = arc4random() & 0x00ffffff; 547 hwaddr[0] = 'b'; 548 hwaddr[1] = 's'; 549 hwaddr[2] = 'd'; 550 hwaddr[3] = rnd >> 16; 551 hwaddr[4] = rnd >> 8; 552 hwaddr[5] = rnd >> 0; 553 } 554 } 555 556 /* 557 * DMA functions 558 */ 559 560 static void 561 dwc_init_dma(struct dwc_softc *sc) 562 { 563 uint32_t reg; 564 565 DWC_ASSERT_LOCKED(sc); 566 567 /* Initializa DMA and enable transmitters */ 568 reg = READ4(sc, OPERATION_MODE); 569 reg |= (MODE_TSF | MODE_OSF | MODE_FUF); 570 reg &= ~(MODE_RSF); 571 reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT); 572 WRITE4(sc, OPERATION_MODE, reg); 573 574 WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT); 575 576 /* Start DMA */ 577 reg = READ4(sc, OPERATION_MODE); 578 reg |= (MODE_ST | MODE_SR); 579 WRITE4(sc, OPERATION_MODE, reg); 580 } 581 582 static void 583 dwc_stop_dma(struct dwc_softc *sc) 584 { 585 uint32_t reg; 586 587 DWC_ASSERT_LOCKED(sc); 588 589 /* Stop DMA TX */ 590 reg = READ4(sc, OPERATION_MODE); 591 reg &= ~(MODE_ST); 592 WRITE4(sc, OPERATION_MODE, reg); 593 594 /* Flush TX */ 595 reg = READ4(sc, OPERATION_MODE); 596 reg |= (MODE_FTF); 597 WRITE4(sc, OPERATION_MODE, reg); 598 599 /* Stop DMA RX */ 600 reg = READ4(sc, OPERATION_MODE); 601 reg &= ~(MODE_SR); 602 WRITE4(sc, OPERATION_MODE, reg); 603 } 604 605 static inline uint32_t 606 next_rxidx(struct dwc_softc *sc, uint32_t curidx) 607 { 608 609 return ((curidx + 1) % RX_DESC_COUNT); 610 } 611 612 static inline uint32_t 613 next_txidx(struct dwc_softc *sc, uint32_t curidx) 614 { 615 616 return ((curidx + 1) % TX_DESC_COUNT); 617 } 618 619 static void 620 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 621 { 622 623 if (error != 0) 624 return; 625 *(bus_addr_t *)arg = segs[0].ds_addr; 626 } 627 628 inline static void 629 dwc_setup_txdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr, 630 uint32_t len, uint32_t flags, bool first, bool last) 631 { 632 uint32_t desc0, desc1; 633 634 /* Addr/len 0 means we're clearing the descriptor after xmit done. */ 635 if (paddr == 0 || len == 0) { 636 desc0 = 0; 637 desc1 = 0; 638 --sc->tx_desccount; 639 } else { 640 if (sc->mactype != DWC_GMAC_EXT_DESC) { 641 desc0 = 0; 642 desc1 = NTDESC1_TCH | len | flags; 643 if (first) 644 desc1 |= NTDESC1_FS; 645 if (last) 646 desc1 |= NTDESC1_LS | NTDESC1_IC; 647 } else { 648 desc0 = ETDESC0_TCH | flags; 649 if (first) 650 desc0 |= ETDESC0_FS; 651 if (last) 652 desc0 |= ETDESC0_LS | ETDESC0_IC; 653 desc1 = len; 654 } 655 ++sc->tx_desccount; 656 } 657 658 sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr); 659 sc->txdesc_ring[idx].desc0 = desc0; 660 sc->txdesc_ring[idx].desc1 = desc1; 661 } 662 663 inline static void 664 dwc_set_owner(struct dwc_softc *sc, int idx) 665 { 666 wmb(); 667 sc->txdesc_ring[idx].desc0 |= TDESC0_OWN; 668 wmb(); 669 } 670 671 static int 672 dwc_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp) 673 { 674 struct bus_dma_segment segs[TX_MAP_MAX_SEGS]; 675 int error, nsegs; 676 struct mbuf * m; 677 uint32_t flags = 0; 678 int i; 679 int first, last; 680 681 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, 682 *mp, segs, &nsegs, 0); 683 if (error == EFBIG) { 684 /* 685 * The map may be partially mapped from the first call. 686 * Make sure to reset it. 687 */ 688 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); 689 if ((m = m_defrag(*mp, M_NOWAIT)) == NULL) 690 return (ENOMEM); 691 *mp = m; 692 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, 693 *mp, segs, &nsegs, 0); 694 } 695 if (error != 0) 696 return (ENOMEM); 697 698 if (sc->tx_desccount + nsegs > TX_DESC_COUNT) { 699 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); 700 return (ENOMEM); 701 } 702 703 m = *mp; 704 705 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { 706 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) { 707 if (sc->mactype != DWC_GMAC_EXT_DESC) 708 flags = NTDESC1_CIC_FULL; 709 else 710 flags = ETDESC0_CIC_FULL; 711 } else { 712 if (sc->mactype != DWC_GMAC_EXT_DESC) 713 flags = NTDESC1_CIC_HDR; 714 else 715 flags = ETDESC0_CIC_HDR; 716 } 717 } 718 719 bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, 720 BUS_DMASYNC_PREWRITE); 721 722 sc->txbuf_map[idx].mbuf = m; 723 724 first = sc->tx_desc_head; 725 for (i = 0; i < nsegs; i++) { 726 dwc_setup_txdesc(sc, sc->tx_desc_head, 727 segs[i].ds_addr, segs[i].ds_len, 728 (i == 0) ? flags : 0, /* only first desc needs flags */ 729 (i == 0), 730 (i == nsegs - 1)); 731 if (i > 0) 732 dwc_set_owner(sc, sc->tx_desc_head); 733 last = sc->tx_desc_head; 734 sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head); 735 } 736 737 sc->txbuf_map[idx].last_desc_idx = last; 738 739 dwc_set_owner(sc, first); 740 741 return (0); 742 } 743 744 inline static uint32_t 745 dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr) 746 { 747 uint32_t nidx; 748 749 sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr; 750 nidx = next_rxidx(sc, idx); 751 sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr + 752 (nidx * sizeof(struct dwc_hwdesc)); 753 if (sc->mactype != DWC_GMAC_EXT_DESC) 754 sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH | 755 MIN(MCLBYTES, NRDESC1_RBS1_MASK); 756 else 757 sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH | 758 MIN(MCLBYTES, ERDESC1_RBS1_MASK); 759 760 wmb(); 761 sc->rxdesc_ring[idx].desc0 = RDESC0_OWN; 762 wmb(); 763 return (nidx); 764 } 765 766 static int 767 dwc_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m) 768 { 769 struct bus_dma_segment seg; 770 int error, nsegs; 771 772 m_adj(m, ETHER_ALIGN); 773 774 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 775 m, &seg, &nsegs, 0); 776 if (error != 0) 777 return (error); 778 779 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 780 781 bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 782 BUS_DMASYNC_PREREAD); 783 784 sc->rxbuf_map[idx].mbuf = m; 785 dwc_setup_rxdesc(sc, idx, seg.ds_addr); 786 787 return (0); 788 } 789 790 static struct mbuf * 791 dwc_alloc_mbufcl(struct dwc_softc *sc) 792 { 793 struct mbuf *m; 794 795 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 796 if (m != NULL) 797 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 798 799 return (m); 800 } 801 802 static struct mbuf * 803 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc, 804 struct dwc_bufmap *map) 805 { 806 struct ifnet *ifp; 807 struct mbuf *m, *m0; 808 int len; 809 uint32_t rdesc0; 810 811 m = map->mbuf; 812 ifp = sc->ifp; 813 rdesc0 = desc ->desc0; 814 /* Validate descriptor. */ 815 if (rdesc0 & RDESC0_ES) { 816 /* 817 * Errored packet. Statistic counters are updated 818 * globally, so do nothing 819 */ 820 return (NULL); 821 } 822 823 if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) != 824 (RDESC0_FS | RDESC0_LS)) { 825 /* 826 * Something very wrong happens. The whole packet should be 827 * recevied in one descriptr. Report problem. 828 */ 829 device_printf(sc->dev, 830 "%s: RX descriptor without FIRST and LAST bit set: 0x%08X", 831 __func__, rdesc0); 832 return (NULL); 833 } 834 835 len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK; 836 if (len < 64) { 837 /* 838 * Lenght is invalid, recycle old mbuf 839 * Probably impossible case 840 */ 841 return (NULL); 842 } 843 844 /* Allocate new buffer */ 845 m0 = dwc_alloc_mbufcl(sc); 846 if (m0 == NULL) { 847 /* no new mbuf available, recycle old */ 848 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1); 849 return (NULL); 850 } 851 /* Do dmasync for newly received packet */ 852 bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD); 853 bus_dmamap_unload(sc->rxbuf_tag, map->map); 854 855 /* Received packet is valid, process it */ 856 m->m_pkthdr.rcvif = ifp; 857 m->m_pkthdr.len = len; 858 m->m_len = len; 859 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 860 861 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 862 (rdesc0 & RDESC0_FT) != 0) { 863 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 864 if ((rdesc0 & RDESC0_ICE) == 0) 865 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 866 if ((rdesc0 & RDESC0_PCE) == 0) { 867 m->m_pkthdr.csum_flags |= 868 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 869 m->m_pkthdr.csum_data = 0xffff; 870 } 871 } 872 873 /* Remove trailing FCS */ 874 m_adj(m, -ETHER_CRC_LEN); 875 876 DWC_UNLOCK(sc); 877 (*ifp->if_input)(ifp, m); 878 DWC_LOCK(sc); 879 return (m0); 880 } 881 882 static int 883 setup_dma(struct dwc_softc *sc) 884 { 885 struct mbuf *m; 886 int error; 887 int nidx; 888 int idx; 889 890 /* 891 * Set up TX descriptor ring, descriptors, and dma maps. 892 */ 893 error = bus_dma_tag_create( 894 bus_get_dma_tag(sc->dev), /* Parent tag. */ 895 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ 896 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 897 BUS_SPACE_MAXADDR, /* highaddr */ 898 NULL, NULL, /* filter, filterarg */ 899 TX_DESC_SIZE, 1, /* maxsize, nsegments */ 900 TX_DESC_SIZE, /* maxsegsize */ 901 0, /* flags */ 902 NULL, NULL, /* lockfunc, lockarg */ 903 &sc->txdesc_tag); 904 if (error != 0) { 905 device_printf(sc->dev, 906 "could not create TX ring DMA tag.\n"); 907 goto out; 908 } 909 910 error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring, 911 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 912 &sc->txdesc_map); 913 if (error != 0) { 914 device_printf(sc->dev, 915 "could not allocate TX descriptor ring.\n"); 916 goto out; 917 } 918 919 error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, 920 sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr, 921 &sc->txdesc_ring_paddr, 0); 922 if (error != 0) { 923 device_printf(sc->dev, 924 "could not load TX descriptor ring map.\n"); 925 goto out; 926 } 927 928 for (idx = 0; idx < TX_DESC_COUNT; idx++) { 929 nidx = next_txidx(sc, idx); 930 sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr + 931 (nidx * sizeof(struct dwc_hwdesc)); 932 } 933 934 error = bus_dma_tag_create( 935 bus_get_dma_tag(sc->dev), /* Parent tag. */ 936 1, 0, /* alignment, boundary */ 937 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 938 BUS_SPACE_MAXADDR, /* highaddr */ 939 NULL, NULL, /* filter, filterarg */ 940 MCLBYTES*TX_MAP_MAX_SEGS, /* maxsize */ 941 TX_MAP_MAX_SEGS, /* nsegments */ 942 MCLBYTES, /* maxsegsize */ 943 0, /* flags */ 944 NULL, NULL, /* lockfunc, lockarg */ 945 &sc->txbuf_tag); 946 if (error != 0) { 947 device_printf(sc->dev, 948 "could not create TX ring DMA tag.\n"); 949 goto out; 950 } 951 952 for (idx = 0; idx < TX_MAP_COUNT; idx++) { 953 error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT, 954 &sc->txbuf_map[idx].map); 955 if (error != 0) { 956 device_printf(sc->dev, 957 "could not create TX buffer DMA map.\n"); 958 goto out; 959 } 960 } 961 962 for (idx = 0; idx < TX_DESC_COUNT; idx++) 963 dwc_setup_txdesc(sc, idx, 0, 0, 0, false, false); 964 965 /* 966 * Set up RX descriptor ring, descriptors, dma maps, and mbufs. 967 */ 968 error = bus_dma_tag_create( 969 bus_get_dma_tag(sc->dev), /* Parent tag. */ 970 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ 971 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 972 BUS_SPACE_MAXADDR, /* highaddr */ 973 NULL, NULL, /* filter, filterarg */ 974 RX_DESC_SIZE, 1, /* maxsize, nsegments */ 975 RX_DESC_SIZE, /* maxsegsize */ 976 0, /* flags */ 977 NULL, NULL, /* lockfunc, lockarg */ 978 &sc->rxdesc_tag); 979 if (error != 0) { 980 device_printf(sc->dev, 981 "could not create RX ring DMA tag.\n"); 982 goto out; 983 } 984 985 error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring, 986 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 987 &sc->rxdesc_map); 988 if (error != 0) { 989 device_printf(sc->dev, 990 "could not allocate RX descriptor ring.\n"); 991 goto out; 992 } 993 994 error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, 995 sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr, 996 &sc->rxdesc_ring_paddr, 0); 997 if (error != 0) { 998 device_printf(sc->dev, 999 "could not load RX descriptor ring map.\n"); 1000 goto out; 1001 } 1002 1003 error = bus_dma_tag_create( 1004 bus_get_dma_tag(sc->dev), /* Parent tag. */ 1005 1, 0, /* alignment, boundary */ 1006 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1007 BUS_SPACE_MAXADDR, /* highaddr */ 1008 NULL, NULL, /* filter, filterarg */ 1009 MCLBYTES, 1, /* maxsize, nsegments */ 1010 MCLBYTES, /* maxsegsize */ 1011 0, /* flags */ 1012 NULL, NULL, /* lockfunc, lockarg */ 1013 &sc->rxbuf_tag); 1014 if (error != 0) { 1015 device_printf(sc->dev, 1016 "could not create RX buf DMA tag.\n"); 1017 goto out; 1018 } 1019 1020 for (idx = 0; idx < RX_DESC_COUNT; idx++) { 1021 error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT, 1022 &sc->rxbuf_map[idx].map); 1023 if (error != 0) { 1024 device_printf(sc->dev, 1025 "could not create RX buffer DMA map.\n"); 1026 goto out; 1027 } 1028 if ((m = dwc_alloc_mbufcl(sc)) == NULL) { 1029 device_printf(sc->dev, "Could not alloc mbuf\n"); 1030 error = ENOMEM; 1031 goto out; 1032 } 1033 if ((error = dwc_setup_rxbuf(sc, idx, m)) != 0) { 1034 device_printf(sc->dev, 1035 "could not create new RX buffer.\n"); 1036 goto out; 1037 } 1038 } 1039 1040 out: 1041 if (error != 0) 1042 return (ENXIO); 1043 1044 return (0); 1045 } 1046 1047 /* 1048 * if_ functions 1049 */ 1050 1051 static void 1052 dwc_txstart_locked(struct dwc_softc *sc) 1053 { 1054 struct ifnet *ifp; 1055 struct mbuf *m; 1056 int enqueued; 1057 1058 DWC_ASSERT_LOCKED(sc); 1059 1060 if (!sc->link_is_up) 1061 return; 1062 1063 ifp = sc->ifp; 1064 1065 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 1066 IFF_DRV_RUNNING) 1067 return; 1068 1069 enqueued = 0; 1070 1071 for (;;) { 1072 if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS + 1)) { 1073 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1074 break; 1075 } 1076 1077 if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) { 1078 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1079 break; 1080 } 1081 1082 m = if_dequeue(ifp); 1083 if (m == NULL) 1084 break; 1085 if (dwc_setup_txbuf(sc, sc->tx_map_head, &m) != 0) { 1086 if_sendq_prepend(ifp, m); 1087 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1088 break; 1089 } 1090 if_bpfmtap(ifp, m); 1091 sc->tx_map_head = next_txidx(sc, sc->tx_map_head); 1092 sc->tx_mapcount++; 1093 ++enqueued; 1094 } 1095 1096 if (enqueued != 0) { 1097 WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1); 1098 sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS; 1099 } 1100 } 1101 1102 static void 1103 dwc_txstart(struct ifnet *ifp) 1104 { 1105 struct dwc_softc *sc = ifp->if_softc; 1106 1107 DWC_LOCK(sc); 1108 dwc_txstart_locked(sc); 1109 DWC_UNLOCK(sc); 1110 } 1111 1112 static void 1113 dwc_init_locked(struct dwc_softc *sc) 1114 { 1115 struct ifnet *ifp = sc->ifp; 1116 1117 DWC_ASSERT_LOCKED(sc); 1118 1119 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1120 return; 1121 1122 dwc_setup_rxfilter(sc); 1123 dwc_setup_core(sc); 1124 dwc_enable_mac(sc, true); 1125 dwc_init_dma(sc); 1126 1127 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 1128 1129 /* 1130 * Call mii_mediachg() which will call back into dwc_miibus_statchg() 1131 * to set up the remaining config registers based on current media. 1132 */ 1133 mii_mediachg(sc->mii_softc); 1134 callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); 1135 } 1136 1137 static void 1138 dwc_init(void *if_softc) 1139 { 1140 struct dwc_softc *sc = if_softc; 1141 1142 DWC_LOCK(sc); 1143 dwc_init_locked(sc); 1144 DWC_UNLOCK(sc); 1145 } 1146 1147 static void 1148 dwc_stop_locked(struct dwc_softc *sc) 1149 { 1150 struct ifnet *ifp; 1151 1152 DWC_ASSERT_LOCKED(sc); 1153 1154 ifp = sc->ifp; 1155 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1156 sc->tx_watchdog_count = 0; 1157 sc->stats_harvest_count = 0; 1158 1159 callout_stop(&sc->dwc_callout); 1160 1161 dwc_stop_dma(sc); 1162 dwc_enable_mac(sc, false); 1163 } 1164 1165 static int 1166 dwc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1167 { 1168 struct dwc_softc *sc; 1169 struct mii_data *mii; 1170 struct ifreq *ifr; 1171 int flags, mask, error; 1172 1173 sc = ifp->if_softc; 1174 ifr = (struct ifreq *)data; 1175 1176 error = 0; 1177 switch (cmd) { 1178 case SIOCSIFFLAGS: 1179 DWC_LOCK(sc); 1180 if (if_getflags(ifp) & IFF_UP) { 1181 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1182 flags = if_getflags(ifp) ^ sc->if_flags; 1183 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 1184 dwc_setup_rxfilter(sc); 1185 } else { 1186 if (!sc->is_detaching) 1187 dwc_init_locked(sc); 1188 } 1189 } else { 1190 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1191 dwc_stop_locked(sc); 1192 } 1193 sc->if_flags = if_getflags(ifp); 1194 DWC_UNLOCK(sc); 1195 break; 1196 case SIOCADDMULTI: 1197 case SIOCDELMULTI: 1198 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1199 DWC_LOCK(sc); 1200 dwc_setup_rxfilter(sc); 1201 DWC_UNLOCK(sc); 1202 } 1203 break; 1204 case SIOCSIFMEDIA: 1205 case SIOCGIFMEDIA: 1206 mii = sc->mii_softc; 1207 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1208 break; 1209 case SIOCSIFCAP: 1210 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1211 if (mask & IFCAP_VLAN_MTU) { 1212 /* No work to do except acknowledge the change took */ 1213 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 1214 } 1215 if (mask & IFCAP_RXCSUM) 1216 if_togglecapenable(ifp, IFCAP_RXCSUM); 1217 if (mask & IFCAP_TXCSUM) 1218 if_togglecapenable(ifp, IFCAP_TXCSUM); 1219 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1220 if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); 1221 else 1222 if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); 1223 break; 1224 1225 default: 1226 error = ether_ioctl(ifp, cmd, data); 1227 break; 1228 } 1229 1230 return (error); 1231 } 1232 1233 /* 1234 * Interrupts functions 1235 */ 1236 1237 static void 1238 dwc_txfinish_locked(struct dwc_softc *sc) 1239 { 1240 struct dwc_bufmap *bmap; 1241 struct dwc_hwdesc *desc; 1242 struct ifnet *ifp; 1243 int idx, last_idx; 1244 bool map_finished; 1245 1246 DWC_ASSERT_LOCKED(sc); 1247 1248 ifp = sc->ifp; 1249 /* check if all descriptors of the map are done */ 1250 while (sc->tx_map_tail != sc->tx_map_head) { 1251 map_finished = true; 1252 bmap = &sc->txbuf_map[sc->tx_map_tail]; 1253 idx = sc->tx_desc_tail; 1254 last_idx = next_txidx(sc, bmap->last_desc_idx); 1255 while (idx != last_idx) { 1256 desc = &sc->txdesc_ring[idx]; 1257 if ((desc->desc0 & TDESC0_OWN) != 0) { 1258 map_finished = false; 1259 break; 1260 } 1261 idx = next_txidx(sc, idx); 1262 } 1263 1264 if (!map_finished) 1265 break; 1266 bus_dmamap_sync(sc->txbuf_tag, bmap->map, 1267 BUS_DMASYNC_POSTWRITE); 1268 bus_dmamap_unload(sc->txbuf_tag, bmap->map); 1269 m_freem(bmap->mbuf); 1270 bmap->mbuf = NULL; 1271 sc->tx_mapcount--; 1272 while (sc->tx_desc_tail != last_idx) { 1273 dwc_setup_txdesc(sc, sc->tx_desc_tail, 0, 0, 0, false, false); 1274 sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail); 1275 } 1276 sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail); 1277 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1278 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1279 } 1280 1281 /* If there are no buffers outstanding, muzzle the watchdog. */ 1282 if (sc->tx_desc_tail == sc->tx_desc_head) { 1283 sc->tx_watchdog_count = 0; 1284 } 1285 } 1286 1287 static void 1288 dwc_rxfinish_locked(struct dwc_softc *sc) 1289 { 1290 struct ifnet *ifp; 1291 struct mbuf *m; 1292 int error, idx; 1293 struct dwc_hwdesc *desc; 1294 1295 DWC_ASSERT_LOCKED(sc); 1296 ifp = sc->ifp; 1297 for (;;) { 1298 idx = sc->rx_idx; 1299 desc = sc->rxdesc_ring + idx; 1300 if ((desc->desc0 & RDESC0_OWN) != 0) 1301 break; 1302 1303 m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx); 1304 if (m == NULL) { 1305 wmb(); 1306 desc->desc0 = RDESC0_OWN; 1307 wmb(); 1308 } else { 1309 /* We cannot create hole in RX ring */ 1310 error = dwc_setup_rxbuf(sc, idx, m); 1311 if (error != 0) 1312 panic("dwc_setup_rxbuf failed: error %d\n", 1313 error); 1314 1315 } 1316 sc->rx_idx = next_rxidx(sc, sc->rx_idx); 1317 } 1318 } 1319 1320 static void 1321 dwc_intr(void *arg) 1322 { 1323 struct dwc_softc *sc; 1324 uint32_t reg; 1325 1326 sc = arg; 1327 1328 DWC_LOCK(sc); 1329 1330 reg = READ4(sc, INTERRUPT_STATUS); 1331 if (reg) 1332 READ4(sc, SGMII_RGMII_SMII_CTRL_STATUS); 1333 1334 reg = READ4(sc, DMA_STATUS); 1335 if (reg & DMA_STATUS_NIS) { 1336 if (reg & DMA_STATUS_RI) 1337 dwc_rxfinish_locked(sc); 1338 1339 if (reg & DMA_STATUS_TI) { 1340 dwc_txfinish_locked(sc); 1341 dwc_txstart_locked(sc); 1342 } 1343 } 1344 1345 if (reg & DMA_STATUS_AIS) { 1346 if (reg & DMA_STATUS_FBI) { 1347 /* Fatal bus error */ 1348 device_printf(sc->dev, 1349 "Ethernet DMA error, restarting controller.\n"); 1350 dwc_stop_locked(sc); 1351 dwc_init_locked(sc); 1352 } 1353 } 1354 1355 WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK); 1356 DWC_UNLOCK(sc); 1357 } 1358 1359 /* 1360 * Stats 1361 */ 1362 1363 static void dwc_clear_stats(struct dwc_softc *sc) 1364 { 1365 uint32_t reg; 1366 1367 reg = READ4(sc, MMC_CONTROL); 1368 reg |= (MMC_CONTROL_CNTRST); 1369 WRITE4(sc, MMC_CONTROL, reg); 1370 } 1371 1372 static void 1373 dwc_harvest_stats(struct dwc_softc *sc) 1374 { 1375 struct ifnet *ifp; 1376 1377 /* We don't need to harvest too often. */ 1378 if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL) 1379 return; 1380 1381 sc->stats_harvest_count = 0; 1382 ifp = sc->ifp; 1383 1384 if_inc_counter(ifp, IFCOUNTER_IPACKETS, READ4(sc, RXFRAMECOUNT_GB)); 1385 if_inc_counter(ifp, IFCOUNTER_IMCASTS, READ4(sc, RXMULTICASTFRAMES_G)); 1386 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1387 READ4(sc, RXOVERSIZE_G) + READ4(sc, RXUNDERSIZE_G) + 1388 READ4(sc, RXCRCERROR) + READ4(sc, RXALIGNMENTERROR) + 1389 READ4(sc, RXRUNTERROR) + READ4(sc, RXJABBERERROR) + 1390 READ4(sc, RXLENGTHERROR)); 1391 1392 if_inc_counter(ifp, IFCOUNTER_OPACKETS, READ4(sc, TXFRAMECOUNT_G)); 1393 if_inc_counter(ifp, IFCOUNTER_OMCASTS, READ4(sc, TXMULTICASTFRAMES_G)); 1394 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1395 READ4(sc, TXOVERSIZE_G) + READ4(sc, TXEXCESSDEF) + 1396 READ4(sc, TXCARRIERERR) + READ4(sc, TXUNDERFLOWERROR)); 1397 1398 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1399 READ4(sc, TXEXESSCOL) + READ4(sc, TXLATECOL)); 1400 1401 dwc_clear_stats(sc); 1402 } 1403 1404 static void 1405 dwc_tick(void *arg) 1406 { 1407 struct dwc_softc *sc; 1408 struct ifnet *ifp; 1409 int link_was_up; 1410 1411 sc = arg; 1412 1413 DWC_ASSERT_LOCKED(sc); 1414 1415 ifp = sc->ifp; 1416 1417 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1418 return; 1419 1420 /* 1421 * Typical tx watchdog. If this fires it indicates that we enqueued 1422 * packets for output and never got a txdone interrupt for them. Maybe 1423 * it's a missed interrupt somehow, just pretend we got one. 1424 */ 1425 if (sc->tx_watchdog_count > 0) { 1426 if (--sc->tx_watchdog_count == 0) { 1427 dwc_txfinish_locked(sc); 1428 } 1429 } 1430 1431 /* Gather stats from hardware counters. */ 1432 dwc_harvest_stats(sc); 1433 1434 /* Check the media status. */ 1435 link_was_up = sc->link_is_up; 1436 mii_tick(sc->mii_softc); 1437 if (sc->link_is_up && !link_was_up) 1438 dwc_txstart_locked(sc); 1439 1440 /* Schedule another check one second from now. */ 1441 callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); 1442 } 1443 1444 /* 1445 * Probe/Attach functions 1446 */ 1447 1448 #define GPIO_ACTIVE_LOW 1 1449 1450 static int 1451 dwc_reset(device_t dev) 1452 { 1453 pcell_t gpio_prop[4]; 1454 pcell_t delay_prop[3]; 1455 phandle_t node, gpio_node; 1456 device_t gpio; 1457 uint32_t pin, flags; 1458 uint32_t pin_value; 1459 1460 node = ofw_bus_get_node(dev); 1461 if (OF_getencprop(node, "snps,reset-gpio", 1462 gpio_prop, sizeof(gpio_prop)) <= 0) 1463 return (0); 1464 1465 if (OF_getencprop(node, "snps,reset-delays-us", 1466 delay_prop, sizeof(delay_prop)) <= 0) { 1467 device_printf(dev, 1468 "Wrong property for snps,reset-delays-us"); 1469 return (ENXIO); 1470 } 1471 1472 gpio_node = OF_node_from_xref(gpio_prop[0]); 1473 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) { 1474 device_printf(dev, 1475 "Can't find gpio controller for phy reset\n"); 1476 return (ENXIO); 1477 } 1478 1479 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, 1480 nitems(gpio_prop) - 1, 1481 gpio_prop + 1, &pin, &flags) != 0) { 1482 device_printf(dev, "Can't map gpio for phy reset\n"); 1483 return (ENXIO); 1484 } 1485 1486 pin_value = GPIO_PIN_LOW; 1487 if (OF_hasprop(node, "snps,reset-active-low")) 1488 pin_value = GPIO_PIN_HIGH; 1489 1490 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); 1491 GPIO_PIN_SET(gpio, pin, pin_value); 1492 DELAY(delay_prop[0] * 5); 1493 GPIO_PIN_SET(gpio, pin, !pin_value); 1494 DELAY(delay_prop[1] * 5); 1495 GPIO_PIN_SET(gpio, pin, pin_value); 1496 DELAY(delay_prop[2] * 5); 1497 1498 return (0); 1499 } 1500 1501 #ifdef EXT_RESOURCES 1502 static int 1503 dwc_clock_init(device_t dev) 1504 { 1505 hwreset_t rst; 1506 clk_t clk; 1507 int error; 1508 int64_t freq; 1509 1510 /* Enable clocks */ 1511 if (clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk) == 0) { 1512 error = clk_enable(clk); 1513 if (error != 0) { 1514 device_printf(dev, "could not enable main clock\n"); 1515 return (error); 1516 } 1517 if (bootverbose) { 1518 clk_get_freq(clk, &freq); 1519 device_printf(dev, "MAC clock(%s) freq: %jd\n", 1520 clk_get_name(clk), (intmax_t)freq); 1521 } 1522 } 1523 else { 1524 device_printf(dev, "could not find clock stmmaceth\n"); 1525 } 1526 1527 /* De-assert reset */ 1528 if (hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst) == 0) { 1529 error = hwreset_deassert(rst); 1530 if (error != 0) { 1531 device_printf(dev, "could not de-assert reset\n"); 1532 return (error); 1533 } 1534 } 1535 1536 return (0); 1537 } 1538 #endif 1539 1540 static int 1541 dwc_probe(device_t dev) 1542 { 1543 1544 if (!ofw_bus_status_okay(dev)) 1545 return (ENXIO); 1546 1547 if (!ofw_bus_is_compatible(dev, "snps,dwmac")) 1548 return (ENXIO); 1549 1550 device_set_desc(dev, "Gigabit Ethernet Controller"); 1551 return (BUS_PROBE_DEFAULT); 1552 } 1553 1554 static int 1555 dwc_attach(device_t dev) 1556 { 1557 uint8_t macaddr[ETHER_ADDR_LEN]; 1558 struct dwc_softc *sc; 1559 struct ifnet *ifp; 1560 int error, i; 1561 uint32_t reg; 1562 char *phy_mode; 1563 phandle_t node; 1564 uint32_t txpbl, rxpbl, pbl; 1565 bool nopblx8 = false; 1566 bool fixed_burst = false; 1567 1568 sc = device_get_softc(dev); 1569 sc->dev = dev; 1570 sc->rx_idx = 0; 1571 sc->tx_desccount = TX_DESC_COUNT; 1572 sc->tx_mapcount = 0; 1573 sc->mii_clk = IF_DWC_MII_CLK(dev); 1574 sc->mactype = IF_DWC_MAC_TYPE(dev); 1575 1576 node = ofw_bus_get_node(dev); 1577 if (OF_getprop_alloc(node, "phy-mode", (void **)&phy_mode)) { 1578 if (strcmp(phy_mode, "rgmii") == 0) 1579 sc->phy_mode = PHY_MODE_RGMII; 1580 if (strcmp(phy_mode, "rmii") == 0) 1581 sc->phy_mode = PHY_MODE_RMII; 1582 OF_prop_free(phy_mode); 1583 } 1584 1585 if (OF_getencprop(node, "snps,pbl", &pbl, sizeof(uint32_t)) <= 0) 1586 pbl = BUS_MODE_DEFAULT_PBL; 1587 if (OF_getencprop(node, "snps,txpbl", &txpbl, sizeof(uint32_t)) <= 0) 1588 txpbl = pbl; 1589 if (OF_getencprop(node, "snps,rxpbl", &rxpbl, sizeof(uint32_t)) <= 0) 1590 rxpbl = pbl; 1591 if (OF_hasprop(node, "snps,no-pbl-x8") == 1) 1592 nopblx8 = true; 1593 if (OF_hasprop(node, "snps,fixed-burst") == 1) 1594 fixed_burst = true; 1595 1596 if (IF_DWC_INIT(dev) != 0) 1597 return (ENXIO); 1598 1599 #ifdef EXT_RESOURCES 1600 if (dwc_clock_init(dev) != 0) 1601 return (ENXIO); 1602 #endif 1603 1604 if (bus_alloc_resources(dev, dwc_spec, sc->res)) { 1605 device_printf(dev, "could not allocate resources\n"); 1606 return (ENXIO); 1607 } 1608 1609 /* Read MAC before reset */ 1610 dwc_get_hwaddr(sc, macaddr); 1611 1612 /* Reset the PHY if needed */ 1613 if (dwc_reset(dev) != 0) { 1614 device_printf(dev, "Can't reset the PHY\n"); 1615 return (ENXIO); 1616 } 1617 1618 /* Reset */ 1619 reg = READ4(sc, BUS_MODE); 1620 reg |= (BUS_MODE_SWR); 1621 WRITE4(sc, BUS_MODE, reg); 1622 1623 for (i = 0; i < MAC_RESET_TIMEOUT; i++) { 1624 if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0) 1625 break; 1626 DELAY(10); 1627 } 1628 if (i >= MAC_RESET_TIMEOUT) { 1629 device_printf(sc->dev, "Can't reset DWC.\n"); 1630 return (ENXIO); 1631 } 1632 1633 reg = BUS_MODE_USP; 1634 if (!nopblx8) 1635 reg |= BUS_MODE_EIGHTXPBL; 1636 reg |= (txpbl << BUS_MODE_PBL_SHIFT); 1637 reg |= (rxpbl << BUS_MODE_RPBL_SHIFT); 1638 if (fixed_burst) 1639 reg |= BUS_MODE_FIXEDBURST; 1640 1641 WRITE4(sc, BUS_MODE, reg); 1642 1643 /* 1644 * DMA must be stop while changing descriptor list addresses. 1645 */ 1646 reg = READ4(sc, OPERATION_MODE); 1647 reg &= ~(MODE_ST | MODE_SR); 1648 WRITE4(sc, OPERATION_MODE, reg); 1649 1650 if (setup_dma(sc)) 1651 return (ENXIO); 1652 1653 /* Setup addresses */ 1654 WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr); 1655 WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr); 1656 1657 mtx_init(&sc->mtx, device_get_nameunit(sc->dev), 1658 MTX_NETWORK_LOCK, MTX_DEF); 1659 1660 callout_init_mtx(&sc->dwc_callout, &sc->mtx, 0); 1661 1662 /* Setup interrupt handler. */ 1663 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 1664 NULL, dwc_intr, sc, &sc->intr_cookie); 1665 if (error != 0) { 1666 device_printf(dev, "could not setup interrupt handler.\n"); 1667 return (ENXIO); 1668 } 1669 1670 /* Set up the ethernet interface. */ 1671 sc->ifp = ifp = if_alloc(IFT_ETHER); 1672 1673 ifp->if_softc = sc; 1674 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1675 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1676 if_setstartfn(ifp, dwc_txstart); 1677 if_setioctlfn(ifp, dwc_ioctl); 1678 if_setinitfn(ifp, dwc_init); 1679 if_setsendqlen(ifp, TX_MAP_COUNT - 1); 1680 if_setsendqready(sc->ifp); 1681 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); 1682 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); 1683 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 1684 1685 /* Attach the mii driver. */ 1686 error = mii_attach(dev, &sc->miibus, ifp, dwc_media_change, 1687 dwc_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, 1688 MII_OFFSET_ANY, 0); 1689 1690 if (error != 0) { 1691 device_printf(dev, "PHY attach failed\n"); 1692 return (ENXIO); 1693 } 1694 sc->mii_softc = device_get_softc(sc->miibus); 1695 1696 /* All ready to run, attach the ethernet interface. */ 1697 ether_ifattach(ifp, macaddr); 1698 sc->is_attached = true; 1699 1700 return (0); 1701 } 1702 1703 static device_method_t dwc_methods[] = { 1704 DEVMETHOD(device_probe, dwc_probe), 1705 DEVMETHOD(device_attach, dwc_attach), 1706 1707 /* MII Interface */ 1708 DEVMETHOD(miibus_readreg, dwc_miibus_read_reg), 1709 DEVMETHOD(miibus_writereg, dwc_miibus_write_reg), 1710 DEVMETHOD(miibus_statchg, dwc_miibus_statchg), 1711 1712 { 0, 0 } 1713 }; 1714 1715 driver_t dwc_driver = { 1716 "dwc", 1717 dwc_methods, 1718 sizeof(struct dwc_softc), 1719 }; 1720 1721 static devclass_t dwc_devclass; 1722 1723 DRIVER_MODULE(dwc, simplebus, dwc_driver, dwc_devclass, 0, 0); 1724 DRIVER_MODULE(miibus, dwc, miibus_driver, miibus_devclass, 0, 0); 1725 1726 MODULE_DEPEND(dwc, ether, 1, 1, 1); 1727 MODULE_DEPEND(dwc, miibus, 1, 1, 1); 1728