1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Ethernet media access controller (EMAC) 33 * Chapter 17, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) 34 * 35 * EMAC is an instance of the Synopsys DesignWare 3504-0 36 * Universal 10/100/1000 Ethernet MAC (DWC_gmac). 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/bus.h> 45 #include <sys/gpio.h> 46 #include <sys/kernel.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mbuf.h> 50 #include <sys/module.h> 51 #include <sys/mutex.h> 52 #include <sys/rman.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 56 #include <net/bpf.h> 57 #include <net/if.h> 58 #include <net/ethernet.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 #include <net/if_var.h> 63 64 #include <machine/bus.h> 65 66 #include <dev/dwc/if_dwc.h> 67 #include <dev/dwc/if_dwcvar.h> 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 #include <dev/ofw/ofw_bus.h> 71 #include <dev/ofw/ofw_bus_subr.h> 72 #include <dev/mii/mii_fdt.h> 73 74 #ifdef EXT_RESOURCES 75 #include <dev/extres/clk/clk.h> 76 #include <dev/extres/hwreset/hwreset.h> 77 #endif 78 79 #include "if_dwc_if.h" 80 #include "gpio_if.h" 81 #include "miibus_if.h" 82 83 #define READ4(_sc, _reg) \ 84 bus_read_4((_sc)->res[0], _reg) 85 #define WRITE4(_sc, _reg, _val) \ 86 bus_write_4((_sc)->res[0], _reg, _val) 87 88 #define MAC_RESET_TIMEOUT 100 89 #define WATCHDOG_TIMEOUT_SECS 5 90 #define STATS_HARVEST_INTERVAL 2 91 92 #define DWC_LOCK(sc) mtx_lock(&(sc)->mtx) 93 #define DWC_UNLOCK(sc) mtx_unlock(&(sc)->mtx) 94 #define DWC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 95 #define DWC_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 96 97 /* TX descriptors - TDESC0 is almost unified */ 98 #define TDESC0_OWN (1U << 31) 99 #define TDESC0_IHE (1U << 16) /* IP Header Error */ 100 #define TDESC0_ES (1U << 15) /* Error Summary */ 101 #define TDESC0_JT (1U << 14) /* Jabber Timeout */ 102 #define TDESC0_FF (1U << 13) /* Frame Flushed */ 103 #define TDESC0_PCE (1U << 12) /* Payload Checksum Error */ 104 #define TDESC0_LOC (1U << 11) /* Loss of Carrier */ 105 #define TDESC0_NC (1U << 10) /* No Carrier */ 106 #define TDESC0_LC (1U << 9) /* Late Collision */ 107 #define TDESC0_EC (1U << 8) /* Excessive Collision */ 108 #define TDESC0_VF (1U << 7) /* VLAN Frame */ 109 #define TDESC0_CC_MASK 0xf 110 #define TDESC0_CC_SHIFT 3 /* Collision Count */ 111 #define TDESC0_ED (1U << 2) /* Excessive Deferral */ 112 #define TDESC0_UF (1U << 1) /* Underflow Error */ 113 #define TDESC0_DB (1U << 0) /* Deferred Bit */ 114 /* TX descriptors - TDESC0 extended format only */ 115 #define ETDESC0_IC (1U << 30) /* Interrupt on Completion */ 116 #define ETDESC0_LS (1U << 29) /* Last Segment */ 117 #define ETDESC0_FS (1U << 28) /* First Segment */ 118 #define ETDESC0_DC (1U << 27) /* Disable CRC */ 119 #define ETDESC0_DP (1U << 26) /* Disable Padding */ 120 #define ETDESC0_CIC_NONE (0U << 22) /* Checksum Insertion Control */ 121 #define ETDESC0_CIC_HDR (1U << 22) 122 #define ETDESC0_CIC_SEG (2U << 22) 123 #define ETDESC0_CIC_FULL (3U << 22) 124 #define ETDESC0_TER (1U << 21) /* Transmit End of Ring */ 125 #define ETDESC0_TCH (1U << 20) /* Second Address Chained */ 126 127 /* TX descriptors - TDESC1 normal format */ 128 #define NTDESC1_IC (1U << 31) /* Interrupt on Completion */ 129 #define NTDESC1_LS (1U << 30) /* Last Segment */ 130 #define NTDESC1_FS (1U << 29) /* First Segment */ 131 #define NTDESC1_CIC_NONE (0U << 27) /* Checksum Insertion Control */ 132 #define NTDESC1_CIC_HDR (1U << 27) 133 #define NTDESC1_CIC_SEG (2U << 27) 134 #define NTDESC1_CIC_FULL (3U << 27) 135 #define NTDESC1_DC (1U << 26) /* Disable CRC */ 136 #define NTDESC1_TER (1U << 25) /* Transmit End of Ring */ 137 #define NTDESC1_TCH (1U << 24) /* Second Address Chained */ 138 /* TX descriptors - TDESC1 extended format */ 139 #define ETDESC1_DP (1U << 23) /* Disable Padding */ 140 #define ETDESC1_TBS2_MASK 0x7ff 141 #define ETDESC1_TBS2_SHIFT 11 /* Receive Buffer 2 Size */ 142 #define ETDESC1_TBS1_MASK 0x7ff 143 #define ETDESC1_TBS1_SHIFT 0 /* Receive Buffer 1 Size */ 144 145 /* RX descriptor - RDESC0 is unified */ 146 #define RDESC0_OWN (1U << 31) 147 #define RDESC0_AFM (1U << 30) /* Dest. Address Filter Fail */ 148 #define RDESC0_FL_MASK 0x3fff 149 #define RDESC0_FL_SHIFT 16 /* Frame Length */ 150 #define RDESC0_ES (1U << 15) /* Error Summary */ 151 #define RDESC0_DE (1U << 14) /* Descriptor Error */ 152 #define RDESC0_SAF (1U << 13) /* Source Address Filter Fail */ 153 #define RDESC0_LE (1U << 12) /* Length Error */ 154 #define RDESC0_OE (1U << 11) /* Overflow Error */ 155 #define RDESC0_VLAN (1U << 10) /* VLAN Tag */ 156 #define RDESC0_FS (1U << 9) /* First Descriptor */ 157 #define RDESC0_LS (1U << 8) /* Last Descriptor */ 158 #define RDESC0_ICE (1U << 7) /* IPC Checksum Error */ 159 #define RDESC0_LC (1U << 6) /* Late Collision */ 160 #define RDESC0_FT (1U << 5) /* Frame Type */ 161 #define RDESC0_RWT (1U << 4) /* Receive Watchdog Timeout */ 162 #define RDESC0_RE (1U << 3) /* Receive Error */ 163 #define RDESC0_DBE (1U << 2) /* Dribble Bit Error */ 164 #define RDESC0_CE (1U << 1) /* CRC Error */ 165 #define RDESC0_PCE (1U << 0) /* Payload Checksum Error */ 166 #define RDESC0_RXMA (1U << 0) /* Rx MAC Address */ 167 168 /* RX descriptors - RDESC1 normal format */ 169 #define NRDESC1_DIC (1U << 31) /* Disable Intr on Completion */ 170 #define NRDESC1_RER (1U << 25) /* Receive End of Ring */ 171 #define NRDESC1_RCH (1U << 24) /* Second Address Chained */ 172 #define NRDESC1_RBS2_MASK 0x7ff 173 #define NRDESC1_RBS2_SHIFT 11 /* Receive Buffer 2 Size */ 174 #define NRDESC1_RBS1_MASK 0x7ff 175 #define NRDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ 176 177 /* RX descriptors - RDESC1 enhanced format */ 178 #define ERDESC1_DIC (1U << 31) /* Disable Intr on Completion */ 179 #define ERDESC1_RBS2_MASK 0x7ffff 180 #define ERDESC1_RBS2_SHIFT 16 /* Receive Buffer 2 Size */ 181 #define ERDESC1_RER (1U << 15) /* Receive End of Ring */ 182 #define ERDESC1_RCH (1U << 14) /* Second Address Chained */ 183 #define ERDESC1_RBS1_MASK 0x7ffff 184 #define ERDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ 185 186 /* 187 * A hardware buffer descriptor. Rx and Tx buffers have the same descriptor 188 * layout, but the bits in the fields have different meanings. 189 */ 190 struct dwc_hwdesc 191 { 192 uint32_t desc0; 193 uint32_t desc1; 194 uint32_t addr1; /* ptr to first buffer data */ 195 uint32_t addr2; /* ptr to next descriptor / second buffer data*/ 196 }; 197 198 199 struct dwc_hash_maddr_ctx { 200 struct dwc_softc *sc; 201 uint32_t hash[8]; 202 }; 203 204 /* 205 * The hardware imposes alignment restrictions on various objects involved in 206 * DMA transfers. These values are expressed in bytes (not bits). 207 */ 208 #define DWC_DESC_RING_ALIGN 2048 209 210 static struct resource_spec dwc_spec[] = { 211 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 212 { SYS_RES_IRQ, 0, RF_ACTIVE }, 213 { -1, 0 } 214 }; 215 216 static void dwc_txfinish_locked(struct dwc_softc *sc); 217 static void dwc_rxfinish_locked(struct dwc_softc *sc); 218 static void dwc_stop_locked(struct dwc_softc *sc); 219 static void dwc_setup_rxfilter(struct dwc_softc *sc); 220 static void dwc_setup_core(struct dwc_softc *sc); 221 static void dwc_enable_mac(struct dwc_softc *sc, bool enable); 222 static void dwc_init_dma(struct dwc_softc *sc); 223 static void dwc_stop_dma(struct dwc_softc *sc); 224 225 static void dwc_tick(void *arg); 226 227 /* Pause time field in the transmitted control frame */ 228 static int dwc_pause_time = 0xffff; 229 TUNABLE_INT("hw.dwc.pause_time", &dwc_pause_time); 230 231 /* 232 * MIIBUS functions 233 */ 234 235 static int 236 dwc_miibus_read_reg(device_t dev, int phy, int reg) 237 { 238 struct dwc_softc *sc; 239 uint16_t mii; 240 size_t cnt; 241 int rv = 0; 242 243 sc = device_get_softc(dev); 244 245 mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) 246 | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) 247 | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) 248 | GMII_ADDRESS_GB; /* Busy flag */ 249 250 WRITE4(sc, GMII_ADDRESS, mii); 251 252 for (cnt = 0; cnt < 1000; cnt++) { 253 if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { 254 rv = READ4(sc, GMII_DATA); 255 break; 256 } 257 DELAY(10); 258 } 259 260 return rv; 261 } 262 263 static int 264 dwc_miibus_write_reg(device_t dev, int phy, int reg, int val) 265 { 266 struct dwc_softc *sc; 267 uint16_t mii; 268 size_t cnt; 269 270 sc = device_get_softc(dev); 271 272 mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT) 273 | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT) 274 | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT) 275 | GMII_ADDRESS_GB | GMII_ADDRESS_GW; 276 277 WRITE4(sc, GMII_DATA, val); 278 WRITE4(sc, GMII_ADDRESS, mii); 279 280 for (cnt = 0; cnt < 1000; cnt++) { 281 if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) { 282 break; 283 } 284 DELAY(10); 285 } 286 287 return (0); 288 } 289 290 static void 291 dwc_miibus_statchg(device_t dev) 292 { 293 struct dwc_softc *sc; 294 struct mii_data *mii; 295 uint32_t reg; 296 297 /* 298 * Called by the MII bus driver when the PHY establishes 299 * link to set the MAC interface registers. 300 */ 301 302 sc = device_get_softc(dev); 303 304 DWC_ASSERT_LOCKED(sc); 305 306 mii = sc->mii_softc; 307 308 if (mii->mii_media_status & IFM_ACTIVE) 309 sc->link_is_up = true; 310 else 311 sc->link_is_up = false; 312 313 reg = READ4(sc, MAC_CONFIGURATION); 314 switch (IFM_SUBTYPE(mii->mii_media_active)) { 315 case IFM_1000_T: 316 case IFM_1000_SX: 317 reg &= ~(CONF_FES | CONF_PS); 318 break; 319 case IFM_100_TX: 320 reg |= (CONF_FES | CONF_PS); 321 break; 322 case IFM_10_T: 323 reg &= ~(CONF_FES); 324 reg |= (CONF_PS); 325 break; 326 case IFM_NONE: 327 sc->link_is_up = false; 328 return; 329 default: 330 sc->link_is_up = false; 331 device_printf(dev, "Unsupported media %u\n", 332 IFM_SUBTYPE(mii->mii_media_active)); 333 return; 334 } 335 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 336 reg |= (CONF_DM); 337 else 338 reg &= ~(CONF_DM); 339 WRITE4(sc, MAC_CONFIGURATION, reg); 340 341 reg = FLOW_CONTROL_UP; 342 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 343 reg |= FLOW_CONTROL_TX; 344 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 345 reg |= FLOW_CONTROL_RX; 346 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 347 reg |= dwc_pause_time << FLOW_CONTROL_PT_SHIFT; 348 WRITE4(sc, FLOW_CONTROL, reg); 349 350 IF_DWC_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active)); 351 352 } 353 354 /* 355 * Media functions 356 */ 357 358 static void 359 dwc_media_status(struct ifnet * ifp, struct ifmediareq *ifmr) 360 { 361 struct dwc_softc *sc; 362 struct mii_data *mii; 363 364 sc = ifp->if_softc; 365 mii = sc->mii_softc; 366 DWC_LOCK(sc); 367 mii_pollstat(mii); 368 ifmr->ifm_active = mii->mii_media_active; 369 ifmr->ifm_status = mii->mii_media_status; 370 DWC_UNLOCK(sc); 371 } 372 373 static int 374 dwc_media_change_locked(struct dwc_softc *sc) 375 { 376 377 return (mii_mediachg(sc->mii_softc)); 378 } 379 380 static int 381 dwc_media_change(struct ifnet * ifp) 382 { 383 struct dwc_softc *sc; 384 int error; 385 386 sc = ifp->if_softc; 387 388 DWC_LOCK(sc); 389 error = dwc_media_change_locked(sc); 390 DWC_UNLOCK(sc); 391 return (error); 392 } 393 394 /* 395 * Core functions 396 */ 397 398 static const uint8_t nibbletab[] = { 399 /* 0x0 0000 -> 0000 */ 0x0, 400 /* 0x1 0001 -> 1000 */ 0x8, 401 /* 0x2 0010 -> 0100 */ 0x4, 402 /* 0x3 0011 -> 1100 */ 0xc, 403 /* 0x4 0100 -> 0010 */ 0x2, 404 /* 0x5 0101 -> 1010 */ 0xa, 405 /* 0x6 0110 -> 0110 */ 0x6, 406 /* 0x7 0111 -> 1110 */ 0xe, 407 /* 0x8 1000 -> 0001 */ 0x1, 408 /* 0x9 1001 -> 1001 */ 0x9, 409 /* 0xa 1010 -> 0101 */ 0x5, 410 /* 0xb 1011 -> 1101 */ 0xd, 411 /* 0xc 1100 -> 0011 */ 0x3, 412 /* 0xd 1101 -> 1011 */ 0xb, 413 /* 0xe 1110 -> 0111 */ 0x7, 414 /* 0xf 1111 -> 1111 */ 0xf, }; 415 416 static uint8_t 417 bitreverse(uint8_t x) 418 { 419 420 return (nibbletab[x & 0xf] << 4) | nibbletab[x >> 4]; 421 } 422 423 static u_int 424 dwc_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 425 { 426 struct dwc_hash_maddr_ctx *ctx = arg; 427 uint32_t crc, hashbit, hashreg; 428 uint8_t val; 429 430 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); 431 /* Take lower 8 bits and reverse it */ 432 val = bitreverse(~crc & 0xff); 433 if (ctx->sc->mactype != DWC_GMAC_EXT_DESC) 434 val >>= 2; /* Only need lower 6 bits */ 435 hashreg = (val >> 5); 436 hashbit = (val & 31); 437 ctx->hash[hashreg] |= (1 << hashbit); 438 439 return (1); 440 } 441 442 static void 443 dwc_setup_rxfilter(struct dwc_softc *sc) 444 { 445 struct dwc_hash_maddr_ctx ctx; 446 struct ifnet *ifp; 447 uint8_t *eaddr; 448 uint32_t ffval, hi, lo; 449 int nhash, i; 450 451 DWC_ASSERT_LOCKED(sc); 452 453 ifp = sc->ifp; 454 nhash = sc->mactype != DWC_GMAC_EXT_DESC ? 2 : 8; 455 456 /* 457 * Set the multicast (group) filter hash. 458 */ 459 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 460 ffval = (FRAME_FILTER_PM); 461 for (i = 0; i < nhash; i++) 462 ctx.hash[i] = ~0; 463 } else { 464 ffval = (FRAME_FILTER_HMC); 465 for (i = 0; i < nhash; i++) 466 ctx.hash[i] = 0; 467 ctx.sc = sc; 468 if_foreach_llmaddr(ifp, dwc_hash_maddr, &ctx); 469 } 470 471 /* 472 * Set the individual address filter hash. 473 */ 474 if (ifp->if_flags & IFF_PROMISC) 475 ffval |= (FRAME_FILTER_PR); 476 477 /* 478 * Set the primary address. 479 */ 480 eaddr = IF_LLADDR(ifp); 481 lo = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | 482 (eaddr[3] << 24); 483 hi = eaddr[4] | (eaddr[5] << 8); 484 WRITE4(sc, MAC_ADDRESS_LOW(0), lo); 485 WRITE4(sc, MAC_ADDRESS_HIGH(0), hi); 486 WRITE4(sc, MAC_FRAME_FILTER, ffval); 487 if (sc->mactype != DWC_GMAC_EXT_DESC) { 488 WRITE4(sc, GMAC_MAC_HTLOW, ctx.hash[0]); 489 WRITE4(sc, GMAC_MAC_HTHIGH, ctx.hash[1]); 490 } else { 491 for (i = 0; i < nhash; i++) 492 WRITE4(sc, HASH_TABLE_REG(i), ctx.hash[i]); 493 } 494 } 495 496 static void 497 dwc_setup_core(struct dwc_softc *sc) 498 { 499 uint32_t reg; 500 501 DWC_ASSERT_LOCKED(sc); 502 503 /* Enable core */ 504 reg = READ4(sc, MAC_CONFIGURATION); 505 reg |= (CONF_JD | CONF_ACS | CONF_BE); 506 WRITE4(sc, MAC_CONFIGURATION, reg); 507 } 508 509 static void 510 dwc_enable_mac(struct dwc_softc *sc, bool enable) 511 { 512 uint32_t reg; 513 514 DWC_ASSERT_LOCKED(sc); 515 reg = READ4(sc, MAC_CONFIGURATION); 516 if (enable) 517 reg |= CONF_TE | CONF_RE; 518 else 519 reg &= ~(CONF_TE | CONF_RE); 520 WRITE4(sc, MAC_CONFIGURATION, reg); 521 } 522 523 static void 524 dwc_get_hwaddr(struct dwc_softc *sc, uint8_t *hwaddr) 525 { 526 uint32_t hi, lo, rnd; 527 528 /* 529 * Try to recover a MAC address from the running hardware. If there's 530 * something non-zero there, assume the bootloader did the right thing 531 * and just use it. 532 * 533 * Otherwise, set the address to a convenient locally assigned address, 534 * 'bsd' + random 24 low-order bits. 'b' is 0x62, which has the locally 535 * assigned bit set, and the broadcast/multicast bit clear. 536 */ 537 lo = READ4(sc, MAC_ADDRESS_LOW(0)); 538 hi = READ4(sc, MAC_ADDRESS_HIGH(0)) & 0xffff; 539 if ((lo != 0xffffffff) || (hi != 0xffff)) { 540 hwaddr[0] = (lo >> 0) & 0xff; 541 hwaddr[1] = (lo >> 8) & 0xff; 542 hwaddr[2] = (lo >> 16) & 0xff; 543 hwaddr[3] = (lo >> 24) & 0xff; 544 hwaddr[4] = (hi >> 0) & 0xff; 545 hwaddr[5] = (hi >> 8) & 0xff; 546 } else { 547 rnd = arc4random() & 0x00ffffff; 548 hwaddr[0] = 'b'; 549 hwaddr[1] = 's'; 550 hwaddr[2] = 'd'; 551 hwaddr[3] = rnd >> 16; 552 hwaddr[4] = rnd >> 8; 553 hwaddr[5] = rnd >> 0; 554 } 555 } 556 557 /* 558 * DMA functions 559 */ 560 561 static void 562 dwc_init_dma(struct dwc_softc *sc) 563 { 564 uint32_t reg; 565 566 DWC_ASSERT_LOCKED(sc); 567 568 /* Initializa DMA and enable transmitters */ 569 reg = READ4(sc, OPERATION_MODE); 570 reg |= (MODE_TSF | MODE_OSF | MODE_FUF); 571 reg &= ~(MODE_RSF); 572 reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT); 573 WRITE4(sc, OPERATION_MODE, reg); 574 575 WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT); 576 577 /* Start DMA */ 578 reg = READ4(sc, OPERATION_MODE); 579 reg |= (MODE_ST | MODE_SR); 580 WRITE4(sc, OPERATION_MODE, reg); 581 } 582 583 static void 584 dwc_stop_dma(struct dwc_softc *sc) 585 { 586 uint32_t reg; 587 588 DWC_ASSERT_LOCKED(sc); 589 590 /* Stop DMA TX */ 591 reg = READ4(sc, OPERATION_MODE); 592 reg &= ~(MODE_ST); 593 WRITE4(sc, OPERATION_MODE, reg); 594 595 /* Flush TX */ 596 reg = READ4(sc, OPERATION_MODE); 597 reg |= (MODE_FTF); 598 WRITE4(sc, OPERATION_MODE, reg); 599 600 /* Stop DMA RX */ 601 reg = READ4(sc, OPERATION_MODE); 602 reg &= ~(MODE_SR); 603 WRITE4(sc, OPERATION_MODE, reg); 604 } 605 606 static inline uint32_t 607 next_rxidx(struct dwc_softc *sc, uint32_t curidx) 608 { 609 610 return ((curidx + 1) % RX_DESC_COUNT); 611 } 612 613 static inline uint32_t 614 next_txidx(struct dwc_softc *sc, uint32_t curidx) 615 { 616 617 return ((curidx + 1) % TX_DESC_COUNT); 618 } 619 620 static void 621 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 622 { 623 624 if (error != 0) 625 return; 626 *(bus_addr_t *)arg = segs[0].ds_addr; 627 } 628 629 inline static void 630 dwc_setup_txdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr, 631 uint32_t len, uint32_t flags, bool first, bool last) 632 { 633 uint32_t desc0, desc1; 634 635 /* Addr/len 0 means we're clearing the descriptor after xmit done. */ 636 if (paddr == 0 || len == 0) { 637 desc0 = 0; 638 desc1 = 0; 639 --sc->tx_desccount; 640 } else { 641 if (sc->mactype != DWC_GMAC_EXT_DESC) { 642 desc0 = 0; 643 desc1 = NTDESC1_TCH | len | flags; 644 if (first) 645 desc1 |= NTDESC1_FS; 646 if (last) 647 desc1 |= NTDESC1_LS | NTDESC1_IC; 648 } else { 649 desc0 = ETDESC0_TCH | flags; 650 if (first) 651 desc0 |= ETDESC0_FS; 652 if (last) 653 desc0 |= ETDESC0_LS | ETDESC0_IC; 654 desc1 = len; 655 } 656 ++sc->tx_desccount; 657 } 658 659 sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr); 660 sc->txdesc_ring[idx].desc0 = desc0; 661 sc->txdesc_ring[idx].desc1 = desc1; 662 } 663 664 inline static void 665 dwc_set_owner(struct dwc_softc *sc, int idx) 666 { 667 wmb(); 668 sc->txdesc_ring[idx].desc0 |= TDESC0_OWN; 669 wmb(); 670 } 671 672 static int 673 dwc_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp) 674 { 675 struct bus_dma_segment segs[TX_MAP_MAX_SEGS]; 676 int error, nsegs; 677 struct mbuf * m; 678 uint32_t flags = 0; 679 int i; 680 int first, last; 681 682 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, 683 *mp, segs, &nsegs, 0); 684 if (error == EFBIG) { 685 /* 686 * The map may be partially mapped from the first call. 687 * Make sure to reset it. 688 */ 689 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); 690 if ((m = m_defrag(*mp, M_NOWAIT)) == NULL) 691 return (ENOMEM); 692 *mp = m; 693 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, 694 *mp, segs, &nsegs, 0); 695 } 696 if (error != 0) 697 return (ENOMEM); 698 699 if (sc->tx_desccount + nsegs > TX_DESC_COUNT) { 700 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); 701 return (ENOMEM); 702 } 703 704 m = *mp; 705 706 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { 707 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) { 708 if (sc->mactype != DWC_GMAC_EXT_DESC) 709 flags = NTDESC1_CIC_FULL; 710 else 711 flags = ETDESC0_CIC_FULL; 712 } else { 713 if (sc->mactype != DWC_GMAC_EXT_DESC) 714 flags = NTDESC1_CIC_HDR; 715 else 716 flags = ETDESC0_CIC_HDR; 717 } 718 } 719 720 bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, 721 BUS_DMASYNC_PREWRITE); 722 723 sc->txbuf_map[idx].mbuf = m; 724 725 first = sc->tx_desc_head; 726 for (i = 0; i < nsegs; i++) { 727 dwc_setup_txdesc(sc, sc->tx_desc_head, 728 segs[i].ds_addr, segs[i].ds_len, 729 (i == 0) ? flags : 0, /* only first desc needs flags */ 730 (i == 0), 731 (i == nsegs - 1)); 732 if (i > 0) 733 dwc_set_owner(sc, sc->tx_desc_head); 734 last = sc->tx_desc_head; 735 sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head); 736 } 737 738 sc->txbuf_map[idx].last_desc_idx = last; 739 740 dwc_set_owner(sc, first); 741 742 return (0); 743 } 744 745 inline static uint32_t 746 dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr) 747 { 748 uint32_t nidx; 749 750 sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr; 751 nidx = next_rxidx(sc, idx); 752 sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr + 753 (nidx * sizeof(struct dwc_hwdesc)); 754 if (sc->mactype != DWC_GMAC_EXT_DESC) 755 sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH | 756 MIN(MCLBYTES, NRDESC1_RBS1_MASK); 757 else 758 sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH | 759 MIN(MCLBYTES, ERDESC1_RBS1_MASK); 760 761 wmb(); 762 sc->rxdesc_ring[idx].desc0 = RDESC0_OWN; 763 wmb(); 764 return (nidx); 765 } 766 767 static int 768 dwc_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m) 769 { 770 struct bus_dma_segment seg; 771 int error, nsegs; 772 773 m_adj(m, ETHER_ALIGN); 774 775 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 776 m, &seg, &nsegs, 0); 777 if (error != 0) 778 return (error); 779 780 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 781 782 bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 783 BUS_DMASYNC_PREREAD); 784 785 sc->rxbuf_map[idx].mbuf = m; 786 dwc_setup_rxdesc(sc, idx, seg.ds_addr); 787 788 return (0); 789 } 790 791 static struct mbuf * 792 dwc_alloc_mbufcl(struct dwc_softc *sc) 793 { 794 struct mbuf *m; 795 796 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 797 if (m != NULL) 798 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 799 800 return (m); 801 } 802 803 static struct mbuf * 804 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc, 805 struct dwc_bufmap *map) 806 { 807 struct ifnet *ifp; 808 struct mbuf *m, *m0; 809 int len; 810 uint32_t rdesc0; 811 812 m = map->mbuf; 813 ifp = sc->ifp; 814 rdesc0 = desc ->desc0; 815 /* Validate descriptor. */ 816 if (rdesc0 & RDESC0_ES) { 817 /* 818 * Errored packet. Statistic counters are updated 819 * globally, so do nothing 820 */ 821 return (NULL); 822 } 823 824 if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) != 825 (RDESC0_FS | RDESC0_LS)) { 826 /* 827 * Something very wrong happens. The whole packet should be 828 * recevied in one descriptr. Report problem. 829 */ 830 device_printf(sc->dev, 831 "%s: RX descriptor without FIRST and LAST bit set: 0x%08X", 832 __func__, rdesc0); 833 return (NULL); 834 } 835 836 len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK; 837 if (len < 64) { 838 /* 839 * Lenght is invalid, recycle old mbuf 840 * Probably impossible case 841 */ 842 return (NULL); 843 } 844 845 /* Allocate new buffer */ 846 m0 = dwc_alloc_mbufcl(sc); 847 if (m0 == NULL) { 848 /* no new mbuf available, recycle old */ 849 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1); 850 return (NULL); 851 } 852 /* Do dmasync for newly received packet */ 853 bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD); 854 bus_dmamap_unload(sc->rxbuf_tag, map->map); 855 856 /* Received packet is valid, process it */ 857 m->m_pkthdr.rcvif = ifp; 858 m->m_pkthdr.len = len; 859 m->m_len = len; 860 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 861 862 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 863 (rdesc0 & RDESC0_FT) != 0) { 864 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 865 if ((rdesc0 & RDESC0_ICE) == 0) 866 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 867 if ((rdesc0 & RDESC0_PCE) == 0) { 868 m->m_pkthdr.csum_flags |= 869 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 870 m->m_pkthdr.csum_data = 0xffff; 871 } 872 } 873 874 /* Remove trailing FCS */ 875 m_adj(m, -ETHER_CRC_LEN); 876 877 DWC_UNLOCK(sc); 878 (*ifp->if_input)(ifp, m); 879 DWC_LOCK(sc); 880 return (m0); 881 } 882 883 static int 884 setup_dma(struct dwc_softc *sc) 885 { 886 struct mbuf *m; 887 int error; 888 int nidx; 889 int idx; 890 891 /* 892 * Set up TX descriptor ring, descriptors, and dma maps. 893 */ 894 error = bus_dma_tag_create( 895 bus_get_dma_tag(sc->dev), /* Parent tag. */ 896 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ 897 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 898 BUS_SPACE_MAXADDR, /* highaddr */ 899 NULL, NULL, /* filter, filterarg */ 900 TX_DESC_SIZE, 1, /* maxsize, nsegments */ 901 TX_DESC_SIZE, /* maxsegsize */ 902 0, /* flags */ 903 NULL, NULL, /* lockfunc, lockarg */ 904 &sc->txdesc_tag); 905 if (error != 0) { 906 device_printf(sc->dev, 907 "could not create TX ring DMA tag.\n"); 908 goto out; 909 } 910 911 error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring, 912 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 913 &sc->txdesc_map); 914 if (error != 0) { 915 device_printf(sc->dev, 916 "could not allocate TX descriptor ring.\n"); 917 goto out; 918 } 919 920 error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, 921 sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr, 922 &sc->txdesc_ring_paddr, 0); 923 if (error != 0) { 924 device_printf(sc->dev, 925 "could not load TX descriptor ring map.\n"); 926 goto out; 927 } 928 929 for (idx = 0; idx < TX_DESC_COUNT; idx++) { 930 nidx = next_txidx(sc, idx); 931 sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr + 932 (nidx * sizeof(struct dwc_hwdesc)); 933 } 934 935 error = bus_dma_tag_create( 936 bus_get_dma_tag(sc->dev), /* Parent tag. */ 937 1, 0, /* alignment, boundary */ 938 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 939 BUS_SPACE_MAXADDR, /* highaddr */ 940 NULL, NULL, /* filter, filterarg */ 941 MCLBYTES*TX_MAP_MAX_SEGS, /* maxsize */ 942 TX_MAP_MAX_SEGS, /* nsegments */ 943 MCLBYTES, /* maxsegsize */ 944 0, /* flags */ 945 NULL, NULL, /* lockfunc, lockarg */ 946 &sc->txbuf_tag); 947 if (error != 0) { 948 device_printf(sc->dev, 949 "could not create TX ring DMA tag.\n"); 950 goto out; 951 } 952 953 for (idx = 0; idx < TX_MAP_COUNT; idx++) { 954 error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT, 955 &sc->txbuf_map[idx].map); 956 if (error != 0) { 957 device_printf(sc->dev, 958 "could not create TX buffer DMA map.\n"); 959 goto out; 960 } 961 } 962 963 for (idx = 0; idx < TX_DESC_COUNT; idx++) 964 dwc_setup_txdesc(sc, idx, 0, 0, 0, false, false); 965 966 /* 967 * Set up RX descriptor ring, descriptors, dma maps, and mbufs. 968 */ 969 error = bus_dma_tag_create( 970 bus_get_dma_tag(sc->dev), /* Parent tag. */ 971 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ 972 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 973 BUS_SPACE_MAXADDR, /* highaddr */ 974 NULL, NULL, /* filter, filterarg */ 975 RX_DESC_SIZE, 1, /* maxsize, nsegments */ 976 RX_DESC_SIZE, /* maxsegsize */ 977 0, /* flags */ 978 NULL, NULL, /* lockfunc, lockarg */ 979 &sc->rxdesc_tag); 980 if (error != 0) { 981 device_printf(sc->dev, 982 "could not create RX ring DMA tag.\n"); 983 goto out; 984 } 985 986 error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring, 987 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 988 &sc->rxdesc_map); 989 if (error != 0) { 990 device_printf(sc->dev, 991 "could not allocate RX descriptor ring.\n"); 992 goto out; 993 } 994 995 error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, 996 sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr, 997 &sc->rxdesc_ring_paddr, 0); 998 if (error != 0) { 999 device_printf(sc->dev, 1000 "could not load RX descriptor ring map.\n"); 1001 goto out; 1002 } 1003 1004 error = bus_dma_tag_create( 1005 bus_get_dma_tag(sc->dev), /* Parent tag. */ 1006 1, 0, /* alignment, boundary */ 1007 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1008 BUS_SPACE_MAXADDR, /* highaddr */ 1009 NULL, NULL, /* filter, filterarg */ 1010 MCLBYTES, 1, /* maxsize, nsegments */ 1011 MCLBYTES, /* maxsegsize */ 1012 0, /* flags */ 1013 NULL, NULL, /* lockfunc, lockarg */ 1014 &sc->rxbuf_tag); 1015 if (error != 0) { 1016 device_printf(sc->dev, 1017 "could not create RX buf DMA tag.\n"); 1018 goto out; 1019 } 1020 1021 for (idx = 0; idx < RX_DESC_COUNT; idx++) { 1022 error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT, 1023 &sc->rxbuf_map[idx].map); 1024 if (error != 0) { 1025 device_printf(sc->dev, 1026 "could not create RX buffer DMA map.\n"); 1027 goto out; 1028 } 1029 if ((m = dwc_alloc_mbufcl(sc)) == NULL) { 1030 device_printf(sc->dev, "Could not alloc mbuf\n"); 1031 error = ENOMEM; 1032 goto out; 1033 } 1034 if ((error = dwc_setup_rxbuf(sc, idx, m)) != 0) { 1035 device_printf(sc->dev, 1036 "could not create new RX buffer.\n"); 1037 goto out; 1038 } 1039 } 1040 1041 out: 1042 if (error != 0) 1043 return (ENXIO); 1044 1045 return (0); 1046 } 1047 1048 /* 1049 * if_ functions 1050 */ 1051 1052 static void 1053 dwc_txstart_locked(struct dwc_softc *sc) 1054 { 1055 struct ifnet *ifp; 1056 struct mbuf *m; 1057 int enqueued; 1058 1059 DWC_ASSERT_LOCKED(sc); 1060 1061 if (!sc->link_is_up) 1062 return; 1063 1064 ifp = sc->ifp; 1065 1066 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 1067 IFF_DRV_RUNNING) 1068 return; 1069 1070 enqueued = 0; 1071 1072 for (;;) { 1073 if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS + 1)) { 1074 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1075 break; 1076 } 1077 1078 if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) { 1079 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1080 break; 1081 } 1082 1083 m = if_dequeue(ifp); 1084 if (m == NULL) 1085 break; 1086 if (dwc_setup_txbuf(sc, sc->tx_map_head, &m) != 0) { 1087 if_sendq_prepend(ifp, m); 1088 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1089 break; 1090 } 1091 if_bpfmtap(ifp, m); 1092 sc->tx_map_head = next_txidx(sc, sc->tx_map_head); 1093 sc->tx_mapcount++; 1094 ++enqueued; 1095 } 1096 1097 if (enqueued != 0) { 1098 WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1); 1099 sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS; 1100 } 1101 } 1102 1103 static void 1104 dwc_txstart(struct ifnet *ifp) 1105 { 1106 struct dwc_softc *sc = ifp->if_softc; 1107 1108 DWC_LOCK(sc); 1109 dwc_txstart_locked(sc); 1110 DWC_UNLOCK(sc); 1111 } 1112 1113 static void 1114 dwc_init_locked(struct dwc_softc *sc) 1115 { 1116 struct ifnet *ifp = sc->ifp; 1117 1118 DWC_ASSERT_LOCKED(sc); 1119 1120 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1121 return; 1122 1123 dwc_setup_rxfilter(sc); 1124 dwc_setup_core(sc); 1125 dwc_enable_mac(sc, true); 1126 dwc_init_dma(sc); 1127 1128 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 1129 1130 /* 1131 * Call mii_mediachg() which will call back into dwc_miibus_statchg() 1132 * to set up the remaining config registers based on current media. 1133 */ 1134 mii_mediachg(sc->mii_softc); 1135 callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); 1136 } 1137 1138 static void 1139 dwc_init(void *if_softc) 1140 { 1141 struct dwc_softc *sc = if_softc; 1142 1143 DWC_LOCK(sc); 1144 dwc_init_locked(sc); 1145 DWC_UNLOCK(sc); 1146 } 1147 1148 static void 1149 dwc_stop_locked(struct dwc_softc *sc) 1150 { 1151 struct ifnet *ifp; 1152 1153 DWC_ASSERT_LOCKED(sc); 1154 1155 ifp = sc->ifp; 1156 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1157 sc->tx_watchdog_count = 0; 1158 sc->stats_harvest_count = 0; 1159 1160 callout_stop(&sc->dwc_callout); 1161 1162 dwc_stop_dma(sc); 1163 dwc_enable_mac(sc, false); 1164 } 1165 1166 static int 1167 dwc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1168 { 1169 struct dwc_softc *sc; 1170 struct mii_data *mii; 1171 struct ifreq *ifr; 1172 int flags, mask, error; 1173 1174 sc = ifp->if_softc; 1175 ifr = (struct ifreq *)data; 1176 1177 error = 0; 1178 switch (cmd) { 1179 case SIOCSIFFLAGS: 1180 DWC_LOCK(sc); 1181 if (if_getflags(ifp) & IFF_UP) { 1182 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1183 flags = if_getflags(ifp) ^ sc->if_flags; 1184 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 1185 dwc_setup_rxfilter(sc); 1186 } else { 1187 if (!sc->is_detaching) 1188 dwc_init_locked(sc); 1189 } 1190 } else { 1191 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1192 dwc_stop_locked(sc); 1193 } 1194 sc->if_flags = if_getflags(ifp); 1195 DWC_UNLOCK(sc); 1196 break; 1197 case SIOCADDMULTI: 1198 case SIOCDELMULTI: 1199 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1200 DWC_LOCK(sc); 1201 dwc_setup_rxfilter(sc); 1202 DWC_UNLOCK(sc); 1203 } 1204 break; 1205 case SIOCSIFMEDIA: 1206 case SIOCGIFMEDIA: 1207 mii = sc->mii_softc; 1208 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1209 break; 1210 case SIOCSIFCAP: 1211 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1212 if (mask & IFCAP_VLAN_MTU) { 1213 /* No work to do except acknowledge the change took */ 1214 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 1215 } 1216 if (mask & IFCAP_RXCSUM) 1217 if_togglecapenable(ifp, IFCAP_RXCSUM); 1218 if (mask & IFCAP_TXCSUM) 1219 if_togglecapenable(ifp, IFCAP_TXCSUM); 1220 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1221 if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); 1222 else 1223 if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); 1224 break; 1225 1226 default: 1227 error = ether_ioctl(ifp, cmd, data); 1228 break; 1229 } 1230 1231 return (error); 1232 } 1233 1234 /* 1235 * Interrupts functions 1236 */ 1237 1238 static void 1239 dwc_txfinish_locked(struct dwc_softc *sc) 1240 { 1241 struct dwc_bufmap *bmap; 1242 struct dwc_hwdesc *desc; 1243 struct ifnet *ifp; 1244 int idx, last_idx; 1245 bool map_finished; 1246 1247 DWC_ASSERT_LOCKED(sc); 1248 1249 ifp = sc->ifp; 1250 /* check if all descriptors of the map are done */ 1251 while (sc->tx_map_tail != sc->tx_map_head) { 1252 map_finished = true; 1253 bmap = &sc->txbuf_map[sc->tx_map_tail]; 1254 idx = sc->tx_desc_tail; 1255 last_idx = next_txidx(sc, bmap->last_desc_idx); 1256 while (idx != last_idx) { 1257 desc = &sc->txdesc_ring[idx]; 1258 if ((desc->desc0 & TDESC0_OWN) != 0) { 1259 map_finished = false; 1260 break; 1261 } 1262 idx = next_txidx(sc, idx); 1263 } 1264 1265 if (!map_finished) 1266 break; 1267 bus_dmamap_sync(sc->txbuf_tag, bmap->map, 1268 BUS_DMASYNC_POSTWRITE); 1269 bus_dmamap_unload(sc->txbuf_tag, bmap->map); 1270 m_freem(bmap->mbuf); 1271 bmap->mbuf = NULL; 1272 sc->tx_mapcount--; 1273 while (sc->tx_desc_tail != last_idx) { 1274 dwc_setup_txdesc(sc, sc->tx_desc_tail, 0, 0, 0, false, false); 1275 sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail); 1276 } 1277 sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail); 1278 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1279 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1280 } 1281 1282 /* If there are no buffers outstanding, muzzle the watchdog. */ 1283 if (sc->tx_desc_tail == sc->tx_desc_head) { 1284 sc->tx_watchdog_count = 0; 1285 } 1286 } 1287 1288 static void 1289 dwc_rxfinish_locked(struct dwc_softc *sc) 1290 { 1291 struct ifnet *ifp; 1292 struct mbuf *m; 1293 int error, idx; 1294 struct dwc_hwdesc *desc; 1295 1296 DWC_ASSERT_LOCKED(sc); 1297 ifp = sc->ifp; 1298 for (;;) { 1299 idx = sc->rx_idx; 1300 desc = sc->rxdesc_ring + idx; 1301 if ((desc->desc0 & RDESC0_OWN) != 0) 1302 break; 1303 1304 m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx); 1305 if (m == NULL) { 1306 wmb(); 1307 desc->desc0 = RDESC0_OWN; 1308 wmb(); 1309 } else { 1310 /* We cannot create hole in RX ring */ 1311 error = dwc_setup_rxbuf(sc, idx, m); 1312 if (error != 0) 1313 panic("dwc_setup_rxbuf failed: error %d\n", 1314 error); 1315 1316 } 1317 sc->rx_idx = next_rxidx(sc, sc->rx_idx); 1318 } 1319 } 1320 1321 static void 1322 dwc_intr(void *arg) 1323 { 1324 struct dwc_softc *sc; 1325 uint32_t reg; 1326 1327 sc = arg; 1328 1329 DWC_LOCK(sc); 1330 1331 reg = READ4(sc, INTERRUPT_STATUS); 1332 if (reg) 1333 READ4(sc, SGMII_RGMII_SMII_CTRL_STATUS); 1334 1335 reg = READ4(sc, DMA_STATUS); 1336 if (reg & DMA_STATUS_NIS) { 1337 if (reg & DMA_STATUS_RI) 1338 dwc_rxfinish_locked(sc); 1339 1340 if (reg & DMA_STATUS_TI) { 1341 dwc_txfinish_locked(sc); 1342 dwc_txstart_locked(sc); 1343 } 1344 } 1345 1346 if (reg & DMA_STATUS_AIS) { 1347 if (reg & DMA_STATUS_FBI) { 1348 /* Fatal bus error */ 1349 device_printf(sc->dev, 1350 "Ethernet DMA error, restarting controller.\n"); 1351 dwc_stop_locked(sc); 1352 dwc_init_locked(sc); 1353 } 1354 } 1355 1356 WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK); 1357 DWC_UNLOCK(sc); 1358 } 1359 1360 /* 1361 * Stats 1362 */ 1363 1364 static void dwc_clear_stats(struct dwc_softc *sc) 1365 { 1366 uint32_t reg; 1367 1368 reg = READ4(sc, MMC_CONTROL); 1369 reg |= (MMC_CONTROL_CNTRST); 1370 WRITE4(sc, MMC_CONTROL, reg); 1371 } 1372 1373 static void 1374 dwc_harvest_stats(struct dwc_softc *sc) 1375 { 1376 struct ifnet *ifp; 1377 1378 /* We don't need to harvest too often. */ 1379 if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL) 1380 return; 1381 1382 sc->stats_harvest_count = 0; 1383 ifp = sc->ifp; 1384 1385 if_inc_counter(ifp, IFCOUNTER_IPACKETS, READ4(sc, RXFRAMECOUNT_GB)); 1386 if_inc_counter(ifp, IFCOUNTER_IMCASTS, READ4(sc, RXMULTICASTFRAMES_G)); 1387 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1388 READ4(sc, RXOVERSIZE_G) + READ4(sc, RXUNDERSIZE_G) + 1389 READ4(sc, RXCRCERROR) + READ4(sc, RXALIGNMENTERROR) + 1390 READ4(sc, RXRUNTERROR) + READ4(sc, RXJABBERERROR) + 1391 READ4(sc, RXLENGTHERROR)); 1392 1393 if_inc_counter(ifp, IFCOUNTER_OPACKETS, READ4(sc, TXFRAMECOUNT_G)); 1394 if_inc_counter(ifp, IFCOUNTER_OMCASTS, READ4(sc, TXMULTICASTFRAMES_G)); 1395 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1396 READ4(sc, TXOVERSIZE_G) + READ4(sc, TXEXCESSDEF) + 1397 READ4(sc, TXCARRIERERR) + READ4(sc, TXUNDERFLOWERROR)); 1398 1399 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1400 READ4(sc, TXEXESSCOL) + READ4(sc, TXLATECOL)); 1401 1402 dwc_clear_stats(sc); 1403 } 1404 1405 static void 1406 dwc_tick(void *arg) 1407 { 1408 struct dwc_softc *sc; 1409 struct ifnet *ifp; 1410 int link_was_up; 1411 1412 sc = arg; 1413 1414 DWC_ASSERT_LOCKED(sc); 1415 1416 ifp = sc->ifp; 1417 1418 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1419 return; 1420 1421 /* 1422 * Typical tx watchdog. If this fires it indicates that we enqueued 1423 * packets for output and never got a txdone interrupt for them. Maybe 1424 * it's a missed interrupt somehow, just pretend we got one. 1425 */ 1426 if (sc->tx_watchdog_count > 0) { 1427 if (--sc->tx_watchdog_count == 0) { 1428 dwc_txfinish_locked(sc); 1429 } 1430 } 1431 1432 /* Gather stats from hardware counters. */ 1433 dwc_harvest_stats(sc); 1434 1435 /* Check the media status. */ 1436 link_was_up = sc->link_is_up; 1437 mii_tick(sc->mii_softc); 1438 if (sc->link_is_up && !link_was_up) 1439 dwc_txstart_locked(sc); 1440 1441 /* Schedule another check one second from now. */ 1442 callout_reset(&sc->dwc_callout, hz, dwc_tick, sc); 1443 } 1444 1445 /* 1446 * Probe/Attach functions 1447 */ 1448 1449 #define GPIO_ACTIVE_LOW 1 1450 1451 static int 1452 dwc_reset(device_t dev) 1453 { 1454 pcell_t gpio_prop[4]; 1455 pcell_t delay_prop[3]; 1456 phandle_t node, gpio_node; 1457 device_t gpio; 1458 uint32_t pin, flags; 1459 uint32_t pin_value; 1460 1461 node = ofw_bus_get_node(dev); 1462 if (OF_getencprop(node, "snps,reset-gpio", 1463 gpio_prop, sizeof(gpio_prop)) <= 0) 1464 return (0); 1465 1466 if (OF_getencprop(node, "snps,reset-delays-us", 1467 delay_prop, sizeof(delay_prop)) <= 0) { 1468 device_printf(dev, 1469 "Wrong property for snps,reset-delays-us"); 1470 return (ENXIO); 1471 } 1472 1473 gpio_node = OF_node_from_xref(gpio_prop[0]); 1474 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) { 1475 device_printf(dev, 1476 "Can't find gpio controller for phy reset\n"); 1477 return (ENXIO); 1478 } 1479 1480 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, 1481 nitems(gpio_prop) - 1, 1482 gpio_prop + 1, &pin, &flags) != 0) { 1483 device_printf(dev, "Can't map gpio for phy reset\n"); 1484 return (ENXIO); 1485 } 1486 1487 pin_value = GPIO_PIN_LOW; 1488 if (OF_hasprop(node, "snps,reset-active-low")) 1489 pin_value = GPIO_PIN_HIGH; 1490 1491 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); 1492 GPIO_PIN_SET(gpio, pin, pin_value); 1493 DELAY(delay_prop[0] * 5); 1494 GPIO_PIN_SET(gpio, pin, !pin_value); 1495 DELAY(delay_prop[1] * 5); 1496 GPIO_PIN_SET(gpio, pin, pin_value); 1497 DELAY(delay_prop[2] * 5); 1498 1499 return (0); 1500 } 1501 1502 #ifdef EXT_RESOURCES 1503 static int 1504 dwc_clock_init(device_t dev) 1505 { 1506 hwreset_t rst; 1507 clk_t clk; 1508 int error; 1509 int64_t freq; 1510 1511 /* Enable clocks */ 1512 if (clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk) == 0) { 1513 error = clk_enable(clk); 1514 if (error != 0) { 1515 device_printf(dev, "could not enable main clock\n"); 1516 return (error); 1517 } 1518 if (bootverbose) { 1519 clk_get_freq(clk, &freq); 1520 device_printf(dev, "MAC clock(%s) freq: %jd\n", 1521 clk_get_name(clk), (intmax_t)freq); 1522 } 1523 } 1524 else { 1525 device_printf(dev, "could not find clock stmmaceth\n"); 1526 } 1527 1528 /* De-assert reset */ 1529 if (hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst) == 0) { 1530 error = hwreset_deassert(rst); 1531 if (error != 0) { 1532 device_printf(dev, "could not de-assert reset\n"); 1533 return (error); 1534 } 1535 } 1536 1537 return (0); 1538 } 1539 #endif 1540 1541 static int 1542 dwc_probe(device_t dev) 1543 { 1544 1545 if (!ofw_bus_status_okay(dev)) 1546 return (ENXIO); 1547 1548 if (!ofw_bus_is_compatible(dev, "snps,dwmac")) 1549 return (ENXIO); 1550 1551 device_set_desc(dev, "Gigabit Ethernet Controller"); 1552 return (BUS_PROBE_DEFAULT); 1553 } 1554 1555 static int 1556 dwc_attach(device_t dev) 1557 { 1558 uint8_t macaddr[ETHER_ADDR_LEN]; 1559 struct dwc_softc *sc; 1560 struct ifnet *ifp; 1561 int error, i; 1562 uint32_t reg; 1563 phandle_t node; 1564 uint32_t txpbl, rxpbl, pbl; 1565 bool nopblx8 = false; 1566 bool fixed_burst = false; 1567 1568 sc = device_get_softc(dev); 1569 sc->dev = dev; 1570 sc->rx_idx = 0; 1571 sc->tx_desccount = TX_DESC_COUNT; 1572 sc->tx_mapcount = 0; 1573 sc->mii_clk = IF_DWC_MII_CLK(dev); 1574 sc->mactype = IF_DWC_MAC_TYPE(dev); 1575 1576 node = ofw_bus_get_node(dev); 1577 switch (mii_fdt_get_contype(node)) { 1578 case MII_CONTYPE_RGMII: 1579 case MII_CONTYPE_RGMII_ID: 1580 case MII_CONTYPE_RGMII_RXID: 1581 case MII_CONTYPE_RGMII_TXID: 1582 sc->phy_mode = PHY_MODE_RGMII; 1583 break; 1584 case MII_CONTYPE_RMII: 1585 sc->phy_mode = PHY_MODE_RMII; 1586 break; 1587 default: 1588 device_printf(dev, "Unsupported MII type\n"); 1589 return (ENXIO); 1590 } 1591 1592 if (OF_getencprop(node, "snps,pbl", &pbl, sizeof(uint32_t)) <= 0) 1593 pbl = BUS_MODE_DEFAULT_PBL; 1594 if (OF_getencprop(node, "snps,txpbl", &txpbl, sizeof(uint32_t)) <= 0) 1595 txpbl = pbl; 1596 if (OF_getencprop(node, "snps,rxpbl", &rxpbl, sizeof(uint32_t)) <= 0) 1597 rxpbl = pbl; 1598 if (OF_hasprop(node, "snps,no-pbl-x8") == 1) 1599 nopblx8 = true; 1600 if (OF_hasprop(node, "snps,fixed-burst") == 1) 1601 fixed_burst = true; 1602 1603 if (IF_DWC_INIT(dev) != 0) 1604 return (ENXIO); 1605 1606 #ifdef EXT_RESOURCES 1607 if (dwc_clock_init(dev) != 0) 1608 return (ENXIO); 1609 #endif 1610 1611 if (bus_alloc_resources(dev, dwc_spec, sc->res)) { 1612 device_printf(dev, "could not allocate resources\n"); 1613 return (ENXIO); 1614 } 1615 1616 /* Read MAC before reset */ 1617 dwc_get_hwaddr(sc, macaddr); 1618 1619 /* Reset the PHY if needed */ 1620 if (dwc_reset(dev) != 0) { 1621 device_printf(dev, "Can't reset the PHY\n"); 1622 return (ENXIO); 1623 } 1624 1625 /* Reset */ 1626 reg = READ4(sc, BUS_MODE); 1627 reg |= (BUS_MODE_SWR); 1628 WRITE4(sc, BUS_MODE, reg); 1629 1630 for (i = 0; i < MAC_RESET_TIMEOUT; i++) { 1631 if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0) 1632 break; 1633 DELAY(10); 1634 } 1635 if (i >= MAC_RESET_TIMEOUT) { 1636 device_printf(sc->dev, "Can't reset DWC.\n"); 1637 return (ENXIO); 1638 } 1639 1640 reg = BUS_MODE_USP; 1641 if (!nopblx8) 1642 reg |= BUS_MODE_EIGHTXPBL; 1643 reg |= (txpbl << BUS_MODE_PBL_SHIFT); 1644 reg |= (rxpbl << BUS_MODE_RPBL_SHIFT); 1645 if (fixed_burst) 1646 reg |= BUS_MODE_FIXEDBURST; 1647 1648 WRITE4(sc, BUS_MODE, reg); 1649 1650 /* 1651 * DMA must be stop while changing descriptor list addresses. 1652 */ 1653 reg = READ4(sc, OPERATION_MODE); 1654 reg &= ~(MODE_ST | MODE_SR); 1655 WRITE4(sc, OPERATION_MODE, reg); 1656 1657 if (setup_dma(sc)) 1658 return (ENXIO); 1659 1660 /* Setup addresses */ 1661 WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr); 1662 WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr); 1663 1664 mtx_init(&sc->mtx, device_get_nameunit(sc->dev), 1665 MTX_NETWORK_LOCK, MTX_DEF); 1666 1667 callout_init_mtx(&sc->dwc_callout, &sc->mtx, 0); 1668 1669 /* Setup interrupt handler. */ 1670 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 1671 NULL, dwc_intr, sc, &sc->intr_cookie); 1672 if (error != 0) { 1673 device_printf(dev, "could not setup interrupt handler.\n"); 1674 return (ENXIO); 1675 } 1676 1677 /* Set up the ethernet interface. */ 1678 sc->ifp = ifp = if_alloc(IFT_ETHER); 1679 1680 ifp->if_softc = sc; 1681 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1682 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1683 if_setstartfn(ifp, dwc_txstart); 1684 if_setioctlfn(ifp, dwc_ioctl); 1685 if_setinitfn(ifp, dwc_init); 1686 if_setsendqlen(ifp, TX_MAP_COUNT - 1); 1687 if_setsendqready(sc->ifp); 1688 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); 1689 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); 1690 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 1691 1692 /* Attach the mii driver. */ 1693 error = mii_attach(dev, &sc->miibus, ifp, dwc_media_change, 1694 dwc_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, 1695 MII_OFFSET_ANY, 0); 1696 1697 if (error != 0) { 1698 device_printf(dev, "PHY attach failed\n"); 1699 return (ENXIO); 1700 } 1701 sc->mii_softc = device_get_softc(sc->miibus); 1702 1703 /* All ready to run, attach the ethernet interface. */ 1704 ether_ifattach(ifp, macaddr); 1705 sc->is_attached = true; 1706 1707 return (0); 1708 } 1709 1710 static device_method_t dwc_methods[] = { 1711 DEVMETHOD(device_probe, dwc_probe), 1712 DEVMETHOD(device_attach, dwc_attach), 1713 1714 /* MII Interface */ 1715 DEVMETHOD(miibus_readreg, dwc_miibus_read_reg), 1716 DEVMETHOD(miibus_writereg, dwc_miibus_write_reg), 1717 DEVMETHOD(miibus_statchg, dwc_miibus_statchg), 1718 1719 { 0, 0 } 1720 }; 1721 1722 driver_t dwc_driver = { 1723 "dwc", 1724 dwc_methods, 1725 sizeof(struct dwc_softc), 1726 }; 1727 1728 static devclass_t dwc_devclass; 1729 1730 DRIVER_MODULE(dwc, simplebus, dwc_driver, dwc_devclass, 0, 0); 1731 DRIVER_MODULE(miibus, dwc, miibus_driver, miibus_devclass, 0, 0); 1732 1733 MODULE_DEPEND(dwc, ether, 1, 1, 1); 1734 MODULE_DEPEND(dwc, miibus, 1, 1, 1); 1735