1 /*- 2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * TI Common Platform Ethernet Switch (CPSW) Driver 29 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 30 * 31 * This controller is documented in the AM335x Technical Reference 32 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 33 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 34 * 35 * It is basically a single Ethernet port (port 0) wired internally to 36 * a 3-port store-and-forward switch connected to two independent 37 * "sliver" controllers (port 1 and port 2). You can operate the 38 * controller in a variety of different ways by suitably configuring 39 * the slivers and the Address Lookup Engine (ALE) that routes packets 40 * between the ports. 41 * 42 * This code was developed and tested on a BeagleBone with 43 * an AM335x SoC. 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/endian.h> 52 #include <sys/mbuf.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/kernel.h> 56 #include <sys/module.h> 57 #include <sys/socket.h> 58 #include <sys/sysctl.h> 59 60 #include <net/ethernet.h> 61 #include <net/bpf.h> 62 #include <net/if.h> 63 #include <net/if_arp.h> 64 #include <net/if_dl.h> 65 #include <net/if_media.h> 66 #include <net/if_types.h> 67 #include <net/if_var.h> 68 #include <net/if_vlan_var.h> 69 70 #include <netinet/in_systm.h> 71 #include <netinet/in.h> 72 #include <netinet/ip.h> 73 74 #include <sys/sockio.h> 75 #include <sys/bus.h> 76 #include <machine/bus.h> 77 #include <sys/rman.h> 78 #include <machine/resource.h> 79 80 #include <dev/mii/mii.h> 81 #include <dev/mii/miivar.h> 82 83 #include <dev/fdt/fdt_common.h> 84 #include <dev/ofw/ofw_bus.h> 85 #include <dev/ofw/ofw_bus_subr.h> 86 87 #include "if_cpswreg.h" 88 #include "if_cpswvar.h" 89 90 #include <arm/ti/ti_scm.h> 91 92 #include "miibus_if.h" 93 94 /* Device probe/attach/detach. */ 95 static int cpsw_probe(device_t); 96 static void cpsw_init_slots(struct cpsw_softc *); 97 static int cpsw_attach(device_t); 98 static void cpsw_free_slot(struct cpsw_softc *, struct cpsw_slot *); 99 static int cpsw_detach(device_t); 100 101 /* Device Init/shutdown. */ 102 static void cpsw_init(void *); 103 static void cpsw_init_locked(void *); 104 static int cpsw_shutdown(device_t); 105 static void cpsw_shutdown_locked(struct cpsw_softc *); 106 107 /* Device Suspend/Resume. */ 108 static int cpsw_suspend(device_t); 109 static int cpsw_resume(device_t); 110 111 /* Ioctl. */ 112 static int cpsw_ioctl(struct ifnet *, u_long command, caddr_t data); 113 114 static int cpsw_miibus_readreg(device_t, int phy, int reg); 115 static int cpsw_miibus_writereg(device_t, int phy, int reg, int value); 116 117 /* Send/Receive packets. */ 118 static void cpsw_intr_rx(void *arg); 119 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 120 static void cpsw_rx_enqueue(struct cpsw_softc *); 121 static void cpsw_start(struct ifnet *); 122 static void cpsw_tx_enqueue(struct cpsw_softc *); 123 static int cpsw_tx_dequeue(struct cpsw_softc *); 124 125 /* Misc interrupts and watchdog. */ 126 static void cpsw_intr_rx_thresh(void *); 127 static void cpsw_intr_misc(void *); 128 static void cpsw_tick(void *); 129 static void cpsw_ifmedia_sts(struct ifnet *, struct ifmediareq *); 130 static int cpsw_ifmedia_upd(struct ifnet *); 131 static void cpsw_tx_watchdog(struct cpsw_softc *); 132 133 /* ALE support */ 134 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry); 135 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry); 136 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t portmap, uint8_t *mac); 137 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge); 138 static void cpsw_ale_dump_table(struct cpsw_softc *); 139 140 /* Statistics and sysctls. */ 141 static void cpsw_add_sysctls(struct cpsw_softc *); 142 static void cpsw_stats_collect(struct cpsw_softc *); 143 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 144 145 /* 146 * Arbitrary limit on number of segments in an mbuf to be transmitted. 147 * Packets with more segments than this will be defragmented before 148 * they are queued. 149 */ 150 #define CPSW_TXFRAGS 8 151 152 153 /* 154 * TODO: The CPSW subsystem (CPSW_SS) can drive two independent PHYs 155 * as separate Ethernet ports. To properly support this, we should 156 * break this into two separate devices: a CPSW_SS device that owns 157 * the interrupts and actually talks to the CPSW hardware, and a 158 * separate CPSW Ethernet child device for each Ethernet port. The RX 159 * interrupt, for example, would be part of CPSW_SS; it would receive 160 * a packet, note the input port, and then dispatch it to the child 161 * device's interface queue. Similarly for transmit. 162 * 163 * It's not clear to me whether the device tree should be restructured 164 * with a cpsw_ss node and two child nodes. That would allow specifying 165 * MAC addresses for each port, for example, but might be overkill. 166 * 167 * Unfortunately, I don't have hardware right now that supports two 168 * Ethernet ports via CPSW. 169 */ 170 171 static device_method_t cpsw_methods[] = { 172 /* Device interface */ 173 DEVMETHOD(device_probe, cpsw_probe), 174 DEVMETHOD(device_attach, cpsw_attach), 175 DEVMETHOD(device_detach, cpsw_detach), 176 DEVMETHOD(device_shutdown, cpsw_shutdown), 177 DEVMETHOD(device_suspend, cpsw_suspend), 178 DEVMETHOD(device_resume, cpsw_resume), 179 /* MII interface */ 180 DEVMETHOD(miibus_readreg, cpsw_miibus_readreg), 181 DEVMETHOD(miibus_writereg, cpsw_miibus_writereg), 182 { 0, 0 } 183 }; 184 185 static driver_t cpsw_driver = { 186 "cpsw", 187 cpsw_methods, 188 sizeof(struct cpsw_softc), 189 }; 190 191 static devclass_t cpsw_devclass; 192 193 DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0); 194 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); 195 MODULE_DEPEND(cpsw, ether, 1, 1, 1); 196 MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 197 198 static struct resource_spec res_spec[] = { 199 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 200 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 201 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 202 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 203 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 204 { -1, 0 } 205 }; 206 207 /* Number of entries here must match size of stats 208 * array in struct cpsw_softc. */ 209 static struct cpsw_stat { 210 int reg; 211 char *oid; 212 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 213 {0x00, "GoodRxFrames"}, 214 {0x04, "BroadcastRxFrames"}, 215 {0x08, "MulticastRxFrames"}, 216 {0x0C, "PauseRxFrames"}, 217 {0x10, "RxCrcErrors"}, 218 {0x14, "RxAlignErrors"}, 219 {0x18, "OversizeRxFrames"}, 220 {0x1c, "RxJabbers"}, 221 {0x20, "ShortRxFrames"}, 222 {0x24, "RxFragments"}, 223 {0x30, "RxOctets"}, 224 {0x34, "GoodTxFrames"}, 225 {0x38, "BroadcastTxFrames"}, 226 {0x3c, "MulticastTxFrames"}, 227 {0x40, "PauseTxFrames"}, 228 {0x44, "DeferredTxFrames"}, 229 {0x48, "CollisionsTxFrames"}, 230 {0x4c, "SingleCollisionTxFrames"}, 231 {0x50, "MultipleCollisionTxFrames"}, 232 {0x54, "ExcessiveCollisions"}, 233 {0x58, "LateCollisions"}, 234 {0x5c, "TxUnderrun"}, 235 {0x60, "CarrierSenseErrors"}, 236 {0x64, "TxOctets"}, 237 {0x68, "RxTx64OctetFrames"}, 238 {0x6c, "RxTx65to127OctetFrames"}, 239 {0x70, "RxTx128to255OctetFrames"}, 240 {0x74, "RxTx256to511OctetFrames"}, 241 {0x78, "RxTx512to1024OctetFrames"}, 242 {0x7c, "RxTx1024upOctetFrames"}, 243 {0x80, "NetOctets"}, 244 {0x84, "RxStartOfFrameOverruns"}, 245 {0x88, "RxMiddleOfFrameOverruns"}, 246 {0x8c, "RxDmaOverruns"} 247 }; 248 249 /* 250 * Basic debug support. 251 */ 252 253 #define IF_DEBUG(sc) if (sc->cpsw_if_flags & IFF_DEBUG) 254 255 static void 256 cpsw_debugf_head(const char *funcname) 257 { 258 int t = (int)(time_second % (24 * 60 * 60)); 259 260 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 261 } 262 263 #include <machine/stdarg.h> 264 static void 265 cpsw_debugf(const char *fmt, ...) 266 { 267 va_list ap; 268 269 va_start(ap, fmt); 270 vprintf(fmt, ap); 271 va_end(ap); 272 printf("\n"); 273 274 } 275 276 #define CPSW_DEBUGF(a) do { \ 277 IF_DEBUG(sc) { \ 278 cpsw_debugf_head(__func__); \ 279 cpsw_debugf a; \ 280 } \ 281 } while (0) 282 283 284 /* 285 * Locking macros 286 */ 287 #define CPSW_TX_LOCK(sc) do { \ 288 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 289 mtx_lock(&(sc)->tx.lock); \ 290 } while (0) 291 292 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 293 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 294 295 #define CPSW_RX_LOCK(sc) do { \ 296 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 297 mtx_lock(&(sc)->rx.lock); \ 298 } while (0) 299 300 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 301 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 302 303 #define CPSW_GLOBAL_LOCK(sc) do { \ 304 if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) != \ 305 (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) { \ 306 panic("cpsw deadlock possibility detection!"); \ 307 } \ 308 mtx_lock(&(sc)->tx.lock); \ 309 mtx_lock(&(sc)->rx.lock); \ 310 } while (0) 311 312 #define CPSW_GLOBAL_UNLOCK(sc) do { \ 313 CPSW_RX_UNLOCK(sc); \ 314 CPSW_TX_UNLOCK(sc); \ 315 } while (0) 316 317 #define CPSW_GLOBAL_LOCK_ASSERT(sc) do { \ 318 CPSW_TX_LOCK_ASSERT(sc); \ 319 CPSW_RX_LOCK_ASSERT(sc); \ 320 } while (0) 321 322 /* 323 * Read/Write macros 324 */ 325 #define cpsw_read_4(sc, reg) bus_read_4(sc->res[0], reg) 326 #define cpsw_write_4(sc, reg, val) bus_write_4(sc->res[0], reg, val) 327 328 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 329 330 #define cpsw_cpdma_bd_paddr(sc, slot) \ 331 BUS_SPACE_PHYSADDR(sc->res[0], slot->bd_offset) 332 #define cpsw_cpdma_read_bd(sc, slot, val) \ 333 bus_read_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4) 334 #define cpsw_cpdma_write_bd(sc, slot, val) \ 335 bus_write_region_4(sc->res[0], slot->bd_offset, (uint32_t *) val, 4) 336 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 337 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 338 #define cpsw_cpdma_read_bd_flags(sc, slot) \ 339 bus_read_2(sc->res[0], slot->bd_offset + 14) 340 #define cpsw_write_hdp_slot(sc, queue, slot) \ 341 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 342 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 343 #define cpsw_read_cp(sc, queue) \ 344 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 345 #define cpsw_write_cp(sc, queue, val) \ 346 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 347 #define cpsw_write_cp_slot(sc, queue, slot) \ 348 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 349 350 #if 0 351 /* XXX temporary function versions for debugging. */ 352 static void 353 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 354 { 355 uint32_t reg = queue->hdp_offset; 356 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 357 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 358 cpsw_write_4(sc, reg, v); 359 } 360 361 static void 362 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 363 { 364 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 365 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 366 cpsw_write_cp(sc, queue, v); 367 } 368 #endif 369 370 /* 371 * Expanded dump routines for verbose debugging. 372 */ 373 static void 374 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 375 { 376 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 377 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 378 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 379 "Port0"}; 380 struct cpsw_cpdma_bd bd; 381 const char *sep; 382 int i; 383 384 cpsw_cpdma_read_bd(sc, slot, &bd); 385 printf("BD Addr: 0x%08x Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); 386 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 387 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 388 printf(" Flags: "); 389 sep = ""; 390 for (i = 0; i < 16; ++i) { 391 if (bd.flags & (1 << (15 - i))) { 392 printf("%s%s", sep, flags[i]); 393 sep = ","; 394 } 395 } 396 printf("\n"); 397 if (slot->mbuf) { 398 printf(" Ether: %14D\n", 399 (char *)(slot->mbuf->m_hdr.mh_data), " "); 400 printf(" Packet: %16D\n", 401 (char *)(slot->mbuf->m_hdr.mh_data) + 14, " "); 402 } 403 } 404 405 #define CPSW_DUMP_SLOT(cs, slot) do { \ 406 IF_DEBUG(sc) { \ 407 cpsw_dump_slot(sc, slot); \ 408 } \ 409 } while (0) 410 411 412 static void 413 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 414 { 415 struct cpsw_slot *slot; 416 int i = 0; 417 int others = 0; 418 419 STAILQ_FOREACH(slot, q, next) { 420 if (i > 4) 421 ++others; 422 else 423 cpsw_dump_slot(sc, slot); 424 ++i; 425 } 426 if (others) 427 printf(" ... and %d more.\n", others); 428 printf("\n"); 429 } 430 431 #define CPSW_DUMP_QUEUE(sc, q) do { \ 432 IF_DEBUG(sc) { \ 433 cpsw_dump_queue(sc, q); \ 434 } \ 435 } while (0) 436 437 438 /* 439 * 440 * Device Probe, Attach, Detach. 441 * 442 */ 443 444 static int 445 cpsw_probe(device_t dev) 446 { 447 448 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 449 return (ENXIO); 450 451 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 452 return (BUS_PROBE_DEFAULT); 453 } 454 455 456 static void 457 cpsw_init_slots(struct cpsw_softc *sc) 458 { 459 struct cpsw_slot *slot; 460 int i; 461 462 STAILQ_INIT(&sc->avail); 463 464 /* Put the slot descriptors onto the global avail list. */ 465 for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); i++) { 466 slot = &sc->_slots[i]; 467 slot->bd_offset = cpsw_cpdma_bd_offset(i); 468 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 469 } 470 } 471 472 /* 473 * bind an interrupt, add the relevant info to sc->interrupts 474 */ 475 static int 476 cpsw_attach_interrupt(struct cpsw_softc *sc, struct resource *res, driver_intr_t *handler, const char *description) 477 { 478 void **pcookie; 479 int error; 480 481 sc->interrupts[sc->interrupt_count].res = res; 482 sc->interrupts[sc->interrupt_count].description = description; 483 pcookie = &sc->interrupts[sc->interrupt_count].ih_cookie; 484 485 error = bus_setup_intr(sc->dev, res, INTR_TYPE_NET | INTR_MPSAFE, 486 NULL, *handler, sc, pcookie); 487 if (error) 488 device_printf(sc->dev, 489 "could not setup %s\n", description); 490 else 491 ++sc->interrupt_count; 492 return (error); 493 } 494 495 /* 496 * teardown everything in sc->interrupts. 497 */ 498 static void 499 cpsw_detach_interrupts(struct cpsw_softc *sc) 500 { 501 int error; 502 int i; 503 504 for (i = 0; i < sizeof(sc->interrupts) / sizeof(sc->interrupts[0]); ++i) { 505 if (!sc->interrupts[i].ih_cookie) 506 continue; 507 error = bus_teardown_intr(sc->dev, 508 sc->interrupts[i].res, sc->interrupts[i].ih_cookie); 509 if (error) 510 device_printf(sc->dev, "could not release %s\n", 511 sc->interrupts[i].description); 512 sc->interrupts[i].ih_cookie = NULL; 513 } 514 } 515 516 static int 517 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 518 { 519 const int max_slots = sizeof(sc->_slots) / sizeof(sc->_slots[0]); 520 struct cpsw_slot *slot; 521 int i; 522 523 if (requested < 0) 524 requested = max_slots; 525 526 for (i = 0; i < requested; ++i) { 527 slot = STAILQ_FIRST(&sc->avail); 528 if (slot == NULL) 529 return (0); 530 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 531 if_printf(sc->ifp, "failed to create dmamap\n"); 532 return (ENOMEM); 533 } 534 STAILQ_REMOVE_HEAD(&sc->avail, next); 535 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 536 ++queue->avail_queue_len; 537 ++queue->queue_slots; 538 } 539 return (0); 540 } 541 542 static int 543 cpsw_attach(device_t dev) 544 { 545 bus_dma_segment_t segs[1]; 546 struct cpsw_softc *sc = device_get_softc(dev); 547 struct mii_softc *miisc; 548 struct ifnet *ifp; 549 void *phy_sc; 550 int error, phy, nsegs; 551 uint32_t reg; 552 553 CPSW_DEBUGF(("")); 554 555 getbinuptime(&sc->attach_uptime); 556 sc->dev = dev; 557 sc->node = ofw_bus_get_node(dev); 558 559 /* Get phy address from fdt */ 560 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, &phy_sc) != 0) { 561 device_printf(dev, "failed to get PHY address from FDT\n"); 562 return (ENXIO); 563 } 564 /* Initialize mutexes */ 565 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 566 "cpsw TX lock", MTX_DEF); 567 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 568 "cpsw RX lock", MTX_DEF); 569 570 /* Allocate IO and IRQ resources */ 571 error = bus_alloc_resources(dev, res_spec, sc->res); 572 if (error) { 573 device_printf(dev, "could not allocate resources\n"); 574 cpsw_detach(dev); 575 return (ENXIO); 576 } 577 578 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 579 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 580 reg & 0xFF, (reg >> 11) & 0x1F); 581 582 cpsw_add_sysctls(sc); 583 584 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 585 error = bus_dma_tag_create( 586 bus_get_dma_tag(sc->dev), /* parent */ 587 1, 0, /* alignment, boundary */ 588 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 589 BUS_SPACE_MAXADDR, /* highaddr */ 590 NULL, NULL, /* filtfunc, filtfuncarg */ 591 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 592 MCLBYTES, 0, /* maxsegsz, flags */ 593 NULL, NULL, /* lockfunc, lockfuncarg */ 594 &sc->mbuf_dtag); /* dmatag */ 595 if (error) { 596 device_printf(dev, "bus_dma_tag_create failed\n"); 597 cpsw_detach(dev); 598 return (error); 599 } 600 601 /* Allocate network interface */ 602 ifp = sc->ifp = if_alloc(IFT_ETHER); 603 if (ifp == NULL) { 604 device_printf(dev, "if_alloc() failed\n"); 605 cpsw_detach(dev); 606 return (ENOMEM); 607 } 608 609 /* Allocate the null mbuf and pre-sync it. */ 610 sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 611 memset(sc->null_mbuf->m_hdr.mh_data, 0, sc->null_mbuf->m_ext.ext_size); 612 bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap); 613 bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap, 614 sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 615 bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap, 616 BUS_DMASYNC_PREWRITE); 617 sc->null_mbuf_paddr = segs[0].ds_addr; 618 619 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 620 ifp->if_softc = sc; 621 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 622 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? 623 ifp->if_capenable = ifp->if_capabilities; 624 625 ifp->if_init = cpsw_init; 626 ifp->if_start = cpsw_start; 627 ifp->if_ioctl = cpsw_ioctl; 628 629 cpsw_init_slots(sc); 630 631 /* Allocate slots to TX and RX queues. */ 632 STAILQ_INIT(&sc->rx.avail); 633 STAILQ_INIT(&sc->rx.active); 634 STAILQ_INIT(&sc->tx.avail); 635 STAILQ_INIT(&sc->tx.active); 636 // For now: 128 slots to TX, rest to RX. 637 // XXX TODO: start with 32/64 and grow dynamically based on demand. 638 if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) { 639 device_printf(dev, "failed to allocate dmamaps\n"); 640 cpsw_detach(dev); 641 return (ENOMEM); 642 } 643 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 644 sc->tx.queue_slots, sc->rx.queue_slots); 645 646 ifp->if_snd.ifq_drv_maxlen = sc->tx.queue_slots; 647 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 648 IFQ_SET_READY(&ifp->if_snd); 649 650 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 651 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 652 653 /* Get high part of MAC address from control module (mac_id0_hi) */ 654 /* TODO: Get MAC ID1 as well as MAC ID0. */ 655 ti_scm_reg_read_4(0x634, ®); 656 sc->mac_addr[0] = reg & 0xFF; 657 sc->mac_addr[1] = (reg >> 8) & 0xFF; 658 sc->mac_addr[2] = (reg >> 16) & 0xFF; 659 sc->mac_addr[3] = (reg >> 24) & 0xFF; 660 661 /* Get low part of MAC address from control module (mac_id0_lo) */ 662 ti_scm_reg_read_4(0x630, ®); 663 sc->mac_addr[4] = reg & 0xFF; 664 sc->mac_addr[5] = (reg >> 8) & 0xFF; 665 666 ether_ifattach(ifp, sc->mac_addr); 667 callout_init(&sc->watchdog.callout, 0); 668 669 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 670 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 671 cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF); 672 673 /* Clear ALE */ 674 cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 30); 675 676 /* Attach PHY(s) */ 677 error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd, 678 cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 679 if (error) { 680 device_printf(dev, "attaching PHYs failed\n"); 681 cpsw_detach(dev); 682 return (error); 683 } 684 sc->mii = device_get_softc(sc->miibus); 685 686 /* Tell the MAC where to find the PHY so autoneg works */ 687 miisc = LIST_FIRST(&sc->mii->mii_phys); 688 689 /* Select PHY and enable interrupts */ 690 cpsw_write_4(sc, MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F)); 691 692 /* Note: We don't use sc->res[3] (TX interrupt) */ 693 if (cpsw_attach_interrupt(sc, sc->res[1], 694 cpsw_intr_rx_thresh, "CPSW RX threshold interrupt") || 695 cpsw_attach_interrupt(sc, sc->res[2], 696 cpsw_intr_rx, "CPSW RX interrupt") || 697 cpsw_attach_interrupt(sc, sc->res[4], 698 cpsw_intr_misc, "CPSW misc interrupt")) { 699 cpsw_detach(dev); 700 return (ENXIO); 701 } 702 703 return (0); 704 } 705 706 static void 707 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 708 { 709 int error; 710 711 if (slot->dmamap) { 712 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 713 KASSERT(error == 0, ("Mapping still active")); 714 slot->dmamap = NULL; 715 } 716 if (slot->mbuf) { 717 m_freem(slot->mbuf); 718 slot->mbuf = NULL; 719 } 720 } 721 722 static int 723 cpsw_detach(device_t dev) 724 { 725 struct cpsw_softc *sc = device_get_softc(dev); 726 int error, i; 727 728 CPSW_DEBUGF(("")); 729 730 /* Stop controller and free TX queue */ 731 if (device_is_attached(dev)) { 732 ether_ifdetach(sc->ifp); 733 CPSW_GLOBAL_LOCK(sc); 734 cpsw_shutdown_locked(sc); 735 CPSW_GLOBAL_UNLOCK(sc); 736 callout_drain(&sc->watchdog.callout); 737 } 738 739 bus_generic_detach(dev); 740 device_delete_child(dev, sc->miibus); 741 742 /* Stop and release all interrupts */ 743 cpsw_detach_interrupts(sc); 744 745 /* Free dmamaps and mbufs */ 746 for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); ++i) { 747 cpsw_free_slot(sc, &sc->_slots[i]); 748 } 749 750 /* Free DMA tag */ 751 error = bus_dma_tag_destroy(sc->mbuf_dtag); 752 KASSERT(error == 0, ("Unable to destroy DMA tag")); 753 754 /* Free IO memory handler */ 755 bus_release_resources(dev, res_spec, sc->res); 756 757 /* Destroy mutexes */ 758 mtx_destroy(&sc->rx.lock); 759 mtx_destroy(&sc->tx.lock); 760 761 return (0); 762 } 763 764 /* 765 * 766 * Init/Shutdown. 767 * 768 */ 769 770 static void 771 cpsw_reset(struct cpsw_softc *sc) 772 { 773 int i; 774 775 /* Reset RMII/RGMII wrapper. */ 776 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 777 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 778 ; 779 780 /* Disable TX and RX interrupts for all cores. */ 781 for (i = 0; i < 3; ++i) { 782 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 783 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 784 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 785 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 786 } 787 788 /* Reset CPSW subsystem. */ 789 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 790 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 791 ; 792 793 /* Reset Sliver port 1 and 2 */ 794 for (i = 0; i < 2; i++) { 795 /* Reset */ 796 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 797 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 798 ; 799 } 800 801 /* Reset DMA controller. */ 802 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 803 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 804 ; 805 806 /* Disable TX & RX DMA */ 807 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 808 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 809 810 /* Clear all queues. */ 811 for (i = 0; i < 8; i++) { 812 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 813 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 814 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 815 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 816 } 817 818 /* Clear all interrupt Masks */ 819 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 820 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 821 } 822 823 static void 824 cpsw_init(void *arg) 825 { 826 struct cpsw_softc *sc = arg; 827 828 CPSW_DEBUGF(("")); 829 CPSW_GLOBAL_LOCK(sc); 830 cpsw_init_locked(arg); 831 CPSW_GLOBAL_UNLOCK(sc); 832 } 833 834 static void 835 cpsw_init_locked(void *arg) 836 { 837 struct ifnet *ifp; 838 struct cpsw_softc *sc = arg; 839 struct cpsw_slot *slot; 840 uint32_t i; 841 842 CPSW_DEBUGF(("")); 843 ifp = sc->ifp; 844 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 845 return; 846 847 getbinuptime(&sc->init_uptime); 848 849 /* Reset the controller. */ 850 cpsw_reset(sc); 851 852 /* Enable ALE */ 853 cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 31 | 1 << 4); 854 855 /* Init Sliver port 1 and 2 */ 856 for (i = 0; i < 2; i++) { 857 /* Set Slave Mapping */ 858 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210); 859 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100); 860 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2); 861 /* Set MACCONTROL for ports 0,1: IFCTL_B(16), IFCTL_A(15), 862 GMII_EN(5), FULLDUPLEX(1) */ 863 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 864 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 865 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1); 866 } 867 868 /* Set Host Port Mapping */ 869 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 870 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 871 872 /* Initialize ALE: all ports set to forwarding(3), initialize addrs */ 873 for (i = 0; i < 3; i++) 874 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i), 3); 875 cpsw_ale_update_addresses(sc, 1); 876 877 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 878 879 /* Enable statistics for ports 0, 1 and 2 */ 880 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 881 882 /* Experiment: Turn off flow control */ 883 /* This seems to fix the watchdog resets that have plagued 884 earlier versions of this driver; I'm not yet sure if there 885 are negative effects yet. */ 886 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 887 888 /* Make IP hdr aligned with 4 */ 889 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 890 891 /* Initialize RX Buffer Descriptors */ 892 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 893 894 /* Enable TX & RX DMA */ 895 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 896 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 897 898 /* Enable Interrupts for core 0 */ 899 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 900 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 901 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x3F); 902 903 /* Enable host Error Interrupt */ 904 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 905 906 /* Enable interrupts for RX Channel 0 */ 907 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1); 908 909 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 910 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 911 cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF); 912 913 /* Select MII in GMII_SEL, Internal Delay mode */ 914 //ti_scm_reg_write_4(0x650, 0); 915 916 /* Initialize active queues. */ 917 slot = STAILQ_FIRST(&sc->tx.active); 918 if (slot != NULL) 919 cpsw_write_hdp_slot(sc, &sc->tx, slot); 920 slot = STAILQ_FIRST(&sc->rx.active); 921 if (slot != NULL) 922 cpsw_write_hdp_slot(sc, &sc->rx, slot); 923 cpsw_rx_enqueue(sc); 924 925 /* Activate network interface */ 926 sc->rx.running = 1; 927 sc->tx.running = 1; 928 sc->watchdog.timer = 0; 929 callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc); 930 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; 931 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 932 933 } 934 935 static int 936 cpsw_shutdown(device_t dev) 937 { 938 struct cpsw_softc *sc = device_get_softc(dev); 939 940 CPSW_DEBUGF(("")); 941 CPSW_GLOBAL_LOCK(sc); 942 cpsw_shutdown_locked(sc); 943 CPSW_GLOBAL_UNLOCK(sc); 944 return (0); 945 } 946 947 static void 948 cpsw_rx_teardown_locked(struct cpsw_softc *sc) 949 { 950 struct mbuf *received, *next; 951 int i = 0; 952 953 CPSW_DEBUGF(("starting RX teardown")); 954 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 955 for (;;) { 956 received = cpsw_rx_dequeue(sc); 957 CPSW_GLOBAL_UNLOCK(sc); 958 while (received != NULL) { 959 next = received->m_nextpkt; 960 received->m_nextpkt = NULL; 961 (*sc->ifp->if_input)(sc->ifp, received); 962 received = next; 963 } 964 CPSW_GLOBAL_LOCK(sc); 965 if (!sc->rx.running) { 966 CPSW_DEBUGF(("finished RX teardown (%d retries)", i)); 967 return; 968 } 969 if (++i > 10) { 970 if_printf(sc->ifp, "Unable to cleanly shutdown receiver\n"); 971 return; 972 } 973 DELAY(10); 974 } 975 } 976 977 static void 978 cpsw_tx_teardown_locked(struct cpsw_softc *sc) 979 { 980 int i = 0; 981 982 CPSW_DEBUGF(("starting TX teardown")); 983 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 984 cpsw_tx_dequeue(sc); 985 while (sc->tx.running && ++i < 10) { 986 DELAY(10); 987 cpsw_tx_dequeue(sc); 988 } 989 if (sc->tx.running) 990 if_printf(sc->ifp, "Unable to cleanly shutdown transmitter\n"); 991 CPSW_DEBUGF(("finished TX teardown (%d retries, %d idle buffers)", 992 i, sc->tx.active_queue_len)); 993 } 994 995 static void 996 cpsw_shutdown_locked(struct cpsw_softc *sc) 997 { 998 struct ifnet *ifp; 999 1000 CPSW_DEBUGF(("")); 1001 CPSW_GLOBAL_LOCK_ASSERT(sc); 1002 ifp = sc->ifp; 1003 1004 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1005 return; 1006 1007 /* Disable interface */ 1008 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1009 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1010 1011 /* Stop ticker */ 1012 callout_stop(&sc->watchdog.callout); 1013 1014 /* Tear down the RX/TX queues. */ 1015 cpsw_rx_teardown_locked(sc); 1016 cpsw_tx_teardown_locked(sc); 1017 1018 /* Capture stats before we reset controller. */ 1019 cpsw_stats_collect(sc); 1020 1021 cpsw_reset(sc); 1022 } 1023 1024 /* 1025 * Suspend/Resume. 1026 */ 1027 1028 static int 1029 cpsw_suspend(device_t dev) 1030 { 1031 struct cpsw_softc *sc = device_get_softc(dev); 1032 1033 CPSW_DEBUGF(("")); 1034 CPSW_GLOBAL_LOCK(sc); 1035 cpsw_shutdown_locked(sc); 1036 CPSW_GLOBAL_UNLOCK(sc); 1037 return (0); 1038 } 1039 1040 static int 1041 cpsw_resume(device_t dev) 1042 { 1043 struct cpsw_softc *sc = device_get_softc(dev); 1044 1045 CPSW_DEBUGF(("UNIMPLEMENTED")); 1046 return (0); 1047 } 1048 1049 /* 1050 * 1051 * IOCTL 1052 * 1053 */ 1054 1055 static void 1056 cpsw_set_promisc(struct cpsw_softc *sc, int set) 1057 { 1058 /* 1059 * Enabling promiscuous mode requires two bits of work: First, 1060 * ALE_BYPASS needs to be enabled. That disables the ALE 1061 * forwarding logic and causes every packet to be sent to the 1062 * host port. That makes us promiscuous wrt received packets. 1063 * 1064 * With ALE forwarding disabled, the transmitter needs to set 1065 * an explicit output port on every packet to route it to the 1066 * correct egress. This should be doable for systems such as 1067 * BeagleBone where only one egress port is actually wired to 1068 * a PHY. If you have both egress ports wired up, life gets a 1069 * lot more interesting. 1070 * 1071 * Hmmm.... NetBSD driver uses ALE_BYPASS always and doesn't 1072 * seem to set explicit egress ports. Does that mean they 1073 * are always promiscuous? 1074 */ 1075 if (set) { 1076 printf("Promiscuous mode unimplemented\n"); 1077 } 1078 } 1079 1080 static void 1081 cpsw_set_allmulti(struct cpsw_softc *sc, int set) 1082 { 1083 if (set) { 1084 printf("All-multicast mode unimplemented\n"); 1085 } 1086 } 1087 1088 static int 1089 cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1090 { 1091 struct cpsw_softc *sc = ifp->if_softc; 1092 struct ifreq *ifr = (struct ifreq *)data; 1093 int error; 1094 uint32_t changed; 1095 1096 error = 0; 1097 1098 switch (command) { 1099 case SIOCSIFFLAGS: 1100 CPSW_GLOBAL_LOCK(sc); 1101 if (ifp->if_flags & IFF_UP) { 1102 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1103 changed = ifp->if_flags ^ sc->cpsw_if_flags; 1104 CPSW_DEBUGF(("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed)); 1105 if (changed & IFF_PROMISC) 1106 cpsw_set_promisc(sc, 1107 ifp->if_flags & IFF_PROMISC); 1108 if (changed & IFF_ALLMULTI) 1109 cpsw_set_allmulti(sc, 1110 ifp->if_flags & IFF_ALLMULTI); 1111 } else { 1112 CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING; starting up")); 1113 cpsw_init_locked(sc); 1114 } 1115 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1116 CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING; shutting down")); 1117 cpsw_shutdown_locked(sc); 1118 } 1119 1120 sc->cpsw_if_flags = ifp->if_flags; 1121 CPSW_GLOBAL_UNLOCK(sc); 1122 break; 1123 case SIOCADDMULTI: 1124 cpsw_ale_update_addresses(sc, 0); 1125 break; 1126 case SIOCDELMULTI: 1127 /* Ugh. DELMULTI doesn't provide the specific address 1128 being removed, so the best we can do is remove 1129 everything and rebuild it all. */ 1130 cpsw_ale_update_addresses(sc, 1); 1131 break; 1132 case SIOCGIFMEDIA: 1133 case SIOCSIFMEDIA: 1134 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1135 break; 1136 default: 1137 error = ether_ioctl(ifp, command, data); 1138 } 1139 return (error); 1140 } 1141 1142 /* 1143 * 1144 * MIIBUS 1145 * 1146 */ 1147 static int 1148 cpsw_miibus_ready(struct cpsw_softc *sc) 1149 { 1150 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1151 1152 while (--retries) { 1153 r = cpsw_read_4(sc, MDIOUSERACCESS0); 1154 if ((r & 1 << 31) == 0) 1155 return 1; 1156 DELAY(CPSW_MIIBUS_DELAY); 1157 } 1158 return 0; 1159 } 1160 1161 static int 1162 cpsw_miibus_readreg(device_t dev, int phy, int reg) 1163 { 1164 struct cpsw_softc *sc = device_get_softc(dev); 1165 uint32_t cmd, r; 1166 1167 if (!cpsw_miibus_ready(sc)) { 1168 device_printf(dev, "MDIO not ready to read\n"); 1169 return 0; 1170 } 1171 1172 /* Set GO, reg, phy */ 1173 cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1174 cpsw_write_4(sc, MDIOUSERACCESS0, cmd); 1175 1176 if (!cpsw_miibus_ready(sc)) { 1177 device_printf(dev, "MDIO timed out during read\n"); 1178 return 0; 1179 } 1180 1181 r = cpsw_read_4(sc, MDIOUSERACCESS0); 1182 if((r & 1 << 29) == 0) { 1183 device_printf(dev, "Failed to read from PHY.\n"); 1184 r = 0; 1185 } 1186 return (r & 0xFFFF); 1187 } 1188 1189 static int 1190 cpsw_miibus_writereg(device_t dev, int phy, int reg, int value) 1191 { 1192 struct cpsw_softc *sc = device_get_softc(dev); 1193 uint32_t cmd; 1194 1195 if (!cpsw_miibus_ready(sc)) { 1196 device_printf(dev, "MDIO not ready to write\n"); 1197 return 0; 1198 } 1199 1200 /* Set GO, WRITE, reg, phy, and value */ 1201 cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 1202 | (value & 0xFFFF); 1203 cpsw_write_4(sc, MDIOUSERACCESS0, cmd); 1204 1205 if (!cpsw_miibus_ready(sc)) { 1206 device_printf(dev, "MDIO timed out during write\n"); 1207 return 0; 1208 } 1209 1210 if((cpsw_read_4(sc, MDIOUSERACCESS0) & (1 << 29)) == 0) 1211 device_printf(dev, "Failed to write to PHY.\n"); 1212 1213 return 0; 1214 } 1215 1216 /* 1217 * 1218 * Transmit/Receive Packets. 1219 * 1220 */ 1221 1222 1223 static void 1224 cpsw_intr_rx(void *arg) 1225 { 1226 struct cpsw_softc *sc = arg; 1227 struct mbuf *received, *next; 1228 1229 CPSW_RX_LOCK(sc); 1230 received = cpsw_rx_dequeue(sc); 1231 cpsw_rx_enqueue(sc); 1232 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1233 CPSW_RX_UNLOCK(sc); 1234 1235 while (received != NULL) { 1236 next = received->m_nextpkt; 1237 received->m_nextpkt = NULL; 1238 (*sc->ifp->if_input)(sc->ifp, received); 1239 received = next; 1240 } 1241 } 1242 1243 static struct mbuf * 1244 cpsw_rx_dequeue(struct cpsw_softc *sc) 1245 { 1246 struct cpsw_cpdma_bd bd; 1247 struct cpsw_slot *slot; 1248 struct ifnet *ifp; 1249 struct mbuf *mb_head, *mb_tail; 1250 int removed = 0; 1251 1252 ifp = sc->ifp; 1253 mb_head = mb_tail = NULL; 1254 1255 /* Pull completed packets off hardware RX queue. */ 1256 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1257 cpsw_cpdma_read_bd(sc, slot, &bd); 1258 if (bd.flags & CPDMA_BD_OWNER) 1259 break; /* Still in use by hardware */ 1260 1261 CPSW_DEBUGF(("Removing received packet from RX queue")); 1262 ++removed; 1263 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1264 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1265 1266 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1267 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1268 1269 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1270 CPSW_DEBUGF(("RX teardown in progress")); 1271 m_freem(slot->mbuf); 1272 slot->mbuf = NULL; 1273 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1274 sc->rx.running = 0; 1275 break; 1276 } 1277 1278 cpsw_write_cp_slot(sc, &sc->rx, slot); 1279 1280 /* Set up mbuf */ 1281 /* TODO: track SOP/EOP bits to assemble a full mbuf 1282 out of received fragments. */ 1283 slot->mbuf->m_hdr.mh_data += bd.bufoff; 1284 slot->mbuf->m_hdr.mh_len = bd.pktlen - 4; 1285 slot->mbuf->m_pkthdr.len = bd.pktlen - 4; 1286 slot->mbuf->m_flags |= M_PKTHDR; 1287 slot->mbuf->m_pkthdr.rcvif = ifp; 1288 slot->mbuf->m_nextpkt = NULL; 1289 1290 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1291 /* check for valid CRC by looking into pkt_err[5:4] */ 1292 if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) { 1293 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1294 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1295 slot->mbuf->m_pkthdr.csum_data = 0xffff; 1296 } 1297 } 1298 1299 /* Add mbuf to packet list to be returned. */ 1300 if (mb_tail) { 1301 mb_tail->m_nextpkt = slot->mbuf; 1302 } else { 1303 mb_head = slot->mbuf; 1304 } 1305 mb_tail = slot->mbuf; 1306 slot->mbuf = NULL; 1307 } 1308 1309 if (removed != 0) { 1310 sc->rx.queue_removes += removed; 1311 sc->rx.active_queue_len -= removed; 1312 sc->rx.avail_queue_len += removed; 1313 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1314 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1315 } 1316 return (mb_head); 1317 } 1318 1319 static void 1320 cpsw_rx_enqueue(struct cpsw_softc *sc) 1321 { 1322 bus_dma_segment_t seg[1]; 1323 struct cpsw_cpdma_bd bd; 1324 struct ifnet *ifp = sc->ifp; 1325 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); 1326 struct cpsw_slot *slot, *prev_slot = NULL; 1327 struct cpsw_slot *last_old_slot, *first_new_slot; 1328 int error, nsegs, added = 0; 1329 1330 /* Register new mbufs with hardware. */ 1331 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1332 if (slot->mbuf == NULL) { 1333 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1334 if (slot->mbuf == NULL) { 1335 if_printf(sc->ifp, "Unable to fill RX queue\n"); 1336 break; 1337 } 1338 slot->mbuf->m_len = 1339 slot->mbuf->m_pkthdr.len = 1340 slot->mbuf->m_ext.ext_size; 1341 } 1342 1343 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1344 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1345 1346 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1347 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1348 if (error != 0 || nsegs != 1) { 1349 if_printf(ifp, 1350 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1351 __func__, nsegs, error); 1352 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1353 m_freem(slot->mbuf); 1354 slot->mbuf = NULL; 1355 break; 1356 } 1357 1358 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1359 1360 /* Create and submit new rx descriptor*/ 1361 bd.next = 0; 1362 bd.bufptr = seg->ds_addr; 1363 bd.bufoff = 0; 1364 bd.buflen = MCLBYTES - 1; 1365 bd.pktlen = bd.buflen; 1366 bd.flags = CPDMA_BD_OWNER; 1367 cpsw_cpdma_write_bd(sc, slot, &bd); 1368 ++added; 1369 1370 if (prev_slot != NULL) 1371 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1372 prev_slot = slot; 1373 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1374 sc->rx.avail_queue_len--; 1375 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1376 } 1377 1378 if (added == 0) 1379 return; 1380 1381 CPSW_DEBUGF(("Adding %d buffers to RX queue", added)); 1382 1383 /* Link new entries to hardware RX queue. */ 1384 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1385 first_new_slot = STAILQ_FIRST(&tmpqueue); 1386 STAILQ_CONCAT(&sc->rx.active, &tmpqueue); 1387 if (first_new_slot == NULL) { 1388 return; 1389 } else if (last_old_slot == NULL) { 1390 /* Start a fresh queue. */ 1391 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1392 } else { 1393 /* Add buffers to end of current queue. */ 1394 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1395 /* If underrun, restart queue. */ 1396 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) { 1397 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1398 } 1399 } 1400 sc->rx.queue_adds += added; 1401 sc->rx.active_queue_len += added; 1402 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) { 1403 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1404 } 1405 } 1406 1407 static void 1408 cpsw_start(struct ifnet *ifp) 1409 { 1410 struct cpsw_softc *sc = ifp->if_softc; 1411 1412 CPSW_TX_LOCK(sc); 1413 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->tx.running) { 1414 cpsw_tx_enqueue(sc); 1415 cpsw_tx_dequeue(sc); 1416 } 1417 CPSW_TX_UNLOCK(sc); 1418 } 1419 1420 static void 1421 cpsw_tx_enqueue(struct cpsw_softc *sc) 1422 { 1423 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1424 struct cpsw_cpdma_bd bd; 1425 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); 1426 struct cpsw_slot *slot, *prev_slot = NULL; 1427 struct cpsw_slot *last_old_slot, *first_new_slot; 1428 struct mbuf *m0; 1429 int error, nsegs, seg, added = 0, padlen; 1430 1431 /* Pull pending packets from IF queue and prep them for DMA. */ 1432 while ((slot = STAILQ_FIRST(&sc->tx.avail)) != NULL) { 1433 IF_DEQUEUE(&sc->ifp->if_snd, m0); 1434 if (m0 == NULL) 1435 break; 1436 1437 slot->mbuf = m0; 1438 padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len; 1439 if (padlen < 0) 1440 padlen = 0; 1441 1442 /* Create mapping in DMA memory */ 1443 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1444 slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1445 /* If the packet is too fragmented, try to simplify. */ 1446 if (error == EFBIG || 1447 (error == 0 && 1448 nsegs + (padlen > 0 ? 1 : 0) > sc->tx.avail_queue_len)) { 1449 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1450 if (padlen > 0) /* May as well add padding. */ 1451 m_append(slot->mbuf, padlen, 1452 sc->null_mbuf->m_hdr.mh_data); 1453 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1454 if (m0 == NULL) { 1455 if_printf(sc->ifp, 1456 "Can't defragment packet; dropping\n"); 1457 m_freem(slot->mbuf); 1458 } else { 1459 CPSW_DEBUGF(("Requeueing defragmented packet")); 1460 IF_PREPEND(&sc->ifp->if_snd, m0); 1461 } 1462 slot->mbuf = NULL; 1463 continue; 1464 } 1465 if (error != 0) { 1466 if_printf(sc->ifp, 1467 "%s: Can't setup DMA (error=%d), dropping packet\n", 1468 __func__, error); 1469 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1470 m_freem(slot->mbuf); 1471 slot->mbuf = NULL; 1472 break; 1473 } 1474 1475 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, 1476 BUS_DMASYNC_PREWRITE); 1477 1478 1479 CPSW_DEBUGF(("Queueing TX packet: %d segments + %d pad bytes", 1480 nsegs, padlen)); 1481 1482 /* If there is only one segment, the for() loop 1483 * gets skipped and the single buffer gets set up 1484 * as both SOP and EOP. */ 1485 /* Start by setting up the first buffer */ 1486 bd.next = 0; 1487 bd.bufptr = segs[0].ds_addr; 1488 bd.bufoff = 0; 1489 bd.buflen = segs[0].ds_len; 1490 bd.pktlen = m_length(slot->mbuf, NULL) + padlen; 1491 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; 1492 for (seg = 1; seg < nsegs; ++seg) { 1493 /* Save the previous buffer (which isn't EOP) */ 1494 cpsw_cpdma_write_bd(sc, slot, &bd); 1495 if (prev_slot != NULL) 1496 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1497 prev_slot = slot; 1498 STAILQ_REMOVE_HEAD(&sc->tx.avail, next); 1499 sc->tx.avail_queue_len--; 1500 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1501 ++added; 1502 slot = STAILQ_FIRST(&sc->tx.avail); 1503 1504 /* Setup next buffer (which isn't SOP) */ 1505 bd.next = 0; 1506 bd.bufptr = segs[seg].ds_addr; 1507 bd.bufoff = 0; 1508 bd.buflen = segs[seg].ds_len; 1509 bd.pktlen = 0; 1510 bd.flags = CPDMA_BD_OWNER; 1511 } 1512 /* Save the final buffer. */ 1513 if (padlen <= 0) 1514 bd.flags |= CPDMA_BD_EOP; 1515 cpsw_cpdma_write_bd(sc, slot, &bd); 1516 if (prev_slot != NULL) 1517 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1518 prev_slot = slot; 1519 STAILQ_REMOVE_HEAD(&sc->tx.avail, next); 1520 sc->tx.avail_queue_len--; 1521 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1522 ++added; 1523 1524 if (padlen > 0) { 1525 slot = STAILQ_FIRST(&sc->tx.avail); 1526 STAILQ_REMOVE_HEAD(&sc->tx.avail, next); 1527 sc->tx.avail_queue_len--; 1528 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1529 ++added; 1530 1531 /* Setup buffer of null pad bytes (definitely EOP) */ 1532 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1533 prev_slot = slot; 1534 bd.next = 0; 1535 bd.bufptr = sc->null_mbuf_paddr; 1536 bd.bufoff = 0; 1537 bd.buflen = padlen; 1538 bd.pktlen = 0; 1539 bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER; 1540 cpsw_cpdma_write_bd(sc, slot, &bd); 1541 ++nsegs; 1542 } 1543 1544 if (nsegs > sc->tx.longest_chain) 1545 sc->tx.longest_chain = nsegs; 1546 1547 // TODO: Should we defer the BPF tap until 1548 // after all packets are queued? 1549 BPF_MTAP(sc->ifp, m0); 1550 } 1551 1552 /* Attach the list of new buffers to the hardware TX queue. */ 1553 last_old_slot = STAILQ_LAST(&sc->tx.active, cpsw_slot, next); 1554 first_new_slot = STAILQ_FIRST(&tmpqueue); 1555 STAILQ_CONCAT(&sc->tx.active, &tmpqueue); 1556 if (first_new_slot == NULL) { 1557 return; 1558 } else if (last_old_slot == NULL) { 1559 /* Start a fresh queue. */ 1560 cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot); 1561 } else { 1562 /* Add buffers to end of current queue. */ 1563 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1564 /* If underrun, restart queue. */ 1565 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) { 1566 cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot); 1567 } 1568 } 1569 sc->tx.queue_adds += added; 1570 sc->tx.active_queue_len += added; 1571 if (sc->tx.active_queue_len > sc->tx.max_active_queue_len) { 1572 sc->tx.max_active_queue_len = sc->tx.active_queue_len; 1573 } 1574 } 1575 1576 static int 1577 cpsw_tx_dequeue(struct cpsw_softc *sc) 1578 { 1579 struct cpsw_slot *slot, *last_removed_slot = NULL; 1580 uint32_t flags, removed = 0; 1581 1582 slot = STAILQ_FIRST(&sc->tx.active); 1583 if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) { 1584 CPSW_DEBUGF(("TX teardown of an empty queue")); 1585 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1586 sc->tx.running = 0; 1587 return (0); 1588 } 1589 1590 /* Pull completed buffers off the hardware TX queue. */ 1591 while (slot != NULL) { 1592 flags = cpsw_cpdma_read_bd_flags(sc, slot); 1593 if (flags & CPDMA_BD_OWNER) 1594 break; /* Hardware is still using this packet. */ 1595 1596 CPSW_DEBUGF(("TX removing completed packet")); 1597 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 1598 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1599 m_freem(slot->mbuf); 1600 slot->mbuf = NULL; 1601 1602 /* Dequeue any additional buffers used by this packet. */ 1603 while (slot != NULL && slot->mbuf == NULL) { 1604 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 1605 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 1606 ++removed; 1607 last_removed_slot = slot; 1608 slot = STAILQ_FIRST(&sc->tx.active); 1609 } 1610 1611 /* TearDown complete is only marked on the SOP for the packet. */ 1612 if (flags & CPDMA_BD_TDOWNCMPLT) { 1613 CPSW_DEBUGF(("TX teardown in progress")); 1614 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1615 // TODO: Increment a count of dropped TX packets 1616 sc->tx.running = 0; 1617 break; 1618 } 1619 } 1620 1621 if (removed != 0) { 1622 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 1623 sc->tx.queue_removes += removed; 1624 sc->tx.active_queue_len -= removed; 1625 sc->tx.avail_queue_len += removed; 1626 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 1627 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 1628 } 1629 return (removed); 1630 } 1631 1632 /* 1633 * 1634 * Miscellaneous interrupts. 1635 * 1636 */ 1637 1638 static void 1639 cpsw_intr_rx_thresh(void *arg) 1640 { 1641 struct cpsw_softc *sc = arg; 1642 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0)); 1643 1644 CPSW_DEBUGF(("stat=%x", stat)); 1645 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 1646 } 1647 1648 static void 1649 cpsw_intr_misc_host_error(struct cpsw_softc *sc) 1650 { 1651 uint32_t intstat; 1652 uint32_t dmastat; 1653 int txerr, rxerr, txchan, rxchan; 1654 1655 printf("\n\n"); 1656 device_printf(sc->dev, 1657 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 1658 printf("\n\n"); 1659 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 1660 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 1661 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 1662 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 1663 1664 txerr = (dmastat >> 20) & 15; 1665 txchan = (dmastat >> 16) & 7; 1666 rxerr = (dmastat >> 12) & 15; 1667 rxchan = (dmastat >> 8) & 7; 1668 1669 switch (txerr) { 1670 case 0: break; 1671 case 1: printf("SOP error on TX channel %d\n", txchan); 1672 break; 1673 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 1674 break; 1675 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 1676 break; 1677 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 1678 break; 1679 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 1680 break; 1681 case 6: printf("Packet length error on TX channel %d\n", txchan); 1682 break; 1683 default: printf("Unknown error on TX channel %d\n", txchan); 1684 break; 1685 } 1686 1687 if (txerr != 0) { 1688 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 1689 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 1690 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 1691 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 1692 cpsw_dump_queue(sc, &sc->tx.active); 1693 } 1694 1695 switch (rxerr) { 1696 case 0: break; 1697 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 1698 break; 1699 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 1700 break; 1701 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 1702 break; 1703 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 1704 break; 1705 default: printf("Unknown RX error on RX channel %d\n", rxchan); 1706 break; 1707 } 1708 1709 if (rxerr != 0) { 1710 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 1711 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 1712 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 1713 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 1714 cpsw_dump_queue(sc, &sc->rx.active); 1715 } 1716 1717 printf("\nALE Table\n"); 1718 cpsw_ale_dump_table(sc); 1719 1720 // XXX do something useful here?? 1721 panic("CPSW HOST ERROR INTERRUPT"); 1722 1723 // Suppress this interrupt in the future. 1724 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 1725 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 1726 // The watchdog will probably reset the controller 1727 // in a little while. It will probably fail again. 1728 } 1729 1730 static void 1731 cpsw_intr_misc(void *arg) 1732 { 1733 struct cpsw_softc *sc = arg; 1734 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 1735 1736 if (stat & 16) 1737 CPSW_DEBUGF(("Time sync event interrupt unimplemented")); 1738 if (stat & 8) 1739 cpsw_stats_collect(sc); 1740 if (stat & 4) 1741 cpsw_intr_misc_host_error(sc); 1742 if (stat & 2) 1743 CPSW_DEBUGF(("MDIO link change interrupt unimplemented")); 1744 if (stat & 1) 1745 CPSW_DEBUGF(("MDIO operation completed interrupt unimplemented")); 1746 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 1747 } 1748 1749 /* 1750 * 1751 * Periodic Checks and Watchdog. 1752 * 1753 */ 1754 1755 static void 1756 cpsw_tick(void *msc) 1757 { 1758 struct cpsw_softc *sc = msc; 1759 1760 /* Check for TX timeout */ 1761 cpsw_tx_watchdog(sc); 1762 1763 /* Check for media type change */ 1764 mii_tick(sc->mii); 1765 if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) { 1766 printf("%s: media type changed (ifm_media=%x)\n", __func__, 1767 sc->mii->mii_media.ifm_media); 1768 cpsw_ifmedia_upd(sc->ifp); 1769 } 1770 1771 /* Schedule another timeout one second from now */ 1772 callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc); 1773 } 1774 1775 static void 1776 cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1777 { 1778 struct cpsw_softc *sc = ifp->if_softc; 1779 struct mii_data *mii; 1780 1781 CPSW_DEBUGF(("")); 1782 CPSW_TX_LOCK(sc); 1783 1784 mii = sc->mii; 1785 mii_pollstat(mii); 1786 1787 ifmr->ifm_active = mii->mii_media_active; 1788 ifmr->ifm_status = mii->mii_media_status; 1789 1790 CPSW_TX_UNLOCK(sc); 1791 } 1792 1793 static int 1794 cpsw_ifmedia_upd(struct ifnet *ifp) 1795 { 1796 struct cpsw_softc *sc = ifp->if_softc; 1797 1798 CPSW_DEBUGF(("")); 1799 if (ifp->if_flags & IFF_UP) { 1800 CPSW_GLOBAL_LOCK(sc); 1801 sc->cpsw_media_status = sc->mii->mii_media.ifm_media; 1802 mii_mediachg(sc->mii); 1803 cpsw_init_locked(sc); 1804 CPSW_GLOBAL_UNLOCK(sc); 1805 } 1806 1807 return (0); 1808 } 1809 1810 static void 1811 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 1812 { 1813 cpsw_debugf_head("CPSW watchdog"); 1814 if_printf(sc->ifp, "watchdog timeout\n"); 1815 cpsw_shutdown_locked(sc); 1816 cpsw_init_locked(sc); 1817 } 1818 1819 static void 1820 cpsw_tx_watchdog(struct cpsw_softc *sc) 1821 { 1822 struct ifnet *ifp = sc->ifp; 1823 1824 CPSW_GLOBAL_LOCK(sc); 1825 if (sc->tx.active_queue_len == 0 || (ifp->if_flags & IFF_UP) == 0 || 1826 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !sc->tx.running) { 1827 sc->watchdog.timer = 0; /* Nothing to do. */ 1828 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 1829 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 1830 } else if (cpsw_tx_dequeue(sc) > 0) { 1831 sc->watchdog.timer = 0; /* We just did something. */ 1832 } else { 1833 /* There was something to do but it didn't get done. */ 1834 ++sc->watchdog.timer; 1835 if (sc->watchdog.timer > 2) { 1836 sc->watchdog.timer = 0; 1837 ++ifp->if_oerrors; 1838 ++sc->watchdog.resets; 1839 cpsw_tx_watchdog_full_reset(sc); 1840 } 1841 } 1842 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 1843 CPSW_GLOBAL_UNLOCK(sc); 1844 } 1845 1846 /* 1847 * 1848 * ALE support routines. 1849 * 1850 */ 1851 1852 static void 1853 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 1854 { 1855 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 1856 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 1857 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 1858 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 1859 } 1860 1861 static void 1862 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 1863 { 1864 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 1865 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 1866 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 1867 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 1868 } 1869 1870 static int 1871 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 1872 { 1873 int i; 1874 uint32_t ale_entry[3]; 1875 1876 /* First two entries are link address and broadcast. */ 1877 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) { 1878 cpsw_ale_read_entry(sc, i, ale_entry); 1879 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */ 1880 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */ 1881 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 1882 cpsw_ale_write_entry(sc, i, ale_entry); 1883 } 1884 } 1885 return CPSW_MAX_ALE_ENTRIES; 1886 } 1887 1888 static int 1889 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac) 1890 { 1891 int free_index = -1, matching_index = -1, i; 1892 uint32_t ale_entry[3]; 1893 1894 /* Find a matching entry or a free entry. */ 1895 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 1896 cpsw_ale_read_entry(sc, i, ale_entry); 1897 1898 /* Entry Type[61:60] is 0 for free entry */ 1899 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) { 1900 free_index = i; 1901 } 1902 1903 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 1904 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 1905 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 1906 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 1907 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 1908 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 1909 matching_index = i; 1910 break; 1911 } 1912 } 1913 1914 if (matching_index < 0) { 1915 if (free_index < 0) 1916 return (ENOMEM); 1917 i = free_index; 1918 } 1919 1920 /* Set MAC address */ 1921 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 1922 ale_entry[1] = mac[0] << 8 | mac[1]; 1923 1924 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/ 1925 ale_entry[1] |= 0xd0 << 24; 1926 1927 /* Set portmask [68:66] */ 1928 ale_entry[2] = (portmap & 7) << 2; 1929 1930 cpsw_ale_write_entry(sc, i, ale_entry); 1931 1932 return 0; 1933 } 1934 1935 static void 1936 cpsw_ale_dump_table(struct cpsw_softc *sc) { 1937 int i; 1938 uint32_t ale_entry[3]; 1939 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 1940 cpsw_ale_read_entry(sc, i, ale_entry); 1941 if (ale_entry[0] || ale_entry[1] || ale_entry[2]) { 1942 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0], 1943 ale_entry[1], ale_entry[2]); 1944 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 1945 (ale_entry[1] >> 8) & 0xFF, 1946 (ale_entry[1] >> 0) & 0xFF, 1947 (ale_entry[0] >>24) & 0xFF, 1948 (ale_entry[0] >>16) & 0xFF, 1949 (ale_entry[0] >> 8) & 0xFF, 1950 (ale_entry[0] >> 0) & 0xFF); 1951 printf(((ale_entry[1] >> 8) & 1) ? "mcast " : "ucast "); 1952 printf("type: %u ", (ale_entry[1] >> 28) & 3); 1953 printf("port: %u ", (ale_entry[2] >> 2) & 7); 1954 printf("\n"); 1955 } 1956 } 1957 printf("\n"); 1958 } 1959 1960 static int 1961 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge) 1962 { 1963 uint8_t *mac; 1964 uint32_t ale_entry[3]; 1965 struct ifnet *ifp = sc->ifp; 1966 struct ifmultiaddr *ifma; 1967 int i; 1968 1969 /* Route incoming packets for our MAC address to Port 0 (host). */ 1970 /* For simplicity, keep this entry at table index 0 in the ALE. */ 1971 if_addr_rlock(ifp); 1972 mac = LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr); 1973 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 1974 ale_entry[1] = 0x10 << 24 | mac[0] << 8 | mac[1]; /* addr entry + mac */ 1975 ale_entry[2] = 0; /* port = 0 */ 1976 cpsw_ale_write_entry(sc, 0, ale_entry); 1977 1978 /* Set outgoing MAC Address for Ports 1 and 2. */ 1979 for (i = 1; i < 3; ++i) { 1980 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i), 1981 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 1982 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i), 1983 mac[5] << 8 | mac[4]); 1984 } 1985 if_addr_runlock(ifp); 1986 1987 /* Keep the broadcast address at table entry 1. */ 1988 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 1989 ale_entry[1] = 0xd000ffff; /* FW (3 << 30), Addr entry (1 << 24), upper 16 bits of Mac */ 1990 ale_entry[2] = 0x0000001c; /* Forward to all ports */ 1991 cpsw_ale_write_entry(sc, 1, ale_entry); 1992 1993 /* SIOCDELMULTI doesn't specify the particular address 1994 being removed, so we have to remove all and rebuild. */ 1995 if (purge) 1996 cpsw_ale_remove_all_mc_entries(sc); 1997 1998 /* Set other multicast addrs desired. */ 1999 if_maddr_rlock(ifp); 2000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2001 if (ifma->ifma_addr->sa_family != AF_LINK) 2002 continue; 2003 cpsw_ale_mc_entry_set(sc, 7, 2004 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 2005 } 2006 if_maddr_runlock(ifp); 2007 2008 return (0); 2009 } 2010 2011 /* 2012 * 2013 * Statistics and Sysctls. 2014 * 2015 */ 2016 2017 #if 0 2018 static void 2019 cpsw_stats_dump(struct cpsw_softc *sc) 2020 { 2021 int i; 2022 uint32_t r; 2023 2024 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2025 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2026 cpsw_stat_sysctls[i].reg); 2027 CPSW_DEBUGF(("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2028 (intmax_t)sc->shadow_stats[i], r, 2029 (intmax_t)sc->shadow_stats[i] + r)); 2030 } 2031 } 2032 #endif 2033 2034 static void 2035 cpsw_stats_collect(struct cpsw_softc *sc) 2036 { 2037 int i; 2038 uint32_t r; 2039 2040 CPSW_DEBUGF(("Controller shadow statistics updated.")); 2041 2042 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2043 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2044 cpsw_stat_sysctls[i].reg); 2045 sc->shadow_stats[i] += r; 2046 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r); 2047 } 2048 } 2049 2050 static int 2051 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2052 { 2053 struct cpsw_softc *sc; 2054 struct cpsw_stat *stat; 2055 uint64_t result; 2056 2057 sc = (struct cpsw_softc *)arg1; 2058 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2059 result = sc->shadow_stats[oidp->oid_number]; 2060 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2061 return (sysctl_handle_64(oidp, &result, 0, req)); 2062 } 2063 2064 static int 2065 cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2066 { 2067 struct cpsw_softc *sc; 2068 struct bintime t; 2069 unsigned result; 2070 2071 sc = (struct cpsw_softc *)arg1; 2072 getbinuptime(&t); 2073 bintime_sub(&t, &sc->attach_uptime); 2074 result = t.sec; 2075 return (sysctl_handle_int(oidp, &result, 0, req)); 2076 } 2077 2078 static int 2079 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2080 { 2081 struct cpsw_softc *sc; 2082 struct bintime t; 2083 unsigned result; 2084 2085 sc = (struct cpsw_softc *)arg1; 2086 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { 2087 getbinuptime(&t); 2088 bintime_sub(&t, &sc->init_uptime); 2089 result = t.sec; 2090 } else 2091 result = 0; 2092 return (sysctl_handle_int(oidp, &result, 0, req)); 2093 } 2094 2095 static void 2096 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue) 2097 { 2098 struct sysctl_oid_list *parent; 2099 2100 parent = SYSCTL_CHILDREN(node); 2101 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2102 CTLFLAG_RD, &queue->queue_slots, 0, 2103 "Total buffers currently assigned to this queue"); 2104 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2105 CTLFLAG_RD, &queue->active_queue_len, 0, 2106 "Buffers currently registered with hardware controller"); 2107 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2108 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2109 "Max value of activeBuffers since last driver reset"); 2110 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2111 CTLFLAG_RD, &queue->avail_queue_len, 0, 2112 "Buffers allocated to this queue but not currently " 2113 "registered with hardware controller"); 2114 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2115 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2116 "Max value of availBuffers since last driver reset"); 2117 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2118 CTLFLAG_RD, &queue->queue_adds, 0, 2119 "Total buffers added to queue"); 2120 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2121 CTLFLAG_RD, &queue->queue_removes, 0, 2122 "Total buffers removed from queue"); 2123 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2124 CTLFLAG_RD, &queue->longest_chain, 0, 2125 "Max buffers used for a single packet"); 2126 } 2127 2128 static void 2129 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc) 2130 { 2131 struct sysctl_oid_list *parent; 2132 2133 parent = SYSCTL_CHILDREN(node); 2134 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2135 CTLFLAG_RD, &sc->watchdog.resets, 0, 2136 "Total number of watchdog resets"); 2137 } 2138 2139 static void 2140 cpsw_add_sysctls(struct cpsw_softc *sc) 2141 { 2142 struct sysctl_ctx_list *ctx; 2143 struct sysctl_oid *stats_node, *queue_node, *node; 2144 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2145 int i; 2146 2147 ctx = device_get_sysctl_ctx(sc->dev); 2148 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2149 2150 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2151 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", 2152 "Time since driver attach"); 2153 2154 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "uptime", 2155 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_uptime, "IU", 2156 "Seconds since driver init"); 2157 2158 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2159 CTLFLAG_RD, NULL, "CPSW Statistics"); 2160 stats_parent = SYSCTL_CHILDREN(stats_node); 2161 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2162 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2163 cpsw_stat_sysctls[i].oid, 2164 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, 2165 cpsw_stats_sysctl, "IU", 2166 cpsw_stat_sysctls[i].oid); 2167 } 2168 2169 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2170 CTLFLAG_RD, NULL, "CPSW Queue Statistics"); 2171 queue_parent = SYSCTL_CHILDREN(queue_node); 2172 2173 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2174 CTLFLAG_RD, NULL, "TX Queue Statistics"); 2175 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2176 2177 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2178 CTLFLAG_RD, NULL, "RX Queue Statistics"); 2179 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2180 2181 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2182 CTLFLAG_RD, NULL, "Watchdog Statistics"); 2183 cpsw_add_watchdog_sysctls(ctx, node, sc); 2184 } 2185 2186