1 /*- 2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * TI Common Platform Ethernet Switch (CPSW) Driver 29 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 30 * 31 * This controller is documented in the AM335x Technical Reference 32 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 33 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 34 * 35 * It is basically a single Ethernet port (port 0) wired internally to 36 * a 3-port store-and-forward switch connected to two independent 37 * "sliver" controllers (port 1 and port 2). You can operate the 38 * controller in a variety of different ways by suitably configuring 39 * the slivers and the Address Lookup Engine (ALE) that routes packets 40 * between the ports. 41 * 42 * This code was developed and tested on a BeagleBone with 43 * an AM335x SoC. 44 */ 45 46 #include <sys/cdefs.h> 47 __FBSDID("$FreeBSD$"); 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/endian.h> 52 #include <sys/mbuf.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/kernel.h> 56 #include <sys/module.h> 57 #include <sys/socket.h> 58 #include <sys/sysctl.h> 59 60 #include <net/ethernet.h> 61 #include <net/bpf.h> 62 #include <net/if.h> 63 #include <net/if_arp.h> 64 #include <net/if_dl.h> 65 #include <net/if_media.h> 66 #include <net/if_types.h> 67 #include <net/if_var.h> 68 #include <net/if_vlan_var.h> 69 70 #include <netinet/in_systm.h> 71 #include <netinet/in.h> 72 #include <netinet/ip.h> 73 74 #include <sys/sockio.h> 75 #include <sys/bus.h> 76 #include <machine/bus.h> 77 #include <sys/rman.h> 78 #include <machine/resource.h> 79 80 #include <dev/mii/mii.h> 81 #include <dev/mii/miivar.h> 82 83 #include <dev/fdt/fdt_common.h> 84 #include <dev/ofw/ofw_bus.h> 85 #include <dev/ofw/ofw_bus_subr.h> 86 87 #include "if_cpswreg.h" 88 #include "if_cpswvar.h" 89 90 #include <arm/ti/ti_scm.h> 91 92 #include "miibus_if.h" 93 94 /* Device probe/attach/detach. */ 95 static int cpsw_probe(device_t); 96 static void cpsw_init_slots(struct cpsw_softc *); 97 static int cpsw_attach(device_t); 98 static void cpsw_free_slot(struct cpsw_softc *, struct cpsw_slot *); 99 static int cpsw_detach(device_t); 100 101 /* Device Init/shutdown. */ 102 static void cpsw_init(void *); 103 static void cpsw_init_locked(void *); 104 static int cpsw_shutdown(device_t); 105 static void cpsw_shutdown_locked(struct cpsw_softc *); 106 107 /* Device Suspend/Resume. */ 108 static int cpsw_suspend(device_t); 109 static int cpsw_resume(device_t); 110 111 /* Ioctl. */ 112 static int cpsw_ioctl(struct ifnet *, u_long command, caddr_t data); 113 114 static int cpsw_miibus_readreg(device_t, int phy, int reg); 115 static int cpsw_miibus_writereg(device_t, int phy, int reg, int value); 116 static void cpsw_miibus_statchg(device_t); 117 118 /* Send/Receive packets. */ 119 static void cpsw_intr_rx(void *arg); 120 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 121 static void cpsw_rx_enqueue(struct cpsw_softc *); 122 static void cpsw_start(struct ifnet *); 123 static void cpsw_tx_enqueue(struct cpsw_softc *); 124 static int cpsw_tx_dequeue(struct cpsw_softc *); 125 126 /* Misc interrupts and watchdog. */ 127 static void cpsw_intr_rx_thresh(void *); 128 static void cpsw_intr_misc(void *); 129 static void cpsw_tick(void *); 130 static void cpsw_ifmedia_sts(struct ifnet *, struct ifmediareq *); 131 static int cpsw_ifmedia_upd(struct ifnet *); 132 static void cpsw_tx_watchdog(struct cpsw_softc *); 133 134 /* ALE support */ 135 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry); 136 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t idx, uint32_t *ale_entry); 137 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t portmap, uint8_t *mac); 138 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge); 139 static void cpsw_ale_dump_table(struct cpsw_softc *); 140 141 /* Statistics and sysctls. */ 142 static void cpsw_add_sysctls(struct cpsw_softc *); 143 static void cpsw_stats_collect(struct cpsw_softc *); 144 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 145 146 /* 147 * Arbitrary limit on number of segments in an mbuf to be transmitted. 148 * Packets with more segments than this will be defragmented before 149 * they are queued. 150 */ 151 #define CPSW_TXFRAGS 8 152 153 154 /* 155 * TODO: The CPSW subsystem (CPSW_SS) can drive two independent PHYs 156 * as separate Ethernet ports. To properly support this, we should 157 * break this into two separate devices: a CPSW_SS device that owns 158 * the interrupts and actually talks to the CPSW hardware, and a 159 * separate CPSW Ethernet child device for each Ethernet port. The RX 160 * interrupt, for example, would be part of CPSW_SS; it would receive 161 * a packet, note the input port, and then dispatch it to the child 162 * device's interface queue. Similarly for transmit. 163 * 164 * It's not clear to me whether the device tree should be restructured 165 * with a cpsw_ss node and two child nodes. That would allow specifying 166 * MAC addresses for each port, for example, but might be overkill. 167 * 168 * Unfortunately, I don't have hardware right now that supports two 169 * Ethernet ports via CPSW. 170 */ 171 172 static device_method_t cpsw_methods[] = { 173 /* Device interface */ 174 DEVMETHOD(device_probe, cpsw_probe), 175 DEVMETHOD(device_attach, cpsw_attach), 176 DEVMETHOD(device_detach, cpsw_detach), 177 DEVMETHOD(device_shutdown, cpsw_shutdown), 178 DEVMETHOD(device_suspend, cpsw_suspend), 179 DEVMETHOD(device_resume, cpsw_resume), 180 /* MII interface */ 181 DEVMETHOD(miibus_readreg, cpsw_miibus_readreg), 182 DEVMETHOD(miibus_writereg, cpsw_miibus_writereg), 183 DEVMETHOD(miibus_statchg, cpsw_miibus_statchg), 184 { 0, 0 } 185 }; 186 187 static driver_t cpsw_driver = { 188 "cpsw", 189 cpsw_methods, 190 sizeof(struct cpsw_softc), 191 }; 192 193 static devclass_t cpsw_devclass; 194 195 DRIVER_MODULE(cpsw, simplebus, cpsw_driver, cpsw_devclass, 0, 0); 196 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); 197 MODULE_DEPEND(cpsw, ether, 1, 1, 1); 198 MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 199 200 static struct resource_spec irq_res_spec[] = { 201 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 202 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 203 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 204 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 205 { -1, 0 } 206 }; 207 208 /* Number of entries here must match size of stats 209 * array in struct cpsw_softc. */ 210 static struct cpsw_stat { 211 int reg; 212 char *oid; 213 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 214 {0x00, "GoodRxFrames"}, 215 {0x04, "BroadcastRxFrames"}, 216 {0x08, "MulticastRxFrames"}, 217 {0x0C, "PauseRxFrames"}, 218 {0x10, "RxCrcErrors"}, 219 {0x14, "RxAlignErrors"}, 220 {0x18, "OversizeRxFrames"}, 221 {0x1c, "RxJabbers"}, 222 {0x20, "ShortRxFrames"}, 223 {0x24, "RxFragments"}, 224 {0x30, "RxOctets"}, 225 {0x34, "GoodTxFrames"}, 226 {0x38, "BroadcastTxFrames"}, 227 {0x3c, "MulticastTxFrames"}, 228 {0x40, "PauseTxFrames"}, 229 {0x44, "DeferredTxFrames"}, 230 {0x48, "CollisionsTxFrames"}, 231 {0x4c, "SingleCollisionTxFrames"}, 232 {0x50, "MultipleCollisionTxFrames"}, 233 {0x54, "ExcessiveCollisions"}, 234 {0x58, "LateCollisions"}, 235 {0x5c, "TxUnderrun"}, 236 {0x60, "CarrierSenseErrors"}, 237 {0x64, "TxOctets"}, 238 {0x68, "RxTx64OctetFrames"}, 239 {0x6c, "RxTx65to127OctetFrames"}, 240 {0x70, "RxTx128to255OctetFrames"}, 241 {0x74, "RxTx256to511OctetFrames"}, 242 {0x78, "RxTx512to1024OctetFrames"}, 243 {0x7c, "RxTx1024upOctetFrames"}, 244 {0x80, "NetOctets"}, 245 {0x84, "RxStartOfFrameOverruns"}, 246 {0x88, "RxMiddleOfFrameOverruns"}, 247 {0x8c, "RxDmaOverruns"} 248 }; 249 250 /* 251 * Basic debug support. 252 */ 253 254 #define IF_DEBUG(sc) if (sc->cpsw_if_flags & IFF_DEBUG) 255 256 static void 257 cpsw_debugf_head(const char *funcname) 258 { 259 int t = (int)(time_second % (24 * 60 * 60)); 260 261 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 262 } 263 264 #include <machine/stdarg.h> 265 static void 266 cpsw_debugf(const char *fmt, ...) 267 { 268 va_list ap; 269 270 va_start(ap, fmt); 271 vprintf(fmt, ap); 272 va_end(ap); 273 printf("\n"); 274 275 } 276 277 #define CPSW_DEBUGF(a) do { \ 278 IF_DEBUG(sc) { \ 279 cpsw_debugf_head(__func__); \ 280 cpsw_debugf a; \ 281 } \ 282 } while (0) 283 284 285 /* 286 * Locking macros 287 */ 288 #define CPSW_TX_LOCK(sc) do { \ 289 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 290 mtx_lock(&(sc)->tx.lock); \ 291 } while (0) 292 293 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 294 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 295 296 #define CPSW_RX_LOCK(sc) do { \ 297 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 298 mtx_lock(&(sc)->rx.lock); \ 299 } while (0) 300 301 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 302 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 303 304 #define CPSW_GLOBAL_LOCK(sc) do { \ 305 if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) != \ 306 (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) { \ 307 panic("cpsw deadlock possibility detection!"); \ 308 } \ 309 mtx_lock(&(sc)->tx.lock); \ 310 mtx_lock(&(sc)->rx.lock); \ 311 } while (0) 312 313 #define CPSW_GLOBAL_UNLOCK(sc) do { \ 314 CPSW_RX_UNLOCK(sc); \ 315 CPSW_TX_UNLOCK(sc); \ 316 } while (0) 317 318 #define CPSW_GLOBAL_LOCK_ASSERT(sc) do { \ 319 CPSW_TX_LOCK_ASSERT(sc); \ 320 CPSW_RX_LOCK_ASSERT(sc); \ 321 } while (0) 322 323 /* 324 * Read/Write macros 325 */ 326 #define cpsw_read_4(sc, reg) bus_read_4(sc->mem_res, reg) 327 #define cpsw_write_4(sc, reg, val) bus_write_4(sc->mem_res, reg, val) 328 329 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 330 331 #define cpsw_cpdma_bd_paddr(sc, slot) \ 332 BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) 333 #define cpsw_cpdma_read_bd(sc, slot, val) \ 334 bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 335 #define cpsw_cpdma_write_bd(sc, slot, val) \ 336 bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 337 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 338 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 339 #define cpsw_cpdma_read_bd_flags(sc, slot) \ 340 bus_read_2(sc->mem_res, slot->bd_offset + 14) 341 #define cpsw_write_hdp_slot(sc, queue, slot) \ 342 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 343 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 344 #define cpsw_read_cp(sc, queue) \ 345 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 346 #define cpsw_write_cp(sc, queue, val) \ 347 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 348 #define cpsw_write_cp_slot(sc, queue, slot) \ 349 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 350 351 #if 0 352 /* XXX temporary function versions for debugging. */ 353 static void 354 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 355 { 356 uint32_t reg = queue->hdp_offset; 357 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 358 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 359 cpsw_write_4(sc, reg, v); 360 } 361 362 static void 363 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 364 { 365 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 366 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 367 cpsw_write_cp(sc, queue, v); 368 } 369 #endif 370 371 /* 372 * Expanded dump routines for verbose debugging. 373 */ 374 static void 375 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 376 { 377 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 378 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 379 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 380 "Port0"}; 381 struct cpsw_cpdma_bd bd; 382 const char *sep; 383 int i; 384 385 cpsw_cpdma_read_bd(sc, slot, &bd); 386 printf("BD Addr: 0x%08x Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); 387 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 388 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 389 printf(" Flags: "); 390 sep = ""; 391 for (i = 0; i < 16; ++i) { 392 if (bd.flags & (1 << (15 - i))) { 393 printf("%s%s", sep, flags[i]); 394 sep = ","; 395 } 396 } 397 printf("\n"); 398 if (slot->mbuf) { 399 printf(" Ether: %14D\n", 400 (char *)(slot->mbuf->m_data), " "); 401 printf(" Packet: %16D\n", 402 (char *)(slot->mbuf->m_data) + 14, " "); 403 } 404 } 405 406 #define CPSW_DUMP_SLOT(cs, slot) do { \ 407 IF_DEBUG(sc) { \ 408 cpsw_dump_slot(sc, slot); \ 409 } \ 410 } while (0) 411 412 413 static void 414 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 415 { 416 struct cpsw_slot *slot; 417 int i = 0; 418 int others = 0; 419 420 STAILQ_FOREACH(slot, q, next) { 421 if (i > 4) 422 ++others; 423 else 424 cpsw_dump_slot(sc, slot); 425 ++i; 426 } 427 if (others) 428 printf(" ... and %d more.\n", others); 429 printf("\n"); 430 } 431 432 #define CPSW_DUMP_QUEUE(sc, q) do { \ 433 IF_DEBUG(sc) { \ 434 cpsw_dump_queue(sc, q); \ 435 } \ 436 } while (0) 437 438 439 /* 440 * 441 * Device Probe, Attach, Detach. 442 * 443 */ 444 445 static int 446 cpsw_probe(device_t dev) 447 { 448 449 if (!ofw_bus_status_okay(dev)) 450 return (ENXIO); 451 452 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 453 return (ENXIO); 454 455 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 456 return (BUS_PROBE_DEFAULT); 457 } 458 459 460 static void 461 cpsw_init_slots(struct cpsw_softc *sc) 462 { 463 struct cpsw_slot *slot; 464 int i; 465 466 STAILQ_INIT(&sc->avail); 467 468 /* Put the slot descriptors onto the global avail list. */ 469 for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); i++) { 470 slot = &sc->_slots[i]; 471 slot->bd_offset = cpsw_cpdma_bd_offset(i); 472 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 473 } 474 } 475 476 /* 477 * bind an interrupt, add the relevant info to sc->interrupts 478 */ 479 static int 480 cpsw_attach_interrupt(struct cpsw_softc *sc, struct resource *res, driver_intr_t *handler, const char *description) 481 { 482 void **pcookie; 483 int error; 484 485 sc->interrupts[sc->interrupt_count].res = res; 486 sc->interrupts[sc->interrupt_count].description = description; 487 pcookie = &sc->interrupts[sc->interrupt_count].ih_cookie; 488 489 error = bus_setup_intr(sc->dev, res, INTR_TYPE_NET | INTR_MPSAFE, 490 NULL, *handler, sc, pcookie); 491 if (error) 492 device_printf(sc->dev, 493 "could not setup %s\n", description); 494 else 495 ++sc->interrupt_count; 496 return (error); 497 } 498 499 /* 500 * teardown everything in sc->interrupts. 501 */ 502 static void 503 cpsw_detach_interrupts(struct cpsw_softc *sc) 504 { 505 int error; 506 int i; 507 508 for (i = 0; i < sizeof(sc->interrupts) / sizeof(sc->interrupts[0]); ++i) { 509 if (!sc->interrupts[i].ih_cookie) 510 continue; 511 error = bus_teardown_intr(sc->dev, 512 sc->interrupts[i].res, sc->interrupts[i].ih_cookie); 513 if (error) 514 device_printf(sc->dev, "could not release %s\n", 515 sc->interrupts[i].description); 516 sc->interrupts[i].ih_cookie = NULL; 517 } 518 } 519 520 static int 521 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 522 { 523 const int max_slots = sizeof(sc->_slots) / sizeof(sc->_slots[0]); 524 struct cpsw_slot *slot; 525 int i; 526 527 if (requested < 0) 528 requested = max_slots; 529 530 for (i = 0; i < requested; ++i) { 531 slot = STAILQ_FIRST(&sc->avail); 532 if (slot == NULL) 533 return (0); 534 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 535 if_printf(sc->ifp, "failed to create dmamap\n"); 536 return (ENOMEM); 537 } 538 STAILQ_REMOVE_HEAD(&sc->avail, next); 539 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 540 ++queue->avail_queue_len; 541 ++queue->queue_slots; 542 } 543 return (0); 544 } 545 546 static int 547 cpsw_attach(device_t dev) 548 { 549 bus_dma_segment_t segs[1]; 550 struct cpsw_softc *sc = device_get_softc(dev); 551 struct mii_softc *miisc; 552 struct ifnet *ifp; 553 int phy, nsegs, error; 554 uint32_t reg; 555 pcell_t phy_id[3]; 556 u_long mem_base, mem_size; 557 phandle_t child; 558 int len; 559 560 CPSW_DEBUGF(("")); 561 562 getbinuptime(&sc->attach_uptime); 563 sc->dev = dev; 564 sc->node = ofw_bus_get_node(dev); 565 566 /* TODO: handle multiple slaves */ 567 phy = -1; 568 569 /* Find any slave with phy_id */ 570 for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { 571 len = OF_getproplen(child, "phy_id"); 572 if (len <= 0) 573 continue; 574 575 /* Get phy address from fdt */ 576 if (OF_getencprop(child, "phy_id", phy_id, len) <= 0) 577 continue; 578 579 phy = phy_id[1]; 580 /* TODO: get memory window for MDIO */ 581 582 break; 583 } 584 585 if (phy == -1) { 586 device_printf(dev, "failed to get PHY address from FDT\n"); 587 return (ENXIO); 588 } 589 590 mem_base = 0; 591 mem_size = 0; 592 593 if (fdt_regsize(sc->node, &mem_base, &mem_size) != 0) { 594 device_printf(sc->dev, "no regs property in cpsw node\n"); 595 return (ENXIO); 596 } 597 598 /* Initialize mutexes */ 599 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 600 "cpsw TX lock", MTX_DEF); 601 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 602 "cpsw RX lock", MTX_DEF); 603 604 /* Allocate IRQ resources */ 605 error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); 606 if (error) { 607 device_printf(dev, "could not allocate IRQ resources\n"); 608 cpsw_detach(dev); 609 return (ENXIO); 610 } 611 612 sc->mem_rid = 0; 613 sc->mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, 614 &sc->mem_rid, mem_base, mem_base + CPSW_MEMWINDOW_SIZE -1, 615 CPSW_MEMWINDOW_SIZE, RF_ACTIVE); 616 if (sc->mem_res == NULL) { 617 device_printf(sc->dev, "failed to allocate memory resource\n"); 618 cpsw_detach(dev); 619 return (ENXIO); 620 } 621 622 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 623 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 624 reg & 0xFF, (reg >> 11) & 0x1F); 625 626 cpsw_add_sysctls(sc); 627 628 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 629 error = bus_dma_tag_create( 630 bus_get_dma_tag(sc->dev), /* parent */ 631 1, 0, /* alignment, boundary */ 632 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 633 BUS_SPACE_MAXADDR, /* highaddr */ 634 NULL, NULL, /* filtfunc, filtfuncarg */ 635 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 636 MCLBYTES, 0, /* maxsegsz, flags */ 637 NULL, NULL, /* lockfunc, lockfuncarg */ 638 &sc->mbuf_dtag); /* dmatag */ 639 if (error) { 640 device_printf(dev, "bus_dma_tag_create failed\n"); 641 cpsw_detach(dev); 642 return (error); 643 } 644 645 /* Allocate network interface */ 646 ifp = sc->ifp = if_alloc(IFT_ETHER); 647 if (ifp == NULL) { 648 device_printf(dev, "if_alloc() failed\n"); 649 cpsw_detach(dev); 650 return (ENOMEM); 651 } 652 653 /* Allocate the null mbuf and pre-sync it. */ 654 sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 655 memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size); 656 bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap); 657 bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap, 658 sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 659 bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap, 660 BUS_DMASYNC_PREWRITE); 661 sc->null_mbuf_paddr = segs[0].ds_addr; 662 663 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 664 ifp->if_softc = sc; 665 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 666 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? 667 ifp->if_capenable = ifp->if_capabilities; 668 669 ifp->if_init = cpsw_init; 670 ifp->if_start = cpsw_start; 671 ifp->if_ioctl = cpsw_ioctl; 672 673 cpsw_init_slots(sc); 674 675 /* Allocate slots to TX and RX queues. */ 676 STAILQ_INIT(&sc->rx.avail); 677 STAILQ_INIT(&sc->rx.active); 678 STAILQ_INIT(&sc->tx.avail); 679 STAILQ_INIT(&sc->tx.active); 680 // For now: 128 slots to TX, rest to RX. 681 // XXX TODO: start with 32/64 and grow dynamically based on demand. 682 if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) { 683 device_printf(dev, "failed to allocate dmamaps\n"); 684 cpsw_detach(dev); 685 return (ENOMEM); 686 } 687 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 688 sc->tx.queue_slots, sc->rx.queue_slots); 689 690 ifp->if_snd.ifq_drv_maxlen = sc->tx.queue_slots; 691 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 692 IFQ_SET_READY(&ifp->if_snd); 693 694 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 695 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 696 697 /* Get high part of MAC address from control module (mac_id0_hi) */ 698 /* TODO: Get MAC ID1 as well as MAC ID0. */ 699 ti_scm_reg_read_4(0x634, ®); 700 sc->mac_addr[0] = reg & 0xFF; 701 sc->mac_addr[1] = (reg >> 8) & 0xFF; 702 sc->mac_addr[2] = (reg >> 16) & 0xFF; 703 sc->mac_addr[3] = (reg >> 24) & 0xFF; 704 705 /* Get low part of MAC address from control module (mac_id0_lo) */ 706 ti_scm_reg_read_4(0x630, ®); 707 sc->mac_addr[4] = reg & 0xFF; 708 sc->mac_addr[5] = (reg >> 8) & 0xFF; 709 710 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 711 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 712 cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF); 713 714 /* Clear ALE */ 715 cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 30); 716 717 /* Attach PHY(s) */ 718 error = mii_attach(dev, &sc->miibus, ifp, cpsw_ifmedia_upd, 719 cpsw_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 720 if (error) { 721 device_printf(dev, "attaching PHYs failed\n"); 722 cpsw_detach(dev); 723 return (error); 724 } 725 sc->mii = device_get_softc(sc->miibus); 726 727 /* Tell the MAC where to find the PHY so autoneg works */ 728 miisc = LIST_FIRST(&sc->mii->mii_phys); 729 730 /* Select PHY and enable interrupts */ 731 cpsw_write_4(sc, MDIOUSERPHYSEL0, 1 << 6 | (miisc->mii_phy & 0x1F)); 732 733 /* Note: We don't use sc->res[3] (TX interrupt) */ 734 if (cpsw_attach_interrupt(sc, sc->irq_res[0], 735 cpsw_intr_rx_thresh, "CPSW RX threshold interrupt") || 736 cpsw_attach_interrupt(sc, sc->irq_res[1], 737 cpsw_intr_rx, "CPSW RX interrupt") || 738 cpsw_attach_interrupt(sc, sc->irq_res[3], 739 cpsw_intr_misc, "CPSW misc interrupt")) { 740 cpsw_detach(dev); 741 return (ENXIO); 742 } 743 744 ether_ifattach(ifp, sc->mac_addr); 745 callout_init(&sc->watchdog.callout, 0); 746 747 return (0); 748 } 749 750 static void 751 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 752 { 753 int error; 754 755 if (slot->dmamap) { 756 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 757 KASSERT(error == 0, ("Mapping still active")); 758 slot->dmamap = NULL; 759 } 760 if (slot->mbuf) { 761 m_freem(slot->mbuf); 762 slot->mbuf = NULL; 763 } 764 } 765 766 static int 767 cpsw_detach(device_t dev) 768 { 769 struct cpsw_softc *sc = device_get_softc(dev); 770 int error, i; 771 772 CPSW_DEBUGF(("")); 773 774 /* Stop controller and free TX queue */ 775 if (device_is_attached(dev)) { 776 ether_ifdetach(sc->ifp); 777 CPSW_GLOBAL_LOCK(sc); 778 cpsw_shutdown_locked(sc); 779 CPSW_GLOBAL_UNLOCK(sc); 780 callout_drain(&sc->watchdog.callout); 781 } 782 783 bus_generic_detach(dev); 784 if (sc->miibus) 785 device_delete_child(dev, sc->miibus); 786 787 /* Stop and release all interrupts */ 788 cpsw_detach_interrupts(sc); 789 790 /* Free dmamaps and mbufs */ 791 for (i = 0; i < sizeof(sc->_slots) / sizeof(sc->_slots[0]); ++i) 792 cpsw_free_slot(sc, &sc->_slots[i]); 793 if (sc->null_mbuf_dmamap) { 794 error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap); 795 KASSERT(error == 0, ("Mapping still active")); 796 } 797 if (sc->null_mbuf) 798 m_freem(sc->null_mbuf); 799 800 /* Free DMA tag */ 801 error = bus_dma_tag_destroy(sc->mbuf_dtag); 802 KASSERT(error == 0, ("Unable to destroy DMA tag")); 803 804 /* Free IO memory handler */ 805 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); 806 bus_release_resources(dev, irq_res_spec, sc->irq_res); 807 808 if (sc->ifp != NULL) 809 if_free(sc->ifp); 810 811 /* Destroy mutexes */ 812 mtx_destroy(&sc->rx.lock); 813 mtx_destroy(&sc->tx.lock); 814 815 return (0); 816 } 817 818 /* 819 * 820 * Init/Shutdown. 821 * 822 */ 823 824 static void 825 cpsw_reset(struct cpsw_softc *sc) 826 { 827 int i; 828 829 /* Reset RMII/RGMII wrapper. */ 830 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 831 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 832 ; 833 834 /* Disable TX and RX interrupts for all cores. */ 835 for (i = 0; i < 3; ++i) { 836 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 837 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 838 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 839 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 840 } 841 842 /* Reset CPSW subsystem. */ 843 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 844 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 845 ; 846 847 /* Reset Sliver port 1 and 2 */ 848 for (i = 0; i < 2; i++) { 849 /* Reset */ 850 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 851 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 852 ; 853 } 854 855 /* Reset DMA controller. */ 856 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 857 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 858 ; 859 860 /* Disable TX & RX DMA */ 861 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 862 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 863 864 /* Clear all queues. */ 865 for (i = 0; i < 8; i++) { 866 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 867 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 868 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 869 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 870 } 871 872 /* Clear all interrupt Masks */ 873 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 874 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 875 } 876 877 static void 878 cpsw_init(void *arg) 879 { 880 struct cpsw_softc *sc = arg; 881 882 CPSW_DEBUGF(("")); 883 CPSW_GLOBAL_LOCK(sc); 884 cpsw_init_locked(arg); 885 CPSW_GLOBAL_UNLOCK(sc); 886 } 887 888 static void 889 cpsw_init_locked(void *arg) 890 { 891 struct ifnet *ifp; 892 struct cpsw_softc *sc = arg; 893 struct cpsw_slot *slot; 894 uint32_t i; 895 896 CPSW_DEBUGF(("")); 897 ifp = sc->ifp; 898 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 899 return; 900 901 getbinuptime(&sc->init_uptime); 902 903 /* Reset the controller. */ 904 cpsw_reset(sc); 905 906 /* Enable ALE */ 907 cpsw_write_4(sc, CPSW_ALE_CONTROL, 1 << 31 | 1 << 4); 908 909 /* Init Sliver port 1 and 2 */ 910 for (i = 0; i < 2; i++) { 911 /* Set Slave Mapping */ 912 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210); 913 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i + 1), 0x33221100); 914 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2); 915 /* Set MACCONTROL for ports 0,1: IFCTL_B(16), IFCTL_A(15), 916 GMII_EN(5), FULLDUPLEX(1) */ 917 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 918 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 919 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), 1 << 15 | 1 << 5 | 1); 920 } 921 922 /* Set Host Port Mapping */ 923 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 924 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 925 926 /* Initialize ALE: all ports set to forwarding(3), initialize addrs */ 927 for (i = 0; i < 3; i++) 928 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i), 3); 929 cpsw_ale_update_addresses(sc, 1); 930 931 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 932 933 /* Enable statistics for ports 0, 1 and 2 */ 934 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 935 936 /* Experiment: Turn off flow control */ 937 /* This seems to fix the watchdog resets that have plagued 938 earlier versions of this driver; I'm not yet sure if there 939 are negative effects yet. */ 940 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 941 942 /* Make IP hdr aligned with 4 */ 943 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 944 945 /* Initialize RX Buffer Descriptors */ 946 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 947 948 /* Enable TX & RX DMA */ 949 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 950 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 951 952 /* Enable Interrupts for core 0 */ 953 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 954 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 955 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x3F); 956 957 /* Enable host Error Interrupt */ 958 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 959 960 /* Enable interrupts for RX Channel 0 */ 961 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1); 962 963 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 964 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 965 cpsw_write_4(sc, MDIOCONTROL, 1 << 30 | 1 << 18 | 0xFF); 966 967 /* Select MII in GMII_SEL, Internal Delay mode */ 968 //ti_scm_reg_write_4(0x650, 0); 969 970 /* Initialize active queues. */ 971 slot = STAILQ_FIRST(&sc->tx.active); 972 if (slot != NULL) 973 cpsw_write_hdp_slot(sc, &sc->tx, slot); 974 slot = STAILQ_FIRST(&sc->rx.active); 975 if (slot != NULL) 976 cpsw_write_hdp_slot(sc, &sc->rx, slot); 977 cpsw_rx_enqueue(sc); 978 979 /* Activate network interface */ 980 sc->rx.running = 1; 981 sc->tx.running = 1; 982 sc->watchdog.timer = 0; 983 callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc); 984 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; 985 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 986 987 } 988 989 static int 990 cpsw_shutdown(device_t dev) 991 { 992 struct cpsw_softc *sc = device_get_softc(dev); 993 994 CPSW_DEBUGF(("")); 995 CPSW_GLOBAL_LOCK(sc); 996 cpsw_shutdown_locked(sc); 997 CPSW_GLOBAL_UNLOCK(sc); 998 return (0); 999 } 1000 1001 static void 1002 cpsw_rx_teardown_locked(struct cpsw_softc *sc) 1003 { 1004 struct mbuf *received, *next; 1005 int i = 0; 1006 1007 CPSW_DEBUGF(("starting RX teardown")); 1008 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 1009 for (;;) { 1010 received = cpsw_rx_dequeue(sc); 1011 CPSW_GLOBAL_UNLOCK(sc); 1012 while (received != NULL) { 1013 next = received->m_nextpkt; 1014 received->m_nextpkt = NULL; 1015 (*sc->ifp->if_input)(sc->ifp, received); 1016 received = next; 1017 } 1018 CPSW_GLOBAL_LOCK(sc); 1019 if (!sc->rx.running) { 1020 CPSW_DEBUGF(("finished RX teardown (%d retries)", i)); 1021 return; 1022 } 1023 if (++i > 10) { 1024 if_printf(sc->ifp, "Unable to cleanly shutdown receiver\n"); 1025 return; 1026 } 1027 DELAY(10); 1028 } 1029 } 1030 1031 static void 1032 cpsw_tx_teardown_locked(struct cpsw_softc *sc) 1033 { 1034 int i = 0; 1035 1036 CPSW_DEBUGF(("starting TX teardown")); 1037 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 1038 cpsw_tx_dequeue(sc); 1039 while (sc->tx.running && ++i < 10) { 1040 DELAY(10); 1041 cpsw_tx_dequeue(sc); 1042 } 1043 if (sc->tx.running) 1044 if_printf(sc->ifp, "Unable to cleanly shutdown transmitter\n"); 1045 CPSW_DEBUGF(("finished TX teardown (%d retries, %d idle buffers)", 1046 i, sc->tx.active_queue_len)); 1047 } 1048 1049 static void 1050 cpsw_shutdown_locked(struct cpsw_softc *sc) 1051 { 1052 struct ifnet *ifp; 1053 1054 CPSW_DEBUGF(("")); 1055 CPSW_GLOBAL_LOCK_ASSERT(sc); 1056 ifp = sc->ifp; 1057 1058 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1059 return; 1060 1061 /* Disable interface */ 1062 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1063 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1064 1065 /* Stop ticker */ 1066 callout_stop(&sc->watchdog.callout); 1067 1068 /* Tear down the RX/TX queues. */ 1069 cpsw_rx_teardown_locked(sc); 1070 cpsw_tx_teardown_locked(sc); 1071 1072 /* Capture stats before we reset controller. */ 1073 cpsw_stats_collect(sc); 1074 1075 cpsw_reset(sc); 1076 } 1077 1078 /* 1079 * Suspend/Resume. 1080 */ 1081 1082 static int 1083 cpsw_suspend(device_t dev) 1084 { 1085 struct cpsw_softc *sc = device_get_softc(dev); 1086 1087 CPSW_DEBUGF(("")); 1088 CPSW_GLOBAL_LOCK(sc); 1089 cpsw_shutdown_locked(sc); 1090 CPSW_GLOBAL_UNLOCK(sc); 1091 return (0); 1092 } 1093 1094 static int 1095 cpsw_resume(device_t dev) 1096 { 1097 struct cpsw_softc *sc = device_get_softc(dev); 1098 1099 CPSW_DEBUGF(("UNIMPLEMENTED")); 1100 return (0); 1101 } 1102 1103 /* 1104 * 1105 * IOCTL 1106 * 1107 */ 1108 1109 static void 1110 cpsw_set_promisc(struct cpsw_softc *sc, int set) 1111 { 1112 /* 1113 * Enabling promiscuous mode requires two bits of work: First, 1114 * ALE_BYPASS needs to be enabled. That disables the ALE 1115 * forwarding logic and causes every packet to be sent to the 1116 * host port. That makes us promiscuous wrt received packets. 1117 * 1118 * With ALE forwarding disabled, the transmitter needs to set 1119 * an explicit output port on every packet to route it to the 1120 * correct egress. This should be doable for systems such as 1121 * BeagleBone where only one egress port is actually wired to 1122 * a PHY. If you have both egress ports wired up, life gets a 1123 * lot more interesting. 1124 * 1125 * Hmmm.... NetBSD driver uses ALE_BYPASS always and doesn't 1126 * seem to set explicit egress ports. Does that mean they 1127 * are always promiscuous? 1128 */ 1129 if (set) { 1130 printf("Promiscuous mode unimplemented\n"); 1131 } 1132 } 1133 1134 static void 1135 cpsw_set_allmulti(struct cpsw_softc *sc, int set) 1136 { 1137 if (set) { 1138 printf("All-multicast mode unimplemented\n"); 1139 } 1140 } 1141 1142 static int 1143 cpsw_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1144 { 1145 struct cpsw_softc *sc = ifp->if_softc; 1146 struct ifreq *ifr = (struct ifreq *)data; 1147 int error; 1148 uint32_t changed; 1149 1150 error = 0; 1151 1152 switch (command) { 1153 case SIOCSIFFLAGS: 1154 CPSW_GLOBAL_LOCK(sc); 1155 if (ifp->if_flags & IFF_UP) { 1156 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1157 changed = ifp->if_flags ^ sc->cpsw_if_flags; 1158 CPSW_DEBUGF(("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed)); 1159 if (changed & IFF_PROMISC) 1160 cpsw_set_promisc(sc, 1161 ifp->if_flags & IFF_PROMISC); 1162 if (changed & IFF_ALLMULTI) 1163 cpsw_set_allmulti(sc, 1164 ifp->if_flags & IFF_ALLMULTI); 1165 } else { 1166 CPSW_DEBUGF(("SIOCSIFFLAGS: UP but not RUNNING; starting up")); 1167 cpsw_init_locked(sc); 1168 } 1169 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1170 CPSW_DEBUGF(("SIOCSIFFLAGS: not UP but RUNNING; shutting down")); 1171 cpsw_shutdown_locked(sc); 1172 } 1173 1174 sc->cpsw_if_flags = ifp->if_flags; 1175 CPSW_GLOBAL_UNLOCK(sc); 1176 break; 1177 case SIOCADDMULTI: 1178 cpsw_ale_update_addresses(sc, 0); 1179 break; 1180 case SIOCDELMULTI: 1181 /* Ugh. DELMULTI doesn't provide the specific address 1182 being removed, so the best we can do is remove 1183 everything and rebuild it all. */ 1184 cpsw_ale_update_addresses(sc, 1); 1185 break; 1186 case SIOCGIFMEDIA: 1187 case SIOCSIFMEDIA: 1188 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1189 break; 1190 default: 1191 error = ether_ioctl(ifp, command, data); 1192 } 1193 return (error); 1194 } 1195 1196 /* 1197 * 1198 * MIIBUS 1199 * 1200 */ 1201 static int 1202 cpsw_miibus_ready(struct cpsw_softc *sc) 1203 { 1204 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1205 1206 while (--retries) { 1207 r = cpsw_read_4(sc, MDIOUSERACCESS0); 1208 if ((r & 1 << 31) == 0) 1209 return 1; 1210 DELAY(CPSW_MIIBUS_DELAY); 1211 } 1212 return 0; 1213 } 1214 1215 static int 1216 cpsw_miibus_readreg(device_t dev, int phy, int reg) 1217 { 1218 struct cpsw_softc *sc = device_get_softc(dev); 1219 uint32_t cmd, r; 1220 1221 if (!cpsw_miibus_ready(sc)) { 1222 device_printf(dev, "MDIO not ready to read\n"); 1223 return 0; 1224 } 1225 1226 /* Set GO, reg, phy */ 1227 cmd = 1 << 31 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1228 cpsw_write_4(sc, MDIOUSERACCESS0, cmd); 1229 1230 if (!cpsw_miibus_ready(sc)) { 1231 device_printf(dev, "MDIO timed out during read\n"); 1232 return 0; 1233 } 1234 1235 r = cpsw_read_4(sc, MDIOUSERACCESS0); 1236 if((r & 1 << 29) == 0) { 1237 device_printf(dev, "Failed to read from PHY.\n"); 1238 r = 0; 1239 } 1240 return (r & 0xFFFF); 1241 } 1242 1243 static int 1244 cpsw_miibus_writereg(device_t dev, int phy, int reg, int value) 1245 { 1246 struct cpsw_softc *sc = device_get_softc(dev); 1247 uint32_t cmd; 1248 1249 if (!cpsw_miibus_ready(sc)) { 1250 device_printf(dev, "MDIO not ready to write\n"); 1251 return 0; 1252 } 1253 1254 /* Set GO, WRITE, reg, phy, and value */ 1255 cmd = 3 << 30 | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 1256 | (value & 0xFFFF); 1257 cpsw_write_4(sc, MDIOUSERACCESS0, cmd); 1258 1259 if (!cpsw_miibus_ready(sc)) { 1260 device_printf(dev, "MDIO timed out during write\n"); 1261 return 0; 1262 } 1263 1264 if((cpsw_read_4(sc, MDIOUSERACCESS0) & (1 << 29)) == 0) 1265 device_printf(dev, "Failed to write to PHY.\n"); 1266 1267 return 0; 1268 } 1269 1270 static void 1271 cpsw_miibus_statchg(device_t dev) 1272 { 1273 struct cpsw_softc *sc = device_get_softc(dev); 1274 uint32_t mac_control; 1275 int i; 1276 1277 CPSW_DEBUGF(("")); 1278 1279 for (i = 0; i < 2; i++) { 1280 mac_control = cpsw_read_4(sc, CPSW_SL_MACCONTROL(i)); 1281 mac_control &= ~(1 << 15 | 1 << 7); 1282 1283 switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { 1284 case IFM_1000_SX: 1285 case IFM_1000_LX: 1286 case IFM_1000_CX: 1287 case IFM_1000_T: 1288 mac_control |= 1 << 7; 1289 break; 1290 1291 default: 1292 mac_control |= 1 << 15; 1293 break; 1294 } 1295 1296 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), mac_control); 1297 } 1298 } 1299 1300 /* 1301 * 1302 * Transmit/Receive Packets. 1303 * 1304 */ 1305 1306 1307 static void 1308 cpsw_intr_rx(void *arg) 1309 { 1310 struct cpsw_softc *sc = arg; 1311 struct mbuf *received, *next; 1312 1313 CPSW_RX_LOCK(sc); 1314 received = cpsw_rx_dequeue(sc); 1315 cpsw_rx_enqueue(sc); 1316 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1317 CPSW_RX_UNLOCK(sc); 1318 1319 while (received != NULL) { 1320 next = received->m_nextpkt; 1321 received->m_nextpkt = NULL; 1322 (*sc->ifp->if_input)(sc->ifp, received); 1323 received = next; 1324 } 1325 } 1326 1327 static struct mbuf * 1328 cpsw_rx_dequeue(struct cpsw_softc *sc) 1329 { 1330 struct cpsw_cpdma_bd bd; 1331 struct cpsw_slot *slot; 1332 struct ifnet *ifp; 1333 struct mbuf *mb_head, *mb_tail; 1334 int removed = 0; 1335 1336 ifp = sc->ifp; 1337 mb_head = mb_tail = NULL; 1338 1339 /* Pull completed packets off hardware RX queue. */ 1340 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1341 cpsw_cpdma_read_bd(sc, slot, &bd); 1342 if (bd.flags & CPDMA_BD_OWNER) 1343 break; /* Still in use by hardware */ 1344 1345 CPSW_DEBUGF(("Removing received packet from RX queue")); 1346 ++removed; 1347 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1348 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1349 1350 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1351 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1352 1353 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1354 CPSW_DEBUGF(("RX teardown in progress")); 1355 m_freem(slot->mbuf); 1356 slot->mbuf = NULL; 1357 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1358 sc->rx.running = 0; 1359 break; 1360 } 1361 1362 cpsw_write_cp_slot(sc, &sc->rx, slot); 1363 1364 /* Set up mbuf */ 1365 /* TODO: track SOP/EOP bits to assemble a full mbuf 1366 out of received fragments. */ 1367 slot->mbuf->m_data += bd.bufoff; 1368 slot->mbuf->m_len = bd.pktlen - 4; 1369 slot->mbuf->m_pkthdr.len = bd.pktlen - 4; 1370 slot->mbuf->m_flags |= M_PKTHDR; 1371 slot->mbuf->m_pkthdr.rcvif = ifp; 1372 slot->mbuf->m_nextpkt = NULL; 1373 1374 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1375 /* check for valid CRC by looking into pkt_err[5:4] */ 1376 if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) { 1377 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1378 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1379 slot->mbuf->m_pkthdr.csum_data = 0xffff; 1380 } 1381 } 1382 1383 /* Add mbuf to packet list to be returned. */ 1384 if (mb_tail) { 1385 mb_tail->m_nextpkt = slot->mbuf; 1386 } else { 1387 mb_head = slot->mbuf; 1388 } 1389 mb_tail = slot->mbuf; 1390 slot->mbuf = NULL; 1391 } 1392 1393 if (removed != 0) { 1394 sc->rx.queue_removes += removed; 1395 sc->rx.active_queue_len -= removed; 1396 sc->rx.avail_queue_len += removed; 1397 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1398 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1399 } 1400 return (mb_head); 1401 } 1402 1403 static void 1404 cpsw_rx_enqueue(struct cpsw_softc *sc) 1405 { 1406 bus_dma_segment_t seg[1]; 1407 struct cpsw_cpdma_bd bd; 1408 struct ifnet *ifp = sc->ifp; 1409 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); 1410 struct cpsw_slot *slot, *prev_slot = NULL; 1411 struct cpsw_slot *last_old_slot, *first_new_slot; 1412 int error, nsegs, added = 0; 1413 1414 /* Register new mbufs with hardware. */ 1415 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1416 if (slot->mbuf == NULL) { 1417 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1418 if (slot->mbuf == NULL) { 1419 if_printf(sc->ifp, "Unable to fill RX queue\n"); 1420 break; 1421 } 1422 slot->mbuf->m_len = 1423 slot->mbuf->m_pkthdr.len = 1424 slot->mbuf->m_ext.ext_size; 1425 } 1426 1427 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1428 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1429 1430 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1431 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1432 if (error != 0 || nsegs != 1) { 1433 if_printf(ifp, 1434 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1435 __func__, nsegs, error); 1436 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1437 m_freem(slot->mbuf); 1438 slot->mbuf = NULL; 1439 break; 1440 } 1441 1442 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1443 1444 /* Create and submit new rx descriptor*/ 1445 bd.next = 0; 1446 bd.bufptr = seg->ds_addr; 1447 bd.bufoff = 0; 1448 bd.buflen = MCLBYTES - 1; 1449 bd.pktlen = bd.buflen; 1450 bd.flags = CPDMA_BD_OWNER; 1451 cpsw_cpdma_write_bd(sc, slot, &bd); 1452 ++added; 1453 1454 if (prev_slot != NULL) 1455 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1456 prev_slot = slot; 1457 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1458 sc->rx.avail_queue_len--; 1459 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1460 } 1461 1462 if (added == 0) 1463 return; 1464 1465 CPSW_DEBUGF(("Adding %d buffers to RX queue", added)); 1466 1467 /* Link new entries to hardware RX queue. */ 1468 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1469 first_new_slot = STAILQ_FIRST(&tmpqueue); 1470 STAILQ_CONCAT(&sc->rx.active, &tmpqueue); 1471 if (first_new_slot == NULL) { 1472 return; 1473 } else if (last_old_slot == NULL) { 1474 /* Start a fresh queue. */ 1475 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1476 } else { 1477 /* Add buffers to end of current queue. */ 1478 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1479 /* If underrun, restart queue. */ 1480 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) { 1481 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1482 } 1483 } 1484 sc->rx.queue_adds += added; 1485 sc->rx.active_queue_len += added; 1486 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) { 1487 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1488 } 1489 } 1490 1491 static void 1492 cpsw_start(struct ifnet *ifp) 1493 { 1494 struct cpsw_softc *sc = ifp->if_softc; 1495 1496 CPSW_TX_LOCK(sc); 1497 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->tx.running) { 1498 cpsw_tx_enqueue(sc); 1499 cpsw_tx_dequeue(sc); 1500 } 1501 CPSW_TX_UNLOCK(sc); 1502 } 1503 1504 static void 1505 cpsw_tx_enqueue(struct cpsw_softc *sc) 1506 { 1507 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1508 struct cpsw_cpdma_bd bd; 1509 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); 1510 struct cpsw_slot *slot, *prev_slot = NULL; 1511 struct cpsw_slot *last_old_slot, *first_new_slot; 1512 struct mbuf *m0; 1513 int error, nsegs, seg, added = 0, padlen; 1514 1515 /* Pull pending packets from IF queue and prep them for DMA. */ 1516 while ((slot = STAILQ_FIRST(&sc->tx.avail)) != NULL) { 1517 IF_DEQUEUE(&sc->ifp->if_snd, m0); 1518 if (m0 == NULL) 1519 break; 1520 1521 slot->mbuf = m0; 1522 padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len; 1523 if (padlen < 0) 1524 padlen = 0; 1525 1526 /* Create mapping in DMA memory */ 1527 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1528 slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1529 /* If the packet is too fragmented, try to simplify. */ 1530 if (error == EFBIG || 1531 (error == 0 && 1532 nsegs + (padlen > 0 ? 1 : 0) > sc->tx.avail_queue_len)) { 1533 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1534 if (padlen > 0) /* May as well add padding. */ 1535 m_append(slot->mbuf, padlen, 1536 sc->null_mbuf->m_data); 1537 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1538 if (m0 == NULL) { 1539 if_printf(sc->ifp, 1540 "Can't defragment packet; dropping\n"); 1541 m_freem(slot->mbuf); 1542 } else { 1543 CPSW_DEBUGF(("Requeueing defragmented packet")); 1544 IF_PREPEND(&sc->ifp->if_snd, m0); 1545 } 1546 slot->mbuf = NULL; 1547 continue; 1548 } 1549 if (error != 0) { 1550 if_printf(sc->ifp, 1551 "%s: Can't setup DMA (error=%d), dropping packet\n", 1552 __func__, error); 1553 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1554 m_freem(slot->mbuf); 1555 slot->mbuf = NULL; 1556 break; 1557 } 1558 1559 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, 1560 BUS_DMASYNC_PREWRITE); 1561 1562 1563 CPSW_DEBUGF(("Queueing TX packet: %d segments + %d pad bytes", 1564 nsegs, padlen)); 1565 1566 /* If there is only one segment, the for() loop 1567 * gets skipped and the single buffer gets set up 1568 * as both SOP and EOP. */ 1569 /* Start by setting up the first buffer */ 1570 bd.next = 0; 1571 bd.bufptr = segs[0].ds_addr; 1572 bd.bufoff = 0; 1573 bd.buflen = segs[0].ds_len; 1574 bd.pktlen = m_length(slot->mbuf, NULL) + padlen; 1575 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; 1576 for (seg = 1; seg < nsegs; ++seg) { 1577 /* Save the previous buffer (which isn't EOP) */ 1578 cpsw_cpdma_write_bd(sc, slot, &bd); 1579 if (prev_slot != NULL) 1580 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1581 prev_slot = slot; 1582 STAILQ_REMOVE_HEAD(&sc->tx.avail, next); 1583 sc->tx.avail_queue_len--; 1584 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1585 ++added; 1586 slot = STAILQ_FIRST(&sc->tx.avail); 1587 1588 /* Setup next buffer (which isn't SOP) */ 1589 bd.next = 0; 1590 bd.bufptr = segs[seg].ds_addr; 1591 bd.bufoff = 0; 1592 bd.buflen = segs[seg].ds_len; 1593 bd.pktlen = 0; 1594 bd.flags = CPDMA_BD_OWNER; 1595 } 1596 /* Save the final buffer. */ 1597 if (padlen <= 0) 1598 bd.flags |= CPDMA_BD_EOP; 1599 cpsw_cpdma_write_bd(sc, slot, &bd); 1600 if (prev_slot != NULL) 1601 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1602 prev_slot = slot; 1603 STAILQ_REMOVE_HEAD(&sc->tx.avail, next); 1604 sc->tx.avail_queue_len--; 1605 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1606 ++added; 1607 1608 if (padlen > 0) { 1609 slot = STAILQ_FIRST(&sc->tx.avail); 1610 STAILQ_REMOVE_HEAD(&sc->tx.avail, next); 1611 sc->tx.avail_queue_len--; 1612 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1613 ++added; 1614 1615 /* Setup buffer of null pad bytes (definitely EOP) */ 1616 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1617 prev_slot = slot; 1618 bd.next = 0; 1619 bd.bufptr = sc->null_mbuf_paddr; 1620 bd.bufoff = 0; 1621 bd.buflen = padlen; 1622 bd.pktlen = 0; 1623 bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER; 1624 cpsw_cpdma_write_bd(sc, slot, &bd); 1625 ++nsegs; 1626 } 1627 1628 if (nsegs > sc->tx.longest_chain) 1629 sc->tx.longest_chain = nsegs; 1630 1631 // TODO: Should we defer the BPF tap until 1632 // after all packets are queued? 1633 BPF_MTAP(sc->ifp, m0); 1634 } 1635 1636 /* Attach the list of new buffers to the hardware TX queue. */ 1637 last_old_slot = STAILQ_LAST(&sc->tx.active, cpsw_slot, next); 1638 first_new_slot = STAILQ_FIRST(&tmpqueue); 1639 STAILQ_CONCAT(&sc->tx.active, &tmpqueue); 1640 if (first_new_slot == NULL) { 1641 return; 1642 } else if (last_old_slot == NULL) { 1643 /* Start a fresh queue. */ 1644 cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot); 1645 } else { 1646 /* Add buffers to end of current queue. */ 1647 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1648 /* If underrun, restart queue. */ 1649 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) { 1650 cpsw_write_hdp_slot(sc, &sc->tx, first_new_slot); 1651 } 1652 } 1653 sc->tx.queue_adds += added; 1654 sc->tx.active_queue_len += added; 1655 if (sc->tx.active_queue_len > sc->tx.max_active_queue_len) { 1656 sc->tx.max_active_queue_len = sc->tx.active_queue_len; 1657 } 1658 } 1659 1660 static int 1661 cpsw_tx_dequeue(struct cpsw_softc *sc) 1662 { 1663 struct cpsw_slot *slot, *last_removed_slot = NULL; 1664 uint32_t flags, removed = 0; 1665 1666 slot = STAILQ_FIRST(&sc->tx.active); 1667 if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) { 1668 CPSW_DEBUGF(("TX teardown of an empty queue")); 1669 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1670 sc->tx.running = 0; 1671 return (0); 1672 } 1673 1674 /* Pull completed buffers off the hardware TX queue. */ 1675 while (slot != NULL) { 1676 flags = cpsw_cpdma_read_bd_flags(sc, slot); 1677 if (flags & CPDMA_BD_OWNER) 1678 break; /* Hardware is still using this packet. */ 1679 1680 CPSW_DEBUGF(("TX removing completed packet")); 1681 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 1682 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1683 m_freem(slot->mbuf); 1684 slot->mbuf = NULL; 1685 1686 /* Dequeue any additional buffers used by this packet. */ 1687 while (slot != NULL && slot->mbuf == NULL) { 1688 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 1689 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 1690 ++removed; 1691 last_removed_slot = slot; 1692 slot = STAILQ_FIRST(&sc->tx.active); 1693 } 1694 1695 /* TearDown complete is only marked on the SOP for the packet. */ 1696 if (flags & CPDMA_BD_TDOWNCMPLT) { 1697 CPSW_DEBUGF(("TX teardown in progress")); 1698 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1699 // TODO: Increment a count of dropped TX packets 1700 sc->tx.running = 0; 1701 break; 1702 } 1703 } 1704 1705 if (removed != 0) { 1706 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 1707 sc->tx.queue_removes += removed; 1708 sc->tx.active_queue_len -= removed; 1709 sc->tx.avail_queue_len += removed; 1710 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 1711 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 1712 } 1713 return (removed); 1714 } 1715 1716 /* 1717 * 1718 * Miscellaneous interrupts. 1719 * 1720 */ 1721 1722 static void 1723 cpsw_intr_rx_thresh(void *arg) 1724 { 1725 struct cpsw_softc *sc = arg; 1726 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0)); 1727 1728 CPSW_DEBUGF(("stat=%x", stat)); 1729 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 1730 } 1731 1732 static void 1733 cpsw_intr_misc_host_error(struct cpsw_softc *sc) 1734 { 1735 uint32_t intstat; 1736 uint32_t dmastat; 1737 int txerr, rxerr, txchan, rxchan; 1738 1739 printf("\n\n"); 1740 device_printf(sc->dev, 1741 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 1742 printf("\n\n"); 1743 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 1744 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 1745 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 1746 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 1747 1748 txerr = (dmastat >> 20) & 15; 1749 txchan = (dmastat >> 16) & 7; 1750 rxerr = (dmastat >> 12) & 15; 1751 rxchan = (dmastat >> 8) & 7; 1752 1753 switch (txerr) { 1754 case 0: break; 1755 case 1: printf("SOP error on TX channel %d\n", txchan); 1756 break; 1757 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 1758 break; 1759 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 1760 break; 1761 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 1762 break; 1763 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 1764 break; 1765 case 6: printf("Packet length error on TX channel %d\n", txchan); 1766 break; 1767 default: printf("Unknown error on TX channel %d\n", txchan); 1768 break; 1769 } 1770 1771 if (txerr != 0) { 1772 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 1773 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 1774 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 1775 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 1776 cpsw_dump_queue(sc, &sc->tx.active); 1777 } 1778 1779 switch (rxerr) { 1780 case 0: break; 1781 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 1782 break; 1783 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 1784 break; 1785 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 1786 break; 1787 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 1788 break; 1789 default: printf("Unknown RX error on RX channel %d\n", rxchan); 1790 break; 1791 } 1792 1793 if (rxerr != 0) { 1794 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 1795 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 1796 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 1797 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 1798 cpsw_dump_queue(sc, &sc->rx.active); 1799 } 1800 1801 printf("\nALE Table\n"); 1802 cpsw_ale_dump_table(sc); 1803 1804 // XXX do something useful here?? 1805 panic("CPSW HOST ERROR INTERRUPT"); 1806 1807 // Suppress this interrupt in the future. 1808 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 1809 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 1810 // The watchdog will probably reset the controller 1811 // in a little while. It will probably fail again. 1812 } 1813 1814 static void 1815 cpsw_intr_misc(void *arg) 1816 { 1817 struct cpsw_softc *sc = arg; 1818 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 1819 1820 if (stat & 16) 1821 CPSW_DEBUGF(("Time sync event interrupt unimplemented")); 1822 if (stat & 8) 1823 cpsw_stats_collect(sc); 1824 if (stat & 4) 1825 cpsw_intr_misc_host_error(sc); 1826 if (stat & 2) 1827 CPSW_DEBUGF(("MDIO link change interrupt unimplemented")); 1828 if (stat & 1) 1829 CPSW_DEBUGF(("MDIO operation completed interrupt unimplemented")); 1830 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 1831 } 1832 1833 /* 1834 * 1835 * Periodic Checks and Watchdog. 1836 * 1837 */ 1838 1839 static void 1840 cpsw_tick(void *msc) 1841 { 1842 struct cpsw_softc *sc = msc; 1843 1844 /* Check for TX timeout */ 1845 cpsw_tx_watchdog(sc); 1846 1847 /* Check for media type change */ 1848 mii_tick(sc->mii); 1849 if(sc->cpsw_media_status != sc->mii->mii_media.ifm_media) { 1850 printf("%s: media type changed (ifm_media=%x)\n", __func__, 1851 sc->mii->mii_media.ifm_media); 1852 cpsw_ifmedia_upd(sc->ifp); 1853 } 1854 1855 /* Schedule another timeout one second from now */ 1856 callout_reset(&sc->watchdog.callout, hz, cpsw_tick, sc); 1857 } 1858 1859 static void 1860 cpsw_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1861 { 1862 struct cpsw_softc *sc = ifp->if_softc; 1863 struct mii_data *mii; 1864 1865 CPSW_DEBUGF(("")); 1866 CPSW_TX_LOCK(sc); 1867 1868 mii = sc->mii; 1869 mii_pollstat(mii); 1870 1871 ifmr->ifm_active = mii->mii_media_active; 1872 ifmr->ifm_status = mii->mii_media_status; 1873 1874 CPSW_TX_UNLOCK(sc); 1875 } 1876 1877 static int 1878 cpsw_ifmedia_upd(struct ifnet *ifp) 1879 { 1880 struct cpsw_softc *sc = ifp->if_softc; 1881 1882 CPSW_DEBUGF(("")); 1883 if (ifp->if_flags & IFF_UP) { 1884 CPSW_GLOBAL_LOCK(sc); 1885 sc->cpsw_media_status = sc->mii->mii_media.ifm_media; 1886 mii_mediachg(sc->mii); 1887 cpsw_init_locked(sc); 1888 CPSW_GLOBAL_UNLOCK(sc); 1889 } 1890 1891 return (0); 1892 } 1893 1894 static void 1895 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 1896 { 1897 cpsw_debugf_head("CPSW watchdog"); 1898 if_printf(sc->ifp, "watchdog timeout\n"); 1899 cpsw_shutdown_locked(sc); 1900 cpsw_init_locked(sc); 1901 } 1902 1903 static void 1904 cpsw_tx_watchdog(struct cpsw_softc *sc) 1905 { 1906 struct ifnet *ifp = sc->ifp; 1907 1908 CPSW_GLOBAL_LOCK(sc); 1909 if (sc->tx.active_queue_len == 0 || (ifp->if_flags & IFF_UP) == 0 || 1910 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !sc->tx.running) { 1911 sc->watchdog.timer = 0; /* Nothing to do. */ 1912 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 1913 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 1914 } else if (cpsw_tx_dequeue(sc) > 0) { 1915 sc->watchdog.timer = 0; /* We just did something. */ 1916 } else { 1917 /* There was something to do but it didn't get done. */ 1918 ++sc->watchdog.timer; 1919 if (sc->watchdog.timer > 2) { 1920 sc->watchdog.timer = 0; 1921 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1922 ++sc->watchdog.resets; 1923 cpsw_tx_watchdog_full_reset(sc); 1924 } 1925 } 1926 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 1927 CPSW_GLOBAL_UNLOCK(sc); 1928 } 1929 1930 /* 1931 * 1932 * ALE support routines. 1933 * 1934 */ 1935 1936 static void 1937 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 1938 { 1939 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 1940 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 1941 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 1942 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 1943 } 1944 1945 static void 1946 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 1947 { 1948 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 1949 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 1950 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 1951 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 1952 } 1953 1954 static int 1955 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 1956 { 1957 int i; 1958 uint32_t ale_entry[3]; 1959 1960 /* First two entries are link address and broadcast. */ 1961 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) { 1962 cpsw_ale_read_entry(sc, i, ale_entry); 1963 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */ 1964 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */ 1965 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 1966 cpsw_ale_write_entry(sc, i, ale_entry); 1967 } 1968 } 1969 return CPSW_MAX_ALE_ENTRIES; 1970 } 1971 1972 static int 1973 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, uint8_t *mac) 1974 { 1975 int free_index = -1, matching_index = -1, i; 1976 uint32_t ale_entry[3]; 1977 1978 /* Find a matching entry or a free entry. */ 1979 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 1980 cpsw_ale_read_entry(sc, i, ale_entry); 1981 1982 /* Entry Type[61:60] is 0 for free entry */ 1983 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) { 1984 free_index = i; 1985 } 1986 1987 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 1988 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 1989 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 1990 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 1991 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 1992 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 1993 matching_index = i; 1994 break; 1995 } 1996 } 1997 1998 if (matching_index < 0) { 1999 if (free_index < 0) 2000 return (ENOMEM); 2001 i = free_index; 2002 } 2003 2004 /* Set MAC address */ 2005 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2006 ale_entry[1] = mac[0] << 8 | mac[1]; 2007 2008 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/ 2009 ale_entry[1] |= 0xd0 << 24; 2010 2011 /* Set portmask [68:66] */ 2012 ale_entry[2] = (portmap & 7) << 2; 2013 2014 cpsw_ale_write_entry(sc, i, ale_entry); 2015 2016 return 0; 2017 } 2018 2019 static void 2020 cpsw_ale_dump_table(struct cpsw_softc *sc) { 2021 int i; 2022 uint32_t ale_entry[3]; 2023 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2024 cpsw_ale_read_entry(sc, i, ale_entry); 2025 if (ale_entry[0] || ale_entry[1] || ale_entry[2]) { 2026 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[0], 2027 ale_entry[1], ale_entry[2]); 2028 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 2029 (ale_entry[1] >> 8) & 0xFF, 2030 (ale_entry[1] >> 0) & 0xFF, 2031 (ale_entry[0] >>24) & 0xFF, 2032 (ale_entry[0] >>16) & 0xFF, 2033 (ale_entry[0] >> 8) & 0xFF, 2034 (ale_entry[0] >> 0) & 0xFF); 2035 printf(((ale_entry[1] >> 8) & 1) ? "mcast " : "ucast "); 2036 printf("type: %u ", (ale_entry[1] >> 28) & 3); 2037 printf("port: %u ", (ale_entry[2] >> 2) & 7); 2038 printf("\n"); 2039 } 2040 } 2041 printf("\n"); 2042 } 2043 2044 static int 2045 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge) 2046 { 2047 uint8_t *mac; 2048 uint32_t ale_entry[3]; 2049 struct ifnet *ifp = sc->ifp; 2050 struct ifmultiaddr *ifma; 2051 int i; 2052 2053 /* Route incoming packets for our MAC address to Port 0 (host). */ 2054 /* For simplicity, keep this entry at table index 0 in the ALE. */ 2055 if_addr_rlock(ifp); 2056 mac = LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr); 2057 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2058 ale_entry[1] = 0x10 << 24 | mac[0] << 8 | mac[1]; /* addr entry + mac */ 2059 ale_entry[2] = 0; /* port = 0 */ 2060 cpsw_ale_write_entry(sc, 0, ale_entry); 2061 2062 /* Set outgoing MAC Address for Ports 1 and 2. */ 2063 for (i = 1; i < 3; ++i) { 2064 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i), 2065 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 2066 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i), 2067 mac[5] << 8 | mac[4]); 2068 } 2069 if_addr_runlock(ifp); 2070 2071 /* Keep the broadcast address at table entry 1. */ 2072 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 2073 ale_entry[1] = 0xd000ffff; /* FW (3 << 30), Addr entry (1 << 24), upper 16 bits of Mac */ 2074 ale_entry[2] = 0x0000001c; /* Forward to all ports */ 2075 cpsw_ale_write_entry(sc, 1, ale_entry); 2076 2077 /* SIOCDELMULTI doesn't specify the particular address 2078 being removed, so we have to remove all and rebuild. */ 2079 if (purge) 2080 cpsw_ale_remove_all_mc_entries(sc); 2081 2082 /* Set other multicast addrs desired. */ 2083 if_maddr_rlock(ifp); 2084 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2085 if (ifma->ifma_addr->sa_family != AF_LINK) 2086 continue; 2087 cpsw_ale_mc_entry_set(sc, 7, 2088 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 2089 } 2090 if_maddr_runlock(ifp); 2091 2092 return (0); 2093 } 2094 2095 /* 2096 * 2097 * Statistics and Sysctls. 2098 * 2099 */ 2100 2101 #if 0 2102 static void 2103 cpsw_stats_dump(struct cpsw_softc *sc) 2104 { 2105 int i; 2106 uint32_t r; 2107 2108 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2109 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2110 cpsw_stat_sysctls[i].reg); 2111 CPSW_DEBUGF(("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2112 (intmax_t)sc->shadow_stats[i], r, 2113 (intmax_t)sc->shadow_stats[i] + r)); 2114 } 2115 } 2116 #endif 2117 2118 static void 2119 cpsw_stats_collect(struct cpsw_softc *sc) 2120 { 2121 int i; 2122 uint32_t r; 2123 2124 CPSW_DEBUGF(("Controller shadow statistics updated.")); 2125 2126 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2127 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2128 cpsw_stat_sysctls[i].reg); 2129 sc->shadow_stats[i] += r; 2130 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r); 2131 } 2132 } 2133 2134 static int 2135 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2136 { 2137 struct cpsw_softc *sc; 2138 struct cpsw_stat *stat; 2139 uint64_t result; 2140 2141 sc = (struct cpsw_softc *)arg1; 2142 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2143 result = sc->shadow_stats[oidp->oid_number]; 2144 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2145 return (sysctl_handle_64(oidp, &result, 0, req)); 2146 } 2147 2148 static int 2149 cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2150 { 2151 struct cpsw_softc *sc; 2152 struct bintime t; 2153 unsigned result; 2154 2155 sc = (struct cpsw_softc *)arg1; 2156 getbinuptime(&t); 2157 bintime_sub(&t, &sc->attach_uptime); 2158 result = t.sec; 2159 return (sysctl_handle_int(oidp, &result, 0, req)); 2160 } 2161 2162 static int 2163 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2164 { 2165 struct cpsw_softc *sc; 2166 struct bintime t; 2167 unsigned result; 2168 2169 sc = (struct cpsw_softc *)arg1; 2170 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { 2171 getbinuptime(&t); 2172 bintime_sub(&t, &sc->init_uptime); 2173 result = t.sec; 2174 } else 2175 result = 0; 2176 return (sysctl_handle_int(oidp, &result, 0, req)); 2177 } 2178 2179 static void 2180 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue) 2181 { 2182 struct sysctl_oid_list *parent; 2183 2184 parent = SYSCTL_CHILDREN(node); 2185 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2186 CTLFLAG_RD, &queue->queue_slots, 0, 2187 "Total buffers currently assigned to this queue"); 2188 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2189 CTLFLAG_RD, &queue->active_queue_len, 0, 2190 "Buffers currently registered with hardware controller"); 2191 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2192 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2193 "Max value of activeBuffers since last driver reset"); 2194 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2195 CTLFLAG_RD, &queue->avail_queue_len, 0, 2196 "Buffers allocated to this queue but not currently " 2197 "registered with hardware controller"); 2198 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2199 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2200 "Max value of availBuffers since last driver reset"); 2201 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2202 CTLFLAG_RD, &queue->queue_adds, 0, 2203 "Total buffers added to queue"); 2204 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2205 CTLFLAG_RD, &queue->queue_removes, 0, 2206 "Total buffers removed from queue"); 2207 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2208 CTLFLAG_RD, &queue->longest_chain, 0, 2209 "Max buffers used for a single packet"); 2210 } 2211 2212 static void 2213 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc) 2214 { 2215 struct sysctl_oid_list *parent; 2216 2217 parent = SYSCTL_CHILDREN(node); 2218 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2219 CTLFLAG_RD, &sc->watchdog.resets, 0, 2220 "Total number of watchdog resets"); 2221 } 2222 2223 static void 2224 cpsw_add_sysctls(struct cpsw_softc *sc) 2225 { 2226 struct sysctl_ctx_list *ctx; 2227 struct sysctl_oid *stats_node, *queue_node, *node; 2228 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2229 int i; 2230 2231 ctx = device_get_sysctl_ctx(sc->dev); 2232 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2233 2234 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2235 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", 2236 "Time since driver attach"); 2237 2238 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "uptime", 2239 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_uptime, "IU", 2240 "Seconds since driver init"); 2241 2242 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2243 CTLFLAG_RD, NULL, "CPSW Statistics"); 2244 stats_parent = SYSCTL_CHILDREN(stats_node); 2245 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2246 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2247 cpsw_stat_sysctls[i].oid, 2248 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, 2249 cpsw_stats_sysctl, "IU", 2250 cpsw_stat_sysctls[i].oid); 2251 } 2252 2253 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2254 CTLFLAG_RD, NULL, "CPSW Queue Statistics"); 2255 queue_parent = SYSCTL_CHILDREN(queue_node); 2256 2257 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2258 CTLFLAG_RD, NULL, "TX Queue Statistics"); 2259 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2260 2261 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2262 CTLFLAG_RD, NULL, "RX Queue Statistics"); 2263 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2264 2265 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2266 CTLFLAG_RD, NULL, "Watchdog Statistics"); 2267 cpsw_add_watchdog_sysctls(ctx, node, sc); 2268 } 2269 2270