1 /*- 2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 3 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * TI Common Platform Ethernet Switch (CPSW) Driver 30 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 31 * 32 * This controller is documented in the AM335x Technical Reference 33 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 34 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 35 * 36 * It is basically a single Ethernet port (port 0) wired internally to 37 * a 3-port store-and-forward switch connected to two independent 38 * "sliver" controllers (port 1 and port 2). You can operate the 39 * controller in a variety of different ways by suitably configuring 40 * the slivers and the Address Lookup Engine (ALE) that routes packets 41 * between the ports. 42 * 43 * This code was developed and tested on a BeagleBone with 44 * an AM335x SoC. 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/endian.h> 53 #include <sys/mbuf.h> 54 #include <sys/lock.h> 55 #include <sys/mutex.h> 56 #include <sys/kernel.h> 57 #include <sys/module.h> 58 #include <sys/socket.h> 59 #include <sys/sysctl.h> 60 61 #include <net/ethernet.h> 62 #include <net/bpf.h> 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_types.h> 68 #include <net/if_var.h> 69 #include <net/if_vlan_var.h> 70 71 #include <netinet/in_systm.h> 72 #include <netinet/in.h> 73 #include <netinet/ip.h> 74 75 #include <sys/sockio.h> 76 #include <sys/bus.h> 77 #include <machine/bus.h> 78 #include <sys/rman.h> 79 #include <machine/resource.h> 80 81 #include <dev/mii/mii.h> 82 #include <dev/mii/miivar.h> 83 84 #include <dev/fdt/fdt_common.h> 85 #include <dev/ofw/ofw_bus.h> 86 #include <dev/ofw/ofw_bus_subr.h> 87 88 #include "if_cpswreg.h" 89 #include "if_cpswvar.h" 90 91 #include <arm/ti/ti_scm.h> 92 93 #include "miibus_if.h" 94 95 /* Device probe/attach/detach. */ 96 static int cpsw_probe(device_t); 97 static int cpsw_attach(device_t); 98 static int cpsw_detach(device_t); 99 static int cpswp_probe(device_t); 100 static int cpswp_attach(device_t); 101 static int cpswp_detach(device_t); 102 103 static phandle_t cpsw_get_node(device_t, device_t); 104 105 /* Device Init/shutdown. */ 106 static int cpsw_shutdown(device_t); 107 static void cpswp_init(void *); 108 static void cpswp_init_locked(void *); 109 static void cpswp_stop_locked(struct cpswp_softc *); 110 111 /* Device Suspend/Resume. */ 112 static int cpsw_suspend(device_t); 113 static int cpsw_resume(device_t); 114 115 /* Ioctl. */ 116 static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data); 117 118 static int cpswp_miibus_readreg(device_t, int phy, int reg); 119 static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); 120 static void cpswp_miibus_statchg(device_t); 121 122 /* Send/Receive packets. */ 123 static void cpsw_intr_rx(void *arg); 124 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 125 static void cpsw_rx_enqueue(struct cpsw_softc *); 126 static void cpswp_start(struct ifnet *); 127 static void cpswp_tx_enqueue(struct cpswp_softc *); 128 static int cpsw_tx_dequeue(struct cpsw_softc *); 129 130 /* Misc interrupts and watchdog. */ 131 static void cpsw_intr_rx_thresh(void *); 132 static void cpsw_intr_misc(void *); 133 static void cpswp_tick(void *); 134 static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *); 135 static int cpswp_ifmedia_upd(struct ifnet *); 136 static void cpsw_tx_watchdog(void *); 137 138 /* ALE support */ 139 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); 140 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); 141 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); 142 static void cpsw_ale_dump_table(struct cpsw_softc *); 143 static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, 144 int); 145 static int cpswp_ale_update_addresses(struct cpswp_softc *, int); 146 147 /* Statistics and sysctls. */ 148 static void cpsw_add_sysctls(struct cpsw_softc *); 149 static void cpsw_stats_collect(struct cpsw_softc *); 150 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 151 152 /* 153 * Arbitrary limit on number of segments in an mbuf to be transmitted. 154 * Packets with more segments than this will be defragmented before 155 * they are queued. 156 */ 157 #define CPSW_TXFRAGS 16 158 159 /* Shared resources. */ 160 static device_method_t cpsw_methods[] = { 161 /* Device interface */ 162 DEVMETHOD(device_probe, cpsw_probe), 163 DEVMETHOD(device_attach, cpsw_attach), 164 DEVMETHOD(device_detach, cpsw_detach), 165 DEVMETHOD(device_shutdown, cpsw_shutdown), 166 DEVMETHOD(device_suspend, cpsw_suspend), 167 DEVMETHOD(device_resume, cpsw_resume), 168 /* OFW methods */ 169 DEVMETHOD(ofw_bus_get_node, cpsw_get_node), 170 DEVMETHOD_END 171 }; 172 173 static driver_t cpsw_driver = { 174 "cpswss", 175 cpsw_methods, 176 sizeof(struct cpsw_softc), 177 }; 178 179 static devclass_t cpsw_devclass; 180 181 DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0); 182 183 /* Port/Slave resources. */ 184 static device_method_t cpswp_methods[] = { 185 /* Device interface */ 186 DEVMETHOD(device_probe, cpswp_probe), 187 DEVMETHOD(device_attach, cpswp_attach), 188 DEVMETHOD(device_detach, cpswp_detach), 189 /* MII interface */ 190 DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), 191 DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), 192 DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), 193 DEVMETHOD_END 194 }; 195 196 static driver_t cpswp_driver = { 197 "cpsw", 198 cpswp_methods, 199 sizeof(struct cpswp_softc), 200 }; 201 202 static devclass_t cpswp_devclass; 203 204 DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0); 205 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); 206 MODULE_DEPEND(cpsw, ether, 1, 1, 1); 207 MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 208 209 static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; 210 211 static struct resource_spec irq_res_spec[] = { 212 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 213 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 214 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 215 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 216 { -1, 0 } 217 }; 218 219 /* Number of entries here must match size of stats 220 * array in struct cpswp_softc. */ 221 static struct cpsw_stat { 222 int reg; 223 char *oid; 224 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 225 {0x00, "GoodRxFrames"}, 226 {0x04, "BroadcastRxFrames"}, 227 {0x08, "MulticastRxFrames"}, 228 {0x0C, "PauseRxFrames"}, 229 {0x10, "RxCrcErrors"}, 230 {0x14, "RxAlignErrors"}, 231 {0x18, "OversizeRxFrames"}, 232 {0x1c, "RxJabbers"}, 233 {0x20, "ShortRxFrames"}, 234 {0x24, "RxFragments"}, 235 {0x30, "RxOctets"}, 236 {0x34, "GoodTxFrames"}, 237 {0x38, "BroadcastTxFrames"}, 238 {0x3c, "MulticastTxFrames"}, 239 {0x40, "PauseTxFrames"}, 240 {0x44, "DeferredTxFrames"}, 241 {0x48, "CollisionsTxFrames"}, 242 {0x4c, "SingleCollisionTxFrames"}, 243 {0x50, "MultipleCollisionTxFrames"}, 244 {0x54, "ExcessiveCollisions"}, 245 {0x58, "LateCollisions"}, 246 {0x5c, "TxUnderrun"}, 247 {0x60, "CarrierSenseErrors"}, 248 {0x64, "TxOctets"}, 249 {0x68, "RxTx64OctetFrames"}, 250 {0x6c, "RxTx65to127OctetFrames"}, 251 {0x70, "RxTx128to255OctetFrames"}, 252 {0x74, "RxTx256to511OctetFrames"}, 253 {0x78, "RxTx512to1024OctetFrames"}, 254 {0x7c, "RxTx1024upOctetFrames"}, 255 {0x80, "NetOctets"}, 256 {0x84, "RxStartOfFrameOverruns"}, 257 {0x88, "RxMiddleOfFrameOverruns"}, 258 {0x8c, "RxDmaOverruns"} 259 }; 260 261 /* 262 * Basic debug support. 263 */ 264 265 #define IF_DEBUG(_sc) if ((_sc)->if_flags & IFF_DEBUG) 266 267 static void 268 cpsw_debugf_head(const char *funcname) 269 { 270 int t = (int)(time_second % (24 * 60 * 60)); 271 272 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 273 } 274 275 #include <machine/stdarg.h> 276 static void 277 cpsw_debugf(const char *fmt, ...) 278 { 279 va_list ap; 280 281 va_start(ap, fmt); 282 vprintf(fmt, ap); 283 va_end(ap); 284 printf("\n"); 285 286 } 287 288 #define CPSW_DEBUGF(_sc, a) do { \ 289 if (sc->debug) { \ 290 cpsw_debugf_head(__func__); \ 291 cpsw_debugf a; \ 292 } \ 293 } while (0) 294 295 #define CPSWP_DEBUGF(_sc, a) do { \ 296 IF_DEBUG((_sc)) { \ 297 cpsw_debugf_head(__func__); \ 298 cpsw_debugf a; \ 299 } \ 300 } while (0) 301 302 303 /* 304 * Locking macros 305 */ 306 #define CPSW_TX_LOCK(sc) do { \ 307 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 308 mtx_lock(&(sc)->tx.lock); \ 309 } while (0) 310 311 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 312 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 313 314 #define CPSW_RX_LOCK(sc) do { \ 315 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 316 mtx_lock(&(sc)->rx.lock); \ 317 } while (0) 318 319 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 320 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 321 322 #define CPSW_GLOBAL_LOCK(sc) do { \ 323 if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) != \ 324 (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) { \ 325 panic("cpsw deadlock possibility detection!"); \ 326 } \ 327 mtx_lock(&(sc)->tx.lock); \ 328 mtx_lock(&(sc)->rx.lock); \ 329 } while (0) 330 331 #define CPSW_GLOBAL_UNLOCK(sc) do { \ 332 CPSW_RX_UNLOCK(sc); \ 333 CPSW_TX_UNLOCK(sc); \ 334 } while (0) 335 336 #define CPSW_GLOBAL_LOCK_ASSERT(sc) do { \ 337 CPSW_TX_LOCK_ASSERT(sc); \ 338 CPSW_RX_LOCK_ASSERT(sc); \ 339 } while (0) 340 341 #define CPSW_PORT_LOCK(_sc) do { \ 342 mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ 343 mtx_lock(&(_sc)->lock); \ 344 } while (0) 345 346 #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 347 #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) 348 349 /* 350 * Read/Write macros 351 */ 352 #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) 353 #define cpsw_write_4(_sc, _reg, _val) \ 354 bus_write_4((_sc)->mem_res, (_reg), (_val)) 355 356 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 357 358 #define cpsw_cpdma_bd_paddr(sc, slot) \ 359 BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) 360 #define cpsw_cpdma_read_bd(sc, slot, val) \ 361 bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 362 #define cpsw_cpdma_write_bd(sc, slot, val) \ 363 bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 364 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 365 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 366 #define cpsw_cpdma_read_bd_flags(sc, slot) \ 367 bus_read_2(sc->mem_res, slot->bd_offset + 14) 368 #define cpsw_write_hdp_slot(sc, queue, slot) \ 369 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 370 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 371 #define cpsw_read_cp(sc, queue) \ 372 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 373 #define cpsw_write_cp(sc, queue, val) \ 374 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 375 #define cpsw_write_cp_slot(sc, queue, slot) \ 376 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 377 378 #if 0 379 /* XXX temporary function versions for debugging. */ 380 static void 381 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 382 { 383 uint32_t reg = queue->hdp_offset; 384 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 385 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 386 cpsw_write_4(sc, reg, v); 387 } 388 389 static void 390 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 391 { 392 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 393 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 394 cpsw_write_cp(sc, queue, v); 395 } 396 #endif 397 398 /* 399 * Expanded dump routines for verbose debugging. 400 */ 401 static void 402 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 403 { 404 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 405 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 406 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 407 "Port0"}; 408 struct cpsw_cpdma_bd bd; 409 const char *sep; 410 int i; 411 412 cpsw_cpdma_read_bd(sc, slot, &bd); 413 printf("BD Addr: 0x%08x Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); 414 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 415 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 416 printf(" Flags: "); 417 sep = ""; 418 for (i = 0; i < 16; ++i) { 419 if (bd.flags & (1 << (15 - i))) { 420 printf("%s%s", sep, flags[i]); 421 sep = ","; 422 } 423 } 424 printf("\n"); 425 if (slot->mbuf) { 426 printf(" Ether: %14D\n", 427 (char *)(slot->mbuf->m_data), " "); 428 printf(" Packet: %16D\n", 429 (char *)(slot->mbuf->m_data) + 14, " "); 430 } 431 } 432 433 #define CPSW_DUMP_SLOT(cs, slot) do { \ 434 IF_DEBUG(sc) { \ 435 cpsw_dump_slot(sc, slot); \ 436 } \ 437 } while (0) 438 439 static void 440 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 441 { 442 struct cpsw_slot *slot; 443 int i = 0; 444 int others = 0; 445 446 STAILQ_FOREACH(slot, q, next) { 447 if (i > 4) 448 ++others; 449 else 450 cpsw_dump_slot(sc, slot); 451 ++i; 452 } 453 if (others) 454 printf(" ... and %d more.\n", others); 455 printf("\n"); 456 } 457 458 #define CPSW_DUMP_QUEUE(sc, q) do { \ 459 IF_DEBUG(sc) { \ 460 cpsw_dump_queue(sc, q); \ 461 } \ 462 } while (0) 463 464 static void 465 cpsw_init_slots(struct cpsw_softc *sc) 466 { 467 struct cpsw_slot *slot; 468 int i; 469 470 STAILQ_INIT(&sc->avail); 471 472 /* Put the slot descriptors onto the global avail list. */ 473 for (i = 0; i < nitems(sc->_slots); i++) { 474 slot = &sc->_slots[i]; 475 slot->bd_offset = cpsw_cpdma_bd_offset(i); 476 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 477 } 478 } 479 480 static int 481 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 482 { 483 const int max_slots = nitems(sc->_slots); 484 struct cpsw_slot *slot; 485 int i; 486 487 if (requested < 0) 488 requested = max_slots; 489 490 for (i = 0; i < requested; ++i) { 491 slot = STAILQ_FIRST(&sc->avail); 492 if (slot == NULL) 493 return (0); 494 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 495 device_printf(sc->dev, "failed to create dmamap\n"); 496 return (ENOMEM); 497 } 498 STAILQ_REMOVE_HEAD(&sc->avail, next); 499 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 500 ++queue->avail_queue_len; 501 ++queue->queue_slots; 502 } 503 return (0); 504 } 505 506 static void 507 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 508 { 509 int error; 510 511 if (slot->dmamap) { 512 if (slot->mbuf) 513 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 514 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 515 KASSERT(error == 0, ("Mapping still active")); 516 slot->dmamap = NULL; 517 } 518 if (slot->mbuf) { 519 m_freem(slot->mbuf); 520 slot->mbuf = NULL; 521 } 522 } 523 524 static void 525 cpsw_reset(struct cpsw_softc *sc) 526 { 527 int i; 528 529 callout_stop(&sc->watchdog.callout); 530 531 /* Reset RMII/RGMII wrapper. */ 532 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 533 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 534 ; 535 536 /* Disable TX and RX interrupts for all cores. */ 537 for (i = 0; i < 3; ++i) { 538 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 539 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 540 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 541 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 542 } 543 544 /* Reset CPSW subsystem. */ 545 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 546 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 547 ; 548 549 /* Reset Sliver port 1 and 2 */ 550 for (i = 0; i < 2; i++) { 551 /* Reset */ 552 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 553 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 554 ; 555 } 556 557 /* Reset DMA controller. */ 558 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 559 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 560 ; 561 562 /* Disable TX & RX DMA */ 563 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 564 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 565 566 /* Clear all queues. */ 567 for (i = 0; i < 8; i++) { 568 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 569 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 570 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 571 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 572 } 573 574 /* Clear all interrupt Masks */ 575 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 576 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 577 } 578 579 static void 580 cpsw_init(struct cpsw_softc *sc) 581 { 582 struct cpsw_slot *slot; 583 uint32_t reg; 584 585 /* Clear ALE */ 586 cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); 587 588 /* Enable ALE */ 589 reg = CPSW_ALE_CTL_ENABLE; 590 if (sc->dualemac) 591 reg |= CPSW_ALE_CTL_VLAN_AWARE; 592 cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); 593 594 /* Set Host Port Mapping. */ 595 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 596 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 597 598 /* Initialize ALE: set host port to forwarding(3). */ 599 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3); 600 601 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 602 603 /* Enable statistics for ports 0, 1 and 2 */ 604 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 605 606 /* Experiment: Turn off flow control */ 607 /* This seems to fix the watchdog resets that have plagued 608 earlier versions of this driver; I'm not yet sure if there 609 are negative effects yet. */ 610 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 611 612 /* Make IP hdr aligned with 4 */ 613 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 614 615 /* Initialize RX Buffer Descriptors */ 616 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 617 618 /* Enable TX & RX DMA */ 619 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 620 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 621 622 /* Enable Interrupts for core 0 */ 623 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 624 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 625 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); 626 627 /* Enable host Error Interrupt */ 628 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 629 630 /* Enable interrupts for RX Channel 0 */ 631 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1); 632 633 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 634 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 635 cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); 636 637 /* Select MII in GMII_SEL, Internal Delay mode */ 638 //ti_scm_reg_write_4(0x650, 0); 639 640 /* Initialize active queues. */ 641 slot = STAILQ_FIRST(&sc->tx.active); 642 if (slot != NULL) 643 cpsw_write_hdp_slot(sc, &sc->tx, slot); 644 slot = STAILQ_FIRST(&sc->rx.active); 645 if (slot != NULL) 646 cpsw_write_hdp_slot(sc, &sc->rx, slot); 647 cpsw_rx_enqueue(sc); 648 649 /* Activate network interface. */ 650 sc->rx.running = 1; 651 sc->tx.running = 1; 652 sc->watchdog.timer = 0; 653 callout_init(&sc->watchdog.callout, 0); 654 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 655 } 656 657 /* 658 * 659 * Device Probe, Attach, Detach. 660 * 661 */ 662 663 static int 664 cpsw_probe(device_t dev) 665 { 666 667 if (!ofw_bus_status_okay(dev)) 668 return (ENXIO); 669 670 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 671 return (ENXIO); 672 673 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 674 return (BUS_PROBE_DEFAULT); 675 } 676 677 static int 678 cpsw_intr_attach(struct cpsw_softc *sc) 679 { 680 681 /* Note: We don't use sc->irq_res[2] (TX interrupt) */ 682 if (bus_setup_intr(sc->dev, sc->irq_res[0], 683 INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_rx_thresh, 684 sc, &sc->ih_cookie[0]) != 0) { 685 return (-1); 686 } 687 if (bus_setup_intr(sc->dev, sc->irq_res[1], 688 INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_rx, 689 sc, &sc->ih_cookie[1]) != 0) { 690 return (-1); 691 } 692 if (bus_setup_intr(sc->dev, sc->irq_res[3], 693 INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_misc, 694 sc, &sc->ih_cookie[3]) != 0) { 695 return (-1); 696 } 697 698 return (0); 699 } 700 701 static void 702 cpsw_intr_detach(struct cpsw_softc *sc) 703 { 704 int i; 705 706 for (i = 0; i < CPSW_INTR_COUNT; i++) { 707 if (sc->ih_cookie[i]) { 708 bus_teardown_intr(sc->dev, sc->irq_res[i], 709 sc->ih_cookie[i]); 710 } 711 } 712 } 713 714 static int 715 cpsw_get_fdt_data(struct cpsw_softc *sc, int port) 716 { 717 char *name; 718 int len, phy, vlan; 719 pcell_t phy_id[3], vlan_id; 720 phandle_t child; 721 unsigned long mdio_child_addr; 722 723 /* Find any slave with phy_id */ 724 phy = -1; 725 vlan = -1; 726 for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { 727 if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0) 728 continue; 729 if (sscanf(name, "slave@%x", &mdio_child_addr) != 1) { 730 OF_prop_free(name); 731 continue; 732 } 733 OF_prop_free(name); 734 if (mdio_child_addr != slave_mdio_addr[port]) 735 continue; 736 737 len = OF_getproplen(child, "phy_id"); 738 if (len / sizeof(pcell_t) == 2) { 739 /* Get phy address from fdt */ 740 if (OF_getencprop(child, "phy_id", phy_id, len) > 0) 741 phy = phy_id[1]; 742 } 743 744 len = OF_getproplen(child, "dual_emac_res_vlan"); 745 if (len / sizeof(pcell_t) == 1) { 746 /* Get phy address from fdt */ 747 if (OF_getencprop(child, "dual_emac_res_vlan", 748 &vlan_id, len) > 0) { 749 vlan = vlan_id; 750 } 751 } 752 753 break; 754 } 755 if (phy == -1) 756 return (ENXIO); 757 sc->port[port].phy = phy; 758 sc->port[port].vlan = vlan; 759 760 return (0); 761 } 762 763 static int 764 cpsw_attach(device_t dev) 765 { 766 bus_dma_segment_t segs[1]; 767 int error, i, nsegs; 768 struct cpsw_softc *sc; 769 uint32_t reg; 770 771 sc = device_get_softc(dev); 772 sc->dev = dev; 773 sc->node = ofw_bus_get_node(dev); 774 getbinuptime(&sc->attach_uptime); 775 776 if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, 777 sizeof(sc->active_slave)) <= 0) { 778 sc->active_slave = 0; 779 } 780 if (sc->active_slave > 1) 781 sc->active_slave = 1; 782 783 if (OF_hasprop(sc->node, "dual_emac")) 784 sc->dualemac = 1; 785 786 for (i = 0; i < CPSW_PORTS; i++) { 787 if (!sc->dualemac && i != sc->active_slave) 788 continue; 789 if (cpsw_get_fdt_data(sc, i) != 0) { 790 device_printf(dev, 791 "failed to get PHY address from FDT\n"); 792 return (ENXIO); 793 } 794 } 795 796 /* Initialize mutexes */ 797 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 798 "cpsw TX lock", MTX_DEF); 799 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 800 "cpsw RX lock", MTX_DEF); 801 802 /* Allocate IRQ resources */ 803 error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); 804 if (error) { 805 device_printf(dev, "could not allocate IRQ resources\n"); 806 cpsw_detach(dev); 807 return (ENXIO); 808 } 809 810 sc->mem_rid = 0; 811 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 812 &sc->mem_rid, RF_ACTIVE); 813 if (sc->mem_res == NULL) { 814 device_printf(sc->dev, "failed to allocate memory resource\n"); 815 cpsw_detach(dev); 816 return (ENXIO); 817 } 818 819 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 820 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 821 reg & 0xFF, (reg >> 11) & 0x1F); 822 823 cpsw_add_sysctls(sc); 824 825 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 826 error = bus_dma_tag_create( 827 bus_get_dma_tag(sc->dev), /* parent */ 828 1, 0, /* alignment, boundary */ 829 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 830 BUS_SPACE_MAXADDR, /* highaddr */ 831 NULL, NULL, /* filtfunc, filtfuncarg */ 832 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 833 MCLBYTES, 0, /* maxsegsz, flags */ 834 NULL, NULL, /* lockfunc, lockfuncarg */ 835 &sc->mbuf_dtag); /* dmatag */ 836 if (error) { 837 device_printf(dev, "bus_dma_tag_create failed\n"); 838 cpsw_detach(dev); 839 return (error); 840 } 841 842 /* Allocate the null mbuf and pre-sync it. */ 843 sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 844 memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size); 845 bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap); 846 bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap, 847 sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 848 bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap, 849 BUS_DMASYNC_PREWRITE); 850 sc->null_mbuf_paddr = segs[0].ds_addr; 851 852 cpsw_init_slots(sc); 853 854 /* Allocate slots to TX and RX queues. */ 855 STAILQ_INIT(&sc->rx.avail); 856 STAILQ_INIT(&sc->rx.active); 857 STAILQ_INIT(&sc->tx.avail); 858 STAILQ_INIT(&sc->tx.active); 859 // For now: 128 slots to TX, rest to RX. 860 // XXX TODO: start with 32/64 and grow dynamically based on demand. 861 if (cpsw_add_slots(sc, &sc->tx, 128) || 862 cpsw_add_slots(sc, &sc->rx, -1)) { 863 device_printf(dev, "failed to allocate dmamaps\n"); 864 cpsw_detach(dev); 865 return (ENOMEM); 866 } 867 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 868 sc->tx.queue_slots, sc->rx.queue_slots); 869 870 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 871 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 872 873 if (cpsw_intr_attach(sc) == -1) { 874 device_printf(dev, "failed to setup interrupts\n"); 875 cpsw_detach(dev); 876 return (ENXIO); 877 } 878 879 /* Reset the controller. */ 880 cpsw_reset(sc); 881 cpsw_init(sc); 882 883 for (i = 0; i < CPSW_PORTS; i++) { 884 if (!sc->dualemac && i != sc->active_slave) 885 continue; 886 sc->port[i].dev = device_add_child(dev, "cpsw", i); 887 if (sc->port[i].dev == NULL) { 888 cpsw_detach(dev); 889 return (ENXIO); 890 } 891 } 892 bus_generic_attach(dev); 893 894 return (0); 895 } 896 897 static int 898 cpsw_detach(device_t dev) 899 { 900 struct cpsw_softc *sc; 901 int error, i; 902 903 bus_generic_detach(dev); 904 sc = device_get_softc(dev); 905 906 for (i = 0; i < CPSW_PORTS; i++) { 907 if (sc->port[i].dev) 908 device_delete_child(dev, sc->port[i].dev); 909 } 910 911 if (device_is_attached(dev)) { 912 callout_stop(&sc->watchdog.callout); 913 callout_drain(&sc->watchdog.callout); 914 } 915 916 /* Stop and release all interrupts */ 917 cpsw_intr_detach(sc); 918 919 /* Free dmamaps and mbufs */ 920 for (i = 0; i < nitems(sc->_slots); ++i) 921 cpsw_free_slot(sc, &sc->_slots[i]); 922 923 /* Free null mbuf. */ 924 if (sc->null_mbuf_dmamap) { 925 bus_dmamap_unload(sc->mbuf_dtag, sc->null_mbuf_dmamap); 926 error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap); 927 KASSERT(error == 0, ("Mapping still active")); 928 m_freem(sc->null_mbuf); 929 } 930 931 /* Free DMA tag */ 932 if (sc->mbuf_dtag) { 933 error = bus_dma_tag_destroy(sc->mbuf_dtag); 934 KASSERT(error == 0, ("Unable to destroy DMA tag")); 935 } 936 937 /* Free IO memory handler */ 938 if (sc->mem_res != NULL) 939 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); 940 bus_release_resources(dev, irq_res_spec, sc->irq_res); 941 942 /* Destroy mutexes */ 943 mtx_destroy(&sc->rx.lock); 944 mtx_destroy(&sc->tx.lock); 945 946 return (0); 947 } 948 949 static phandle_t 950 cpsw_get_node(device_t bus, device_t dev) 951 { 952 953 /* Share controller node with port device. */ 954 return (ofw_bus_get_node(bus)); 955 } 956 957 static int 958 cpswp_probe(device_t dev) 959 { 960 961 if (device_get_unit(dev) > 1) { 962 device_printf(dev, "Only two ports are supported.\n"); 963 return (ENXIO); 964 } 965 device_set_desc(dev, "Ethernet Switch Port"); 966 967 return (BUS_PROBE_DEFAULT); 968 } 969 970 static int 971 cpswp_attach(device_t dev) 972 { 973 int error; 974 struct ifnet *ifp; 975 struct cpswp_softc *sc; 976 uint32_t reg; 977 uint8_t mac_addr[ETHER_ADDR_LEN]; 978 979 sc = device_get_softc(dev); 980 sc->dev = dev; 981 sc->pdev = device_get_parent(dev); 982 sc->swsc = device_get_softc(sc->pdev); 983 sc->unit = device_get_unit(dev); 984 sc->phy = sc->swsc->port[sc->unit].phy; 985 sc->vlan = sc->swsc->port[sc->unit].vlan; 986 if (sc->swsc->dualemac && sc->vlan == -1) 987 sc->vlan = sc->unit + 1; 988 989 if (sc->unit == 0) { 990 sc->physel = MDIOUSERPHYSEL0; 991 sc->phyaccess = MDIOUSERACCESS0; 992 } else { 993 sc->physel = MDIOUSERPHYSEL1; 994 sc->phyaccess = MDIOUSERACCESS1; 995 } 996 997 mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", 998 MTX_DEF); 999 1000 /* Allocate network interface */ 1001 ifp = sc->ifp = if_alloc(IFT_ETHER); 1002 if (ifp == NULL) { 1003 cpswp_detach(dev); 1004 return (ENXIO); 1005 } 1006 1007 if_initname(ifp, device_get_name(sc->dev), sc->unit); 1008 ifp->if_softc = sc; 1009 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 1010 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? 1011 ifp->if_capenable = ifp->if_capabilities; 1012 1013 ifp->if_init = cpswp_init; 1014 ifp->if_start = cpswp_start; 1015 ifp->if_ioctl = cpswp_ioctl; 1016 1017 ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots; 1018 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1019 IFQ_SET_READY(&ifp->if_snd); 1020 1021 /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ 1022 ti_scm_reg_read_4(0x634 + sc->unit * 8, ®); 1023 mac_addr[0] = reg & 0xFF; 1024 mac_addr[1] = (reg >> 8) & 0xFF; 1025 mac_addr[2] = (reg >> 16) & 0xFF; 1026 mac_addr[3] = (reg >> 24) & 0xFF; 1027 1028 /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ 1029 ti_scm_reg_read_4(0x630 + sc->unit * 8, ®); 1030 mac_addr[4] = reg & 0xFF; 1031 mac_addr[5] = (reg >> 8) & 0xFF; 1032 1033 error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, 1034 cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); 1035 if (error) { 1036 device_printf(dev, "attaching PHYs failed\n"); 1037 cpswp_detach(dev); 1038 return (error); 1039 } 1040 sc->mii = device_get_softc(sc->miibus); 1041 1042 /* Select PHY and enable interrupts */ 1043 cpsw_write_4(sc->swsc, sc->physel, 1044 MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); 1045 1046 ether_ifattach(sc->ifp, mac_addr); 1047 callout_init(&sc->mii_callout, 0); 1048 1049 return (0); 1050 } 1051 1052 static int 1053 cpswp_detach(device_t dev) 1054 { 1055 struct cpswp_softc *sc; 1056 1057 sc = device_get_softc(dev); 1058 CPSWP_DEBUGF(sc, ("")); 1059 if (device_is_attached(dev)) { 1060 ether_ifdetach(sc->ifp); 1061 CPSW_PORT_LOCK(sc); 1062 cpswp_stop_locked(sc); 1063 CPSW_PORT_UNLOCK(sc); 1064 callout_drain(&sc->mii_callout); 1065 } 1066 1067 bus_generic_detach(dev); 1068 1069 if_free(sc->ifp); 1070 mtx_destroy(&sc->lock); 1071 1072 return (0); 1073 } 1074 1075 /* 1076 * 1077 * Init/Shutdown. 1078 * 1079 */ 1080 1081 static int 1082 cpsw_ports_down(struct cpsw_softc *sc) 1083 { 1084 struct cpswp_softc *psc; 1085 struct ifnet *ifp1, *ifp2; 1086 1087 if (!sc->dualemac) 1088 return (1); 1089 psc = device_get_softc(sc->port[0].dev); 1090 ifp1 = psc->ifp; 1091 psc = device_get_softc(sc->port[1].dev); 1092 ifp2 = psc->ifp; 1093 if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0) 1094 return (1); 1095 1096 return (0); 1097 } 1098 1099 static void 1100 cpswp_init(void *arg) 1101 { 1102 struct cpswp_softc *sc = arg; 1103 1104 CPSWP_DEBUGF(sc, ("")); 1105 CPSW_PORT_LOCK(sc); 1106 cpswp_init_locked(arg); 1107 CPSW_PORT_UNLOCK(sc); 1108 } 1109 1110 static void 1111 cpswp_init_locked(void *arg) 1112 { 1113 struct cpswp_softc *sc = arg; 1114 struct ifnet *ifp; 1115 uint32_t reg; 1116 1117 CPSWP_DEBUGF(sc, ("")); 1118 CPSW_PORT_LOCK_ASSERT(sc); 1119 ifp = sc->ifp; 1120 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1121 return; 1122 1123 getbinuptime(&sc->init_uptime); 1124 1125 if (!sc->swsc->rx.running && !sc->swsc->tx.running) { 1126 /* Reset the controller. */ 1127 cpsw_reset(sc->swsc); 1128 cpsw_init(sc->swsc); 1129 } 1130 1131 /* Set Slave Mapping. */ 1132 cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); 1133 cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 1134 0x33221100); 1135 cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); 1136 /* Enable MAC RX/TX modules. */ 1137 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 1138 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 1139 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1140 reg |= CPSW_SL_MACTL_GMII_ENABLE; 1141 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1142 1143 /* Initialize ALE: set port to forwarding(3), initialize addrs */ 1144 cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 3); 1145 cpswp_ale_update_addresses(sc, 1); 1146 1147 if (sc->swsc->dualemac) { 1148 /* Set Port VID. */ 1149 cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), 1150 sc->vlan & 0xfff); 1151 cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, 1152 (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ 1153 (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ 1154 (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ 1155 } 1156 1157 mii_mediachg(sc->mii); 1158 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 1159 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1160 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1161 } 1162 1163 static int 1164 cpsw_shutdown(device_t dev) 1165 { 1166 struct cpsw_softc *sc; 1167 struct cpswp_softc *psc; 1168 int i; 1169 1170 sc = device_get_softc(dev); 1171 CPSW_DEBUGF(sc, ("")); 1172 for (i = 0; i < CPSW_PORTS; i++) { 1173 if (!sc->dualemac && i != sc->active_slave) 1174 continue; 1175 psc = device_get_softc(sc->port[i].dev); 1176 CPSW_PORT_LOCK(psc); 1177 cpswp_stop_locked(psc); 1178 CPSW_PORT_UNLOCK(psc); 1179 } 1180 1181 return (0); 1182 } 1183 1184 static void 1185 cpsw_rx_teardown_locked(struct cpsw_softc *sc) 1186 { 1187 struct ifnet *ifp; 1188 struct mbuf *received, *next; 1189 int i = 0; 1190 1191 CPSW_DEBUGF(sc, ("starting RX teardown")); 1192 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 1193 for (;;) { 1194 received = cpsw_rx_dequeue(sc); 1195 CPSW_GLOBAL_UNLOCK(sc); 1196 while (received != NULL) { 1197 next = received->m_nextpkt; 1198 received->m_nextpkt = NULL; 1199 ifp = received->m_pkthdr.rcvif; 1200 (*ifp->if_input)(ifp, received); 1201 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1202 received = next; 1203 } 1204 CPSW_GLOBAL_LOCK(sc); 1205 if (!sc->rx.running) { 1206 CPSW_DEBUGF(sc, 1207 ("finished RX teardown (%d retries)", i)); 1208 return; 1209 } 1210 if (++i > 10) { 1211 device_printf(sc->dev, 1212 "Unable to cleanly shutdown receiver\n"); 1213 return; 1214 } 1215 DELAY(10); 1216 } 1217 } 1218 1219 static void 1220 cpsw_tx_teardown_locked(struct cpsw_softc *sc) 1221 { 1222 int i = 0; 1223 1224 CPSW_DEBUGF(sc, ("starting TX teardown")); 1225 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 1226 cpsw_tx_dequeue(sc); 1227 while (sc->tx.running && ++i < 10) { 1228 DELAY(10); 1229 cpsw_tx_dequeue(sc); 1230 } 1231 if (sc->tx.running) { 1232 device_printf(sc->dev, 1233 "Unable to cleanly shutdown transmitter\n"); 1234 } 1235 CPSW_DEBUGF(sc, ("finished TX teardown (%d retries, %d idle buffers)", 1236 i, sc->tx.active_queue_len)); 1237 } 1238 1239 static void 1240 cpswp_stop_locked(struct cpswp_softc *sc) 1241 { 1242 struct ifnet *ifp; 1243 uint32_t reg; 1244 1245 ifp = sc->ifp; 1246 CPSWP_DEBUGF(sc, ("")); 1247 CPSW_PORT_LOCK_ASSERT(sc); 1248 1249 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1250 return; 1251 1252 /* Disable interface */ 1253 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1254 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1255 1256 /* Stop ticker */ 1257 callout_stop(&sc->mii_callout); 1258 1259 /* Tear down the RX/TX queues. */ 1260 if (cpsw_ports_down(sc->swsc)) { 1261 CPSW_GLOBAL_LOCK(sc->swsc); 1262 cpsw_rx_teardown_locked(sc->swsc); 1263 cpsw_tx_teardown_locked(sc->swsc); 1264 CPSW_GLOBAL_UNLOCK(sc->swsc); 1265 } 1266 1267 /* Stop MAC RX/TX modules. */ 1268 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1269 reg &= ~CPSW_SL_MACTL_GMII_ENABLE; 1270 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1271 1272 if (cpsw_ports_down(sc->swsc)) { 1273 /* Capture stats before we reset controller. */ 1274 cpsw_stats_collect(sc->swsc); 1275 1276 cpsw_reset(sc->swsc); 1277 cpsw_init(sc->swsc); 1278 } 1279 } 1280 1281 /* 1282 * Suspend/Resume. 1283 */ 1284 1285 static int 1286 cpsw_suspend(device_t dev) 1287 { 1288 struct cpsw_softc *sc; 1289 struct cpswp_softc *psc; 1290 int i; 1291 1292 sc = device_get_softc(dev); 1293 CPSW_DEBUGF(sc, ("")); 1294 for (i = 0; i < CPSW_PORTS; i++) { 1295 if (!sc->dualemac && i != sc->active_slave) 1296 continue; 1297 psc = device_get_softc(sc->port[i].dev); 1298 CPSW_PORT_LOCK(psc); 1299 cpswp_stop_locked(psc); 1300 CPSW_PORT_UNLOCK(psc); 1301 } 1302 1303 return (0); 1304 } 1305 1306 static int 1307 cpsw_resume(device_t dev) 1308 { 1309 struct cpsw_softc *sc; 1310 1311 sc = device_get_softc(dev); 1312 CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); 1313 1314 return (0); 1315 } 1316 1317 /* 1318 * 1319 * IOCTL 1320 * 1321 */ 1322 1323 static void 1324 cpsw_set_promisc(struct cpswp_softc *sc, int set) 1325 { 1326 uint32_t reg; 1327 1328 /* 1329 * Enabling promiscuous mode requires ALE_BYPASS to be enabled. 1330 * That disables the ALE forwarding logic and causes every 1331 * packet to be sent only to the host port. In bypass mode, 1332 * the ALE processes host port transmit packets the same as in 1333 * normal mode. 1334 */ 1335 reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); 1336 reg &= ~CPSW_ALE_CTL_BYPASS; 1337 if (set) 1338 reg |= CPSW_ALE_CTL_BYPASS; 1339 cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); 1340 } 1341 1342 static void 1343 cpsw_set_allmulti(struct cpswp_softc *sc, int set) 1344 { 1345 if (set) { 1346 printf("All-multicast mode unimplemented\n"); 1347 } 1348 } 1349 1350 static int 1351 cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1352 { 1353 struct cpswp_softc *sc; 1354 struct ifreq *ifr; 1355 int error; 1356 uint32_t changed; 1357 1358 error = 0; 1359 sc = ifp->if_softc; 1360 ifr = (struct ifreq *)data; 1361 1362 switch (command) { 1363 case SIOCSIFFLAGS: 1364 CPSW_PORT_LOCK(sc); 1365 if (ifp->if_flags & IFF_UP) { 1366 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1367 changed = ifp->if_flags ^ sc->if_flags; 1368 CPSWP_DEBUGF(sc, 1369 ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", 1370 changed)); 1371 if (changed & IFF_PROMISC) 1372 cpsw_set_promisc(sc, 1373 ifp->if_flags & IFF_PROMISC); 1374 if (changed & IFF_ALLMULTI) 1375 cpsw_set_allmulti(sc, 1376 ifp->if_flags & IFF_ALLMULTI); 1377 } else { 1378 CPSWP_DEBUGF(sc, 1379 ("SIOCSIFFLAGS: UP but not RUNNING; starting up")); 1380 cpswp_init_locked(sc); 1381 } 1382 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1383 CPSWP_DEBUGF(sc, 1384 ("SIOCSIFFLAGS: not UP but RUNNING; shutting down")); 1385 cpswp_stop_locked(sc); 1386 } 1387 1388 sc->if_flags = ifp->if_flags; 1389 CPSW_PORT_UNLOCK(sc); 1390 break; 1391 case SIOCADDMULTI: 1392 cpswp_ale_update_addresses(sc, 0); 1393 break; 1394 case SIOCDELMULTI: 1395 /* Ugh. DELMULTI doesn't provide the specific address 1396 being removed, so the best we can do is remove 1397 everything and rebuild it all. */ 1398 cpswp_ale_update_addresses(sc, 1); 1399 break; 1400 case SIOCGIFMEDIA: 1401 case SIOCSIFMEDIA: 1402 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1403 break; 1404 default: 1405 error = ether_ioctl(ifp, command, data); 1406 } 1407 return (error); 1408 } 1409 1410 /* 1411 * 1412 * MIIBUS 1413 * 1414 */ 1415 static int 1416 cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) 1417 { 1418 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1419 1420 while (--retries) { 1421 r = cpsw_read_4(sc, reg); 1422 if ((r & MDIO_PHYACCESS_GO) == 0) 1423 return (1); 1424 DELAY(CPSW_MIIBUS_DELAY); 1425 } 1426 1427 return (0); 1428 } 1429 1430 static int 1431 cpswp_miibus_readreg(device_t dev, int phy, int reg) 1432 { 1433 struct cpswp_softc *sc; 1434 uint32_t cmd, r; 1435 1436 sc = device_get_softc(dev); 1437 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1438 device_printf(dev, "MDIO not ready to read\n"); 1439 return (0); 1440 } 1441 1442 /* Set GO, reg, phy */ 1443 cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1444 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1445 1446 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1447 device_printf(dev, "MDIO timed out during read\n"); 1448 return (0); 1449 } 1450 1451 r = cpsw_read_4(sc->swsc, sc->phyaccess); 1452 if ((r & MDIO_PHYACCESS_ACK) == 0) { 1453 device_printf(dev, "Failed to read from PHY.\n"); 1454 r = 0; 1455 } 1456 return (r & 0xFFFF); 1457 } 1458 1459 static int 1460 cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) 1461 { 1462 struct cpswp_softc *sc; 1463 uint32_t cmd; 1464 1465 sc = device_get_softc(dev); 1466 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1467 device_printf(dev, "MDIO not ready to write\n"); 1468 return (0); 1469 } 1470 1471 /* Set GO, WRITE, reg, phy, and value */ 1472 cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | 1473 (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); 1474 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1475 1476 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1477 device_printf(dev, "MDIO timed out during write\n"); 1478 return (0); 1479 } 1480 1481 if ((cpsw_read_4(sc->swsc, sc->phyaccess) & MDIO_PHYACCESS_ACK) == 0) 1482 device_printf(dev, "Failed to write to PHY.\n"); 1483 1484 return (0); 1485 } 1486 1487 static void 1488 cpswp_miibus_statchg(device_t dev) 1489 { 1490 struct cpswp_softc *sc; 1491 uint32_t mac_control, reg; 1492 1493 sc = device_get_softc(dev); 1494 CPSWP_DEBUGF(sc, ("")); 1495 1496 reg = CPSW_SL_MACCONTROL(sc->unit); 1497 mac_control = cpsw_read_4(sc->swsc, reg); 1498 mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | 1499 CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); 1500 1501 switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { 1502 case IFM_1000_SX: 1503 case IFM_1000_LX: 1504 case IFM_1000_CX: 1505 case IFM_1000_T: 1506 mac_control |= CPSW_SL_MACTL_GIG; 1507 break; 1508 1509 case IFM_100_TX: 1510 mac_control |= CPSW_SL_MACTL_IFCTL_A; 1511 break; 1512 } 1513 if (sc->mii->mii_media_active & IFM_FDX) 1514 mac_control |= CPSW_SL_MACTL_FULLDUPLEX; 1515 1516 cpsw_write_4(sc->swsc, reg, mac_control); 1517 } 1518 1519 /* 1520 * 1521 * Transmit/Receive Packets. 1522 * 1523 */ 1524 static void 1525 cpsw_intr_rx(void *arg) 1526 { 1527 struct cpsw_softc *sc = arg; 1528 struct ifnet *ifp; 1529 struct mbuf *received, *next; 1530 1531 CPSW_RX_LOCK(sc); 1532 received = cpsw_rx_dequeue(sc); 1533 cpsw_rx_enqueue(sc); 1534 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1535 CPSW_RX_UNLOCK(sc); 1536 1537 while (received != NULL) { 1538 next = received->m_nextpkt; 1539 received->m_nextpkt = NULL; 1540 ifp = received->m_pkthdr.rcvif; 1541 (*ifp->if_input)(ifp, received); 1542 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1543 received = next; 1544 } 1545 } 1546 1547 static struct mbuf * 1548 cpsw_rx_dequeue(struct cpsw_softc *sc) 1549 { 1550 struct cpsw_cpdma_bd bd; 1551 struct cpsw_slot *slot; 1552 struct cpswp_softc *psc; 1553 struct mbuf *mb_head, *mb_tail; 1554 int port, removed = 0; 1555 1556 mb_head = mb_tail = NULL; 1557 1558 /* Pull completed packets off hardware RX queue. */ 1559 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1560 cpsw_cpdma_read_bd(sc, slot, &bd); 1561 if (bd.flags & CPDMA_BD_OWNER) 1562 break; /* Still in use by hardware */ 1563 1564 CPSW_DEBUGF(sc, ("Removing received packet from RX queue")); 1565 ++removed; 1566 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1567 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1568 1569 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1570 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1571 1572 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1573 CPSW_DEBUGF(sc, ("RX teardown in progress")); 1574 m_freem(slot->mbuf); 1575 slot->mbuf = NULL; 1576 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1577 sc->rx.running = 0; 1578 break; 1579 } 1580 1581 cpsw_write_cp_slot(sc, &sc->rx, slot); 1582 1583 port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; 1584 KASSERT(port >= 0 && port <= 1, 1585 ("patcket received with invalid port: %d", port)); 1586 psc = device_get_softc(sc->port[port].dev); 1587 1588 /* Set up mbuf */ 1589 /* TODO: track SOP/EOP bits to assemble a full mbuf 1590 out of received fragments. */ 1591 slot->mbuf->m_data += bd.bufoff; 1592 slot->mbuf->m_len = bd.pktlen - 4; 1593 slot->mbuf->m_pkthdr.len = bd.pktlen - 4; 1594 slot->mbuf->m_flags |= M_PKTHDR; 1595 slot->mbuf->m_pkthdr.rcvif = psc->ifp; 1596 slot->mbuf->m_nextpkt = NULL; 1597 1598 if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1599 /* check for valid CRC by looking into pkt_err[5:4] */ 1600 if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) { 1601 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1602 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1603 slot->mbuf->m_pkthdr.csum_data = 0xffff; 1604 } 1605 } 1606 1607 /* Add mbuf to packet list to be returned. */ 1608 if (mb_tail) { 1609 mb_tail->m_nextpkt = slot->mbuf; 1610 } else { 1611 mb_head = slot->mbuf; 1612 } 1613 mb_tail = slot->mbuf; 1614 slot->mbuf = NULL; 1615 } 1616 1617 if (removed != 0) { 1618 sc->rx.queue_removes += removed; 1619 sc->rx.active_queue_len -= removed; 1620 sc->rx.avail_queue_len += removed; 1621 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1622 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1623 } 1624 return (mb_head); 1625 } 1626 1627 static void 1628 cpsw_rx_enqueue(struct cpsw_softc *sc) 1629 { 1630 bus_dma_segment_t seg[1]; 1631 struct cpsw_cpdma_bd bd; 1632 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); 1633 struct cpsw_slot *slot, *prev_slot = NULL; 1634 struct cpsw_slot *last_old_slot, *first_new_slot; 1635 int error, nsegs, added = 0; 1636 1637 /* Register new mbufs with hardware. */ 1638 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1639 if (slot->mbuf == NULL) { 1640 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1641 if (slot->mbuf == NULL) { 1642 device_printf(sc->dev, 1643 "Unable to fill RX queue\n"); 1644 break; 1645 } 1646 slot->mbuf->m_len = 1647 slot->mbuf->m_pkthdr.len = 1648 slot->mbuf->m_ext.ext_size; 1649 } 1650 1651 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1652 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1653 1654 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1655 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1656 if (error != 0 || nsegs != 1) { 1657 device_printf(sc->dev, 1658 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1659 __func__, nsegs, error); 1660 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1661 m_freem(slot->mbuf); 1662 slot->mbuf = NULL; 1663 break; 1664 } 1665 1666 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1667 1668 /* Create and submit new rx descriptor*/ 1669 bd.next = 0; 1670 bd.bufptr = seg->ds_addr; 1671 bd.bufoff = 0; 1672 bd.buflen = MCLBYTES - 1; 1673 bd.pktlen = bd.buflen; 1674 bd.flags = CPDMA_BD_OWNER; 1675 cpsw_cpdma_write_bd(sc, slot, &bd); 1676 ++added; 1677 1678 if (prev_slot != NULL) 1679 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1680 prev_slot = slot; 1681 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1682 sc->rx.avail_queue_len--; 1683 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1684 } 1685 1686 if (added == 0) 1687 return; 1688 1689 CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); 1690 1691 /* Link new entries to hardware RX queue. */ 1692 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1693 first_new_slot = STAILQ_FIRST(&tmpqueue); 1694 STAILQ_CONCAT(&sc->rx.active, &tmpqueue); 1695 if (first_new_slot == NULL) { 1696 return; 1697 } else if (last_old_slot == NULL) { 1698 /* Start a fresh queue. */ 1699 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1700 } else { 1701 /* Add buffers to end of current queue. */ 1702 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1703 /* If underrun, restart queue. */ 1704 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) { 1705 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1706 } 1707 } 1708 sc->rx.queue_adds += added; 1709 sc->rx.active_queue_len += added; 1710 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) { 1711 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1712 } 1713 } 1714 1715 static void 1716 cpswp_start(struct ifnet *ifp) 1717 { 1718 struct cpswp_softc *sc = ifp->if_softc; 1719 1720 CPSW_TX_LOCK(sc->swsc); 1721 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->swsc->tx.running) { 1722 cpswp_tx_enqueue(sc); 1723 cpsw_tx_dequeue(sc->swsc); 1724 } 1725 CPSW_TX_UNLOCK(sc->swsc); 1726 } 1727 1728 static void 1729 cpswp_tx_enqueue(struct cpswp_softc *sc) 1730 { 1731 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1732 struct cpsw_cpdma_bd bd; 1733 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); 1734 struct cpsw_slot *slot, *prev_slot = NULL; 1735 struct cpsw_slot *last_old_slot, *first_new_slot; 1736 struct mbuf *m0; 1737 int error, flags, nsegs, seg, added = 0, padlen; 1738 1739 flags = 0; 1740 if (sc->swsc->dualemac) { 1741 flags = CPDMA_BD_TO_PORT | 1742 ((sc->unit + 1) & CPDMA_BD_PORT_MASK); 1743 } 1744 /* Pull pending packets from IF queue and prep them for DMA. */ 1745 while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { 1746 IF_DEQUEUE(&sc->ifp->if_snd, m0); 1747 if (m0 == NULL) 1748 break; 1749 1750 slot->mbuf = m0; 1751 padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len; 1752 if (padlen < 0) 1753 padlen = 0; 1754 1755 /* Create mapping in DMA memory */ 1756 error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, 1757 slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1758 /* If the packet is too fragmented, try to simplify. */ 1759 if (error == EFBIG || 1760 (error == 0 && 1761 nsegs + (padlen > 0 ? 1 : 0) > sc->swsc->tx.avail_queue_len)) { 1762 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1763 if (padlen > 0) /* May as well add padding. */ 1764 m_append(slot->mbuf, padlen, 1765 sc->swsc->null_mbuf->m_data); 1766 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1767 if (m0 == NULL) { 1768 device_printf(sc->dev, 1769 "Can't defragment packet; dropping\n"); 1770 m_freem(slot->mbuf); 1771 } else { 1772 CPSWP_DEBUGF(sc, 1773 ("Requeueing defragmented packet")); 1774 IF_PREPEND(&sc->ifp->if_snd, m0); 1775 } 1776 slot->mbuf = NULL; 1777 continue; 1778 } 1779 if (error != 0) { 1780 device_printf(sc->dev, 1781 "%s: Can't setup DMA (error=%d), dropping packet\n", 1782 __func__, error); 1783 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1784 m_freem(slot->mbuf); 1785 slot->mbuf = NULL; 1786 break; 1787 } 1788 1789 bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, 1790 BUS_DMASYNC_PREWRITE); 1791 1792 CPSWP_DEBUGF(sc, 1793 ("Queueing TX packet: %d segments + %d pad bytes", 1794 nsegs, padlen)); 1795 1796 slot->ifp = sc->ifp; 1797 /* If there is only one segment, the for() loop 1798 * gets skipped and the single buffer gets set up 1799 * as both SOP and EOP. */ 1800 /* Start by setting up the first buffer */ 1801 bd.next = 0; 1802 bd.bufptr = segs[0].ds_addr; 1803 bd.bufoff = 0; 1804 bd.buflen = segs[0].ds_len; 1805 bd.pktlen = m_length(slot->mbuf, NULL) + padlen; 1806 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER | flags; 1807 for (seg = 1; seg < nsegs; ++seg) { 1808 /* Save the previous buffer (which isn't EOP) */ 1809 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1810 if (prev_slot != NULL) { 1811 cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, 1812 slot); 1813 } 1814 prev_slot = slot; 1815 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1816 sc->swsc->tx.avail_queue_len--; 1817 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1818 ++added; 1819 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1820 1821 /* Setup next buffer (which isn't SOP) */ 1822 bd.next = 0; 1823 bd.bufptr = segs[seg].ds_addr; 1824 bd.bufoff = 0; 1825 bd.buflen = segs[seg].ds_len; 1826 bd.pktlen = 0; 1827 bd.flags = CPDMA_BD_OWNER | flags; 1828 } 1829 /* Save the final buffer. */ 1830 if (padlen <= 0) 1831 bd.flags |= CPDMA_BD_EOP; 1832 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1833 if (prev_slot != NULL) 1834 cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot); 1835 prev_slot = slot; 1836 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1837 sc->swsc->tx.avail_queue_len--; 1838 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1839 ++added; 1840 1841 if (padlen > 0) { 1842 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1843 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1844 sc->swsc->tx.avail_queue_len--; 1845 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1846 ++added; 1847 1848 /* Setup buffer of null pad bytes (definitely EOP) */ 1849 cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot); 1850 prev_slot = slot; 1851 bd.next = 0; 1852 bd.bufptr = sc->swsc->null_mbuf_paddr; 1853 bd.bufoff = 0; 1854 bd.buflen = padlen; 1855 bd.pktlen = 0; 1856 bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER | flags; 1857 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1858 ++nsegs; 1859 } 1860 1861 if (nsegs > sc->swsc->tx.longest_chain) 1862 sc->swsc->tx.longest_chain = nsegs; 1863 1864 // TODO: Should we defer the BPF tap until 1865 // after all packets are queued? 1866 BPF_MTAP(sc->ifp, m0); 1867 } 1868 1869 /* Attach the list of new buffers to the hardware TX queue. */ 1870 last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); 1871 first_new_slot = STAILQ_FIRST(&tmpqueue); 1872 STAILQ_CONCAT(&sc->swsc->tx.active, &tmpqueue); 1873 if (first_new_slot == NULL) { 1874 return; 1875 } else if (last_old_slot == NULL) { 1876 /* Start a fresh queue. */ 1877 sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot); 1878 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); 1879 } else { 1880 /* Add buffers to end of current queue. */ 1881 cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, 1882 first_new_slot); 1883 /* If underrun, restart queue. */ 1884 if (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & 1885 CPDMA_BD_EOQ) { 1886 sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot); 1887 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, 1888 first_new_slot); 1889 } 1890 } 1891 sc->swsc->tx.queue_adds += added; 1892 sc->swsc->tx.active_queue_len += added; 1893 if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { 1894 sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; 1895 } 1896 } 1897 1898 static int 1899 cpsw_tx_dequeue(struct cpsw_softc *sc) 1900 { 1901 struct cpsw_slot *slot, *last_removed_slot = NULL; 1902 struct cpsw_cpdma_bd bd; 1903 uint32_t flags, removed = 0; 1904 1905 slot = STAILQ_FIRST(&sc->tx.active); 1906 if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) { 1907 CPSW_DEBUGF(sc, ("TX teardown of an empty queue")); 1908 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1909 sc->tx.running = 0; 1910 return (0); 1911 } 1912 1913 /* Pull completed buffers off the hardware TX queue. */ 1914 while (slot != NULL) { 1915 flags = cpsw_cpdma_read_bd_flags(sc, slot); 1916 if (flags & CPDMA_BD_OWNER) 1917 break; /* Hardware is still using this packet. */ 1918 1919 CPSW_DEBUGF(sc, ("TX removing completed packet")); 1920 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 1921 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1922 m_freem(slot->mbuf); 1923 slot->mbuf = NULL; 1924 if (slot->ifp) 1925 if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); 1926 1927 /* Dequeue any additional buffers used by this packet. */ 1928 while (slot != NULL && slot->mbuf == NULL) { 1929 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 1930 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 1931 ++removed; 1932 last_removed_slot = slot; 1933 slot = STAILQ_FIRST(&sc->tx.active); 1934 } 1935 1936 /* TearDown complete is only marked on the SOP for the packet. */ 1937 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == 1938 (CPDMA_BD_EOP | CPDMA_BD_TDOWNCMPLT)) { 1939 CPSW_DEBUGF(sc, ("TX teardown in progress")); 1940 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1941 // TODO: Increment a count of dropped TX packets 1942 sc->tx.running = 0; 1943 break; 1944 } 1945 1946 if ((flags & CPDMA_BD_EOP) == 0) 1947 flags = cpsw_cpdma_read_bd_flags(sc, last_removed_slot); 1948 if ((flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == 1949 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 1950 cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); 1951 if (bd.next != 0 && bd.next != sc->last_hdp) { 1952 /* Restart the queue. */ 1953 sc->last_hdp = bd.next; 1954 cpsw_write_4(sc, sc->tx.hdp_offset, bd.next); 1955 } 1956 } 1957 } 1958 1959 if (removed != 0) { 1960 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 1961 sc->tx.queue_removes += removed; 1962 sc->tx.active_queue_len -= removed; 1963 sc->tx.avail_queue_len += removed; 1964 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 1965 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 1966 } 1967 return (removed); 1968 } 1969 1970 /* 1971 * 1972 * Miscellaneous interrupts. 1973 * 1974 */ 1975 1976 static void 1977 cpsw_intr_rx_thresh(void *arg) 1978 { 1979 struct cpsw_softc *sc = arg; 1980 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0)); 1981 1982 CPSW_DEBUGF(sc, ("stat=%x", stat)); 1983 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 1984 } 1985 1986 static void 1987 cpsw_intr_misc_host_error(struct cpsw_softc *sc) 1988 { 1989 uint32_t intstat; 1990 uint32_t dmastat; 1991 int txerr, rxerr, txchan, rxchan; 1992 1993 printf("\n\n"); 1994 device_printf(sc->dev, 1995 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 1996 printf("\n\n"); 1997 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 1998 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 1999 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 2000 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 2001 2002 txerr = (dmastat >> 20) & 15; 2003 txchan = (dmastat >> 16) & 7; 2004 rxerr = (dmastat >> 12) & 15; 2005 rxchan = (dmastat >> 8) & 7; 2006 2007 switch (txerr) { 2008 case 0: break; 2009 case 1: printf("SOP error on TX channel %d\n", txchan); 2010 break; 2011 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 2012 break; 2013 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 2014 break; 2015 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 2016 break; 2017 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 2018 break; 2019 case 6: printf("Packet length error on TX channel %d\n", txchan); 2020 break; 2021 default: printf("Unknown error on TX channel %d\n", txchan); 2022 break; 2023 } 2024 2025 if (txerr != 0) { 2026 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 2027 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 2028 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 2029 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 2030 cpsw_dump_queue(sc, &sc->tx.active); 2031 } 2032 2033 switch (rxerr) { 2034 case 0: break; 2035 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 2036 break; 2037 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 2038 break; 2039 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 2040 break; 2041 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 2042 break; 2043 default: printf("Unknown RX error on RX channel %d\n", rxchan); 2044 break; 2045 } 2046 2047 if (rxerr != 0) { 2048 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 2049 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 2050 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 2051 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 2052 cpsw_dump_queue(sc, &sc->rx.active); 2053 } 2054 2055 printf("\nALE Table\n"); 2056 cpsw_ale_dump_table(sc); 2057 2058 // XXX do something useful here?? 2059 panic("CPSW HOST ERROR INTERRUPT"); 2060 2061 // Suppress this interrupt in the future. 2062 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 2063 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 2064 // The watchdog will probably reset the controller 2065 // in a little while. It will probably fail again. 2066 } 2067 2068 static void 2069 cpsw_intr_misc(void *arg) 2070 { 2071 struct cpsw_softc *sc = arg; 2072 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 2073 2074 if (stat & CPSW_WR_C_MISC_EVNT_PEND) 2075 CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); 2076 if (stat & CPSW_WR_C_MISC_STAT_PEND) 2077 cpsw_stats_collect(sc); 2078 if (stat & CPSW_WR_C_MISC_HOST_PEND) 2079 cpsw_intr_misc_host_error(sc); 2080 if (stat & CPSW_WR_C_MISC_MDIOLINK) { 2081 cpsw_write_4(sc, MDIOLINKINTMASKED, 2082 cpsw_read_4(sc, MDIOLINKINTMASKED)); 2083 } 2084 if (stat & CPSW_WR_C_MISC_MDIOUSER) { 2085 CPSW_DEBUGF(sc, 2086 ("MDIO operation completed interrupt unimplemented")); 2087 } 2088 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 2089 } 2090 2091 /* 2092 * 2093 * Periodic Checks and Watchdog. 2094 * 2095 */ 2096 2097 static void 2098 cpswp_tick(void *msc) 2099 { 2100 struct cpswp_softc *sc = msc; 2101 2102 /* Check for media type change */ 2103 mii_tick(sc->mii); 2104 if (sc->media_status != sc->mii->mii_media.ifm_media) { 2105 printf("%s: media type changed (ifm_media=%x)\n", __func__, 2106 sc->mii->mii_media.ifm_media); 2107 cpswp_ifmedia_upd(sc->ifp); 2108 } 2109 2110 /* Schedule another timeout one second from now */ 2111 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 2112 } 2113 2114 static void 2115 cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2116 { 2117 struct cpswp_softc *sc; 2118 struct mii_data *mii; 2119 2120 sc = ifp->if_softc; 2121 CPSWP_DEBUGF(sc, ("")); 2122 CPSW_PORT_LOCK(sc); 2123 2124 mii = sc->mii; 2125 mii_pollstat(mii); 2126 2127 ifmr->ifm_active = mii->mii_media_active; 2128 ifmr->ifm_status = mii->mii_media_status; 2129 CPSW_PORT_UNLOCK(sc); 2130 } 2131 2132 static int 2133 cpswp_ifmedia_upd(struct ifnet *ifp) 2134 { 2135 struct cpswp_softc *sc; 2136 2137 sc = ifp->if_softc; 2138 CPSWP_DEBUGF(sc, ("")); 2139 CPSW_PORT_LOCK(sc); 2140 mii_mediachg(sc->mii); 2141 sc->media_status = sc->mii->mii_media.ifm_media; 2142 CPSW_PORT_UNLOCK(sc); 2143 2144 return (0); 2145 } 2146 2147 static void 2148 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 2149 { 2150 struct cpswp_softc *psc; 2151 int i; 2152 2153 cpsw_debugf_head("CPSW watchdog"); 2154 device_printf(sc->dev, "watchdog timeout\n"); 2155 for (i = 0; i < CPSW_PORTS; i++) { 2156 if (!sc->dualemac && i != sc->active_slave) 2157 continue; 2158 psc = device_get_softc(sc->port[i].dev); 2159 CPSW_PORT_LOCK(psc); 2160 cpswp_stop_locked(psc); 2161 CPSW_PORT_UNLOCK(psc); 2162 } 2163 } 2164 2165 static void 2166 cpsw_tx_watchdog(void *msc) 2167 { 2168 struct cpsw_softc *sc; 2169 2170 sc = msc; 2171 CPSW_GLOBAL_LOCK(sc); 2172 if (sc->tx.active_queue_len == 0 || !sc->tx.running) { 2173 sc->watchdog.timer = 0; /* Nothing to do. */ 2174 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 2175 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 2176 } else if (cpsw_tx_dequeue(sc) > 0) { 2177 sc->watchdog.timer = 0; /* We just did something. */ 2178 } else { 2179 /* There was something to do but it didn't get done. */ 2180 ++sc->watchdog.timer; 2181 if (sc->watchdog.timer > 5) { 2182 sc->watchdog.timer = 0; 2183 ++sc->watchdog.resets; 2184 cpsw_tx_watchdog_full_reset(sc); 2185 } 2186 } 2187 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 2188 CPSW_GLOBAL_UNLOCK(sc); 2189 2190 /* Schedule another timeout one second from now */ 2191 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 2192 } 2193 2194 /* 2195 * 2196 * ALE support routines. 2197 * 2198 */ 2199 2200 static void 2201 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2202 { 2203 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 2204 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 2205 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 2206 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 2207 } 2208 2209 static void 2210 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2211 { 2212 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 2213 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 2214 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 2215 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 2216 } 2217 2218 static void 2219 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 2220 { 2221 int i; 2222 uint32_t ale_entry[3]; 2223 2224 /* First four entries are link address and broadcast. */ 2225 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2226 cpsw_ale_read_entry(sc, i, ale_entry); 2227 if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || 2228 ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && 2229 ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ 2230 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2231 cpsw_ale_write_entry(sc, i, ale_entry); 2232 } 2233 } 2234 } 2235 2236 static int 2237 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, 2238 uint8_t *mac) 2239 { 2240 int free_index = -1, matching_index = -1, i; 2241 uint32_t ale_entry[3], ale_type; 2242 2243 /* Find a matching entry or a free entry. */ 2244 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2245 cpsw_ale_read_entry(sc, i, ale_entry); 2246 2247 /* Entry Type[61:60] is 0 for free entry */ 2248 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2249 free_index = i; 2250 2251 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 2252 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 2253 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 2254 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 2255 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 2256 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 2257 matching_index = i; 2258 break; 2259 } 2260 } 2261 2262 if (matching_index < 0) { 2263 if (free_index < 0) 2264 return (ENOMEM); 2265 i = free_index; 2266 } 2267 2268 if (vlan != -1) 2269 ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; 2270 else 2271 ale_type = ALE_TYPE_ADDR << 28; 2272 2273 /* Set MAC address */ 2274 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2275 ale_entry[1] = mac[0] << 8 | mac[1]; 2276 2277 /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ 2278 ale_entry[1] |= ALE_MCAST_FWD | ale_type; 2279 2280 /* Set portmask [68:66] */ 2281 ale_entry[2] = (portmap & 7) << 2; 2282 2283 cpsw_ale_write_entry(sc, i, ale_entry); 2284 2285 return 0; 2286 } 2287 2288 static void 2289 cpsw_ale_dump_table(struct cpsw_softc *sc) { 2290 int i; 2291 uint32_t ale_entry[3]; 2292 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2293 cpsw_ale_read_entry(sc, i, ale_entry); 2294 switch (ALE_TYPE(ale_entry)) { 2295 case ALE_TYPE_VLAN: 2296 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2297 ale_entry[1], ale_entry[0]); 2298 printf("type: %u ", ALE_TYPE(ale_entry)); 2299 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2300 printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); 2301 printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); 2302 printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); 2303 printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); 2304 printf("\n"); 2305 break; 2306 case ALE_TYPE_ADDR: 2307 case ALE_TYPE_VLAN_ADDR: 2308 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2309 ale_entry[1], ale_entry[0]); 2310 printf("type: %u ", ALE_TYPE(ale_entry)); 2311 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 2312 (ale_entry[1] >> 8) & 0xFF, 2313 (ale_entry[1] >> 0) & 0xFF, 2314 (ale_entry[0] >>24) & 0xFF, 2315 (ale_entry[0] >>16) & 0xFF, 2316 (ale_entry[0] >> 8) & 0xFF, 2317 (ale_entry[0] >> 0) & 0xFF); 2318 printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); 2319 if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) 2320 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2321 printf("port: %u ", ALE_PORTS(ale_entry)); 2322 printf("\n"); 2323 break; 2324 } 2325 } 2326 printf("\n"); 2327 } 2328 2329 static int 2330 cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) 2331 { 2332 uint8_t *mac; 2333 uint32_t ale_entry[3], ale_type, portmask; 2334 struct ifmultiaddr *ifma; 2335 2336 if (sc->swsc->dualemac) { 2337 ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; 2338 portmask = 1 << (sc->unit + 1) | 1 << 0; 2339 } else { 2340 ale_type = ALE_TYPE_ADDR << 28; 2341 portmask = 7; 2342 } 2343 2344 /* 2345 * Route incoming packets for our MAC address to Port 0 (host). 2346 * For simplicity, keep this entry at table index 0 for port 1 and 2347 * at index 2 for port 2 in the ALE. 2348 */ 2349 if_addr_rlock(sc->ifp); 2350 mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr); 2351 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2352 ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ 2353 ale_entry[2] = 0; /* port = 0 */ 2354 cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); 2355 2356 /* Set outgoing MAC Address for slave port. */ 2357 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), 2358 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 2359 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), 2360 mac[5] << 8 | mac[4]); 2361 if_addr_runlock(sc->ifp); 2362 2363 /* Keep the broadcast address at table entry 1 (or 3). */ 2364 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 2365 /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ 2366 ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; 2367 ale_entry[2] = portmask << 2; 2368 cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); 2369 2370 /* SIOCDELMULTI doesn't specify the particular address 2371 being removed, so we have to remove all and rebuild. */ 2372 if (purge) 2373 cpsw_ale_remove_all_mc_entries(sc->swsc); 2374 2375 /* Set other multicast addrs desired. */ 2376 if_maddr_rlock(sc->ifp); 2377 TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { 2378 if (ifma->ifma_addr->sa_family != AF_LINK) 2379 continue; 2380 cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, 2381 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 2382 } 2383 if_maddr_runlock(sc->ifp); 2384 2385 return (0); 2386 } 2387 2388 static int 2389 cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, 2390 int untag, int mcregflood, int mcunregflood) 2391 { 2392 int free_index, i, matching_index; 2393 uint32_t ale_entry[3]; 2394 2395 free_index = matching_index = -1; 2396 /* Find a matching entry or a free entry. */ 2397 for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { 2398 cpsw_ale_read_entry(sc, i, ale_entry); 2399 2400 /* Entry Type[61:60] is 0 for free entry */ 2401 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2402 free_index = i; 2403 2404 if (ALE_VLAN(ale_entry) == vlan) { 2405 matching_index = i; 2406 break; 2407 } 2408 } 2409 2410 if (matching_index < 0) { 2411 if (free_index < 0) 2412 return (-1); 2413 i = free_index; 2414 } 2415 2416 ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | 2417 (mcunregflood & 7) << 8 | (ports & 7); 2418 ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; 2419 ale_entry[2] = 0; 2420 cpsw_ale_write_entry(sc, i, ale_entry); 2421 2422 return (0); 2423 } 2424 2425 /* 2426 * 2427 * Statistics and Sysctls. 2428 * 2429 */ 2430 2431 #if 0 2432 static void 2433 cpsw_stats_dump(struct cpsw_softc *sc) 2434 { 2435 int i; 2436 uint32_t r; 2437 2438 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2439 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2440 cpsw_stat_sysctls[i].reg); 2441 CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2442 (intmax_t)sc->shadow_stats[i], r, 2443 (intmax_t)sc->shadow_stats[i] + r)); 2444 } 2445 } 2446 #endif 2447 2448 static void 2449 cpsw_stats_collect(struct cpsw_softc *sc) 2450 { 2451 int i; 2452 uint32_t r; 2453 2454 CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); 2455 2456 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2457 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2458 cpsw_stat_sysctls[i].reg); 2459 sc->shadow_stats[i] += r; 2460 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, 2461 r); 2462 } 2463 } 2464 2465 static int 2466 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2467 { 2468 struct cpsw_softc *sc; 2469 struct cpsw_stat *stat; 2470 uint64_t result; 2471 2472 sc = (struct cpsw_softc *)arg1; 2473 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2474 result = sc->shadow_stats[oidp->oid_number]; 2475 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2476 return (sysctl_handle_64(oidp, &result, 0, req)); 2477 } 2478 2479 static int 2480 cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2481 { 2482 struct cpsw_softc *sc; 2483 struct bintime t; 2484 unsigned result; 2485 2486 sc = (struct cpsw_softc *)arg1; 2487 getbinuptime(&t); 2488 bintime_sub(&t, &sc->attach_uptime); 2489 result = t.sec; 2490 return (sysctl_handle_int(oidp, &result, 0, req)); 2491 } 2492 2493 static int 2494 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2495 { 2496 struct cpsw_softc *swsc; 2497 struct cpswp_softc *sc; 2498 struct bintime t; 2499 unsigned result; 2500 2501 swsc = arg1; 2502 sc = device_get_softc(swsc->port[arg2].dev); 2503 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { 2504 getbinuptime(&t); 2505 bintime_sub(&t, &sc->init_uptime); 2506 result = t.sec; 2507 } else 2508 result = 0; 2509 return (sysctl_handle_int(oidp, &result, 0, req)); 2510 } 2511 2512 static void 2513 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2514 struct cpsw_queue *queue) 2515 { 2516 struct sysctl_oid_list *parent; 2517 2518 parent = SYSCTL_CHILDREN(node); 2519 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2520 CTLFLAG_RD, &queue->queue_slots, 0, 2521 "Total buffers currently assigned to this queue"); 2522 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2523 CTLFLAG_RD, &queue->active_queue_len, 0, 2524 "Buffers currently registered with hardware controller"); 2525 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2526 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2527 "Max value of activeBuffers since last driver reset"); 2528 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2529 CTLFLAG_RD, &queue->avail_queue_len, 0, 2530 "Buffers allocated to this queue but not currently " 2531 "registered with hardware controller"); 2532 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2533 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2534 "Max value of availBuffers since last driver reset"); 2535 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2536 CTLFLAG_RD, &queue->queue_adds, 0, 2537 "Total buffers added to queue"); 2538 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2539 CTLFLAG_RD, &queue->queue_removes, 0, 2540 "Total buffers removed from queue"); 2541 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2542 CTLFLAG_RD, &queue->longest_chain, 0, 2543 "Max buffers used for a single packet"); 2544 } 2545 2546 static void 2547 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2548 struct cpsw_softc *sc) 2549 { 2550 struct sysctl_oid_list *parent; 2551 2552 parent = SYSCTL_CHILDREN(node); 2553 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2554 CTLFLAG_RD, &sc->watchdog.resets, 0, 2555 "Total number of watchdog resets"); 2556 } 2557 2558 static void 2559 cpsw_add_sysctls(struct cpsw_softc *sc) 2560 { 2561 struct sysctl_ctx_list *ctx; 2562 struct sysctl_oid *stats_node, *queue_node, *node; 2563 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2564 struct sysctl_oid_list *ports_parent, *port_parent; 2565 char port[16]; 2566 int i; 2567 2568 ctx = device_get_sysctl_ctx(sc->dev); 2569 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2570 2571 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", 2572 CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); 2573 2574 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2575 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", 2576 "Time since driver attach"); 2577 2578 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", 2579 CTLFLAG_RD, NULL, "CPSW Ports Statistics"); 2580 ports_parent = SYSCTL_CHILDREN(node); 2581 for (i = 0; i < CPSW_PORTS; i++) { 2582 if (!sc->dualemac && i != sc->active_slave) 2583 continue; 2584 port[0] = '0' + i; 2585 port[1] = '\0'; 2586 node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, 2587 port, CTLFLAG_RD, NULL, "CPSW Port Statistics"); 2588 port_parent = SYSCTL_CHILDREN(node); 2589 SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", 2590 CTLTYPE_UINT | CTLFLAG_RD, sc, i, 2591 cpsw_stat_uptime, "IU", "Seconds since driver init"); 2592 } 2593 2594 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2595 CTLFLAG_RD, NULL, "CPSW Statistics"); 2596 stats_parent = SYSCTL_CHILDREN(stats_node); 2597 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2598 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2599 cpsw_stat_sysctls[i].oid, 2600 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, 2601 cpsw_stats_sysctl, "IU", 2602 cpsw_stat_sysctls[i].oid); 2603 } 2604 2605 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2606 CTLFLAG_RD, NULL, "CPSW Queue Statistics"); 2607 queue_parent = SYSCTL_CHILDREN(queue_node); 2608 2609 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2610 CTLFLAG_RD, NULL, "TX Queue Statistics"); 2611 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2612 2613 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2614 CTLFLAG_RD, NULL, "RX Queue Statistics"); 2615 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2616 2617 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2618 CTLFLAG_RD, NULL, "Watchdog Statistics"); 2619 cpsw_add_watchdog_sysctls(ctx, node, sc); 2620 } 2621