1 /*- 2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 3 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * TI Common Platform Ethernet Switch (CPSW) Driver 30 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 31 * 32 * This controller is documented in the AM335x Technical Reference 33 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 34 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 35 * 36 * It is basically a single Ethernet port (port 0) wired internally to 37 * a 3-port store-and-forward switch connected to two independent 38 * "sliver" controllers (port 1 and port 2). You can operate the 39 * controller in a variety of different ways by suitably configuring 40 * the slivers and the Address Lookup Engine (ALE) that routes packets 41 * between the ports. 42 * 43 * This code was developed and tested on a BeagleBone with 44 * an AM335x SoC. 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/bus.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/mbuf.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/rman.h> 58 #include <sys/socket.h> 59 #include <sys/sockio.h> 60 #include <sys/sysctl.h> 61 62 #include <machine/bus.h> 63 #include <machine/resource.h> 64 #include <machine/stdarg.h> 65 66 #include <net/ethernet.h> 67 #include <net/bpf.h> 68 #include <net/if.h> 69 #include <net/if_dl.h> 70 #include <net/if_media.h> 71 #include <net/if_types.h> 72 73 #include <arm/ti/ti_scm.h> 74 #include <arm/ti/am335x/am335x_scm.h> 75 76 #include <dev/mii/mii.h> 77 #include <dev/mii/miivar.h> 78 79 #include <dev/fdt/fdt_common.h> 80 #include <dev/ofw/ofw_bus.h> 81 #include <dev/ofw/ofw_bus_subr.h> 82 83 #include "if_cpswreg.h" 84 #include "if_cpswvar.h" 85 86 #include "miibus_if.h" 87 88 /* Device probe/attach/detach. */ 89 static int cpsw_probe(device_t); 90 static int cpsw_attach(device_t); 91 static int cpsw_detach(device_t); 92 static int cpswp_probe(device_t); 93 static int cpswp_attach(device_t); 94 static int cpswp_detach(device_t); 95 96 static phandle_t cpsw_get_node(device_t, device_t); 97 98 /* Device Init/shutdown. */ 99 static int cpsw_shutdown(device_t); 100 static void cpswp_init(void *); 101 static void cpswp_init_locked(void *); 102 static void cpswp_stop_locked(struct cpswp_softc *); 103 104 /* Device Suspend/Resume. */ 105 static int cpsw_suspend(device_t); 106 static int cpsw_resume(device_t); 107 108 /* Ioctl. */ 109 static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data); 110 111 static int cpswp_miibus_readreg(device_t, int phy, int reg); 112 static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); 113 static void cpswp_miibus_statchg(device_t); 114 115 /* Send/Receive packets. */ 116 static void cpsw_intr_rx(void *arg); 117 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 118 static void cpsw_rx_enqueue(struct cpsw_softc *); 119 static void cpswp_start(struct ifnet *); 120 static void cpswp_tx_enqueue(struct cpswp_softc *); 121 static int cpsw_tx_dequeue(struct cpsw_softc *); 122 123 /* Misc interrupts and watchdog. */ 124 static void cpsw_intr_rx_thresh(void *); 125 static void cpsw_intr_misc(void *); 126 static void cpswp_tick(void *); 127 static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *); 128 static int cpswp_ifmedia_upd(struct ifnet *); 129 static void cpsw_tx_watchdog(void *); 130 131 /* ALE support */ 132 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); 133 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); 134 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); 135 static void cpsw_ale_dump_table(struct cpsw_softc *); 136 static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, 137 int); 138 static int cpswp_ale_update_addresses(struct cpswp_softc *, int); 139 140 /* Statistics and sysctls. */ 141 static void cpsw_add_sysctls(struct cpsw_softc *); 142 static void cpsw_stats_collect(struct cpsw_softc *); 143 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 144 145 /* 146 * Arbitrary limit on number of segments in an mbuf to be transmitted. 147 * Packets with more segments than this will be defragmented before 148 * they are queued. 149 */ 150 #define CPSW_TXFRAGS 16 151 152 /* Shared resources. */ 153 static device_method_t cpsw_methods[] = { 154 /* Device interface */ 155 DEVMETHOD(device_probe, cpsw_probe), 156 DEVMETHOD(device_attach, cpsw_attach), 157 DEVMETHOD(device_detach, cpsw_detach), 158 DEVMETHOD(device_shutdown, cpsw_shutdown), 159 DEVMETHOD(device_suspend, cpsw_suspend), 160 DEVMETHOD(device_resume, cpsw_resume), 161 /* OFW methods */ 162 DEVMETHOD(ofw_bus_get_node, cpsw_get_node), 163 DEVMETHOD_END 164 }; 165 166 static driver_t cpsw_driver = { 167 "cpswss", 168 cpsw_methods, 169 sizeof(struct cpsw_softc), 170 }; 171 172 static devclass_t cpsw_devclass; 173 174 DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0); 175 176 /* Port/Slave resources. */ 177 static device_method_t cpswp_methods[] = { 178 /* Device interface */ 179 DEVMETHOD(device_probe, cpswp_probe), 180 DEVMETHOD(device_attach, cpswp_attach), 181 DEVMETHOD(device_detach, cpswp_detach), 182 /* MII interface */ 183 DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), 184 DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), 185 DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), 186 DEVMETHOD_END 187 }; 188 189 static driver_t cpswp_driver = { 190 "cpsw", 191 cpswp_methods, 192 sizeof(struct cpswp_softc), 193 }; 194 195 static devclass_t cpswp_devclass; 196 197 DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0); 198 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); 199 MODULE_DEPEND(cpsw, ether, 1, 1, 1); 200 MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 201 202 static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; 203 204 static struct resource_spec irq_res_spec[] = { 205 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 206 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 207 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 208 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 209 { -1, 0 } 210 }; 211 212 /* Number of entries here must match size of stats 213 * array in struct cpswp_softc. */ 214 static struct cpsw_stat { 215 int reg; 216 char *oid; 217 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 218 {0x00, "GoodRxFrames"}, 219 {0x04, "BroadcastRxFrames"}, 220 {0x08, "MulticastRxFrames"}, 221 {0x0C, "PauseRxFrames"}, 222 {0x10, "RxCrcErrors"}, 223 {0x14, "RxAlignErrors"}, 224 {0x18, "OversizeRxFrames"}, 225 {0x1c, "RxJabbers"}, 226 {0x20, "ShortRxFrames"}, 227 {0x24, "RxFragments"}, 228 {0x30, "RxOctets"}, 229 {0x34, "GoodTxFrames"}, 230 {0x38, "BroadcastTxFrames"}, 231 {0x3c, "MulticastTxFrames"}, 232 {0x40, "PauseTxFrames"}, 233 {0x44, "DeferredTxFrames"}, 234 {0x48, "CollisionsTxFrames"}, 235 {0x4c, "SingleCollisionTxFrames"}, 236 {0x50, "MultipleCollisionTxFrames"}, 237 {0x54, "ExcessiveCollisions"}, 238 {0x58, "LateCollisions"}, 239 {0x5c, "TxUnderrun"}, 240 {0x60, "CarrierSenseErrors"}, 241 {0x64, "TxOctets"}, 242 {0x68, "RxTx64OctetFrames"}, 243 {0x6c, "RxTx65to127OctetFrames"}, 244 {0x70, "RxTx128to255OctetFrames"}, 245 {0x74, "RxTx256to511OctetFrames"}, 246 {0x78, "RxTx512to1024OctetFrames"}, 247 {0x7c, "RxTx1024upOctetFrames"}, 248 {0x80, "NetOctets"}, 249 {0x84, "RxStartOfFrameOverruns"}, 250 {0x88, "RxMiddleOfFrameOverruns"}, 251 {0x8c, "RxDmaOverruns"} 252 }; 253 254 /* 255 * Basic debug support. 256 */ 257 258 static void 259 cpsw_debugf_head(const char *funcname) 260 { 261 int t = (int)(time_second % (24 * 60 * 60)); 262 263 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 264 } 265 266 static void 267 cpsw_debugf(const char *fmt, ...) 268 { 269 va_list ap; 270 271 va_start(ap, fmt); 272 vprintf(fmt, ap); 273 va_end(ap); 274 printf("\n"); 275 276 } 277 278 #define CPSW_DEBUGF(_sc, a) do { \ 279 if ((_sc)->debug) { \ 280 cpsw_debugf_head(__func__); \ 281 cpsw_debugf a; \ 282 } \ 283 } while (0) 284 285 /* 286 * Locking macros 287 */ 288 #define CPSW_TX_LOCK(sc) do { \ 289 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 290 mtx_lock(&(sc)->tx.lock); \ 291 } while (0) 292 293 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 294 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 295 296 #define CPSW_RX_LOCK(sc) do { \ 297 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 298 mtx_lock(&(sc)->rx.lock); \ 299 } while (0) 300 301 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 302 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 303 304 #define CPSW_PORT_LOCK(_sc) do { \ 305 mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ 306 mtx_lock(&(_sc)->lock); \ 307 } while (0) 308 309 #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 310 #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) 311 312 /* 313 * Read/Write macros 314 */ 315 #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) 316 #define cpsw_write_4(_sc, _reg, _val) \ 317 bus_write_4((_sc)->mem_res, (_reg), (_val)) 318 319 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 320 321 #define cpsw_cpdma_bd_paddr(sc, slot) \ 322 BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) 323 #define cpsw_cpdma_read_bd(sc, slot, val) \ 324 bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 325 #define cpsw_cpdma_write_bd(sc, slot, val) \ 326 bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 327 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 328 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 329 #define cpsw_cpdma_read_bd_flags(sc, slot) \ 330 bus_read_2(sc->mem_res, slot->bd_offset + 14) 331 #define cpsw_write_hdp_slot(sc, queue, slot) \ 332 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 333 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 334 #define cpsw_read_cp(sc, queue) \ 335 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 336 #define cpsw_write_cp(sc, queue, val) \ 337 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 338 #define cpsw_write_cp_slot(sc, queue, slot) \ 339 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 340 341 #if 0 342 /* XXX temporary function versions for debugging. */ 343 static void 344 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 345 { 346 uint32_t reg = queue->hdp_offset; 347 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 348 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 349 cpsw_write_4(sc, reg, v); 350 } 351 352 static void 353 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 354 { 355 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 356 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 357 cpsw_write_cp(sc, queue, v); 358 } 359 #endif 360 361 /* 362 * Expanded dump routines for verbose debugging. 363 */ 364 static void 365 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 366 { 367 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 368 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 369 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 370 "Port0"}; 371 struct cpsw_cpdma_bd bd; 372 const char *sep; 373 int i; 374 375 cpsw_cpdma_read_bd(sc, slot, &bd); 376 printf("BD Addr: 0x%08x Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); 377 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 378 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 379 printf(" Flags: "); 380 sep = ""; 381 for (i = 0; i < 16; ++i) { 382 if (bd.flags & (1 << (15 - i))) { 383 printf("%s%s", sep, flags[i]); 384 sep = ","; 385 } 386 } 387 printf("\n"); 388 if (slot->mbuf) { 389 printf(" Ether: %14D\n", 390 (char *)(slot->mbuf->m_data), " "); 391 printf(" Packet: %16D\n", 392 (char *)(slot->mbuf->m_data) + 14, " "); 393 } 394 } 395 396 #define CPSW_DUMP_SLOT(cs, slot) do { \ 397 IF_DEBUG(sc) { \ 398 cpsw_dump_slot(sc, slot); \ 399 } \ 400 } while (0) 401 402 static void 403 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 404 { 405 struct cpsw_slot *slot; 406 int i = 0; 407 int others = 0; 408 409 STAILQ_FOREACH(slot, q, next) { 410 if (i > 4) 411 ++others; 412 else 413 cpsw_dump_slot(sc, slot); 414 ++i; 415 } 416 if (others) 417 printf(" ... and %d more.\n", others); 418 printf("\n"); 419 } 420 421 #define CPSW_DUMP_QUEUE(sc, q) do { \ 422 IF_DEBUG(sc) { \ 423 cpsw_dump_queue(sc, q); \ 424 } \ 425 } while (0) 426 427 static void 428 cpsw_init_slots(struct cpsw_softc *sc) 429 { 430 struct cpsw_slot *slot; 431 int i; 432 433 STAILQ_INIT(&sc->avail); 434 435 /* Put the slot descriptors onto the global avail list. */ 436 for (i = 0; i < nitems(sc->_slots); i++) { 437 slot = &sc->_slots[i]; 438 slot->bd_offset = cpsw_cpdma_bd_offset(i); 439 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 440 } 441 } 442 443 static int 444 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 445 { 446 const int max_slots = nitems(sc->_slots); 447 struct cpsw_slot *slot; 448 int i; 449 450 if (requested < 0) 451 requested = max_slots; 452 453 for (i = 0; i < requested; ++i) { 454 slot = STAILQ_FIRST(&sc->avail); 455 if (slot == NULL) 456 return (0); 457 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 458 device_printf(sc->dev, "failed to create dmamap\n"); 459 return (ENOMEM); 460 } 461 STAILQ_REMOVE_HEAD(&sc->avail, next); 462 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 463 ++queue->avail_queue_len; 464 ++queue->queue_slots; 465 } 466 return (0); 467 } 468 469 static void 470 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 471 { 472 int error; 473 474 if (slot->dmamap) { 475 if (slot->mbuf) 476 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 477 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 478 KASSERT(error == 0, ("Mapping still active")); 479 slot->dmamap = NULL; 480 } 481 if (slot->mbuf) { 482 m_freem(slot->mbuf); 483 slot->mbuf = NULL; 484 } 485 } 486 487 static void 488 cpsw_reset(struct cpsw_softc *sc) 489 { 490 int i; 491 492 callout_stop(&sc->watchdog.callout); 493 494 /* Reset RMII/RGMII wrapper. */ 495 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 496 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 497 ; 498 499 /* Disable TX and RX interrupts for all cores. */ 500 for (i = 0; i < 3; ++i) { 501 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 502 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 503 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 504 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 505 } 506 507 /* Reset CPSW subsystem. */ 508 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 509 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 510 ; 511 512 /* Reset Sliver port 1 and 2 */ 513 for (i = 0; i < 2; i++) { 514 /* Reset */ 515 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 516 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 517 ; 518 } 519 520 /* Reset DMA controller. */ 521 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 522 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 523 ; 524 525 /* Disable TX & RX DMA */ 526 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 527 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 528 529 /* Clear all queues. */ 530 for (i = 0; i < 8; i++) { 531 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 532 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 533 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 534 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 535 } 536 537 /* Clear all interrupt Masks */ 538 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 539 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 540 } 541 542 static void 543 cpsw_init(struct cpsw_softc *sc) 544 { 545 struct cpsw_slot *slot; 546 uint32_t reg; 547 548 /* Disable the interrupt pacing. */ 549 reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 550 reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 551 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); 552 553 /* Clear ALE */ 554 cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); 555 556 /* Enable ALE */ 557 reg = CPSW_ALE_CTL_ENABLE; 558 if (sc->dualemac) 559 reg |= CPSW_ALE_CTL_VLAN_AWARE; 560 cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); 561 562 /* Set Host Port Mapping. */ 563 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 564 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 565 566 /* Initialize ALE: set host port to forwarding(3). */ 567 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3); 568 569 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 570 571 /* Enable statistics for ports 0, 1 and 2 */ 572 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 573 574 /* Experiment: Turn off flow control */ 575 /* This seems to fix the watchdog resets that have plagued 576 earlier versions of this driver; I'm not yet sure if there 577 are negative effects yet. */ 578 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 579 580 /* Make IP hdr aligned with 4 */ 581 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 582 583 /* Initialize RX Buffer Descriptors */ 584 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 585 586 /* Enable TX & RX DMA */ 587 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 588 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 589 590 /* Enable Interrupts for core 0 */ 591 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 592 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 593 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); 594 595 /* Enable host Error Interrupt */ 596 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 597 598 /* Enable interrupts for RX Channel 0 */ 599 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1); 600 601 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 602 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 603 cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); 604 605 /* Select MII in GMII_SEL, Internal Delay mode */ 606 //ti_scm_reg_write_4(0x650, 0); 607 608 /* Initialize active queues. */ 609 slot = STAILQ_FIRST(&sc->tx.active); 610 if (slot != NULL) 611 cpsw_write_hdp_slot(sc, &sc->tx, slot); 612 slot = STAILQ_FIRST(&sc->rx.active); 613 if (slot != NULL) 614 cpsw_write_hdp_slot(sc, &sc->rx, slot); 615 cpsw_rx_enqueue(sc); 616 617 /* Activate network interface. */ 618 sc->rx.running = 1; 619 sc->tx.running = 1; 620 sc->watchdog.timer = 0; 621 callout_init(&sc->watchdog.callout, 0); 622 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 623 } 624 625 /* 626 * 627 * Device Probe, Attach, Detach. 628 * 629 */ 630 631 static int 632 cpsw_probe(device_t dev) 633 { 634 635 if (!ofw_bus_status_okay(dev)) 636 return (ENXIO); 637 638 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 639 return (ENXIO); 640 641 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 642 return (BUS_PROBE_DEFAULT); 643 } 644 645 static int 646 cpsw_intr_attach(struct cpsw_softc *sc) 647 { 648 649 /* Note: We don't use sc->irq_res[2] (TX interrupt) */ 650 if (bus_setup_intr(sc->dev, sc->irq_res[0], 651 INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_rx_thresh, 652 sc, &sc->ih_cookie[0]) != 0) { 653 return (-1); 654 } 655 if (bus_setup_intr(sc->dev, sc->irq_res[1], 656 INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_rx, 657 sc, &sc->ih_cookie[1]) != 0) { 658 return (-1); 659 } 660 if (bus_setup_intr(sc->dev, sc->irq_res[3], 661 INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_misc, 662 sc, &sc->ih_cookie[3]) != 0) { 663 return (-1); 664 } 665 666 return (0); 667 } 668 669 static void 670 cpsw_intr_detach(struct cpsw_softc *sc) 671 { 672 int i; 673 674 for (i = 0; i < CPSW_INTR_COUNT; i++) { 675 if (sc->ih_cookie[i]) { 676 bus_teardown_intr(sc->dev, sc->irq_res[i], 677 sc->ih_cookie[i]); 678 } 679 } 680 } 681 682 static int 683 cpsw_get_fdt_data(struct cpsw_softc *sc, int port) 684 { 685 char *name; 686 int len, phy, vlan; 687 pcell_t phy_id[3], vlan_id; 688 phandle_t child; 689 unsigned long mdio_child_addr; 690 691 /* Find any slave with phy_id */ 692 phy = -1; 693 vlan = -1; 694 for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { 695 if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0) 696 continue; 697 if (sscanf(name, "slave@%x", &mdio_child_addr) != 1) { 698 OF_prop_free(name); 699 continue; 700 } 701 OF_prop_free(name); 702 if (mdio_child_addr != slave_mdio_addr[port]) 703 continue; 704 705 len = OF_getproplen(child, "phy_id"); 706 if (len / sizeof(pcell_t) == 2) { 707 /* Get phy address from fdt */ 708 if (OF_getencprop(child, "phy_id", phy_id, len) > 0) 709 phy = phy_id[1]; 710 } 711 712 len = OF_getproplen(child, "dual_emac_res_vlan"); 713 if (len / sizeof(pcell_t) == 1) { 714 /* Get phy address from fdt */ 715 if (OF_getencprop(child, "dual_emac_res_vlan", 716 &vlan_id, len) > 0) { 717 vlan = vlan_id; 718 } 719 } 720 721 break; 722 } 723 if (phy == -1) 724 return (ENXIO); 725 sc->port[port].phy = phy; 726 sc->port[port].vlan = vlan; 727 728 return (0); 729 } 730 731 static int 732 cpsw_attach(device_t dev) 733 { 734 bus_dma_segment_t segs[1]; 735 int error, i, nsegs; 736 struct cpsw_softc *sc; 737 uint32_t reg; 738 739 sc = device_get_softc(dev); 740 sc->dev = dev; 741 sc->node = ofw_bus_get_node(dev); 742 getbinuptime(&sc->attach_uptime); 743 744 if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, 745 sizeof(sc->active_slave)) <= 0) { 746 sc->active_slave = 0; 747 } 748 if (sc->active_slave > 1) 749 sc->active_slave = 1; 750 751 if (OF_hasprop(sc->node, "dual_emac")) 752 sc->dualemac = 1; 753 754 for (i = 0; i < CPSW_PORTS; i++) { 755 if (!sc->dualemac && i != sc->active_slave) 756 continue; 757 if (cpsw_get_fdt_data(sc, i) != 0) { 758 device_printf(dev, 759 "failed to get PHY address from FDT\n"); 760 return (ENXIO); 761 } 762 } 763 764 /* Initialize mutexes */ 765 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 766 "cpsw TX lock", MTX_DEF); 767 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 768 "cpsw RX lock", MTX_DEF); 769 770 /* Allocate IRQ resources */ 771 error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); 772 if (error) { 773 device_printf(dev, "could not allocate IRQ resources\n"); 774 cpsw_detach(dev); 775 return (ENXIO); 776 } 777 778 sc->mem_rid = 0; 779 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 780 &sc->mem_rid, RF_ACTIVE); 781 if (sc->mem_res == NULL) { 782 device_printf(sc->dev, "failed to allocate memory resource\n"); 783 cpsw_detach(dev); 784 return (ENXIO); 785 } 786 787 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 788 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 789 reg & 0xFF, (reg >> 11) & 0x1F); 790 791 cpsw_add_sysctls(sc); 792 793 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 794 error = bus_dma_tag_create( 795 bus_get_dma_tag(sc->dev), /* parent */ 796 1, 0, /* alignment, boundary */ 797 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 798 BUS_SPACE_MAXADDR, /* highaddr */ 799 NULL, NULL, /* filtfunc, filtfuncarg */ 800 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 801 MCLBYTES, 0, /* maxsegsz, flags */ 802 NULL, NULL, /* lockfunc, lockfuncarg */ 803 &sc->mbuf_dtag); /* dmatag */ 804 if (error) { 805 device_printf(dev, "bus_dma_tag_create failed\n"); 806 cpsw_detach(dev); 807 return (error); 808 } 809 810 /* Allocate the null mbuf and pre-sync it. */ 811 sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 812 memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size); 813 bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap); 814 bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap, 815 sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 816 bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap, 817 BUS_DMASYNC_PREWRITE); 818 sc->null_mbuf_paddr = segs[0].ds_addr; 819 820 cpsw_init_slots(sc); 821 822 /* Allocate slots to TX and RX queues. */ 823 STAILQ_INIT(&sc->rx.avail); 824 STAILQ_INIT(&sc->rx.active); 825 STAILQ_INIT(&sc->tx.avail); 826 STAILQ_INIT(&sc->tx.active); 827 // For now: 128 slots to TX, rest to RX. 828 // XXX TODO: start with 32/64 and grow dynamically based on demand. 829 if (cpsw_add_slots(sc, &sc->tx, 128) || 830 cpsw_add_slots(sc, &sc->rx, -1)) { 831 device_printf(dev, "failed to allocate dmamaps\n"); 832 cpsw_detach(dev); 833 return (ENOMEM); 834 } 835 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 836 sc->tx.queue_slots, sc->rx.queue_slots); 837 838 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 839 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 840 841 if (cpsw_intr_attach(sc) == -1) { 842 device_printf(dev, "failed to setup interrupts\n"); 843 cpsw_detach(dev); 844 return (ENXIO); 845 } 846 847 /* Reset the controller. */ 848 cpsw_reset(sc); 849 cpsw_init(sc); 850 851 for (i = 0; i < CPSW_PORTS; i++) { 852 if (!sc->dualemac && i != sc->active_slave) 853 continue; 854 sc->port[i].dev = device_add_child(dev, "cpsw", i); 855 if (sc->port[i].dev == NULL) { 856 cpsw_detach(dev); 857 return (ENXIO); 858 } 859 } 860 bus_generic_attach(dev); 861 862 return (0); 863 } 864 865 static int 866 cpsw_detach(device_t dev) 867 { 868 struct cpsw_softc *sc; 869 int error, i; 870 871 bus_generic_detach(dev); 872 sc = device_get_softc(dev); 873 874 for (i = 0; i < CPSW_PORTS; i++) { 875 if (sc->port[i].dev) 876 device_delete_child(dev, sc->port[i].dev); 877 } 878 879 if (device_is_attached(dev)) { 880 callout_stop(&sc->watchdog.callout); 881 callout_drain(&sc->watchdog.callout); 882 } 883 884 /* Stop and release all interrupts */ 885 cpsw_intr_detach(sc); 886 887 /* Free dmamaps and mbufs */ 888 for (i = 0; i < nitems(sc->_slots); ++i) 889 cpsw_free_slot(sc, &sc->_slots[i]); 890 891 /* Free null mbuf. */ 892 if (sc->null_mbuf_dmamap) { 893 bus_dmamap_unload(sc->mbuf_dtag, sc->null_mbuf_dmamap); 894 error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap); 895 KASSERT(error == 0, ("Mapping still active")); 896 m_freem(sc->null_mbuf); 897 } 898 899 /* Free DMA tag */ 900 if (sc->mbuf_dtag) { 901 error = bus_dma_tag_destroy(sc->mbuf_dtag); 902 KASSERT(error == 0, ("Unable to destroy DMA tag")); 903 } 904 905 /* Free IO memory handler */ 906 if (sc->mem_res != NULL) 907 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); 908 bus_release_resources(dev, irq_res_spec, sc->irq_res); 909 910 /* Destroy mutexes */ 911 mtx_destroy(&sc->rx.lock); 912 mtx_destroy(&sc->tx.lock); 913 914 return (0); 915 } 916 917 static phandle_t 918 cpsw_get_node(device_t bus, device_t dev) 919 { 920 921 /* Share controller node with port device. */ 922 return (ofw_bus_get_node(bus)); 923 } 924 925 static int 926 cpswp_probe(device_t dev) 927 { 928 929 if (device_get_unit(dev) > 1) { 930 device_printf(dev, "Only two ports are supported.\n"); 931 return (ENXIO); 932 } 933 device_set_desc(dev, "Ethernet Switch Port"); 934 935 return (BUS_PROBE_DEFAULT); 936 } 937 938 static int 939 cpswp_attach(device_t dev) 940 { 941 int error; 942 struct ifnet *ifp; 943 struct cpswp_softc *sc; 944 uint32_t reg; 945 uint8_t mac_addr[ETHER_ADDR_LEN]; 946 947 sc = device_get_softc(dev); 948 sc->dev = dev; 949 sc->pdev = device_get_parent(dev); 950 sc->swsc = device_get_softc(sc->pdev); 951 sc->unit = device_get_unit(dev); 952 sc->phy = sc->swsc->port[sc->unit].phy; 953 sc->vlan = sc->swsc->port[sc->unit].vlan; 954 if (sc->swsc->dualemac && sc->vlan == -1) 955 sc->vlan = sc->unit + 1; 956 957 if (sc->unit == 0) { 958 sc->physel = MDIOUSERPHYSEL0; 959 sc->phyaccess = MDIOUSERACCESS0; 960 } else { 961 sc->physel = MDIOUSERPHYSEL1; 962 sc->phyaccess = MDIOUSERACCESS1; 963 } 964 965 mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", 966 MTX_DEF); 967 968 /* Allocate network interface */ 969 ifp = sc->ifp = if_alloc(IFT_ETHER); 970 if (ifp == NULL) { 971 cpswp_detach(dev); 972 return (ENXIO); 973 } 974 975 if_initname(ifp, device_get_name(sc->dev), sc->unit); 976 ifp->if_softc = sc; 977 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 978 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? 979 ifp->if_capenable = ifp->if_capabilities; 980 981 ifp->if_init = cpswp_init; 982 ifp->if_start = cpswp_start; 983 ifp->if_ioctl = cpswp_ioctl; 984 985 ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots; 986 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 987 IFQ_SET_READY(&ifp->if_snd); 988 989 /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ 990 ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, ®); 991 mac_addr[0] = reg & 0xFF; 992 mac_addr[1] = (reg >> 8) & 0xFF; 993 mac_addr[2] = (reg >> 16) & 0xFF; 994 mac_addr[3] = (reg >> 24) & 0xFF; 995 996 /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ 997 ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, ®); 998 mac_addr[4] = reg & 0xFF; 999 mac_addr[5] = (reg >> 8) & 0xFF; 1000 1001 error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, 1002 cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); 1003 if (error) { 1004 device_printf(dev, "attaching PHYs failed\n"); 1005 cpswp_detach(dev); 1006 return (error); 1007 } 1008 sc->mii = device_get_softc(sc->miibus); 1009 1010 /* Select PHY and enable interrupts */ 1011 cpsw_write_4(sc->swsc, sc->physel, 1012 MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); 1013 1014 ether_ifattach(sc->ifp, mac_addr); 1015 callout_init(&sc->mii_callout, 0); 1016 1017 return (0); 1018 } 1019 1020 static int 1021 cpswp_detach(device_t dev) 1022 { 1023 struct cpswp_softc *sc; 1024 1025 sc = device_get_softc(dev); 1026 CPSW_DEBUGF(sc->swsc, ("")); 1027 if (device_is_attached(dev)) { 1028 ether_ifdetach(sc->ifp); 1029 CPSW_PORT_LOCK(sc); 1030 cpswp_stop_locked(sc); 1031 CPSW_PORT_UNLOCK(sc); 1032 callout_drain(&sc->mii_callout); 1033 } 1034 1035 bus_generic_detach(dev); 1036 1037 if_free(sc->ifp); 1038 mtx_destroy(&sc->lock); 1039 1040 return (0); 1041 } 1042 1043 /* 1044 * 1045 * Init/Shutdown. 1046 * 1047 */ 1048 1049 static int 1050 cpsw_ports_down(struct cpsw_softc *sc) 1051 { 1052 struct cpswp_softc *psc; 1053 struct ifnet *ifp1, *ifp2; 1054 1055 if (!sc->dualemac) 1056 return (1); 1057 psc = device_get_softc(sc->port[0].dev); 1058 ifp1 = psc->ifp; 1059 psc = device_get_softc(sc->port[1].dev); 1060 ifp2 = psc->ifp; 1061 if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0) 1062 return (1); 1063 1064 return (0); 1065 } 1066 1067 static void 1068 cpswp_init(void *arg) 1069 { 1070 struct cpswp_softc *sc = arg; 1071 1072 CPSW_DEBUGF(sc->swsc, ("")); 1073 CPSW_PORT_LOCK(sc); 1074 cpswp_init_locked(arg); 1075 CPSW_PORT_UNLOCK(sc); 1076 } 1077 1078 static void 1079 cpswp_init_locked(void *arg) 1080 { 1081 struct cpswp_softc *sc = arg; 1082 struct ifnet *ifp; 1083 uint32_t reg; 1084 1085 CPSW_DEBUGF(sc->swsc, ("")); 1086 CPSW_PORT_LOCK_ASSERT(sc); 1087 ifp = sc->ifp; 1088 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1089 return; 1090 1091 getbinuptime(&sc->init_uptime); 1092 1093 if (!sc->swsc->rx.running && !sc->swsc->tx.running) { 1094 /* Reset the controller. */ 1095 cpsw_reset(sc->swsc); 1096 cpsw_init(sc->swsc); 1097 } 1098 1099 /* Set Slave Mapping. */ 1100 cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); 1101 cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 1102 0x33221100); 1103 cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); 1104 /* Enable MAC RX/TX modules. */ 1105 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 1106 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 1107 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1108 reg |= CPSW_SL_MACTL_GMII_ENABLE; 1109 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1110 1111 /* Initialize ALE: set port to forwarding(3), initialize addrs */ 1112 cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 3); 1113 cpswp_ale_update_addresses(sc, 1); 1114 1115 if (sc->swsc->dualemac) { 1116 /* Set Port VID. */ 1117 cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), 1118 sc->vlan & 0xfff); 1119 cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, 1120 (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ 1121 (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ 1122 (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ 1123 } 1124 1125 mii_mediachg(sc->mii); 1126 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 1127 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1128 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1129 } 1130 1131 static int 1132 cpsw_shutdown(device_t dev) 1133 { 1134 struct cpsw_softc *sc; 1135 struct cpswp_softc *psc; 1136 int i; 1137 1138 sc = device_get_softc(dev); 1139 CPSW_DEBUGF(sc, ("")); 1140 for (i = 0; i < CPSW_PORTS; i++) { 1141 if (!sc->dualemac && i != sc->active_slave) 1142 continue; 1143 psc = device_get_softc(sc->port[i].dev); 1144 CPSW_PORT_LOCK(psc); 1145 cpswp_stop_locked(psc); 1146 CPSW_PORT_UNLOCK(psc); 1147 } 1148 1149 return (0); 1150 } 1151 1152 static void 1153 cpsw_rx_teardown_locked(struct cpsw_softc *sc) 1154 { 1155 struct ifnet *ifp; 1156 struct mbuf *received, *next; 1157 int i = 0; 1158 1159 CPSW_DEBUGF(sc, ("starting RX teardown")); 1160 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 1161 for (;;) { 1162 received = cpsw_rx_dequeue(sc); 1163 CPSW_RX_UNLOCK(sc); 1164 while (received != NULL) { 1165 next = received->m_nextpkt; 1166 received->m_nextpkt = NULL; 1167 ifp = received->m_pkthdr.rcvif; 1168 (*ifp->if_input)(ifp, received); 1169 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1170 received = next; 1171 } 1172 CPSW_RX_LOCK(sc); 1173 if (!sc->rx.running) { 1174 CPSW_DEBUGF(sc, 1175 ("finished RX teardown (%d retries)", i)); 1176 return; 1177 } 1178 if (++i > 10) { 1179 device_printf(sc->dev, 1180 "Unable to cleanly shutdown receiver\n"); 1181 return; 1182 } 1183 DELAY(10); 1184 } 1185 } 1186 1187 static void 1188 cpsw_tx_teardown_locked(struct cpsw_softc *sc) 1189 { 1190 int i = 0; 1191 1192 CPSW_DEBUGF(sc, ("starting TX teardown")); 1193 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 1194 cpsw_tx_dequeue(sc); 1195 while (sc->tx.running && ++i < 10) { 1196 DELAY(10); 1197 cpsw_tx_dequeue(sc); 1198 } 1199 if (sc->tx.running) { 1200 device_printf(sc->dev, 1201 "Unable to cleanly shutdown transmitter\n"); 1202 } 1203 CPSW_DEBUGF(sc, ("finished TX teardown (%d retries, %d idle buffers)", 1204 i, sc->tx.active_queue_len)); 1205 } 1206 1207 static void 1208 cpswp_stop_locked(struct cpswp_softc *sc) 1209 { 1210 struct ifnet *ifp; 1211 uint32_t reg; 1212 1213 ifp = sc->ifp; 1214 CPSW_DEBUGF(sc->swsc, ("")); 1215 CPSW_PORT_LOCK_ASSERT(sc); 1216 1217 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1218 return; 1219 1220 /* Disable interface */ 1221 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1222 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1223 1224 /* Stop ticker */ 1225 callout_stop(&sc->mii_callout); 1226 1227 /* Tear down the RX/TX queues. */ 1228 if (cpsw_ports_down(sc->swsc)) { 1229 CPSW_RX_LOCK(sc->swsc); 1230 cpsw_rx_teardown_locked(sc->swsc); 1231 CPSW_RX_UNLOCK(sc->swsc); 1232 CPSW_TX_LOCK(sc->swsc); 1233 cpsw_tx_teardown_locked(sc->swsc); 1234 CPSW_TX_UNLOCK(sc->swsc); 1235 } 1236 1237 /* Stop MAC RX/TX modules. */ 1238 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1239 reg &= ~CPSW_SL_MACTL_GMII_ENABLE; 1240 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1241 1242 if (cpsw_ports_down(sc->swsc)) { 1243 /* Capture stats before we reset controller. */ 1244 cpsw_stats_collect(sc->swsc); 1245 1246 cpsw_reset(sc->swsc); 1247 cpsw_init(sc->swsc); 1248 } 1249 } 1250 1251 /* 1252 * Suspend/Resume. 1253 */ 1254 1255 static int 1256 cpsw_suspend(device_t dev) 1257 { 1258 struct cpsw_softc *sc; 1259 struct cpswp_softc *psc; 1260 int i; 1261 1262 sc = device_get_softc(dev); 1263 CPSW_DEBUGF(sc, ("")); 1264 for (i = 0; i < CPSW_PORTS; i++) { 1265 if (!sc->dualemac && i != sc->active_slave) 1266 continue; 1267 psc = device_get_softc(sc->port[i].dev); 1268 CPSW_PORT_LOCK(psc); 1269 cpswp_stop_locked(psc); 1270 CPSW_PORT_UNLOCK(psc); 1271 } 1272 1273 return (0); 1274 } 1275 1276 static int 1277 cpsw_resume(device_t dev) 1278 { 1279 struct cpsw_softc *sc; 1280 1281 sc = device_get_softc(dev); 1282 CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); 1283 1284 return (0); 1285 } 1286 1287 /* 1288 * 1289 * IOCTL 1290 * 1291 */ 1292 1293 static void 1294 cpsw_set_promisc(struct cpswp_softc *sc, int set) 1295 { 1296 uint32_t reg; 1297 1298 /* 1299 * Enabling promiscuous mode requires ALE_BYPASS to be enabled. 1300 * That disables the ALE forwarding logic and causes every 1301 * packet to be sent only to the host port. In bypass mode, 1302 * the ALE processes host port transmit packets the same as in 1303 * normal mode. 1304 */ 1305 reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); 1306 reg &= ~CPSW_ALE_CTL_BYPASS; 1307 if (set) 1308 reg |= CPSW_ALE_CTL_BYPASS; 1309 cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); 1310 } 1311 1312 static void 1313 cpsw_set_allmulti(struct cpswp_softc *sc, int set) 1314 { 1315 if (set) { 1316 printf("All-multicast mode unimplemented\n"); 1317 } 1318 } 1319 1320 static int 1321 cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1322 { 1323 struct cpswp_softc *sc; 1324 struct ifreq *ifr; 1325 int error; 1326 uint32_t changed; 1327 1328 error = 0; 1329 sc = ifp->if_softc; 1330 ifr = (struct ifreq *)data; 1331 1332 switch (command) { 1333 case SIOCSIFFLAGS: 1334 CPSW_PORT_LOCK(sc); 1335 if (ifp->if_flags & IFF_UP) { 1336 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1337 changed = ifp->if_flags ^ sc->if_flags; 1338 CPSW_DEBUGF(sc->swsc, 1339 ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", 1340 changed)); 1341 if (changed & IFF_PROMISC) 1342 cpsw_set_promisc(sc, 1343 ifp->if_flags & IFF_PROMISC); 1344 if (changed & IFF_ALLMULTI) 1345 cpsw_set_allmulti(sc, 1346 ifp->if_flags & IFF_ALLMULTI); 1347 } else { 1348 CPSW_DEBUGF(sc->swsc, 1349 ("SIOCSIFFLAGS: UP but not RUNNING; starting up")); 1350 cpswp_init_locked(sc); 1351 } 1352 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1353 CPSW_DEBUGF(sc->swsc, 1354 ("SIOCSIFFLAGS: not UP but RUNNING; shutting down")); 1355 cpswp_stop_locked(sc); 1356 } 1357 1358 sc->if_flags = ifp->if_flags; 1359 CPSW_PORT_UNLOCK(sc); 1360 break; 1361 case SIOCADDMULTI: 1362 cpswp_ale_update_addresses(sc, 0); 1363 break; 1364 case SIOCDELMULTI: 1365 /* Ugh. DELMULTI doesn't provide the specific address 1366 being removed, so the best we can do is remove 1367 everything and rebuild it all. */ 1368 cpswp_ale_update_addresses(sc, 1); 1369 break; 1370 case SIOCGIFMEDIA: 1371 case SIOCSIFMEDIA: 1372 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1373 break; 1374 default: 1375 error = ether_ioctl(ifp, command, data); 1376 } 1377 return (error); 1378 } 1379 1380 /* 1381 * 1382 * MIIBUS 1383 * 1384 */ 1385 static int 1386 cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) 1387 { 1388 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1389 1390 while (--retries) { 1391 r = cpsw_read_4(sc, reg); 1392 if ((r & MDIO_PHYACCESS_GO) == 0) 1393 return (1); 1394 DELAY(CPSW_MIIBUS_DELAY); 1395 } 1396 1397 return (0); 1398 } 1399 1400 static int 1401 cpswp_miibus_readreg(device_t dev, int phy, int reg) 1402 { 1403 struct cpswp_softc *sc; 1404 uint32_t cmd, r; 1405 1406 sc = device_get_softc(dev); 1407 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1408 device_printf(dev, "MDIO not ready to read\n"); 1409 return (0); 1410 } 1411 1412 /* Set GO, reg, phy */ 1413 cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1414 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1415 1416 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1417 device_printf(dev, "MDIO timed out during read\n"); 1418 return (0); 1419 } 1420 1421 r = cpsw_read_4(sc->swsc, sc->phyaccess); 1422 if ((r & MDIO_PHYACCESS_ACK) == 0) { 1423 device_printf(dev, "Failed to read from PHY.\n"); 1424 r = 0; 1425 } 1426 return (r & 0xFFFF); 1427 } 1428 1429 static int 1430 cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) 1431 { 1432 struct cpswp_softc *sc; 1433 uint32_t cmd; 1434 1435 sc = device_get_softc(dev); 1436 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1437 device_printf(dev, "MDIO not ready to write\n"); 1438 return (0); 1439 } 1440 1441 /* Set GO, WRITE, reg, phy, and value */ 1442 cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | 1443 (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); 1444 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1445 1446 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1447 device_printf(dev, "MDIO timed out during write\n"); 1448 return (0); 1449 } 1450 1451 if ((cpsw_read_4(sc->swsc, sc->phyaccess) & MDIO_PHYACCESS_ACK) == 0) 1452 device_printf(dev, "Failed to write to PHY.\n"); 1453 1454 return (0); 1455 } 1456 1457 static void 1458 cpswp_miibus_statchg(device_t dev) 1459 { 1460 struct cpswp_softc *sc; 1461 uint32_t mac_control, reg; 1462 1463 sc = device_get_softc(dev); 1464 CPSW_DEBUGF(sc->swsc, ("")); 1465 1466 reg = CPSW_SL_MACCONTROL(sc->unit); 1467 mac_control = cpsw_read_4(sc->swsc, reg); 1468 mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | 1469 CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); 1470 1471 switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { 1472 case IFM_1000_SX: 1473 case IFM_1000_LX: 1474 case IFM_1000_CX: 1475 case IFM_1000_T: 1476 mac_control |= CPSW_SL_MACTL_GIG; 1477 break; 1478 1479 case IFM_100_TX: 1480 mac_control |= CPSW_SL_MACTL_IFCTL_A; 1481 break; 1482 } 1483 if (sc->mii->mii_media_active & IFM_FDX) 1484 mac_control |= CPSW_SL_MACTL_FULLDUPLEX; 1485 1486 cpsw_write_4(sc->swsc, reg, mac_control); 1487 } 1488 1489 /* 1490 * 1491 * Transmit/Receive Packets. 1492 * 1493 */ 1494 static void 1495 cpsw_intr_rx(void *arg) 1496 { 1497 struct cpsw_softc *sc = arg; 1498 struct ifnet *ifp; 1499 struct mbuf *received, *next; 1500 1501 CPSW_RX_LOCK(sc); 1502 received = cpsw_rx_dequeue(sc); 1503 cpsw_rx_enqueue(sc); 1504 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1505 CPSW_RX_UNLOCK(sc); 1506 1507 while (received != NULL) { 1508 next = received->m_nextpkt; 1509 received->m_nextpkt = NULL; 1510 ifp = received->m_pkthdr.rcvif; 1511 (*ifp->if_input)(ifp, received); 1512 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1513 received = next; 1514 } 1515 } 1516 1517 static struct mbuf * 1518 cpsw_rx_dequeue(struct cpsw_softc *sc) 1519 { 1520 struct cpsw_cpdma_bd bd; 1521 struct cpsw_slot *slot; 1522 struct cpswp_softc *psc; 1523 struct mbuf *mb_head, *mb_tail; 1524 int port, removed = 0; 1525 1526 mb_head = mb_tail = NULL; 1527 1528 /* Pull completed packets off hardware RX queue. */ 1529 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1530 cpsw_cpdma_read_bd(sc, slot, &bd); 1531 if (bd.flags & CPDMA_BD_OWNER) 1532 break; /* Still in use by hardware */ 1533 1534 CPSW_DEBUGF(sc, ("Removing received packet from RX queue")); 1535 ++removed; 1536 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1537 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1538 1539 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1540 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1541 1542 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1543 CPSW_DEBUGF(sc, ("RX teardown in progress")); 1544 m_freem(slot->mbuf); 1545 slot->mbuf = NULL; 1546 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1547 sc->rx.running = 0; 1548 break; 1549 } 1550 1551 cpsw_write_cp_slot(sc, &sc->rx, slot); 1552 1553 port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; 1554 KASSERT(port >= 0 && port <= 1, 1555 ("patcket received with invalid port: %d", port)); 1556 psc = device_get_softc(sc->port[port].dev); 1557 1558 /* Set up mbuf */ 1559 /* TODO: track SOP/EOP bits to assemble a full mbuf 1560 out of received fragments. */ 1561 slot->mbuf->m_data += bd.bufoff; 1562 slot->mbuf->m_len = bd.pktlen - 4; 1563 slot->mbuf->m_pkthdr.len = bd.pktlen - 4; 1564 slot->mbuf->m_flags |= M_PKTHDR; 1565 slot->mbuf->m_pkthdr.rcvif = psc->ifp; 1566 slot->mbuf->m_nextpkt = NULL; 1567 1568 if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1569 /* check for valid CRC by looking into pkt_err[5:4] */ 1570 if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) { 1571 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1572 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1573 slot->mbuf->m_pkthdr.csum_data = 0xffff; 1574 } 1575 } 1576 1577 /* Add mbuf to packet list to be returned. */ 1578 if (mb_tail) { 1579 mb_tail->m_nextpkt = slot->mbuf; 1580 } else { 1581 mb_head = slot->mbuf; 1582 } 1583 mb_tail = slot->mbuf; 1584 slot->mbuf = NULL; 1585 } 1586 1587 if (removed != 0) { 1588 sc->rx.queue_removes += removed; 1589 sc->rx.active_queue_len -= removed; 1590 sc->rx.avail_queue_len += removed; 1591 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1592 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1593 } 1594 return (mb_head); 1595 } 1596 1597 static void 1598 cpsw_rx_enqueue(struct cpsw_softc *sc) 1599 { 1600 bus_dma_segment_t seg[1]; 1601 struct cpsw_cpdma_bd bd; 1602 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); 1603 struct cpsw_slot *slot, *prev_slot = NULL; 1604 struct cpsw_slot *last_old_slot, *first_new_slot; 1605 int error, nsegs, added = 0; 1606 1607 /* Register new mbufs with hardware. */ 1608 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1609 if (slot->mbuf == NULL) { 1610 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1611 if (slot->mbuf == NULL) { 1612 device_printf(sc->dev, 1613 "Unable to fill RX queue\n"); 1614 break; 1615 } 1616 slot->mbuf->m_len = 1617 slot->mbuf->m_pkthdr.len = 1618 slot->mbuf->m_ext.ext_size; 1619 } 1620 1621 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1622 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1623 1624 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1625 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1626 if (error != 0 || nsegs != 1) { 1627 device_printf(sc->dev, 1628 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1629 __func__, nsegs, error); 1630 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1631 m_freem(slot->mbuf); 1632 slot->mbuf = NULL; 1633 break; 1634 } 1635 1636 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1637 1638 /* Create and submit new rx descriptor*/ 1639 bd.next = 0; 1640 bd.bufptr = seg->ds_addr; 1641 bd.bufoff = 0; 1642 bd.buflen = MCLBYTES - 1; 1643 bd.pktlen = bd.buflen; 1644 bd.flags = CPDMA_BD_OWNER; 1645 cpsw_cpdma_write_bd(sc, slot, &bd); 1646 ++added; 1647 1648 if (prev_slot != NULL) 1649 cpsw_cpdma_write_bd_next(sc, prev_slot, slot); 1650 prev_slot = slot; 1651 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1652 sc->rx.avail_queue_len--; 1653 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1654 } 1655 1656 if (added == 0) 1657 return; 1658 1659 CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); 1660 1661 /* Link new entries to hardware RX queue. */ 1662 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1663 first_new_slot = STAILQ_FIRST(&tmpqueue); 1664 STAILQ_CONCAT(&sc->rx.active, &tmpqueue); 1665 if (first_new_slot == NULL) { 1666 return; 1667 } else if (last_old_slot == NULL) { 1668 /* Start a fresh queue. */ 1669 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1670 } else { 1671 /* Add buffers to end of current queue. */ 1672 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1673 /* If underrun, restart queue. */ 1674 if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) { 1675 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1676 } 1677 } 1678 sc->rx.queue_adds += added; 1679 sc->rx.active_queue_len += added; 1680 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) { 1681 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1682 } 1683 } 1684 1685 static void 1686 cpswp_start(struct ifnet *ifp) 1687 { 1688 struct cpswp_softc *sc = ifp->if_softc; 1689 1690 CPSW_TX_LOCK(sc->swsc); 1691 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->swsc->tx.running) { 1692 cpswp_tx_enqueue(sc); 1693 cpsw_tx_dequeue(sc->swsc); 1694 } 1695 CPSW_TX_UNLOCK(sc->swsc); 1696 } 1697 1698 static void 1699 cpswp_tx_enqueue(struct cpswp_softc *sc) 1700 { 1701 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1702 struct cpsw_cpdma_bd bd; 1703 struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); 1704 struct cpsw_slot *slot, *prev_slot = NULL; 1705 struct cpsw_slot *last_old_slot, *first_new_slot; 1706 struct mbuf *m0; 1707 int error, flags, nsegs, seg, added = 0, padlen; 1708 1709 flags = 0; 1710 if (sc->swsc->dualemac) { 1711 flags = CPDMA_BD_TO_PORT | 1712 ((sc->unit + 1) & CPDMA_BD_PORT_MASK); 1713 } 1714 /* Pull pending packets from IF queue and prep them for DMA. */ 1715 while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { 1716 IF_DEQUEUE(&sc->ifp->if_snd, m0); 1717 if (m0 == NULL) 1718 break; 1719 1720 slot->mbuf = m0; 1721 padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len; 1722 if (padlen < 0) 1723 padlen = 0; 1724 1725 /* Create mapping in DMA memory */ 1726 error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, 1727 slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1728 /* If the packet is too fragmented, try to simplify. */ 1729 if (error == EFBIG || 1730 (error == 0 && 1731 nsegs + (padlen > 0 ? 1 : 0) > sc->swsc->tx.avail_queue_len)) { 1732 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1733 if (padlen > 0) /* May as well add padding. */ 1734 m_append(slot->mbuf, padlen, 1735 sc->swsc->null_mbuf->m_data); 1736 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1737 if (m0 == NULL) { 1738 device_printf(sc->dev, 1739 "Can't defragment packet; dropping\n"); 1740 m_freem(slot->mbuf); 1741 } else { 1742 CPSW_DEBUGF(sc->swsc, 1743 ("Requeueing defragmented packet")); 1744 IF_PREPEND(&sc->ifp->if_snd, m0); 1745 } 1746 slot->mbuf = NULL; 1747 continue; 1748 } 1749 if (error != 0) { 1750 device_printf(sc->dev, 1751 "%s: Can't setup DMA (error=%d), dropping packet\n", 1752 __func__, error); 1753 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1754 m_freem(slot->mbuf); 1755 slot->mbuf = NULL; 1756 break; 1757 } 1758 1759 bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, 1760 BUS_DMASYNC_PREWRITE); 1761 1762 CPSW_DEBUGF(sc->swsc, 1763 ("Queueing TX packet: %d segments + %d pad bytes", 1764 nsegs, padlen)); 1765 1766 slot->ifp = sc->ifp; 1767 /* If there is only one segment, the for() loop 1768 * gets skipped and the single buffer gets set up 1769 * as both SOP and EOP. */ 1770 /* Start by setting up the first buffer */ 1771 bd.next = 0; 1772 bd.bufptr = segs[0].ds_addr; 1773 bd.bufoff = 0; 1774 bd.buflen = segs[0].ds_len; 1775 bd.pktlen = m_length(slot->mbuf, NULL) + padlen; 1776 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER | flags; 1777 for (seg = 1; seg < nsegs; ++seg) { 1778 /* Save the previous buffer (which isn't EOP) */ 1779 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1780 if (prev_slot != NULL) { 1781 cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, 1782 slot); 1783 } 1784 prev_slot = slot; 1785 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1786 sc->swsc->tx.avail_queue_len--; 1787 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1788 ++added; 1789 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1790 1791 /* Setup next buffer (which isn't SOP) */ 1792 bd.next = 0; 1793 bd.bufptr = segs[seg].ds_addr; 1794 bd.bufoff = 0; 1795 bd.buflen = segs[seg].ds_len; 1796 bd.pktlen = 0; 1797 bd.flags = CPDMA_BD_OWNER | flags; 1798 } 1799 /* Save the final buffer. */ 1800 if (padlen <= 0) 1801 bd.flags |= CPDMA_BD_EOP; 1802 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1803 if (prev_slot != NULL) 1804 cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot); 1805 prev_slot = slot; 1806 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1807 sc->swsc->tx.avail_queue_len--; 1808 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1809 ++added; 1810 1811 if (padlen > 0) { 1812 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1813 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1814 sc->swsc->tx.avail_queue_len--; 1815 STAILQ_INSERT_TAIL(&tmpqueue, slot, next); 1816 ++added; 1817 1818 /* Setup buffer of null pad bytes (definitely EOP) */ 1819 cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot); 1820 prev_slot = slot; 1821 bd.next = 0; 1822 bd.bufptr = sc->swsc->null_mbuf_paddr; 1823 bd.bufoff = 0; 1824 bd.buflen = padlen; 1825 bd.pktlen = 0; 1826 bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER | flags; 1827 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1828 ++nsegs; 1829 } 1830 1831 if (nsegs > sc->swsc->tx.longest_chain) 1832 sc->swsc->tx.longest_chain = nsegs; 1833 1834 // TODO: Should we defer the BPF tap until 1835 // after all packets are queued? 1836 BPF_MTAP(sc->ifp, m0); 1837 } 1838 1839 /* Attach the list of new buffers to the hardware TX queue. */ 1840 last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); 1841 first_new_slot = STAILQ_FIRST(&tmpqueue); 1842 STAILQ_CONCAT(&sc->swsc->tx.active, &tmpqueue); 1843 if (first_new_slot == NULL) { 1844 return; 1845 } else if (last_old_slot == NULL) { 1846 /* Start a fresh queue. */ 1847 sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot); 1848 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); 1849 } else { 1850 /* Add buffers to end of current queue. */ 1851 cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, 1852 first_new_slot); 1853 /* If underrun, restart queue. */ 1854 if (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & 1855 CPDMA_BD_EOQ) { 1856 sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot); 1857 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, 1858 first_new_slot); 1859 } 1860 } 1861 sc->swsc->tx.queue_adds += added; 1862 sc->swsc->tx.active_queue_len += added; 1863 if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { 1864 sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; 1865 } 1866 } 1867 1868 static int 1869 cpsw_tx_dequeue(struct cpsw_softc *sc) 1870 { 1871 struct cpsw_slot *slot, *last_removed_slot = NULL; 1872 struct cpsw_cpdma_bd bd; 1873 uint32_t flags, removed = 0; 1874 1875 slot = STAILQ_FIRST(&sc->tx.active); 1876 if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) { 1877 CPSW_DEBUGF(sc, ("TX teardown of an empty queue")); 1878 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1879 sc->tx.running = 0; 1880 return (0); 1881 } 1882 1883 /* Pull completed buffers off the hardware TX queue. */ 1884 while (slot != NULL) { 1885 flags = cpsw_cpdma_read_bd_flags(sc, slot); 1886 if (flags & CPDMA_BD_OWNER) 1887 break; /* Hardware is still using this packet. */ 1888 1889 CPSW_DEBUGF(sc, ("TX removing completed packet")); 1890 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 1891 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1892 m_freem(slot->mbuf); 1893 slot->mbuf = NULL; 1894 if (slot->ifp) 1895 if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); 1896 1897 /* Dequeue any additional buffers used by this packet. */ 1898 while (slot != NULL && slot->mbuf == NULL) { 1899 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 1900 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 1901 ++removed; 1902 last_removed_slot = slot; 1903 slot = STAILQ_FIRST(&sc->tx.active); 1904 } 1905 1906 /* TearDown complete is only marked on the SOP for the packet. */ 1907 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == 1908 (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { 1909 CPSW_DEBUGF(sc, ("TX teardown in progress")); 1910 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1911 // TODO: Increment a count of dropped TX packets 1912 sc->tx.running = 0; 1913 break; 1914 } 1915 1916 if ((flags & CPDMA_BD_EOP) == 0) 1917 flags = cpsw_cpdma_read_bd_flags(sc, last_removed_slot); 1918 if ((flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == 1919 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 1920 cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); 1921 if (bd.next != 0 && bd.next != sc->last_hdp) { 1922 /* Restart the queue. */ 1923 sc->last_hdp = bd.next; 1924 cpsw_write_4(sc, sc->tx.hdp_offset, bd.next); 1925 } 1926 } 1927 } 1928 1929 if (removed != 0) { 1930 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 1931 sc->tx.queue_removes += removed; 1932 sc->tx.active_queue_len -= removed; 1933 sc->tx.avail_queue_len += removed; 1934 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 1935 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 1936 } 1937 return (removed); 1938 } 1939 1940 /* 1941 * 1942 * Miscellaneous interrupts. 1943 * 1944 */ 1945 1946 static void 1947 cpsw_intr_rx_thresh(void *arg) 1948 { 1949 struct cpsw_softc *sc = arg; 1950 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0)); 1951 1952 CPSW_DEBUGF(sc, ("stat=%x", stat)); 1953 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 1954 } 1955 1956 static void 1957 cpsw_intr_misc_host_error(struct cpsw_softc *sc) 1958 { 1959 uint32_t intstat; 1960 uint32_t dmastat; 1961 int txerr, rxerr, txchan, rxchan; 1962 1963 printf("\n\n"); 1964 device_printf(sc->dev, 1965 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 1966 printf("\n\n"); 1967 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 1968 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 1969 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 1970 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 1971 1972 txerr = (dmastat >> 20) & 15; 1973 txchan = (dmastat >> 16) & 7; 1974 rxerr = (dmastat >> 12) & 15; 1975 rxchan = (dmastat >> 8) & 7; 1976 1977 switch (txerr) { 1978 case 0: break; 1979 case 1: printf("SOP error on TX channel %d\n", txchan); 1980 break; 1981 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 1982 break; 1983 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 1984 break; 1985 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 1986 break; 1987 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 1988 break; 1989 case 6: printf("Packet length error on TX channel %d\n", txchan); 1990 break; 1991 default: printf("Unknown error on TX channel %d\n", txchan); 1992 break; 1993 } 1994 1995 if (txerr != 0) { 1996 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 1997 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 1998 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 1999 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 2000 cpsw_dump_queue(sc, &sc->tx.active); 2001 } 2002 2003 switch (rxerr) { 2004 case 0: break; 2005 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 2006 break; 2007 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 2008 break; 2009 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 2010 break; 2011 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 2012 break; 2013 default: printf("Unknown RX error on RX channel %d\n", rxchan); 2014 break; 2015 } 2016 2017 if (rxerr != 0) { 2018 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 2019 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 2020 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 2021 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 2022 cpsw_dump_queue(sc, &sc->rx.active); 2023 } 2024 2025 printf("\nALE Table\n"); 2026 cpsw_ale_dump_table(sc); 2027 2028 // XXX do something useful here?? 2029 panic("CPSW HOST ERROR INTERRUPT"); 2030 2031 // Suppress this interrupt in the future. 2032 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 2033 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 2034 // The watchdog will probably reset the controller 2035 // in a little while. It will probably fail again. 2036 } 2037 2038 static void 2039 cpsw_intr_misc(void *arg) 2040 { 2041 struct cpsw_softc *sc = arg; 2042 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 2043 2044 if (stat & CPSW_WR_C_MISC_EVNT_PEND) 2045 CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); 2046 if (stat & CPSW_WR_C_MISC_STAT_PEND) 2047 cpsw_stats_collect(sc); 2048 if (stat & CPSW_WR_C_MISC_HOST_PEND) 2049 cpsw_intr_misc_host_error(sc); 2050 if (stat & CPSW_WR_C_MISC_MDIOLINK) { 2051 cpsw_write_4(sc, MDIOLINKINTMASKED, 2052 cpsw_read_4(sc, MDIOLINKINTMASKED)); 2053 } 2054 if (stat & CPSW_WR_C_MISC_MDIOUSER) { 2055 CPSW_DEBUGF(sc, 2056 ("MDIO operation completed interrupt unimplemented")); 2057 } 2058 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 2059 } 2060 2061 /* 2062 * 2063 * Periodic Checks and Watchdog. 2064 * 2065 */ 2066 2067 static void 2068 cpswp_tick(void *msc) 2069 { 2070 struct cpswp_softc *sc = msc; 2071 2072 /* Check for media type change */ 2073 mii_tick(sc->mii); 2074 if (sc->media_status != sc->mii->mii_media.ifm_media) { 2075 printf("%s: media type changed (ifm_media=%x)\n", __func__, 2076 sc->mii->mii_media.ifm_media); 2077 cpswp_ifmedia_upd(sc->ifp); 2078 } 2079 2080 /* Schedule another timeout one second from now */ 2081 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 2082 } 2083 2084 static void 2085 cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2086 { 2087 struct cpswp_softc *sc; 2088 struct mii_data *mii; 2089 2090 sc = ifp->if_softc; 2091 CPSW_DEBUGF(sc->swsc, ("")); 2092 CPSW_PORT_LOCK(sc); 2093 2094 mii = sc->mii; 2095 mii_pollstat(mii); 2096 2097 ifmr->ifm_active = mii->mii_media_active; 2098 ifmr->ifm_status = mii->mii_media_status; 2099 CPSW_PORT_UNLOCK(sc); 2100 } 2101 2102 static int 2103 cpswp_ifmedia_upd(struct ifnet *ifp) 2104 { 2105 struct cpswp_softc *sc; 2106 2107 sc = ifp->if_softc; 2108 CPSW_DEBUGF(sc->swsc, ("")); 2109 CPSW_PORT_LOCK(sc); 2110 mii_mediachg(sc->mii); 2111 sc->media_status = sc->mii->mii_media.ifm_media; 2112 CPSW_PORT_UNLOCK(sc); 2113 2114 return (0); 2115 } 2116 2117 static void 2118 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 2119 { 2120 struct cpswp_softc *psc; 2121 int i; 2122 2123 cpsw_debugf_head("CPSW watchdog"); 2124 device_printf(sc->dev, "watchdog timeout\n"); 2125 for (i = 0; i < CPSW_PORTS; i++) { 2126 if (!sc->dualemac && i != sc->active_slave) 2127 continue; 2128 psc = device_get_softc(sc->port[i].dev); 2129 CPSW_PORT_LOCK(psc); 2130 cpswp_stop_locked(psc); 2131 CPSW_PORT_UNLOCK(psc); 2132 } 2133 } 2134 2135 static void 2136 cpsw_tx_watchdog(void *msc) 2137 { 2138 struct cpsw_softc *sc; 2139 2140 sc = msc; 2141 CPSW_TX_LOCK(sc); 2142 if (sc->tx.active_queue_len == 0 || !sc->tx.running) { 2143 sc->watchdog.timer = 0; /* Nothing to do. */ 2144 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 2145 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 2146 } else if (cpsw_tx_dequeue(sc) > 0) { 2147 sc->watchdog.timer = 0; /* We just did something. */ 2148 } else { 2149 /* There was something to do but it didn't get done. */ 2150 ++sc->watchdog.timer; 2151 if (sc->watchdog.timer > 5) { 2152 sc->watchdog.timer = 0; 2153 ++sc->watchdog.resets; 2154 cpsw_tx_watchdog_full_reset(sc); 2155 } 2156 } 2157 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 2158 CPSW_TX_UNLOCK(sc); 2159 2160 /* Schedule another timeout one second from now */ 2161 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 2162 } 2163 2164 /* 2165 * 2166 * ALE support routines. 2167 * 2168 */ 2169 2170 static void 2171 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2172 { 2173 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 2174 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 2175 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 2176 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 2177 } 2178 2179 static void 2180 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2181 { 2182 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 2183 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 2184 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 2185 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 2186 } 2187 2188 static void 2189 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 2190 { 2191 int i; 2192 uint32_t ale_entry[3]; 2193 2194 /* First four entries are link address and broadcast. */ 2195 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2196 cpsw_ale_read_entry(sc, i, ale_entry); 2197 if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || 2198 ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && 2199 ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ 2200 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2201 cpsw_ale_write_entry(sc, i, ale_entry); 2202 } 2203 } 2204 } 2205 2206 static int 2207 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, 2208 uint8_t *mac) 2209 { 2210 int free_index = -1, matching_index = -1, i; 2211 uint32_t ale_entry[3], ale_type; 2212 2213 /* Find a matching entry or a free entry. */ 2214 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2215 cpsw_ale_read_entry(sc, i, ale_entry); 2216 2217 /* Entry Type[61:60] is 0 for free entry */ 2218 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2219 free_index = i; 2220 2221 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 2222 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 2223 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 2224 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 2225 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 2226 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 2227 matching_index = i; 2228 break; 2229 } 2230 } 2231 2232 if (matching_index < 0) { 2233 if (free_index < 0) 2234 return (ENOMEM); 2235 i = free_index; 2236 } 2237 2238 if (vlan != -1) 2239 ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; 2240 else 2241 ale_type = ALE_TYPE_ADDR << 28; 2242 2243 /* Set MAC address */ 2244 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2245 ale_entry[1] = mac[0] << 8 | mac[1]; 2246 2247 /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ 2248 ale_entry[1] |= ALE_MCAST_FWD | ale_type; 2249 2250 /* Set portmask [68:66] */ 2251 ale_entry[2] = (portmap & 7) << 2; 2252 2253 cpsw_ale_write_entry(sc, i, ale_entry); 2254 2255 return 0; 2256 } 2257 2258 static void 2259 cpsw_ale_dump_table(struct cpsw_softc *sc) { 2260 int i; 2261 uint32_t ale_entry[3]; 2262 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2263 cpsw_ale_read_entry(sc, i, ale_entry); 2264 switch (ALE_TYPE(ale_entry)) { 2265 case ALE_TYPE_VLAN: 2266 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2267 ale_entry[1], ale_entry[0]); 2268 printf("type: %u ", ALE_TYPE(ale_entry)); 2269 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2270 printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); 2271 printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); 2272 printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); 2273 printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); 2274 printf("\n"); 2275 break; 2276 case ALE_TYPE_ADDR: 2277 case ALE_TYPE_VLAN_ADDR: 2278 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2279 ale_entry[1], ale_entry[0]); 2280 printf("type: %u ", ALE_TYPE(ale_entry)); 2281 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 2282 (ale_entry[1] >> 8) & 0xFF, 2283 (ale_entry[1] >> 0) & 0xFF, 2284 (ale_entry[0] >>24) & 0xFF, 2285 (ale_entry[0] >>16) & 0xFF, 2286 (ale_entry[0] >> 8) & 0xFF, 2287 (ale_entry[0] >> 0) & 0xFF); 2288 printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); 2289 if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) 2290 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2291 printf("port: %u ", ALE_PORTS(ale_entry)); 2292 printf("\n"); 2293 break; 2294 } 2295 } 2296 printf("\n"); 2297 } 2298 2299 static int 2300 cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) 2301 { 2302 uint8_t *mac; 2303 uint32_t ale_entry[3], ale_type, portmask; 2304 struct ifmultiaddr *ifma; 2305 2306 if (sc->swsc->dualemac) { 2307 ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; 2308 portmask = 1 << (sc->unit + 1) | 1 << 0; 2309 } else { 2310 ale_type = ALE_TYPE_ADDR << 28; 2311 portmask = 7; 2312 } 2313 2314 /* 2315 * Route incoming packets for our MAC address to Port 0 (host). 2316 * For simplicity, keep this entry at table index 0 for port 1 and 2317 * at index 2 for port 2 in the ALE. 2318 */ 2319 if_addr_rlock(sc->ifp); 2320 mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr); 2321 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2322 ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ 2323 ale_entry[2] = 0; /* port = 0 */ 2324 cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); 2325 2326 /* Set outgoing MAC Address for slave port. */ 2327 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), 2328 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 2329 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), 2330 mac[5] << 8 | mac[4]); 2331 if_addr_runlock(sc->ifp); 2332 2333 /* Keep the broadcast address at table entry 1 (or 3). */ 2334 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 2335 /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ 2336 ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; 2337 ale_entry[2] = portmask << 2; 2338 cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); 2339 2340 /* SIOCDELMULTI doesn't specify the particular address 2341 being removed, so we have to remove all and rebuild. */ 2342 if (purge) 2343 cpsw_ale_remove_all_mc_entries(sc->swsc); 2344 2345 /* Set other multicast addrs desired. */ 2346 if_maddr_rlock(sc->ifp); 2347 TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { 2348 if (ifma->ifma_addr->sa_family != AF_LINK) 2349 continue; 2350 cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, 2351 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 2352 } 2353 if_maddr_runlock(sc->ifp); 2354 2355 return (0); 2356 } 2357 2358 static int 2359 cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, 2360 int untag, int mcregflood, int mcunregflood) 2361 { 2362 int free_index, i, matching_index; 2363 uint32_t ale_entry[3]; 2364 2365 free_index = matching_index = -1; 2366 /* Find a matching entry or a free entry. */ 2367 for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { 2368 cpsw_ale_read_entry(sc, i, ale_entry); 2369 2370 /* Entry Type[61:60] is 0 for free entry */ 2371 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2372 free_index = i; 2373 2374 if (ALE_VLAN(ale_entry) == vlan) { 2375 matching_index = i; 2376 break; 2377 } 2378 } 2379 2380 if (matching_index < 0) { 2381 if (free_index < 0) 2382 return (-1); 2383 i = free_index; 2384 } 2385 2386 ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | 2387 (mcunregflood & 7) << 8 | (ports & 7); 2388 ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; 2389 ale_entry[2] = 0; 2390 cpsw_ale_write_entry(sc, i, ale_entry); 2391 2392 return (0); 2393 } 2394 2395 /* 2396 * 2397 * Statistics and Sysctls. 2398 * 2399 */ 2400 2401 #if 0 2402 static void 2403 cpsw_stats_dump(struct cpsw_softc *sc) 2404 { 2405 int i; 2406 uint32_t r; 2407 2408 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2409 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2410 cpsw_stat_sysctls[i].reg); 2411 CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2412 (intmax_t)sc->shadow_stats[i], r, 2413 (intmax_t)sc->shadow_stats[i] + r)); 2414 } 2415 } 2416 #endif 2417 2418 static void 2419 cpsw_stats_collect(struct cpsw_softc *sc) 2420 { 2421 int i; 2422 uint32_t r; 2423 2424 CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); 2425 2426 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2427 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2428 cpsw_stat_sysctls[i].reg); 2429 sc->shadow_stats[i] += r; 2430 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, 2431 r); 2432 } 2433 } 2434 2435 static int 2436 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2437 { 2438 struct cpsw_softc *sc; 2439 struct cpsw_stat *stat; 2440 uint64_t result; 2441 2442 sc = (struct cpsw_softc *)arg1; 2443 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2444 result = sc->shadow_stats[oidp->oid_number]; 2445 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2446 return (sysctl_handle_64(oidp, &result, 0, req)); 2447 } 2448 2449 static int 2450 cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2451 { 2452 struct cpsw_softc *sc; 2453 struct bintime t; 2454 unsigned result; 2455 2456 sc = (struct cpsw_softc *)arg1; 2457 getbinuptime(&t); 2458 bintime_sub(&t, &sc->attach_uptime); 2459 result = t.sec; 2460 return (sysctl_handle_int(oidp, &result, 0, req)); 2461 } 2462 2463 static int 2464 cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) 2465 { 2466 int error; 2467 struct cpsw_softc *sc; 2468 uint32_t ctrl, intr_per_ms; 2469 2470 sc = (struct cpsw_softc *)arg1; 2471 error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); 2472 if (error != 0 || req->newptr == NULL) 2473 return (error); 2474 2475 ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 2476 ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 2477 if (sc->coal_us == 0) { 2478 /* Disable the interrupt pace hardware. */ 2479 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2480 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); 2481 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); 2482 return (0); 2483 } 2484 2485 if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) 2486 sc->coal_us = CPSW_WR_C_IMAX_US_MAX; 2487 if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) 2488 sc->coal_us = CPSW_WR_C_IMAX_US_MIN; 2489 intr_per_ms = 1000 / sc->coal_us; 2490 /* Just to make sure... */ 2491 if (intr_per_ms > CPSW_WR_C_IMAX_MAX) 2492 intr_per_ms = CPSW_WR_C_IMAX_MAX; 2493 if (intr_per_ms < CPSW_WR_C_IMAX_MIN) 2494 intr_per_ms = CPSW_WR_C_IMAX_MIN; 2495 2496 /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ 2497 ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; 2498 2499 /* Enable the interrupt pace hardware. */ 2500 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); 2501 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); 2502 ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; 2503 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2504 2505 return (0); 2506 } 2507 2508 static int 2509 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2510 { 2511 struct cpsw_softc *swsc; 2512 struct cpswp_softc *sc; 2513 struct bintime t; 2514 unsigned result; 2515 2516 swsc = arg1; 2517 sc = device_get_softc(swsc->port[arg2].dev); 2518 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { 2519 getbinuptime(&t); 2520 bintime_sub(&t, &sc->init_uptime); 2521 result = t.sec; 2522 } else 2523 result = 0; 2524 return (sysctl_handle_int(oidp, &result, 0, req)); 2525 } 2526 2527 static void 2528 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2529 struct cpsw_queue *queue) 2530 { 2531 struct sysctl_oid_list *parent; 2532 2533 parent = SYSCTL_CHILDREN(node); 2534 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2535 CTLFLAG_RD, &queue->queue_slots, 0, 2536 "Total buffers currently assigned to this queue"); 2537 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2538 CTLFLAG_RD, &queue->active_queue_len, 0, 2539 "Buffers currently registered with hardware controller"); 2540 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2541 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2542 "Max value of activeBuffers since last driver reset"); 2543 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2544 CTLFLAG_RD, &queue->avail_queue_len, 0, 2545 "Buffers allocated to this queue but not currently " 2546 "registered with hardware controller"); 2547 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2548 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2549 "Max value of availBuffers since last driver reset"); 2550 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2551 CTLFLAG_RD, &queue->queue_adds, 0, 2552 "Total buffers added to queue"); 2553 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2554 CTLFLAG_RD, &queue->queue_removes, 0, 2555 "Total buffers removed from queue"); 2556 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2557 CTLFLAG_RD, &queue->longest_chain, 0, 2558 "Max buffers used for a single packet"); 2559 } 2560 2561 static void 2562 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2563 struct cpsw_softc *sc) 2564 { 2565 struct sysctl_oid_list *parent; 2566 2567 parent = SYSCTL_CHILDREN(node); 2568 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2569 CTLFLAG_RD, &sc->watchdog.resets, 0, 2570 "Total number of watchdog resets"); 2571 } 2572 2573 static void 2574 cpsw_add_sysctls(struct cpsw_softc *sc) 2575 { 2576 struct sysctl_ctx_list *ctx; 2577 struct sysctl_oid *stats_node, *queue_node, *node; 2578 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2579 struct sysctl_oid_list *ports_parent, *port_parent; 2580 char port[16]; 2581 int i; 2582 2583 ctx = device_get_sysctl_ctx(sc->dev); 2584 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2585 2586 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", 2587 CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); 2588 2589 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2590 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", 2591 "Time since driver attach"); 2592 2593 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", 2594 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, cpsw_intr_coalesce, "IU", 2595 "minimum time between interrupts"); 2596 2597 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", 2598 CTLFLAG_RD, NULL, "CPSW Ports Statistics"); 2599 ports_parent = SYSCTL_CHILDREN(node); 2600 for (i = 0; i < CPSW_PORTS; i++) { 2601 if (!sc->dualemac && i != sc->active_slave) 2602 continue; 2603 port[0] = '0' + i; 2604 port[1] = '\0'; 2605 node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, 2606 port, CTLFLAG_RD, NULL, "CPSW Port Statistics"); 2607 port_parent = SYSCTL_CHILDREN(node); 2608 SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", 2609 CTLTYPE_UINT | CTLFLAG_RD, sc, i, 2610 cpsw_stat_uptime, "IU", "Seconds since driver init"); 2611 } 2612 2613 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2614 CTLFLAG_RD, NULL, "CPSW Statistics"); 2615 stats_parent = SYSCTL_CHILDREN(stats_node); 2616 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2617 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2618 cpsw_stat_sysctls[i].oid, 2619 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, 2620 cpsw_stats_sysctl, "IU", 2621 cpsw_stat_sysctls[i].oid); 2622 } 2623 2624 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2625 CTLFLAG_RD, NULL, "CPSW Queue Statistics"); 2626 queue_parent = SYSCTL_CHILDREN(queue_node); 2627 2628 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2629 CTLFLAG_RD, NULL, "TX Queue Statistics"); 2630 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2631 2632 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2633 CTLFLAG_RD, NULL, "RX Queue Statistics"); 2634 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2635 2636 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2637 CTLFLAG_RD, NULL, "Watchdog Statistics"); 2638 cpsw_add_watchdog_sysctls(ctx, node, sc); 2639 } 2640