1 /*- 2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 3 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * TI Common Platform Ethernet Switch (CPSW) Driver 30 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 31 * 32 * This controller is documented in the AM335x Technical Reference 33 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 34 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 35 * 36 * It is basically a single Ethernet port (port 0) wired internally to 37 * a 3-port store-and-forward switch connected to two independent 38 * "sliver" controllers (port 1 and port 2). You can operate the 39 * controller in a variety of different ways by suitably configuring 40 * the slivers and the Address Lookup Engine (ALE) that routes packets 41 * between the ports. 42 * 43 * This code was developed and tested on a BeagleBone with 44 * an AM335x SoC. 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/bus.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/mbuf.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/rman.h> 58 #include <sys/socket.h> 59 #include <sys/sockio.h> 60 #include <sys/sysctl.h> 61 62 #include <machine/bus.h> 63 #include <machine/resource.h> 64 #include <machine/stdarg.h> 65 66 #include <net/ethernet.h> 67 #include <net/bpf.h> 68 #include <net/if.h> 69 #include <net/if_dl.h> 70 #include <net/if_media.h> 71 #include <net/if_types.h> 72 73 #include <arm/ti/ti_scm.h> 74 #include <arm/ti/am335x/am335x_scm.h> 75 76 #include <dev/mii/mii.h> 77 #include <dev/mii/miivar.h> 78 79 #include <dev/fdt/fdt_common.h> 80 #include <dev/ofw/ofw_bus.h> 81 #include <dev/ofw/ofw_bus_subr.h> 82 83 #include "if_cpswreg.h" 84 #include "if_cpswvar.h" 85 86 #include "miibus_if.h" 87 88 /* Device probe/attach/detach. */ 89 static int cpsw_probe(device_t); 90 static int cpsw_attach(device_t); 91 static int cpsw_detach(device_t); 92 static int cpswp_probe(device_t); 93 static int cpswp_attach(device_t); 94 static int cpswp_detach(device_t); 95 96 static phandle_t cpsw_get_node(device_t, device_t); 97 98 /* Device Init/shutdown. */ 99 static int cpsw_shutdown(device_t); 100 static void cpswp_init(void *); 101 static void cpswp_init_locked(void *); 102 static void cpswp_stop_locked(struct cpswp_softc *); 103 104 /* Device Suspend/Resume. */ 105 static int cpsw_suspend(device_t); 106 static int cpsw_resume(device_t); 107 108 /* Ioctl. */ 109 static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data); 110 111 static int cpswp_miibus_readreg(device_t, int phy, int reg); 112 static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); 113 static void cpswp_miibus_statchg(device_t); 114 115 /* Send/Receive packets. */ 116 static void cpsw_intr_rx(void *arg); 117 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 118 static void cpsw_rx_enqueue(struct cpsw_softc *); 119 static void cpswp_start(struct ifnet *); 120 static void cpsw_intr_tx(void *); 121 static void cpswp_tx_enqueue(struct cpswp_softc *); 122 static int cpsw_tx_dequeue(struct cpsw_softc *); 123 124 /* Misc interrupts and watchdog. */ 125 static void cpsw_intr_rx_thresh(void *); 126 static void cpsw_intr_misc(void *); 127 static void cpswp_tick(void *); 128 static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *); 129 static int cpswp_ifmedia_upd(struct ifnet *); 130 static void cpsw_tx_watchdog(void *); 131 132 /* ALE support */ 133 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); 134 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); 135 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); 136 static void cpsw_ale_dump_table(struct cpsw_softc *); 137 static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, 138 int); 139 static int cpswp_ale_update_addresses(struct cpswp_softc *, int); 140 141 /* Statistics and sysctls. */ 142 static void cpsw_add_sysctls(struct cpsw_softc *); 143 static void cpsw_stats_collect(struct cpsw_softc *); 144 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 145 146 /* 147 * Arbitrary limit on number of segments in an mbuf to be transmitted. 148 * Packets with more segments than this will be defragmented before 149 * they are queued. 150 */ 151 #define CPSW_TXFRAGS 16 152 153 /* Shared resources. */ 154 static device_method_t cpsw_methods[] = { 155 /* Device interface */ 156 DEVMETHOD(device_probe, cpsw_probe), 157 DEVMETHOD(device_attach, cpsw_attach), 158 DEVMETHOD(device_detach, cpsw_detach), 159 DEVMETHOD(device_shutdown, cpsw_shutdown), 160 DEVMETHOD(device_suspend, cpsw_suspend), 161 DEVMETHOD(device_resume, cpsw_resume), 162 /* OFW methods */ 163 DEVMETHOD(ofw_bus_get_node, cpsw_get_node), 164 DEVMETHOD_END 165 }; 166 167 static driver_t cpsw_driver = { 168 "cpswss", 169 cpsw_methods, 170 sizeof(struct cpsw_softc), 171 }; 172 173 static devclass_t cpsw_devclass; 174 175 DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0); 176 177 /* Port/Slave resources. */ 178 static device_method_t cpswp_methods[] = { 179 /* Device interface */ 180 DEVMETHOD(device_probe, cpswp_probe), 181 DEVMETHOD(device_attach, cpswp_attach), 182 DEVMETHOD(device_detach, cpswp_detach), 183 /* MII interface */ 184 DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), 185 DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), 186 DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), 187 DEVMETHOD_END 188 }; 189 190 static driver_t cpswp_driver = { 191 "cpsw", 192 cpswp_methods, 193 sizeof(struct cpswp_softc), 194 }; 195 196 static devclass_t cpswp_devclass; 197 198 DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0); 199 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); 200 MODULE_DEPEND(cpsw, ether, 1, 1, 1); 201 MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 202 203 static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; 204 205 static struct resource_spec irq_res_spec[] = { 206 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 207 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 208 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 209 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 210 { -1, 0 } 211 }; 212 213 static struct { 214 void (*cb)(void *); 215 } cpsw_intr_cb[] = { 216 { cpsw_intr_rx_thresh }, 217 { cpsw_intr_rx }, 218 { cpsw_intr_tx }, 219 { cpsw_intr_misc }, 220 }; 221 222 /* Number of entries here must match size of stats 223 * array in struct cpswp_softc. */ 224 static struct cpsw_stat { 225 int reg; 226 char *oid; 227 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 228 {0x00, "GoodRxFrames"}, 229 {0x04, "BroadcastRxFrames"}, 230 {0x08, "MulticastRxFrames"}, 231 {0x0C, "PauseRxFrames"}, 232 {0x10, "RxCrcErrors"}, 233 {0x14, "RxAlignErrors"}, 234 {0x18, "OversizeRxFrames"}, 235 {0x1c, "RxJabbers"}, 236 {0x20, "ShortRxFrames"}, 237 {0x24, "RxFragments"}, 238 {0x30, "RxOctets"}, 239 {0x34, "GoodTxFrames"}, 240 {0x38, "BroadcastTxFrames"}, 241 {0x3c, "MulticastTxFrames"}, 242 {0x40, "PauseTxFrames"}, 243 {0x44, "DeferredTxFrames"}, 244 {0x48, "CollisionsTxFrames"}, 245 {0x4c, "SingleCollisionTxFrames"}, 246 {0x50, "MultipleCollisionTxFrames"}, 247 {0x54, "ExcessiveCollisions"}, 248 {0x58, "LateCollisions"}, 249 {0x5c, "TxUnderrun"}, 250 {0x60, "CarrierSenseErrors"}, 251 {0x64, "TxOctets"}, 252 {0x68, "RxTx64OctetFrames"}, 253 {0x6c, "RxTx65to127OctetFrames"}, 254 {0x70, "RxTx128to255OctetFrames"}, 255 {0x74, "RxTx256to511OctetFrames"}, 256 {0x78, "RxTx512to1024OctetFrames"}, 257 {0x7c, "RxTx1024upOctetFrames"}, 258 {0x80, "NetOctets"}, 259 {0x84, "RxStartOfFrameOverruns"}, 260 {0x88, "RxMiddleOfFrameOverruns"}, 261 {0x8c, "RxDmaOverruns"} 262 }; 263 264 /* 265 * Basic debug support. 266 */ 267 268 static void 269 cpsw_debugf_head(const char *funcname) 270 { 271 int t = (int)(time_second % (24 * 60 * 60)); 272 273 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 274 } 275 276 static void 277 cpsw_debugf(const char *fmt, ...) 278 { 279 va_list ap; 280 281 va_start(ap, fmt); 282 vprintf(fmt, ap); 283 va_end(ap); 284 printf("\n"); 285 286 } 287 288 #define CPSW_DEBUGF(_sc, a) do { \ 289 if ((_sc)->debug) { \ 290 cpsw_debugf_head(__func__); \ 291 cpsw_debugf a; \ 292 } \ 293 } while (0) 294 295 /* 296 * Locking macros 297 */ 298 #define CPSW_TX_LOCK(sc) do { \ 299 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 300 mtx_lock(&(sc)->tx.lock); \ 301 } while (0) 302 303 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 304 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 305 306 #define CPSW_RX_LOCK(sc) do { \ 307 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 308 mtx_lock(&(sc)->rx.lock); \ 309 } while (0) 310 311 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 312 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 313 314 #define CPSW_PORT_LOCK(_sc) do { \ 315 mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ 316 mtx_lock(&(_sc)->lock); \ 317 } while (0) 318 319 #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 320 #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) 321 322 /* 323 * Read/Write macros 324 */ 325 #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) 326 #define cpsw_write_4(_sc, _reg, _val) \ 327 bus_write_4((_sc)->mem_res, (_reg), (_val)) 328 329 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 330 331 #define cpsw_cpdma_bd_paddr(sc, slot) \ 332 BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) 333 #define cpsw_cpdma_read_bd(sc, slot, val) \ 334 bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 335 #define cpsw_cpdma_write_bd(sc, slot, val) \ 336 bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 337 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 338 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 339 #define cpsw_cpdma_write_bd_flags(sc, slot, val) \ 340 bus_write_2(sc->mem_res, slot->bd_offset + 14, val) 341 #define cpsw_cpdma_read_bd_flags(sc, slot) \ 342 bus_read_2(sc->mem_res, slot->bd_offset + 14) 343 #define cpsw_write_hdp_slot(sc, queue, slot) \ 344 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 345 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 346 #define cpsw_read_cp(sc, queue) \ 347 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 348 #define cpsw_write_cp(sc, queue, val) \ 349 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 350 #define cpsw_write_cp_slot(sc, queue, slot) \ 351 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 352 353 #if 0 354 /* XXX temporary function versions for debugging. */ 355 static void 356 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 357 { 358 uint32_t reg = queue->hdp_offset; 359 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 360 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 361 cpsw_write_4(sc, reg, v); 362 } 363 364 static void 365 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 366 { 367 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 368 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 369 cpsw_write_cp(sc, queue, v); 370 } 371 #endif 372 373 /* 374 * Expanded dump routines for verbose debugging. 375 */ 376 static void 377 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 378 { 379 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 380 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 381 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 382 "Port0"}; 383 struct cpsw_cpdma_bd bd; 384 const char *sep; 385 int i; 386 387 cpsw_cpdma_read_bd(sc, slot, &bd); 388 printf("BD Addr : 0x%08x Next : 0x%08x\n", 389 cpsw_cpdma_bd_paddr(sc, slot), bd.next); 390 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 391 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 392 printf(" Flags: "); 393 sep = ""; 394 for (i = 0; i < 16; ++i) { 395 if (bd.flags & (1 << (15 - i))) { 396 printf("%s%s", sep, flags[i]); 397 sep = ","; 398 } 399 } 400 printf("\n"); 401 if (slot->mbuf) { 402 printf(" Ether: %14D\n", 403 (char *)(slot->mbuf->m_data), " "); 404 printf(" Packet: %16D\n", 405 (char *)(slot->mbuf->m_data) + 14, " "); 406 } 407 } 408 409 #define CPSW_DUMP_SLOT(cs, slot) do { \ 410 IF_DEBUG(sc) { \ 411 cpsw_dump_slot(sc, slot); \ 412 } \ 413 } while (0) 414 415 static void 416 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 417 { 418 struct cpsw_slot *slot; 419 int i = 0; 420 int others = 0; 421 422 STAILQ_FOREACH(slot, q, next) { 423 if (i > CPSW_TXFRAGS) 424 ++others; 425 else 426 cpsw_dump_slot(sc, slot); 427 ++i; 428 } 429 if (others) 430 printf(" ... and %d more.\n", others); 431 printf("\n"); 432 } 433 434 #define CPSW_DUMP_QUEUE(sc, q) do { \ 435 IF_DEBUG(sc) { \ 436 cpsw_dump_queue(sc, q); \ 437 } \ 438 } while (0) 439 440 static void 441 cpsw_init_slots(struct cpsw_softc *sc) 442 { 443 struct cpsw_slot *slot; 444 int i; 445 446 STAILQ_INIT(&sc->avail); 447 448 /* Put the slot descriptors onto the global avail list. */ 449 for (i = 0; i < nitems(sc->_slots); i++) { 450 slot = &sc->_slots[i]; 451 slot->bd_offset = cpsw_cpdma_bd_offset(i); 452 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 453 } 454 } 455 456 static int 457 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 458 { 459 const int max_slots = nitems(sc->_slots); 460 struct cpsw_slot *slot; 461 int i; 462 463 if (requested < 0) 464 requested = max_slots; 465 466 for (i = 0; i < requested; ++i) { 467 slot = STAILQ_FIRST(&sc->avail); 468 if (slot == NULL) 469 return (0); 470 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 471 device_printf(sc->dev, "failed to create dmamap\n"); 472 return (ENOMEM); 473 } 474 STAILQ_REMOVE_HEAD(&sc->avail, next); 475 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 476 ++queue->avail_queue_len; 477 ++queue->queue_slots; 478 } 479 return (0); 480 } 481 482 static void 483 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 484 { 485 int error; 486 487 if (slot->dmamap) { 488 if (slot->mbuf) 489 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 490 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 491 KASSERT(error == 0, ("Mapping still active")); 492 slot->dmamap = NULL; 493 } 494 if (slot->mbuf) { 495 m_freem(slot->mbuf); 496 slot->mbuf = NULL; 497 } 498 } 499 500 static void 501 cpsw_reset(struct cpsw_softc *sc) 502 { 503 int i; 504 505 callout_stop(&sc->watchdog.callout); 506 507 /* Reset RMII/RGMII wrapper. */ 508 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 509 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 510 ; 511 512 /* Disable TX and RX interrupts for all cores. */ 513 for (i = 0; i < 3; ++i) { 514 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 515 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 516 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 517 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 518 } 519 520 /* Reset CPSW subsystem. */ 521 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 522 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 523 ; 524 525 /* Reset Sliver port 1 and 2 */ 526 for (i = 0; i < 2; i++) { 527 /* Reset */ 528 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 529 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 530 ; 531 } 532 533 /* Reset DMA controller. */ 534 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 535 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 536 ; 537 538 /* Disable TX & RX DMA */ 539 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 540 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 541 542 /* Clear all queues. */ 543 for (i = 0; i < 8; i++) { 544 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 545 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 546 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 547 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 548 } 549 550 /* Clear all interrupt Masks */ 551 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 552 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 553 } 554 555 static void 556 cpsw_init(struct cpsw_softc *sc) 557 { 558 struct cpsw_slot *slot; 559 uint32_t reg; 560 561 /* Disable the interrupt pacing. */ 562 reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 563 reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 564 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); 565 566 /* Clear ALE */ 567 cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); 568 569 /* Enable ALE */ 570 reg = CPSW_ALE_CTL_ENABLE; 571 if (sc->dualemac) 572 reg |= CPSW_ALE_CTL_VLAN_AWARE; 573 cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); 574 575 /* Set Host Port Mapping. */ 576 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 577 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 578 579 /* Initialize ALE: set host port to forwarding(3). */ 580 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3); 581 582 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 583 584 /* Enable statistics for ports 0, 1 and 2 */ 585 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 586 587 /* Turn off flow control. */ 588 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 589 590 /* Make IP hdr aligned with 4 */ 591 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 592 593 /* Initialize RX Buffer Descriptors */ 594 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); 595 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 596 597 /* Enable TX & RX DMA */ 598 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 599 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 600 601 /* Enable Interrupts for core 0 */ 602 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 603 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 604 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); 605 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); 606 607 /* Enable host Error Interrupt */ 608 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 609 610 /* Enable interrupts for RX and TX on Channel 0 */ 611 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 612 CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); 613 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); 614 615 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 616 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 617 cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); 618 619 /* Select MII in GMII_SEL, Internal Delay mode */ 620 //ti_scm_reg_write_4(0x650, 0); 621 622 /* Initialize active queues. */ 623 slot = STAILQ_FIRST(&sc->tx.active); 624 if (slot != NULL) 625 cpsw_write_hdp_slot(sc, &sc->tx, slot); 626 slot = STAILQ_FIRST(&sc->rx.active); 627 if (slot != NULL) 628 cpsw_write_hdp_slot(sc, &sc->rx, slot); 629 cpsw_rx_enqueue(sc); 630 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); 631 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); 632 633 /* Activate network interface. */ 634 sc->rx.running = 1; 635 sc->tx.running = 1; 636 sc->watchdog.timer = 0; 637 callout_init(&sc->watchdog.callout, 0); 638 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 639 } 640 641 /* 642 * 643 * Device Probe, Attach, Detach. 644 * 645 */ 646 647 static int 648 cpsw_probe(device_t dev) 649 { 650 651 if (!ofw_bus_status_okay(dev)) 652 return (ENXIO); 653 654 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 655 return (ENXIO); 656 657 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 658 return (BUS_PROBE_DEFAULT); 659 } 660 661 static int 662 cpsw_intr_attach(struct cpsw_softc *sc) 663 { 664 int i; 665 666 for (i = 0; i < CPSW_INTR_COUNT; i++) { 667 if (bus_setup_intr(sc->dev, sc->irq_res[i], 668 INTR_TYPE_NET | INTR_MPSAFE, NULL, 669 cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { 670 return (-1); 671 } 672 } 673 674 return (0); 675 } 676 677 static void 678 cpsw_intr_detach(struct cpsw_softc *sc) 679 { 680 int i; 681 682 for (i = 0; i < CPSW_INTR_COUNT; i++) { 683 if (sc->ih_cookie[i]) { 684 bus_teardown_intr(sc->dev, sc->irq_res[i], 685 sc->ih_cookie[i]); 686 } 687 } 688 } 689 690 static int 691 cpsw_get_fdt_data(struct cpsw_softc *sc, int port) 692 { 693 char *name; 694 int len, phy, vlan; 695 pcell_t phy_id[3], vlan_id; 696 phandle_t child; 697 unsigned long mdio_child_addr; 698 699 /* Find any slave with phy_id */ 700 phy = -1; 701 vlan = -1; 702 for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { 703 if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0) 704 continue; 705 if (sscanf(name, "slave@%x", &mdio_child_addr) != 1) { 706 OF_prop_free(name); 707 continue; 708 } 709 OF_prop_free(name); 710 if (mdio_child_addr != slave_mdio_addr[port]) 711 continue; 712 713 len = OF_getproplen(child, "phy_id"); 714 if (len / sizeof(pcell_t) == 2) { 715 /* Get phy address from fdt */ 716 if (OF_getencprop(child, "phy_id", phy_id, len) > 0) 717 phy = phy_id[1]; 718 } 719 720 len = OF_getproplen(child, "dual_emac_res_vlan"); 721 if (len / sizeof(pcell_t) == 1) { 722 /* Get phy address from fdt */ 723 if (OF_getencprop(child, "dual_emac_res_vlan", 724 &vlan_id, len) > 0) { 725 vlan = vlan_id; 726 } 727 } 728 729 break; 730 } 731 if (phy == -1) 732 return (ENXIO); 733 sc->port[port].phy = phy; 734 sc->port[port].vlan = vlan; 735 736 return (0); 737 } 738 739 static int 740 cpsw_attach(device_t dev) 741 { 742 bus_dma_segment_t segs[1]; 743 int error, i, nsegs; 744 struct cpsw_softc *sc; 745 uint32_t reg; 746 747 sc = device_get_softc(dev); 748 sc->dev = dev; 749 sc->node = ofw_bus_get_node(dev); 750 getbinuptime(&sc->attach_uptime); 751 752 if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, 753 sizeof(sc->active_slave)) <= 0) { 754 sc->active_slave = 0; 755 } 756 if (sc->active_slave > 1) 757 sc->active_slave = 1; 758 759 if (OF_hasprop(sc->node, "dual_emac")) 760 sc->dualemac = 1; 761 762 for (i = 0; i < CPSW_PORTS; i++) { 763 if (!sc->dualemac && i != sc->active_slave) 764 continue; 765 if (cpsw_get_fdt_data(sc, i) != 0) { 766 device_printf(dev, 767 "failed to get PHY address from FDT\n"); 768 return (ENXIO); 769 } 770 } 771 772 /* Initialize mutexes */ 773 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 774 "cpsw TX lock", MTX_DEF); 775 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 776 "cpsw RX lock", MTX_DEF); 777 778 /* Allocate IRQ resources */ 779 error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); 780 if (error) { 781 device_printf(dev, "could not allocate IRQ resources\n"); 782 cpsw_detach(dev); 783 return (ENXIO); 784 } 785 786 sc->mem_rid = 0; 787 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 788 &sc->mem_rid, RF_ACTIVE); 789 if (sc->mem_res == NULL) { 790 device_printf(sc->dev, "failed to allocate memory resource\n"); 791 cpsw_detach(dev); 792 return (ENXIO); 793 } 794 795 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 796 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 797 reg & 0xFF, (reg >> 11) & 0x1F); 798 799 cpsw_add_sysctls(sc); 800 801 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 802 error = bus_dma_tag_create( 803 bus_get_dma_tag(sc->dev), /* parent */ 804 1, 0, /* alignment, boundary */ 805 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 806 BUS_SPACE_MAXADDR, /* highaddr */ 807 NULL, NULL, /* filtfunc, filtfuncarg */ 808 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 809 MCLBYTES, 0, /* maxsegsz, flags */ 810 NULL, NULL, /* lockfunc, lockfuncarg */ 811 &sc->mbuf_dtag); /* dmatag */ 812 if (error) { 813 device_printf(dev, "bus_dma_tag_create failed\n"); 814 cpsw_detach(dev); 815 return (error); 816 } 817 818 /* Allocate the null mbuf and pre-sync it. */ 819 sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 820 memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size); 821 bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap); 822 bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap, 823 sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 824 bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap, 825 BUS_DMASYNC_PREWRITE); 826 sc->null_mbuf_paddr = segs[0].ds_addr; 827 828 cpsw_init_slots(sc); 829 830 /* Allocate slots to TX and RX queues. */ 831 STAILQ_INIT(&sc->rx.avail); 832 STAILQ_INIT(&sc->rx.active); 833 STAILQ_INIT(&sc->tx.avail); 834 STAILQ_INIT(&sc->tx.active); 835 // For now: 128 slots to TX, rest to RX. 836 // XXX TODO: start with 32/64 and grow dynamically based on demand. 837 if (cpsw_add_slots(sc, &sc->tx, 128) || 838 cpsw_add_slots(sc, &sc->rx, -1)) { 839 device_printf(dev, "failed to allocate dmamaps\n"); 840 cpsw_detach(dev); 841 return (ENOMEM); 842 } 843 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 844 sc->tx.queue_slots, sc->rx.queue_slots); 845 846 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 847 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 848 849 if (cpsw_intr_attach(sc) == -1) { 850 device_printf(dev, "failed to setup interrupts\n"); 851 cpsw_detach(dev); 852 return (ENXIO); 853 } 854 855 /* Reset the controller. */ 856 cpsw_reset(sc); 857 cpsw_init(sc); 858 859 for (i = 0; i < CPSW_PORTS; i++) { 860 if (!sc->dualemac && i != sc->active_slave) 861 continue; 862 sc->port[i].dev = device_add_child(dev, "cpsw", i); 863 if (sc->port[i].dev == NULL) { 864 cpsw_detach(dev); 865 return (ENXIO); 866 } 867 } 868 bus_generic_attach(dev); 869 870 return (0); 871 } 872 873 static int 874 cpsw_detach(device_t dev) 875 { 876 struct cpsw_softc *sc; 877 int error, i; 878 879 bus_generic_detach(dev); 880 sc = device_get_softc(dev); 881 882 for (i = 0; i < CPSW_PORTS; i++) { 883 if (sc->port[i].dev) 884 device_delete_child(dev, sc->port[i].dev); 885 } 886 887 if (device_is_attached(dev)) { 888 callout_stop(&sc->watchdog.callout); 889 callout_drain(&sc->watchdog.callout); 890 } 891 892 /* Stop and release all interrupts */ 893 cpsw_intr_detach(sc); 894 895 /* Free dmamaps and mbufs */ 896 for (i = 0; i < nitems(sc->_slots); ++i) 897 cpsw_free_slot(sc, &sc->_slots[i]); 898 899 /* Free null mbuf. */ 900 if (sc->null_mbuf_dmamap) { 901 bus_dmamap_unload(sc->mbuf_dtag, sc->null_mbuf_dmamap); 902 error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap); 903 KASSERT(error == 0, ("Mapping still active")); 904 m_freem(sc->null_mbuf); 905 } 906 907 /* Free DMA tag */ 908 if (sc->mbuf_dtag) { 909 error = bus_dma_tag_destroy(sc->mbuf_dtag); 910 KASSERT(error == 0, ("Unable to destroy DMA tag")); 911 } 912 913 /* Free IO memory handler */ 914 if (sc->mem_res != NULL) 915 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); 916 bus_release_resources(dev, irq_res_spec, sc->irq_res); 917 918 /* Destroy mutexes */ 919 mtx_destroy(&sc->rx.lock); 920 mtx_destroy(&sc->tx.lock); 921 922 return (0); 923 } 924 925 static phandle_t 926 cpsw_get_node(device_t bus, device_t dev) 927 { 928 929 /* Share controller node with port device. */ 930 return (ofw_bus_get_node(bus)); 931 } 932 933 static int 934 cpswp_probe(device_t dev) 935 { 936 937 if (device_get_unit(dev) > 1) { 938 device_printf(dev, "Only two ports are supported.\n"); 939 return (ENXIO); 940 } 941 device_set_desc(dev, "Ethernet Switch Port"); 942 943 return (BUS_PROBE_DEFAULT); 944 } 945 946 static int 947 cpswp_attach(device_t dev) 948 { 949 int error; 950 struct ifnet *ifp; 951 struct cpswp_softc *sc; 952 uint32_t reg; 953 uint8_t mac_addr[ETHER_ADDR_LEN]; 954 955 sc = device_get_softc(dev); 956 sc->dev = dev; 957 sc->pdev = device_get_parent(dev); 958 sc->swsc = device_get_softc(sc->pdev); 959 sc->unit = device_get_unit(dev); 960 sc->phy = sc->swsc->port[sc->unit].phy; 961 sc->vlan = sc->swsc->port[sc->unit].vlan; 962 if (sc->swsc->dualemac && sc->vlan == -1) 963 sc->vlan = sc->unit + 1; 964 965 if (sc->unit == 0) { 966 sc->physel = MDIOUSERPHYSEL0; 967 sc->phyaccess = MDIOUSERACCESS0; 968 } else { 969 sc->physel = MDIOUSERPHYSEL1; 970 sc->phyaccess = MDIOUSERACCESS1; 971 } 972 973 mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", 974 MTX_DEF); 975 976 /* Allocate network interface */ 977 ifp = sc->ifp = if_alloc(IFT_ETHER); 978 if (ifp == NULL) { 979 cpswp_detach(dev); 980 return (ENXIO); 981 } 982 983 if_initname(ifp, device_get_name(sc->dev), sc->unit); 984 ifp->if_softc = sc; 985 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 986 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? 987 ifp->if_capenable = ifp->if_capabilities; 988 989 ifp->if_init = cpswp_init; 990 ifp->if_start = cpswp_start; 991 ifp->if_ioctl = cpswp_ioctl; 992 993 ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots; 994 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 995 IFQ_SET_READY(&ifp->if_snd); 996 997 /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ 998 ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, ®); 999 mac_addr[0] = reg & 0xFF; 1000 mac_addr[1] = (reg >> 8) & 0xFF; 1001 mac_addr[2] = (reg >> 16) & 0xFF; 1002 mac_addr[3] = (reg >> 24) & 0xFF; 1003 1004 /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ 1005 ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, ®); 1006 mac_addr[4] = reg & 0xFF; 1007 mac_addr[5] = (reg >> 8) & 0xFF; 1008 1009 error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, 1010 cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); 1011 if (error) { 1012 device_printf(dev, "attaching PHYs failed\n"); 1013 cpswp_detach(dev); 1014 return (error); 1015 } 1016 sc->mii = device_get_softc(sc->miibus); 1017 1018 /* Select PHY and enable interrupts */ 1019 cpsw_write_4(sc->swsc, sc->physel, 1020 MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); 1021 1022 ether_ifattach(sc->ifp, mac_addr); 1023 callout_init(&sc->mii_callout, 0); 1024 1025 return (0); 1026 } 1027 1028 static int 1029 cpswp_detach(device_t dev) 1030 { 1031 struct cpswp_softc *sc; 1032 1033 sc = device_get_softc(dev); 1034 CPSW_DEBUGF(sc->swsc, ("")); 1035 if (device_is_attached(dev)) { 1036 ether_ifdetach(sc->ifp); 1037 CPSW_PORT_LOCK(sc); 1038 cpswp_stop_locked(sc); 1039 CPSW_PORT_UNLOCK(sc); 1040 callout_drain(&sc->mii_callout); 1041 } 1042 1043 bus_generic_detach(dev); 1044 1045 if_free(sc->ifp); 1046 mtx_destroy(&sc->lock); 1047 1048 return (0); 1049 } 1050 1051 /* 1052 * 1053 * Init/Shutdown. 1054 * 1055 */ 1056 1057 static int 1058 cpsw_ports_down(struct cpsw_softc *sc) 1059 { 1060 struct cpswp_softc *psc; 1061 struct ifnet *ifp1, *ifp2; 1062 1063 if (!sc->dualemac) 1064 return (1); 1065 psc = device_get_softc(sc->port[0].dev); 1066 ifp1 = psc->ifp; 1067 psc = device_get_softc(sc->port[1].dev); 1068 ifp2 = psc->ifp; 1069 if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0) 1070 return (1); 1071 1072 return (0); 1073 } 1074 1075 static void 1076 cpswp_init(void *arg) 1077 { 1078 struct cpswp_softc *sc = arg; 1079 1080 CPSW_DEBUGF(sc->swsc, ("")); 1081 CPSW_PORT_LOCK(sc); 1082 cpswp_init_locked(arg); 1083 CPSW_PORT_UNLOCK(sc); 1084 } 1085 1086 static void 1087 cpswp_init_locked(void *arg) 1088 { 1089 struct cpswp_softc *sc = arg; 1090 struct ifnet *ifp; 1091 uint32_t reg; 1092 1093 CPSW_DEBUGF(sc->swsc, ("")); 1094 CPSW_PORT_LOCK_ASSERT(sc); 1095 ifp = sc->ifp; 1096 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1097 return; 1098 1099 getbinuptime(&sc->init_uptime); 1100 1101 if (!sc->swsc->rx.running && !sc->swsc->tx.running) { 1102 /* Reset the controller. */ 1103 cpsw_reset(sc->swsc); 1104 cpsw_init(sc->swsc); 1105 } 1106 1107 /* Set Slave Mapping. */ 1108 cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); 1109 cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 1110 0x33221100); 1111 cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); 1112 /* Enable MAC RX/TX modules. */ 1113 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 1114 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 1115 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1116 reg |= CPSW_SL_MACTL_GMII_ENABLE; 1117 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1118 1119 /* Initialize ALE: set port to forwarding(3), initialize addrs */ 1120 cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 3); 1121 cpswp_ale_update_addresses(sc, 1); 1122 1123 if (sc->swsc->dualemac) { 1124 /* Set Port VID. */ 1125 cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), 1126 sc->vlan & 0xfff); 1127 cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, 1128 (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ 1129 (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ 1130 (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ 1131 } 1132 1133 mii_mediachg(sc->mii); 1134 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 1135 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1136 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1137 } 1138 1139 static int 1140 cpsw_shutdown(device_t dev) 1141 { 1142 struct cpsw_softc *sc; 1143 struct cpswp_softc *psc; 1144 int i; 1145 1146 sc = device_get_softc(dev); 1147 CPSW_DEBUGF(sc, ("")); 1148 for (i = 0; i < CPSW_PORTS; i++) { 1149 if (!sc->dualemac && i != sc->active_slave) 1150 continue; 1151 psc = device_get_softc(sc->port[i].dev); 1152 CPSW_PORT_LOCK(psc); 1153 cpswp_stop_locked(psc); 1154 CPSW_PORT_UNLOCK(psc); 1155 } 1156 1157 return (0); 1158 } 1159 1160 static void 1161 cpsw_rx_teardown(struct cpsw_softc *sc) 1162 { 1163 int i = 0; 1164 1165 CPSW_RX_LOCK(sc); 1166 CPSW_DEBUGF(sc, ("starting RX teardown")); 1167 sc->rx.teardown = 1; 1168 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 1169 CPSW_RX_UNLOCK(sc); 1170 while (sc->rx.running) { 1171 if (++i > 10) { 1172 device_printf(sc->dev, 1173 "Unable to cleanly shutdown receiver\n"); 1174 return; 1175 } 1176 DELAY(200); 1177 } 1178 if (!sc->rx.running) 1179 CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); 1180 } 1181 1182 static void 1183 cpsw_tx_teardown(struct cpsw_softc *sc) 1184 { 1185 int i = 0; 1186 1187 CPSW_TX_LOCK(sc); 1188 CPSW_DEBUGF(sc, ("starting TX teardown")); 1189 /* Start the TX queue teardown if queue is not empty. */ 1190 if (STAILQ_FIRST(&sc->tx.active) != NULL) 1191 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 1192 else 1193 sc->tx.teardown = 1; 1194 cpsw_tx_dequeue(sc); 1195 while (sc->tx.running && ++i < 10) { 1196 DELAY(200); 1197 cpsw_tx_dequeue(sc); 1198 } 1199 if (sc->tx.running) { 1200 device_printf(sc->dev, 1201 "Unable to cleanly shutdown transmitter\n"); 1202 } 1203 CPSW_DEBUGF(sc, 1204 ("finished TX teardown (%d retries, %d idle buffers)", i, 1205 sc->tx.active_queue_len)); 1206 CPSW_TX_UNLOCK(sc); 1207 } 1208 1209 static void 1210 cpswp_stop_locked(struct cpswp_softc *sc) 1211 { 1212 struct ifnet *ifp; 1213 uint32_t reg; 1214 1215 ifp = sc->ifp; 1216 CPSW_DEBUGF(sc->swsc, ("")); 1217 CPSW_PORT_LOCK_ASSERT(sc); 1218 1219 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1220 return; 1221 1222 /* Disable interface */ 1223 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1224 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1225 1226 /* Stop ticker */ 1227 callout_stop(&sc->mii_callout); 1228 1229 /* Tear down the RX/TX queues. */ 1230 if (cpsw_ports_down(sc->swsc)) { 1231 cpsw_rx_teardown(sc->swsc); 1232 cpsw_tx_teardown(sc->swsc); 1233 } 1234 1235 /* Stop MAC RX/TX modules. */ 1236 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1237 reg &= ~CPSW_SL_MACTL_GMII_ENABLE; 1238 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1239 1240 if (cpsw_ports_down(sc->swsc)) { 1241 /* Capture stats before we reset controller. */ 1242 cpsw_stats_collect(sc->swsc); 1243 1244 cpsw_reset(sc->swsc); 1245 cpsw_init(sc->swsc); 1246 } 1247 } 1248 1249 /* 1250 * Suspend/Resume. 1251 */ 1252 1253 static int 1254 cpsw_suspend(device_t dev) 1255 { 1256 struct cpsw_softc *sc; 1257 struct cpswp_softc *psc; 1258 int i; 1259 1260 sc = device_get_softc(dev); 1261 CPSW_DEBUGF(sc, ("")); 1262 for (i = 0; i < CPSW_PORTS; i++) { 1263 if (!sc->dualemac && i != sc->active_slave) 1264 continue; 1265 psc = device_get_softc(sc->port[i].dev); 1266 CPSW_PORT_LOCK(psc); 1267 cpswp_stop_locked(psc); 1268 CPSW_PORT_UNLOCK(psc); 1269 } 1270 1271 return (0); 1272 } 1273 1274 static int 1275 cpsw_resume(device_t dev) 1276 { 1277 struct cpsw_softc *sc; 1278 1279 sc = device_get_softc(dev); 1280 CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); 1281 1282 return (0); 1283 } 1284 1285 /* 1286 * 1287 * IOCTL 1288 * 1289 */ 1290 1291 static void 1292 cpsw_set_promisc(struct cpswp_softc *sc, int set) 1293 { 1294 uint32_t reg; 1295 1296 /* 1297 * Enabling promiscuous mode requires ALE_BYPASS to be enabled. 1298 * That disables the ALE forwarding logic and causes every 1299 * packet to be sent only to the host port. In bypass mode, 1300 * the ALE processes host port transmit packets the same as in 1301 * normal mode. 1302 */ 1303 reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); 1304 reg &= ~CPSW_ALE_CTL_BYPASS; 1305 if (set) 1306 reg |= CPSW_ALE_CTL_BYPASS; 1307 cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); 1308 } 1309 1310 static void 1311 cpsw_set_allmulti(struct cpswp_softc *sc, int set) 1312 { 1313 if (set) { 1314 printf("All-multicast mode unimplemented\n"); 1315 } 1316 } 1317 1318 static int 1319 cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1320 { 1321 struct cpswp_softc *sc; 1322 struct ifreq *ifr; 1323 int error; 1324 uint32_t changed; 1325 1326 error = 0; 1327 sc = ifp->if_softc; 1328 ifr = (struct ifreq *)data; 1329 1330 switch (command) { 1331 case SIOCSIFFLAGS: 1332 CPSW_PORT_LOCK(sc); 1333 if (ifp->if_flags & IFF_UP) { 1334 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1335 changed = ifp->if_flags ^ sc->if_flags; 1336 CPSW_DEBUGF(sc->swsc, 1337 ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", 1338 changed)); 1339 if (changed & IFF_PROMISC) 1340 cpsw_set_promisc(sc, 1341 ifp->if_flags & IFF_PROMISC); 1342 if (changed & IFF_ALLMULTI) 1343 cpsw_set_allmulti(sc, 1344 ifp->if_flags & IFF_ALLMULTI); 1345 } else { 1346 CPSW_DEBUGF(sc->swsc, 1347 ("SIOCSIFFLAGS: starting up")); 1348 cpswp_init_locked(sc); 1349 } 1350 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1351 CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); 1352 cpswp_stop_locked(sc); 1353 } 1354 1355 sc->if_flags = ifp->if_flags; 1356 CPSW_PORT_UNLOCK(sc); 1357 break; 1358 case SIOCADDMULTI: 1359 cpswp_ale_update_addresses(sc, 0); 1360 break; 1361 case SIOCDELMULTI: 1362 /* Ugh. DELMULTI doesn't provide the specific address 1363 being removed, so the best we can do is remove 1364 everything and rebuild it all. */ 1365 cpswp_ale_update_addresses(sc, 1); 1366 break; 1367 case SIOCGIFMEDIA: 1368 case SIOCSIFMEDIA: 1369 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1370 break; 1371 default: 1372 error = ether_ioctl(ifp, command, data); 1373 } 1374 return (error); 1375 } 1376 1377 /* 1378 * 1379 * MIIBUS 1380 * 1381 */ 1382 static int 1383 cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) 1384 { 1385 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1386 1387 while (--retries) { 1388 r = cpsw_read_4(sc, reg); 1389 if ((r & MDIO_PHYACCESS_GO) == 0) 1390 return (1); 1391 DELAY(CPSW_MIIBUS_DELAY); 1392 } 1393 1394 return (0); 1395 } 1396 1397 static int 1398 cpswp_miibus_readreg(device_t dev, int phy, int reg) 1399 { 1400 struct cpswp_softc *sc; 1401 uint32_t cmd, r; 1402 1403 sc = device_get_softc(dev); 1404 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1405 device_printf(dev, "MDIO not ready to read\n"); 1406 return (0); 1407 } 1408 1409 /* Set GO, reg, phy */ 1410 cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1411 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1412 1413 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1414 device_printf(dev, "MDIO timed out during read\n"); 1415 return (0); 1416 } 1417 1418 r = cpsw_read_4(sc->swsc, sc->phyaccess); 1419 if ((r & MDIO_PHYACCESS_ACK) == 0) { 1420 device_printf(dev, "Failed to read from PHY.\n"); 1421 r = 0; 1422 } 1423 return (r & 0xFFFF); 1424 } 1425 1426 static int 1427 cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) 1428 { 1429 struct cpswp_softc *sc; 1430 uint32_t cmd; 1431 1432 sc = device_get_softc(dev); 1433 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1434 device_printf(dev, "MDIO not ready to write\n"); 1435 return (0); 1436 } 1437 1438 /* Set GO, WRITE, reg, phy, and value */ 1439 cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | 1440 (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); 1441 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1442 1443 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1444 device_printf(dev, "MDIO timed out during write\n"); 1445 return (0); 1446 } 1447 1448 if ((cpsw_read_4(sc->swsc, sc->phyaccess) & MDIO_PHYACCESS_ACK) == 0) 1449 device_printf(dev, "Failed to write to PHY.\n"); 1450 1451 return (0); 1452 } 1453 1454 static void 1455 cpswp_miibus_statchg(device_t dev) 1456 { 1457 struct cpswp_softc *sc; 1458 uint32_t mac_control, reg; 1459 1460 sc = device_get_softc(dev); 1461 CPSW_DEBUGF(sc->swsc, ("")); 1462 1463 reg = CPSW_SL_MACCONTROL(sc->unit); 1464 mac_control = cpsw_read_4(sc->swsc, reg); 1465 mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | 1466 CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); 1467 1468 switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { 1469 case IFM_1000_SX: 1470 case IFM_1000_LX: 1471 case IFM_1000_CX: 1472 case IFM_1000_T: 1473 mac_control |= CPSW_SL_MACTL_GIG; 1474 break; 1475 1476 case IFM_100_TX: 1477 mac_control |= CPSW_SL_MACTL_IFCTL_A; 1478 break; 1479 } 1480 if (sc->mii->mii_media_active & IFM_FDX) 1481 mac_control |= CPSW_SL_MACTL_FULLDUPLEX; 1482 1483 cpsw_write_4(sc->swsc, reg, mac_control); 1484 } 1485 1486 /* 1487 * 1488 * Transmit/Receive Packets. 1489 * 1490 */ 1491 static void 1492 cpsw_intr_rx(void *arg) 1493 { 1494 struct cpsw_softc *sc; 1495 struct ifnet *ifp; 1496 struct mbuf *received, *next; 1497 1498 sc = (struct cpsw_softc *)arg; 1499 CPSW_RX_LOCK(sc); 1500 if (sc->rx.teardown) { 1501 sc->rx.running = 0; 1502 sc->rx.teardown = 0; 1503 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1504 } 1505 received = cpsw_rx_dequeue(sc); 1506 cpsw_rx_enqueue(sc); 1507 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1508 CPSW_RX_UNLOCK(sc); 1509 1510 while (received != NULL) { 1511 next = received->m_nextpkt; 1512 received->m_nextpkt = NULL; 1513 ifp = received->m_pkthdr.rcvif; 1514 (*ifp->if_input)(ifp, received); 1515 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1516 received = next; 1517 } 1518 } 1519 1520 static struct mbuf * 1521 cpsw_rx_dequeue(struct cpsw_softc *sc) 1522 { 1523 struct cpsw_cpdma_bd bd; 1524 struct cpsw_slot *last, *slot; 1525 struct cpswp_softc *psc; 1526 struct mbuf *mb_head, *mb_tail; 1527 int port, removed = 0; 1528 1529 last = NULL; 1530 mb_head = mb_tail = NULL; 1531 1532 /* Pull completed packets off hardware RX queue. */ 1533 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1534 cpsw_cpdma_read_bd(sc, slot, &bd); 1535 1536 /* 1537 * Stop on packets still in use by hardware, but do not stop 1538 * on packets with the teardown complete flag, they will be 1539 * discarded later. 1540 */ 1541 if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == 1542 CPDMA_BD_OWNER) 1543 break; 1544 1545 last = slot; 1546 ++removed; 1547 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1548 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1549 1550 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1551 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1552 1553 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1554 CPSW_DEBUGF(sc, ("RX teardown is complete")); 1555 m_freem(slot->mbuf); 1556 slot->mbuf = NULL; 1557 sc->rx.running = 0; 1558 sc->rx.teardown = 0; 1559 break; 1560 } 1561 1562 port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; 1563 KASSERT(port >= 0 && port <= 1, 1564 ("patcket received with invalid port: %d", port)); 1565 psc = device_get_softc(sc->port[port].dev); 1566 1567 /* Set up mbuf */ 1568 /* TODO: track SOP/EOP bits to assemble a full mbuf 1569 out of received fragments. */ 1570 slot->mbuf->m_data += bd.bufoff; 1571 slot->mbuf->m_len = bd.pktlen - 4; 1572 slot->mbuf->m_pkthdr.len = bd.pktlen - 4; 1573 slot->mbuf->m_flags |= M_PKTHDR; 1574 slot->mbuf->m_pkthdr.rcvif = psc->ifp; 1575 slot->mbuf->m_nextpkt = NULL; 1576 1577 if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1578 /* check for valid CRC by looking into pkt_err[5:4] */ 1579 if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) { 1580 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1581 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1582 slot->mbuf->m_pkthdr.csum_data = 0xffff; 1583 } 1584 } 1585 1586 /* Add mbuf to packet list to be returned. */ 1587 if (mb_tail) { 1588 mb_tail->m_nextpkt = slot->mbuf; 1589 } else { 1590 mb_head = slot->mbuf; 1591 } 1592 mb_tail = slot->mbuf; 1593 slot->mbuf = NULL; 1594 if (sc->rx_batch > 0 && sc->rx_batch == removed) 1595 break; 1596 } 1597 1598 if (removed != 0) { 1599 cpsw_write_cp_slot(sc, &sc->rx, last); 1600 sc->rx.queue_removes += removed; 1601 sc->rx.avail_queue_len += removed; 1602 sc->rx.active_queue_len -= removed; 1603 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1604 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1605 CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); 1606 } 1607 1608 return (mb_head); 1609 } 1610 1611 static void 1612 cpsw_rx_enqueue(struct cpsw_softc *sc) 1613 { 1614 bus_dma_segment_t seg[1]; 1615 struct cpsw_cpdma_bd bd; 1616 struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; 1617 int error, nsegs, added = 0; 1618 uint32_t flags; 1619 1620 /* Register new mbufs with hardware. */ 1621 first_new_slot = NULL; 1622 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1623 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1624 if (first_new_slot == NULL) 1625 first_new_slot = slot; 1626 if (slot->mbuf == NULL) { 1627 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1628 if (slot->mbuf == NULL) { 1629 device_printf(sc->dev, 1630 "Unable to fill RX queue\n"); 1631 break; 1632 } 1633 slot->mbuf->m_len = 1634 slot->mbuf->m_pkthdr.len = 1635 slot->mbuf->m_ext.ext_size; 1636 } 1637 1638 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1639 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1640 1641 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1642 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1643 if (error != 0 || nsegs != 1) { 1644 device_printf(sc->dev, 1645 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1646 __func__, nsegs, error); 1647 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1648 m_freem(slot->mbuf); 1649 slot->mbuf = NULL; 1650 break; 1651 } 1652 1653 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1654 1655 /* Create and submit new rx descriptor. */ 1656 if ((next = STAILQ_NEXT(slot, next)) != NULL) 1657 bd.next = cpsw_cpdma_bd_paddr(sc, next); 1658 else 1659 bd.next = 0; 1660 bd.bufptr = seg->ds_addr; 1661 bd.bufoff = 0; 1662 bd.buflen = MCLBYTES - 1; 1663 bd.pktlen = bd.buflen; 1664 bd.flags = CPDMA_BD_OWNER; 1665 cpsw_cpdma_write_bd(sc, slot, &bd); 1666 ++added; 1667 1668 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1669 STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); 1670 } 1671 1672 if (added == 0 || first_new_slot == NULL) 1673 return; 1674 1675 CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); 1676 1677 /* Link new entries to hardware RX queue. */ 1678 if (last_old_slot == NULL) { 1679 /* Start a fresh queue. */ 1680 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1681 } else { 1682 /* Add buffers to end of current queue. */ 1683 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1684 /* If underrun, restart queue. */ 1685 if ((flags = cpsw_cpdma_read_bd_flags(sc, last_old_slot)) & 1686 CPDMA_BD_EOQ) { 1687 flags &= ~CPDMA_BD_EOQ; 1688 cpsw_cpdma_write_bd_flags(sc, last_old_slot, flags); 1689 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1690 sc->rx.queue_restart++; 1691 } 1692 } 1693 sc->rx.queue_adds += added; 1694 sc->rx.avail_queue_len -= added; 1695 sc->rx.active_queue_len += added; 1696 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); 1697 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) { 1698 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1699 } 1700 } 1701 1702 static void 1703 cpswp_start(struct ifnet *ifp) 1704 { 1705 struct cpswp_softc *sc; 1706 1707 sc = ifp->if_softc; 1708 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1709 sc->swsc->tx.running == 0) { 1710 return; 1711 } 1712 CPSW_TX_LOCK(sc->swsc); 1713 cpswp_tx_enqueue(sc); 1714 cpsw_tx_dequeue(sc->swsc); 1715 CPSW_TX_UNLOCK(sc->swsc); 1716 } 1717 1718 static void 1719 cpsw_intr_tx(void *arg) 1720 { 1721 struct cpsw_softc *sc; 1722 1723 sc = (struct cpsw_softc *)arg; 1724 CPSW_TX_LOCK(sc); 1725 if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) 1726 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1727 cpsw_tx_dequeue(sc); 1728 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); 1729 CPSW_TX_UNLOCK(sc); 1730 } 1731 1732 static void 1733 cpswp_tx_enqueue(struct cpswp_softc *sc) 1734 { 1735 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1736 struct cpsw_cpdma_bd bd; 1737 struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; 1738 struct mbuf *m0; 1739 int error, flags, nsegs, seg, added = 0, padlen; 1740 1741 flags = 0; 1742 if (sc->swsc->dualemac) { 1743 flags = CPDMA_BD_TO_PORT | 1744 ((sc->unit + 1) & CPDMA_BD_PORT_MASK); 1745 } 1746 /* Pull pending packets from IF queue and prep them for DMA. */ 1747 last = NULL; 1748 first_new_slot = NULL; 1749 last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); 1750 while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { 1751 IF_DEQUEUE(&sc->ifp->if_snd, m0); 1752 if (m0 == NULL) 1753 break; 1754 1755 slot->mbuf = m0; 1756 padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len; 1757 if (padlen < 0) 1758 padlen = 0; 1759 1760 /* Create mapping in DMA memory */ 1761 error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, 1762 slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1763 /* If the packet is too fragmented, try to simplify. */ 1764 if (error == EFBIG || 1765 (error == 0 && 1766 nsegs + (padlen > 0 ? 1 : 0) > sc->swsc->tx.avail_queue_len)) { 1767 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1768 if (padlen > 0) /* May as well add padding. */ 1769 m_append(slot->mbuf, padlen, 1770 sc->swsc->null_mbuf->m_data); 1771 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1772 if (m0 == NULL) { 1773 device_printf(sc->dev, 1774 "Can't defragment packet; dropping\n"); 1775 m_freem(slot->mbuf); 1776 } else { 1777 CPSW_DEBUGF(sc->swsc, 1778 ("Requeueing defragmented packet")); 1779 IF_PREPEND(&sc->ifp->if_snd, m0); 1780 } 1781 slot->mbuf = NULL; 1782 continue; 1783 } 1784 if (error != 0) { 1785 device_printf(sc->dev, 1786 "%s: Can't setup DMA (error=%d), dropping packet\n", 1787 __func__, error); 1788 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1789 m_freem(slot->mbuf); 1790 slot->mbuf = NULL; 1791 break; 1792 } 1793 1794 bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, 1795 BUS_DMASYNC_PREWRITE); 1796 1797 CPSW_DEBUGF(sc->swsc, 1798 ("Queueing TX packet: %d segments + %d pad bytes", 1799 nsegs, padlen)); 1800 1801 if (first_new_slot == NULL) 1802 first_new_slot = slot; 1803 1804 /* Link from the previous descriptor. */ 1805 if (last != NULL) 1806 cpsw_cpdma_write_bd_next(sc->swsc, last, slot); 1807 1808 slot->ifp = sc->ifp; 1809 1810 /* If there is only one segment, the for() loop 1811 * gets skipped and the single buffer gets set up 1812 * as both SOP and EOP. */ 1813 if (nsegs > 1) { 1814 next = STAILQ_NEXT(slot, next); 1815 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1816 } else 1817 bd.next = 0; 1818 /* Start by setting up the first buffer. */ 1819 bd.bufptr = segs[0].ds_addr; 1820 bd.bufoff = 0; 1821 bd.buflen = segs[0].ds_len; 1822 bd.pktlen = m_length(slot->mbuf, NULL) + padlen; 1823 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER | flags; 1824 for (seg = 1; seg < nsegs; ++seg) { 1825 /* Save the previous buffer (which isn't EOP) */ 1826 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1827 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1828 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1829 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1830 1831 /* Setup next buffer (which isn't SOP) */ 1832 if (nsegs > seg + 1) { 1833 next = STAILQ_NEXT(slot, next); 1834 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1835 } else 1836 bd.next = 0; 1837 bd.bufptr = segs[seg].ds_addr; 1838 bd.bufoff = 0; 1839 bd.buflen = segs[seg].ds_len; 1840 bd.pktlen = 0; 1841 bd.flags = CPDMA_BD_OWNER | flags; 1842 } 1843 /* Save the final buffer. */ 1844 if (padlen <= 0) 1845 bd.flags |= CPDMA_BD_EOP; 1846 else { 1847 next = STAILQ_NEXT(slot, next); 1848 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1849 } 1850 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1851 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1852 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1853 1854 if (padlen > 0) { 1855 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1856 1857 /* Setup buffer of null pad bytes (definitely EOP). */ 1858 bd.next = 0; 1859 bd.bufptr = sc->swsc->null_mbuf_paddr; 1860 bd.bufoff = 0; 1861 bd.buflen = padlen; 1862 bd.pktlen = 0; 1863 bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER | flags; 1864 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1865 ++nsegs; 1866 1867 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1868 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1869 } 1870 1871 last = slot; 1872 1873 added += nsegs; 1874 if (nsegs > sc->swsc->tx.longest_chain) 1875 sc->swsc->tx.longest_chain = nsegs; 1876 1877 // TODO: Should we defer the BPF tap until 1878 // after all packets are queued? 1879 BPF_MTAP(sc->ifp, m0); 1880 } 1881 1882 if (first_new_slot == NULL) 1883 return; 1884 1885 /* Attach the list of new buffers to the hardware TX queue. */ 1886 if (last_old_slot != NULL && 1887 (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & 1888 CPDMA_BD_EOQ) == 0) { 1889 /* Add buffers to end of current queue. */ 1890 cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, 1891 first_new_slot); 1892 } else { 1893 /* Start a fresh queue. */ 1894 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); 1895 } 1896 sc->swsc->tx.queue_adds += added; 1897 sc->swsc->tx.avail_queue_len -= added; 1898 sc->swsc->tx.active_queue_len += added; 1899 if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { 1900 sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; 1901 } 1902 CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); 1903 } 1904 1905 static int 1906 cpsw_tx_dequeue(struct cpsw_softc *sc) 1907 { 1908 struct cpsw_slot *slot, *last_removed_slot = NULL; 1909 struct cpsw_cpdma_bd bd; 1910 uint32_t flags, removed = 0; 1911 1912 /* Pull completed buffers off the hardware TX queue. */ 1913 slot = STAILQ_FIRST(&sc->tx.active); 1914 while (slot != NULL) { 1915 flags = cpsw_cpdma_read_bd_flags(sc, slot); 1916 1917 /* TearDown complete is only marked on the SOP for the packet. */ 1918 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == 1919 (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { 1920 sc->tx.teardown = 1; 1921 } 1922 1923 if ((flags & CPDMA_BD_OWNER) != 0 && sc->tx.teardown == 0) 1924 break; /* Hardware is still using this packet. */ 1925 1926 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 1927 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1928 m_freem(slot->mbuf); 1929 slot->mbuf = NULL; 1930 1931 if (slot->ifp) { 1932 if (sc->tx.teardown == 0) 1933 if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); 1934 else 1935 if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); 1936 } 1937 1938 /* Dequeue any additional buffers used by this packet. */ 1939 while (slot != NULL && slot->mbuf == NULL) { 1940 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 1941 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 1942 ++removed; 1943 last_removed_slot = slot; 1944 slot = STAILQ_FIRST(&sc->tx.active); 1945 } 1946 1947 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 1948 1949 /* Restart the TX queue if necessary. */ 1950 cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); 1951 if (slot != NULL && bd.next != 0 && (bd.flags & 1952 (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == 1953 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 1954 cpsw_write_hdp_slot(sc, &sc->tx, slot); 1955 sc->tx.queue_restart++; 1956 break; 1957 } 1958 } 1959 1960 if (removed != 0) { 1961 sc->tx.queue_removes += removed; 1962 sc->tx.active_queue_len -= removed; 1963 sc->tx.avail_queue_len += removed; 1964 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 1965 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 1966 CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); 1967 } 1968 1969 if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { 1970 CPSW_DEBUGF(sc, ("TX teardown is complete")); 1971 sc->tx.teardown = 0; 1972 sc->tx.running = 0; 1973 } 1974 1975 return (removed); 1976 } 1977 1978 /* 1979 * 1980 * Miscellaneous interrupts. 1981 * 1982 */ 1983 1984 static void 1985 cpsw_intr_rx_thresh(void *arg) 1986 { 1987 struct cpsw_softc *sc; 1988 struct ifnet *ifp; 1989 struct mbuf *received, *next; 1990 1991 sc = (struct cpsw_softc *)arg; 1992 CPSW_RX_LOCK(sc); 1993 received = cpsw_rx_dequeue(sc); 1994 cpsw_rx_enqueue(sc); 1995 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 1996 CPSW_RX_UNLOCK(sc); 1997 1998 while (received != NULL) { 1999 next = received->m_nextpkt; 2000 received->m_nextpkt = NULL; 2001 ifp = received->m_pkthdr.rcvif; 2002 (*ifp->if_input)(ifp, received); 2003 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2004 received = next; 2005 } 2006 } 2007 2008 static void 2009 cpsw_intr_misc_host_error(struct cpsw_softc *sc) 2010 { 2011 uint32_t intstat; 2012 uint32_t dmastat; 2013 int txerr, rxerr, txchan, rxchan; 2014 2015 printf("\n\n"); 2016 device_printf(sc->dev, 2017 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 2018 printf("\n\n"); 2019 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 2020 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 2021 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 2022 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 2023 2024 txerr = (dmastat >> 20) & 15; 2025 txchan = (dmastat >> 16) & 7; 2026 rxerr = (dmastat >> 12) & 15; 2027 rxchan = (dmastat >> 8) & 7; 2028 2029 switch (txerr) { 2030 case 0: break; 2031 case 1: printf("SOP error on TX channel %d\n", txchan); 2032 break; 2033 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 2034 break; 2035 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 2036 break; 2037 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 2038 break; 2039 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 2040 break; 2041 case 6: printf("Packet length error on TX channel %d\n", txchan); 2042 break; 2043 default: printf("Unknown error on TX channel %d\n", txchan); 2044 break; 2045 } 2046 2047 if (txerr != 0) { 2048 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 2049 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 2050 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 2051 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 2052 cpsw_dump_queue(sc, &sc->tx.active); 2053 } 2054 2055 switch (rxerr) { 2056 case 0: break; 2057 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 2058 break; 2059 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 2060 break; 2061 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 2062 break; 2063 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 2064 break; 2065 default: printf("Unknown RX error on RX channel %d\n", rxchan); 2066 break; 2067 } 2068 2069 if (rxerr != 0) { 2070 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 2071 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 2072 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 2073 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 2074 cpsw_dump_queue(sc, &sc->rx.active); 2075 } 2076 2077 printf("\nALE Table\n"); 2078 cpsw_ale_dump_table(sc); 2079 2080 // XXX do something useful here?? 2081 panic("CPSW HOST ERROR INTERRUPT"); 2082 2083 // Suppress this interrupt in the future. 2084 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 2085 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 2086 // The watchdog will probably reset the controller 2087 // in a little while. It will probably fail again. 2088 } 2089 2090 static void 2091 cpsw_intr_misc(void *arg) 2092 { 2093 struct cpsw_softc *sc = arg; 2094 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 2095 2096 if (stat & CPSW_WR_C_MISC_EVNT_PEND) 2097 CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); 2098 if (stat & CPSW_WR_C_MISC_STAT_PEND) 2099 cpsw_stats_collect(sc); 2100 if (stat & CPSW_WR_C_MISC_HOST_PEND) 2101 cpsw_intr_misc_host_error(sc); 2102 if (stat & CPSW_WR_C_MISC_MDIOLINK) { 2103 cpsw_write_4(sc, MDIOLINKINTMASKED, 2104 cpsw_read_4(sc, MDIOLINKINTMASKED)); 2105 } 2106 if (stat & CPSW_WR_C_MISC_MDIOUSER) { 2107 CPSW_DEBUGF(sc, 2108 ("MDIO operation completed interrupt unimplemented")); 2109 } 2110 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 2111 } 2112 2113 /* 2114 * 2115 * Periodic Checks and Watchdog. 2116 * 2117 */ 2118 2119 static void 2120 cpswp_tick(void *msc) 2121 { 2122 struct cpswp_softc *sc = msc; 2123 2124 /* Check for media type change */ 2125 mii_tick(sc->mii); 2126 if (sc->media_status != sc->mii->mii_media.ifm_media) { 2127 printf("%s: media type changed (ifm_media=%x)\n", __func__, 2128 sc->mii->mii_media.ifm_media); 2129 cpswp_ifmedia_upd(sc->ifp); 2130 } 2131 2132 /* Schedule another timeout one second from now */ 2133 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 2134 } 2135 2136 static void 2137 cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2138 { 2139 struct cpswp_softc *sc; 2140 struct mii_data *mii; 2141 2142 sc = ifp->if_softc; 2143 CPSW_DEBUGF(sc->swsc, ("")); 2144 CPSW_PORT_LOCK(sc); 2145 2146 mii = sc->mii; 2147 mii_pollstat(mii); 2148 2149 ifmr->ifm_active = mii->mii_media_active; 2150 ifmr->ifm_status = mii->mii_media_status; 2151 CPSW_PORT_UNLOCK(sc); 2152 } 2153 2154 static int 2155 cpswp_ifmedia_upd(struct ifnet *ifp) 2156 { 2157 struct cpswp_softc *sc; 2158 2159 sc = ifp->if_softc; 2160 CPSW_DEBUGF(sc->swsc, ("")); 2161 CPSW_PORT_LOCK(sc); 2162 mii_mediachg(sc->mii); 2163 sc->media_status = sc->mii->mii_media.ifm_media; 2164 CPSW_PORT_UNLOCK(sc); 2165 2166 return (0); 2167 } 2168 2169 static void 2170 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 2171 { 2172 struct cpswp_softc *psc; 2173 int i; 2174 2175 cpsw_debugf_head("CPSW watchdog"); 2176 device_printf(sc->dev, "watchdog timeout\n"); 2177 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, 2178 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); 2179 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, 2180 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); 2181 cpsw_dump_queue(sc, &sc->tx.active); 2182 for (i = 0; i < CPSW_PORTS; i++) { 2183 if (!sc->dualemac && i != sc->active_slave) 2184 continue; 2185 psc = device_get_softc(sc->port[i].dev); 2186 CPSW_PORT_LOCK(psc); 2187 cpswp_stop_locked(psc); 2188 CPSW_PORT_UNLOCK(psc); 2189 } 2190 } 2191 2192 static void 2193 cpsw_tx_watchdog(void *msc) 2194 { 2195 struct cpsw_softc *sc; 2196 2197 sc = msc; 2198 CPSW_TX_LOCK(sc); 2199 if (sc->tx.active_queue_len == 0 || !sc->tx.running) { 2200 sc->watchdog.timer = 0; /* Nothing to do. */ 2201 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 2202 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 2203 } else if (cpsw_tx_dequeue(sc) > 0) { 2204 sc->watchdog.timer = 0; /* We just did something. */ 2205 } else { 2206 /* There was something to do but it didn't get done. */ 2207 ++sc->watchdog.timer; 2208 if (sc->watchdog.timer > 5) { 2209 sc->watchdog.timer = 0; 2210 ++sc->watchdog.resets; 2211 cpsw_tx_watchdog_full_reset(sc); 2212 } 2213 } 2214 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 2215 CPSW_TX_UNLOCK(sc); 2216 2217 /* Schedule another timeout one second from now */ 2218 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 2219 } 2220 2221 /* 2222 * 2223 * ALE support routines. 2224 * 2225 */ 2226 2227 static void 2228 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2229 { 2230 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 2231 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 2232 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 2233 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 2234 } 2235 2236 static void 2237 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2238 { 2239 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 2240 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 2241 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 2242 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 2243 } 2244 2245 static void 2246 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 2247 { 2248 int i; 2249 uint32_t ale_entry[3]; 2250 2251 /* First four entries are link address and broadcast. */ 2252 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2253 cpsw_ale_read_entry(sc, i, ale_entry); 2254 if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || 2255 ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && 2256 ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ 2257 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2258 cpsw_ale_write_entry(sc, i, ale_entry); 2259 } 2260 } 2261 } 2262 2263 static int 2264 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, 2265 uint8_t *mac) 2266 { 2267 int free_index = -1, matching_index = -1, i; 2268 uint32_t ale_entry[3], ale_type; 2269 2270 /* Find a matching entry or a free entry. */ 2271 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2272 cpsw_ale_read_entry(sc, i, ale_entry); 2273 2274 /* Entry Type[61:60] is 0 for free entry */ 2275 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2276 free_index = i; 2277 2278 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 2279 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 2280 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 2281 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 2282 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 2283 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 2284 matching_index = i; 2285 break; 2286 } 2287 } 2288 2289 if (matching_index < 0) { 2290 if (free_index < 0) 2291 return (ENOMEM); 2292 i = free_index; 2293 } 2294 2295 if (vlan != -1) 2296 ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; 2297 else 2298 ale_type = ALE_TYPE_ADDR << 28; 2299 2300 /* Set MAC address */ 2301 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2302 ale_entry[1] = mac[0] << 8 | mac[1]; 2303 2304 /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ 2305 ale_entry[1] |= ALE_MCAST_FWD | ale_type; 2306 2307 /* Set portmask [68:66] */ 2308 ale_entry[2] = (portmap & 7) << 2; 2309 2310 cpsw_ale_write_entry(sc, i, ale_entry); 2311 2312 return 0; 2313 } 2314 2315 static void 2316 cpsw_ale_dump_table(struct cpsw_softc *sc) { 2317 int i; 2318 uint32_t ale_entry[3]; 2319 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2320 cpsw_ale_read_entry(sc, i, ale_entry); 2321 switch (ALE_TYPE(ale_entry)) { 2322 case ALE_TYPE_VLAN: 2323 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2324 ale_entry[1], ale_entry[0]); 2325 printf("type: %u ", ALE_TYPE(ale_entry)); 2326 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2327 printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); 2328 printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); 2329 printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); 2330 printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); 2331 printf("\n"); 2332 break; 2333 case ALE_TYPE_ADDR: 2334 case ALE_TYPE_VLAN_ADDR: 2335 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2336 ale_entry[1], ale_entry[0]); 2337 printf("type: %u ", ALE_TYPE(ale_entry)); 2338 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 2339 (ale_entry[1] >> 8) & 0xFF, 2340 (ale_entry[1] >> 0) & 0xFF, 2341 (ale_entry[0] >>24) & 0xFF, 2342 (ale_entry[0] >>16) & 0xFF, 2343 (ale_entry[0] >> 8) & 0xFF, 2344 (ale_entry[0] >> 0) & 0xFF); 2345 printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); 2346 if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) 2347 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2348 printf("port: %u ", ALE_PORTS(ale_entry)); 2349 printf("\n"); 2350 break; 2351 } 2352 } 2353 printf("\n"); 2354 } 2355 2356 static int 2357 cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) 2358 { 2359 uint8_t *mac; 2360 uint32_t ale_entry[3], ale_type, portmask; 2361 struct ifmultiaddr *ifma; 2362 2363 if (sc->swsc->dualemac) { 2364 ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; 2365 portmask = 1 << (sc->unit + 1) | 1 << 0; 2366 } else { 2367 ale_type = ALE_TYPE_ADDR << 28; 2368 portmask = 7; 2369 } 2370 2371 /* 2372 * Route incoming packets for our MAC address to Port 0 (host). 2373 * For simplicity, keep this entry at table index 0 for port 1 and 2374 * at index 2 for port 2 in the ALE. 2375 */ 2376 if_addr_rlock(sc->ifp); 2377 mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr); 2378 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2379 ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ 2380 ale_entry[2] = 0; /* port = 0 */ 2381 cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); 2382 2383 /* Set outgoing MAC Address for slave port. */ 2384 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), 2385 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 2386 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), 2387 mac[5] << 8 | mac[4]); 2388 if_addr_runlock(sc->ifp); 2389 2390 /* Keep the broadcast address at table entry 1 (or 3). */ 2391 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 2392 /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ 2393 ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; 2394 ale_entry[2] = portmask << 2; 2395 cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); 2396 2397 /* SIOCDELMULTI doesn't specify the particular address 2398 being removed, so we have to remove all and rebuild. */ 2399 if (purge) 2400 cpsw_ale_remove_all_mc_entries(sc->swsc); 2401 2402 /* Set other multicast addrs desired. */ 2403 if_maddr_rlock(sc->ifp); 2404 TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { 2405 if (ifma->ifma_addr->sa_family != AF_LINK) 2406 continue; 2407 cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, 2408 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 2409 } 2410 if_maddr_runlock(sc->ifp); 2411 2412 return (0); 2413 } 2414 2415 static int 2416 cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, 2417 int untag, int mcregflood, int mcunregflood) 2418 { 2419 int free_index, i, matching_index; 2420 uint32_t ale_entry[3]; 2421 2422 free_index = matching_index = -1; 2423 /* Find a matching entry or a free entry. */ 2424 for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { 2425 cpsw_ale_read_entry(sc, i, ale_entry); 2426 2427 /* Entry Type[61:60] is 0 for free entry */ 2428 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2429 free_index = i; 2430 2431 if (ALE_VLAN(ale_entry) == vlan) { 2432 matching_index = i; 2433 break; 2434 } 2435 } 2436 2437 if (matching_index < 0) { 2438 if (free_index < 0) 2439 return (-1); 2440 i = free_index; 2441 } 2442 2443 ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | 2444 (mcunregflood & 7) << 8 | (ports & 7); 2445 ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; 2446 ale_entry[2] = 0; 2447 cpsw_ale_write_entry(sc, i, ale_entry); 2448 2449 return (0); 2450 } 2451 2452 /* 2453 * 2454 * Statistics and Sysctls. 2455 * 2456 */ 2457 2458 #if 0 2459 static void 2460 cpsw_stats_dump(struct cpsw_softc *sc) 2461 { 2462 int i; 2463 uint32_t r; 2464 2465 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2466 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2467 cpsw_stat_sysctls[i].reg); 2468 CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2469 (intmax_t)sc->shadow_stats[i], r, 2470 (intmax_t)sc->shadow_stats[i] + r)); 2471 } 2472 } 2473 #endif 2474 2475 static void 2476 cpsw_stats_collect(struct cpsw_softc *sc) 2477 { 2478 int i; 2479 uint32_t r; 2480 2481 CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); 2482 2483 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2484 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2485 cpsw_stat_sysctls[i].reg); 2486 sc->shadow_stats[i] += r; 2487 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, 2488 r); 2489 } 2490 } 2491 2492 static int 2493 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2494 { 2495 struct cpsw_softc *sc; 2496 struct cpsw_stat *stat; 2497 uint64_t result; 2498 2499 sc = (struct cpsw_softc *)arg1; 2500 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2501 result = sc->shadow_stats[oidp->oid_number]; 2502 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2503 return (sysctl_handle_64(oidp, &result, 0, req)); 2504 } 2505 2506 static int 2507 cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2508 { 2509 struct cpsw_softc *sc; 2510 struct bintime t; 2511 unsigned result; 2512 2513 sc = (struct cpsw_softc *)arg1; 2514 getbinuptime(&t); 2515 bintime_sub(&t, &sc->attach_uptime); 2516 result = t.sec; 2517 return (sysctl_handle_int(oidp, &result, 0, req)); 2518 } 2519 2520 static int 2521 cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) 2522 { 2523 int error; 2524 struct cpsw_softc *sc; 2525 uint32_t ctrl, intr_per_ms; 2526 2527 sc = (struct cpsw_softc *)arg1; 2528 error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); 2529 if (error != 0 || req->newptr == NULL) 2530 return (error); 2531 2532 ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 2533 ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 2534 if (sc->coal_us == 0) { 2535 /* Disable the interrupt pace hardware. */ 2536 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2537 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); 2538 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); 2539 return (0); 2540 } 2541 2542 if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) 2543 sc->coal_us = CPSW_WR_C_IMAX_US_MAX; 2544 if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) 2545 sc->coal_us = CPSW_WR_C_IMAX_US_MIN; 2546 intr_per_ms = 1000 / sc->coal_us; 2547 /* Just to make sure... */ 2548 if (intr_per_ms > CPSW_WR_C_IMAX_MAX) 2549 intr_per_ms = CPSW_WR_C_IMAX_MAX; 2550 if (intr_per_ms < CPSW_WR_C_IMAX_MIN) 2551 intr_per_ms = CPSW_WR_C_IMAX_MIN; 2552 2553 /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ 2554 ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; 2555 2556 /* Enable the interrupt pace hardware. */ 2557 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); 2558 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); 2559 ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; 2560 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2561 2562 return (0); 2563 } 2564 2565 static int 2566 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2567 { 2568 struct cpsw_softc *swsc; 2569 struct cpswp_softc *sc; 2570 struct bintime t; 2571 unsigned result; 2572 2573 swsc = arg1; 2574 sc = device_get_softc(swsc->port[arg2].dev); 2575 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { 2576 getbinuptime(&t); 2577 bintime_sub(&t, &sc->init_uptime); 2578 result = t.sec; 2579 } else 2580 result = 0; 2581 return (sysctl_handle_int(oidp, &result, 0, req)); 2582 } 2583 2584 static void 2585 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2586 struct cpsw_queue *queue) 2587 { 2588 struct sysctl_oid_list *parent; 2589 2590 parent = SYSCTL_CHILDREN(node); 2591 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2592 CTLFLAG_RD, &queue->queue_slots, 0, 2593 "Total buffers currently assigned to this queue"); 2594 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2595 CTLFLAG_RD, &queue->active_queue_len, 0, 2596 "Buffers currently registered with hardware controller"); 2597 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2598 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2599 "Max value of activeBuffers since last driver reset"); 2600 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2601 CTLFLAG_RD, &queue->avail_queue_len, 0, 2602 "Buffers allocated to this queue but not currently " 2603 "registered with hardware controller"); 2604 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2605 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2606 "Max value of availBuffers since last driver reset"); 2607 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2608 CTLFLAG_RD, &queue->queue_adds, 0, 2609 "Total buffers added to queue"); 2610 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2611 CTLFLAG_RD, &queue->queue_removes, 0, 2612 "Total buffers removed from queue"); 2613 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", 2614 CTLFLAG_RD, &queue->queue_restart, 0, 2615 "Total times the queue has been restarted"); 2616 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2617 CTLFLAG_RD, &queue->longest_chain, 0, 2618 "Max buffers used for a single packet"); 2619 } 2620 2621 static void 2622 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2623 struct cpsw_softc *sc) 2624 { 2625 struct sysctl_oid_list *parent; 2626 2627 parent = SYSCTL_CHILDREN(node); 2628 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2629 CTLFLAG_RD, &sc->watchdog.resets, 0, 2630 "Total number of watchdog resets"); 2631 } 2632 2633 static void 2634 cpsw_add_sysctls(struct cpsw_softc *sc) 2635 { 2636 struct sysctl_ctx_list *ctx; 2637 struct sysctl_oid *stats_node, *queue_node, *node; 2638 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2639 struct sysctl_oid_list *ports_parent, *port_parent; 2640 char port[16]; 2641 int i; 2642 2643 ctx = device_get_sysctl_ctx(sc->dev); 2644 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2645 2646 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", 2647 CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); 2648 2649 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "rx_batch", 2650 CTLFLAG_RW, &sc->rx_batch, 0, "Set the rx batch size"); 2651 2652 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2653 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", 2654 "Time since driver attach"); 2655 2656 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", 2657 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, cpsw_intr_coalesce, "IU", 2658 "minimum time between interrupts"); 2659 2660 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", 2661 CTLFLAG_RD, NULL, "CPSW Ports Statistics"); 2662 ports_parent = SYSCTL_CHILDREN(node); 2663 for (i = 0; i < CPSW_PORTS; i++) { 2664 if (!sc->dualemac && i != sc->active_slave) 2665 continue; 2666 port[0] = '0' + i; 2667 port[1] = '\0'; 2668 node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, 2669 port, CTLFLAG_RD, NULL, "CPSW Port Statistics"); 2670 port_parent = SYSCTL_CHILDREN(node); 2671 SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", 2672 CTLTYPE_UINT | CTLFLAG_RD, sc, i, 2673 cpsw_stat_uptime, "IU", "Seconds since driver init"); 2674 } 2675 2676 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2677 CTLFLAG_RD, NULL, "CPSW Statistics"); 2678 stats_parent = SYSCTL_CHILDREN(stats_node); 2679 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2680 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2681 cpsw_stat_sysctls[i].oid, 2682 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, 2683 cpsw_stats_sysctl, "IU", 2684 cpsw_stat_sysctls[i].oid); 2685 } 2686 2687 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2688 CTLFLAG_RD, NULL, "CPSW Queue Statistics"); 2689 queue_parent = SYSCTL_CHILDREN(queue_node); 2690 2691 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2692 CTLFLAG_RD, NULL, "TX Queue Statistics"); 2693 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2694 2695 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2696 CTLFLAG_RD, NULL, "RX Queue Statistics"); 2697 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2698 2699 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2700 CTLFLAG_RD, NULL, "Watchdog Statistics"); 2701 cpsw_add_watchdog_sysctls(ctx, node, sc); 2702 } 2703