1 /*- 2 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 3 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * TI Common Platform Ethernet Switch (CPSW) Driver 30 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 31 * 32 * This controller is documented in the AM335x Technical Reference 33 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 34 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 35 * 36 * It is basically a single Ethernet port (port 0) wired internally to 37 * a 3-port store-and-forward switch connected to two independent 38 * "sliver" controllers (port 1 and port 2). You can operate the 39 * controller in a variety of different ways by suitably configuring 40 * the slivers and the Address Lookup Engine (ALE) that routes packets 41 * between the ports. 42 * 43 * This code was developed and tested on a BeagleBone with 44 * an AM335x SoC. 45 */ 46 47 #include <sys/cdefs.h> 48 __FBSDID("$FreeBSD$"); 49 50 #include <sys/param.h> 51 #include <sys/bus.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/mbuf.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/rman.h> 58 #include <sys/socket.h> 59 #include <sys/sockio.h> 60 #include <sys/sysctl.h> 61 62 #include <machine/bus.h> 63 #include <machine/resource.h> 64 #include <machine/stdarg.h> 65 66 #include <net/ethernet.h> 67 #include <net/bpf.h> 68 #include <net/if.h> 69 #include <net/if_dl.h> 70 #include <net/if_media.h> 71 #include <net/if_types.h> 72 73 #include <arm/ti/ti_scm.h> 74 #include <arm/ti/am335x/am335x_scm.h> 75 76 #include <dev/mii/mii.h> 77 #include <dev/mii/miivar.h> 78 79 #include <dev/ofw/ofw_bus.h> 80 #include <dev/ofw/ofw_bus_subr.h> 81 82 #include "if_cpswreg.h" 83 #include "if_cpswvar.h" 84 85 #include "miibus_if.h" 86 87 /* Device probe/attach/detach. */ 88 static int cpsw_probe(device_t); 89 static int cpsw_attach(device_t); 90 static int cpsw_detach(device_t); 91 static int cpswp_probe(device_t); 92 static int cpswp_attach(device_t); 93 static int cpswp_detach(device_t); 94 95 static phandle_t cpsw_get_node(device_t, device_t); 96 97 /* Device Init/shutdown. */ 98 static int cpsw_shutdown(device_t); 99 static void cpswp_init(void *); 100 static void cpswp_init_locked(void *); 101 static void cpswp_stop_locked(struct cpswp_softc *); 102 103 /* Device Suspend/Resume. */ 104 static int cpsw_suspend(device_t); 105 static int cpsw_resume(device_t); 106 107 /* Ioctl. */ 108 static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data); 109 110 static int cpswp_miibus_readreg(device_t, int phy, int reg); 111 static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); 112 static void cpswp_miibus_statchg(device_t); 113 114 /* Send/Receive packets. */ 115 static void cpsw_intr_rx(void *arg); 116 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 117 static void cpsw_rx_enqueue(struct cpsw_softc *); 118 static void cpswp_start(struct ifnet *); 119 static void cpsw_intr_tx(void *); 120 static void cpswp_tx_enqueue(struct cpswp_softc *); 121 static int cpsw_tx_dequeue(struct cpsw_softc *); 122 123 /* Misc interrupts and watchdog. */ 124 static void cpsw_intr_rx_thresh(void *); 125 static void cpsw_intr_misc(void *); 126 static void cpswp_tick(void *); 127 static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *); 128 static int cpswp_ifmedia_upd(struct ifnet *); 129 static void cpsw_tx_watchdog(void *); 130 131 /* ALE support */ 132 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); 133 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); 134 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); 135 static void cpsw_ale_dump_table(struct cpsw_softc *); 136 static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, 137 int); 138 static int cpswp_ale_update_addresses(struct cpswp_softc *, int); 139 140 /* Statistics and sysctls. */ 141 static void cpsw_add_sysctls(struct cpsw_softc *); 142 static void cpsw_stats_collect(struct cpsw_softc *); 143 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 144 145 /* 146 * Arbitrary limit on number of segments in an mbuf to be transmitted. 147 * Packets with more segments than this will be defragmented before 148 * they are queued. 149 */ 150 #define CPSW_TXFRAGS 16 151 152 /* Shared resources. */ 153 static device_method_t cpsw_methods[] = { 154 /* Device interface */ 155 DEVMETHOD(device_probe, cpsw_probe), 156 DEVMETHOD(device_attach, cpsw_attach), 157 DEVMETHOD(device_detach, cpsw_detach), 158 DEVMETHOD(device_shutdown, cpsw_shutdown), 159 DEVMETHOD(device_suspend, cpsw_suspend), 160 DEVMETHOD(device_resume, cpsw_resume), 161 /* OFW methods */ 162 DEVMETHOD(ofw_bus_get_node, cpsw_get_node), 163 DEVMETHOD_END 164 }; 165 166 static driver_t cpsw_driver = { 167 "cpswss", 168 cpsw_methods, 169 sizeof(struct cpsw_softc), 170 }; 171 172 static devclass_t cpsw_devclass; 173 174 DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0); 175 176 /* Port/Slave resources. */ 177 static device_method_t cpswp_methods[] = { 178 /* Device interface */ 179 DEVMETHOD(device_probe, cpswp_probe), 180 DEVMETHOD(device_attach, cpswp_attach), 181 DEVMETHOD(device_detach, cpswp_detach), 182 /* MII interface */ 183 DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), 184 DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), 185 DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), 186 DEVMETHOD_END 187 }; 188 189 static driver_t cpswp_driver = { 190 "cpsw", 191 cpswp_methods, 192 sizeof(struct cpswp_softc), 193 }; 194 195 static devclass_t cpswp_devclass; 196 197 DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0); 198 DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); 199 MODULE_DEPEND(cpsw, ether, 1, 1, 1); 200 MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 201 202 static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; 203 204 static struct resource_spec irq_res_spec[] = { 205 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 206 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 207 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 208 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 209 { -1, 0 } 210 }; 211 212 static struct { 213 void (*cb)(void *); 214 } cpsw_intr_cb[] = { 215 { cpsw_intr_rx_thresh }, 216 { cpsw_intr_rx }, 217 { cpsw_intr_tx }, 218 { cpsw_intr_misc }, 219 }; 220 221 /* Number of entries here must match size of stats 222 * array in struct cpswp_softc. */ 223 static struct cpsw_stat { 224 int reg; 225 char *oid; 226 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 227 {0x00, "GoodRxFrames"}, 228 {0x04, "BroadcastRxFrames"}, 229 {0x08, "MulticastRxFrames"}, 230 {0x0C, "PauseRxFrames"}, 231 {0x10, "RxCrcErrors"}, 232 {0x14, "RxAlignErrors"}, 233 {0x18, "OversizeRxFrames"}, 234 {0x1c, "RxJabbers"}, 235 {0x20, "ShortRxFrames"}, 236 {0x24, "RxFragments"}, 237 {0x30, "RxOctets"}, 238 {0x34, "GoodTxFrames"}, 239 {0x38, "BroadcastTxFrames"}, 240 {0x3c, "MulticastTxFrames"}, 241 {0x40, "PauseTxFrames"}, 242 {0x44, "DeferredTxFrames"}, 243 {0x48, "CollisionsTxFrames"}, 244 {0x4c, "SingleCollisionTxFrames"}, 245 {0x50, "MultipleCollisionTxFrames"}, 246 {0x54, "ExcessiveCollisions"}, 247 {0x58, "LateCollisions"}, 248 {0x5c, "TxUnderrun"}, 249 {0x60, "CarrierSenseErrors"}, 250 {0x64, "TxOctets"}, 251 {0x68, "RxTx64OctetFrames"}, 252 {0x6c, "RxTx65to127OctetFrames"}, 253 {0x70, "RxTx128to255OctetFrames"}, 254 {0x74, "RxTx256to511OctetFrames"}, 255 {0x78, "RxTx512to1024OctetFrames"}, 256 {0x7c, "RxTx1024upOctetFrames"}, 257 {0x80, "NetOctets"}, 258 {0x84, "RxStartOfFrameOverruns"}, 259 {0x88, "RxMiddleOfFrameOverruns"}, 260 {0x8c, "RxDmaOverruns"} 261 }; 262 263 /* 264 * Basic debug support. 265 */ 266 267 static void 268 cpsw_debugf_head(const char *funcname) 269 { 270 int t = (int)(time_second % (24 * 60 * 60)); 271 272 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 273 } 274 275 static void 276 cpsw_debugf(const char *fmt, ...) 277 { 278 va_list ap; 279 280 va_start(ap, fmt); 281 vprintf(fmt, ap); 282 va_end(ap); 283 printf("\n"); 284 285 } 286 287 #define CPSW_DEBUGF(_sc, a) do { \ 288 if ((_sc)->debug) { \ 289 cpsw_debugf_head(__func__); \ 290 cpsw_debugf a; \ 291 } \ 292 } while (0) 293 294 /* 295 * Locking macros 296 */ 297 #define CPSW_TX_LOCK(sc) do { \ 298 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 299 mtx_lock(&(sc)->tx.lock); \ 300 } while (0) 301 302 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 303 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 304 305 #define CPSW_RX_LOCK(sc) do { \ 306 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 307 mtx_lock(&(sc)->rx.lock); \ 308 } while (0) 309 310 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 311 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 312 313 #define CPSW_PORT_LOCK(_sc) do { \ 314 mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ 315 mtx_lock(&(_sc)->lock); \ 316 } while (0) 317 318 #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 319 #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) 320 321 /* 322 * Read/Write macros 323 */ 324 #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) 325 #define cpsw_write_4(_sc, _reg, _val) \ 326 bus_write_4((_sc)->mem_res, (_reg), (_val)) 327 328 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 329 330 #define cpsw_cpdma_bd_paddr(sc, slot) \ 331 BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) 332 #define cpsw_cpdma_read_bd(sc, slot, val) \ 333 bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 334 #define cpsw_cpdma_write_bd(sc, slot, val) \ 335 bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 336 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 337 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 338 #define cpsw_cpdma_write_bd_flags(sc, slot, val) \ 339 bus_write_2(sc->mem_res, slot->bd_offset + 14, val) 340 #define cpsw_cpdma_read_bd_flags(sc, slot) \ 341 bus_read_2(sc->mem_res, slot->bd_offset + 14) 342 #define cpsw_write_hdp_slot(sc, queue, slot) \ 343 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 344 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 345 #define cpsw_read_cp(sc, queue) \ 346 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 347 #define cpsw_write_cp(sc, queue, val) \ 348 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 349 #define cpsw_write_cp_slot(sc, queue, slot) \ 350 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 351 352 #if 0 353 /* XXX temporary function versions for debugging. */ 354 static void 355 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 356 { 357 uint32_t reg = queue->hdp_offset; 358 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 359 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 360 cpsw_write_4(sc, reg, v); 361 } 362 363 static void 364 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 365 { 366 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 367 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 368 cpsw_write_cp(sc, queue, v); 369 } 370 #endif 371 372 /* 373 * Expanded dump routines for verbose debugging. 374 */ 375 static void 376 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 377 { 378 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 379 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 380 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 381 "Port0"}; 382 struct cpsw_cpdma_bd bd; 383 const char *sep; 384 int i; 385 386 cpsw_cpdma_read_bd(sc, slot, &bd); 387 printf("BD Addr : 0x%08x Next : 0x%08x\n", 388 cpsw_cpdma_bd_paddr(sc, slot), bd.next); 389 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 390 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 391 printf(" Flags: "); 392 sep = ""; 393 for (i = 0; i < 16; ++i) { 394 if (bd.flags & (1 << (15 - i))) { 395 printf("%s%s", sep, flags[i]); 396 sep = ","; 397 } 398 } 399 printf("\n"); 400 if (slot->mbuf) { 401 printf(" Ether: %14D\n", 402 (char *)(slot->mbuf->m_data), " "); 403 printf(" Packet: %16D\n", 404 (char *)(slot->mbuf->m_data) + 14, " "); 405 } 406 } 407 408 #define CPSW_DUMP_SLOT(cs, slot) do { \ 409 IF_DEBUG(sc) { \ 410 cpsw_dump_slot(sc, slot); \ 411 } \ 412 } while (0) 413 414 static void 415 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 416 { 417 struct cpsw_slot *slot; 418 int i = 0; 419 int others = 0; 420 421 STAILQ_FOREACH(slot, q, next) { 422 if (i > CPSW_TXFRAGS) 423 ++others; 424 else 425 cpsw_dump_slot(sc, slot); 426 ++i; 427 } 428 if (others) 429 printf(" ... and %d more.\n", others); 430 printf("\n"); 431 } 432 433 #define CPSW_DUMP_QUEUE(sc, q) do { \ 434 IF_DEBUG(sc) { \ 435 cpsw_dump_queue(sc, q); \ 436 } \ 437 } while (0) 438 439 static void 440 cpsw_init_slots(struct cpsw_softc *sc) 441 { 442 struct cpsw_slot *slot; 443 int i; 444 445 STAILQ_INIT(&sc->avail); 446 447 /* Put the slot descriptors onto the global avail list. */ 448 for (i = 0; i < nitems(sc->_slots); i++) { 449 slot = &sc->_slots[i]; 450 slot->bd_offset = cpsw_cpdma_bd_offset(i); 451 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 452 } 453 } 454 455 static int 456 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 457 { 458 const int max_slots = nitems(sc->_slots); 459 struct cpsw_slot *slot; 460 int i; 461 462 if (requested < 0) 463 requested = max_slots; 464 465 for (i = 0; i < requested; ++i) { 466 slot = STAILQ_FIRST(&sc->avail); 467 if (slot == NULL) 468 return (0); 469 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 470 device_printf(sc->dev, "failed to create dmamap\n"); 471 return (ENOMEM); 472 } 473 STAILQ_REMOVE_HEAD(&sc->avail, next); 474 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 475 ++queue->avail_queue_len; 476 ++queue->queue_slots; 477 } 478 return (0); 479 } 480 481 static void 482 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 483 { 484 int error; 485 486 if (slot->dmamap) { 487 if (slot->mbuf) 488 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 489 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 490 KASSERT(error == 0, ("Mapping still active")); 491 slot->dmamap = NULL; 492 } 493 if (slot->mbuf) { 494 m_freem(slot->mbuf); 495 slot->mbuf = NULL; 496 } 497 } 498 499 static void 500 cpsw_reset(struct cpsw_softc *sc) 501 { 502 int i; 503 504 callout_stop(&sc->watchdog.callout); 505 506 /* Reset RMII/RGMII wrapper. */ 507 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 508 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 509 ; 510 511 /* Disable TX and RX interrupts for all cores. */ 512 for (i = 0; i < 3; ++i) { 513 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 514 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 515 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 516 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 517 } 518 519 /* Reset CPSW subsystem. */ 520 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 521 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 522 ; 523 524 /* Reset Sliver port 1 and 2 */ 525 for (i = 0; i < 2; i++) { 526 /* Reset */ 527 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 528 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 529 ; 530 } 531 532 /* Reset DMA controller. */ 533 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 534 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 535 ; 536 537 /* Disable TX & RX DMA */ 538 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 539 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 540 541 /* Clear all queues. */ 542 for (i = 0; i < 8; i++) { 543 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 544 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 545 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 546 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 547 } 548 549 /* Clear all interrupt Masks */ 550 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 551 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 552 } 553 554 static void 555 cpsw_init(struct cpsw_softc *sc) 556 { 557 struct cpsw_slot *slot; 558 uint32_t reg; 559 560 /* Disable the interrupt pacing. */ 561 reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 562 reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 563 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); 564 565 /* Clear ALE */ 566 cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); 567 568 /* Enable ALE */ 569 reg = CPSW_ALE_CTL_ENABLE; 570 if (sc->dualemac) 571 reg |= CPSW_ALE_CTL_VLAN_AWARE; 572 cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); 573 574 /* Set Host Port Mapping. */ 575 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 576 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 577 578 /* Initialize ALE: set host port to forwarding(3). */ 579 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3); 580 581 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 582 583 /* Enable statistics for ports 0, 1 and 2 */ 584 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 585 586 /* Turn off flow control. */ 587 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 588 589 /* Make IP hdr aligned with 4 */ 590 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 591 592 /* Initialize RX Buffer Descriptors */ 593 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); 594 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 595 596 /* Enable TX & RX DMA */ 597 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 598 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 599 600 /* Enable Interrupts for core 0 */ 601 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 602 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 603 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); 604 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); 605 606 /* Enable host Error Interrupt */ 607 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 608 609 /* Enable interrupts for RX and TX on Channel 0 */ 610 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 611 CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); 612 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); 613 614 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 615 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 616 cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); 617 618 /* Select MII in GMII_SEL, Internal Delay mode */ 619 //ti_scm_reg_write_4(0x650, 0); 620 621 /* Initialize active queues. */ 622 slot = STAILQ_FIRST(&sc->tx.active); 623 if (slot != NULL) 624 cpsw_write_hdp_slot(sc, &sc->tx, slot); 625 slot = STAILQ_FIRST(&sc->rx.active); 626 if (slot != NULL) 627 cpsw_write_hdp_slot(sc, &sc->rx, slot); 628 cpsw_rx_enqueue(sc); 629 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); 630 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); 631 632 /* Activate network interface. */ 633 sc->rx.running = 1; 634 sc->tx.running = 1; 635 sc->watchdog.timer = 0; 636 callout_init(&sc->watchdog.callout, 0); 637 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 638 } 639 640 /* 641 * 642 * Device Probe, Attach, Detach. 643 * 644 */ 645 646 static int 647 cpsw_probe(device_t dev) 648 { 649 650 if (!ofw_bus_status_okay(dev)) 651 return (ENXIO); 652 653 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 654 return (ENXIO); 655 656 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 657 return (BUS_PROBE_DEFAULT); 658 } 659 660 static int 661 cpsw_intr_attach(struct cpsw_softc *sc) 662 { 663 int i; 664 665 for (i = 0; i < CPSW_INTR_COUNT; i++) { 666 if (bus_setup_intr(sc->dev, sc->irq_res[i], 667 INTR_TYPE_NET | INTR_MPSAFE, NULL, 668 cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { 669 return (-1); 670 } 671 } 672 673 return (0); 674 } 675 676 static void 677 cpsw_intr_detach(struct cpsw_softc *sc) 678 { 679 int i; 680 681 for (i = 0; i < CPSW_INTR_COUNT; i++) { 682 if (sc->ih_cookie[i]) { 683 bus_teardown_intr(sc->dev, sc->irq_res[i], 684 sc->ih_cookie[i]); 685 } 686 } 687 } 688 689 static int 690 cpsw_get_fdt_data(struct cpsw_softc *sc, int port) 691 { 692 char *name; 693 int len, phy, vlan; 694 pcell_t phy_id[3], vlan_id; 695 phandle_t child; 696 unsigned long mdio_child_addr; 697 698 /* Find any slave with phy_id */ 699 phy = -1; 700 vlan = -1; 701 for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { 702 if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0) 703 continue; 704 if (sscanf(name, "slave@%x", &mdio_child_addr) != 1) { 705 OF_prop_free(name); 706 continue; 707 } 708 OF_prop_free(name); 709 if (mdio_child_addr != slave_mdio_addr[port]) 710 continue; 711 712 len = OF_getproplen(child, "phy_id"); 713 if (len / sizeof(pcell_t) == 2) { 714 /* Get phy address from fdt */ 715 if (OF_getencprop(child, "phy_id", phy_id, len) > 0) 716 phy = phy_id[1]; 717 } 718 719 len = OF_getproplen(child, "dual_emac_res_vlan"); 720 if (len / sizeof(pcell_t) == 1) { 721 /* Get phy address from fdt */ 722 if (OF_getencprop(child, "dual_emac_res_vlan", 723 &vlan_id, len) > 0) { 724 vlan = vlan_id; 725 } 726 } 727 728 break; 729 } 730 if (phy == -1) 731 return (ENXIO); 732 sc->port[port].phy = phy; 733 sc->port[port].vlan = vlan; 734 735 return (0); 736 } 737 738 static int 739 cpsw_attach(device_t dev) 740 { 741 bus_dma_segment_t segs[1]; 742 int error, i, nsegs; 743 struct cpsw_softc *sc; 744 uint32_t reg; 745 746 sc = device_get_softc(dev); 747 sc->dev = dev; 748 sc->node = ofw_bus_get_node(dev); 749 getbinuptime(&sc->attach_uptime); 750 751 if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, 752 sizeof(sc->active_slave)) <= 0) { 753 sc->active_slave = 0; 754 } 755 if (sc->active_slave > 1) 756 sc->active_slave = 1; 757 758 if (OF_hasprop(sc->node, "dual_emac")) 759 sc->dualemac = 1; 760 761 for (i = 0; i < CPSW_PORTS; i++) { 762 if (!sc->dualemac && i != sc->active_slave) 763 continue; 764 if (cpsw_get_fdt_data(sc, i) != 0) { 765 device_printf(dev, 766 "failed to get PHY address from FDT\n"); 767 return (ENXIO); 768 } 769 } 770 771 /* Initialize mutexes */ 772 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 773 "cpsw TX lock", MTX_DEF); 774 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 775 "cpsw RX lock", MTX_DEF); 776 777 /* Allocate IRQ resources */ 778 error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); 779 if (error) { 780 device_printf(dev, "could not allocate IRQ resources\n"); 781 cpsw_detach(dev); 782 return (ENXIO); 783 } 784 785 sc->mem_rid = 0; 786 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 787 &sc->mem_rid, RF_ACTIVE); 788 if (sc->mem_res == NULL) { 789 device_printf(sc->dev, "failed to allocate memory resource\n"); 790 cpsw_detach(dev); 791 return (ENXIO); 792 } 793 794 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 795 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 796 reg & 0xFF, (reg >> 11) & 0x1F); 797 798 cpsw_add_sysctls(sc); 799 800 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 801 error = bus_dma_tag_create( 802 bus_get_dma_tag(sc->dev), /* parent */ 803 1, 0, /* alignment, boundary */ 804 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 805 BUS_SPACE_MAXADDR, /* highaddr */ 806 NULL, NULL, /* filtfunc, filtfuncarg */ 807 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 808 MCLBYTES, 0, /* maxsegsz, flags */ 809 NULL, NULL, /* lockfunc, lockfuncarg */ 810 &sc->mbuf_dtag); /* dmatag */ 811 if (error) { 812 device_printf(dev, "bus_dma_tag_create failed\n"); 813 cpsw_detach(dev); 814 return (error); 815 } 816 817 /* Allocate the null mbuf and pre-sync it. */ 818 sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 819 memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size); 820 bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap); 821 bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap, 822 sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 823 bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap, 824 BUS_DMASYNC_PREWRITE); 825 sc->null_mbuf_paddr = segs[0].ds_addr; 826 827 cpsw_init_slots(sc); 828 829 /* Allocate slots to TX and RX queues. */ 830 STAILQ_INIT(&sc->rx.avail); 831 STAILQ_INIT(&sc->rx.active); 832 STAILQ_INIT(&sc->tx.avail); 833 STAILQ_INIT(&sc->tx.active); 834 // For now: 128 slots to TX, rest to RX. 835 // XXX TODO: start with 32/64 and grow dynamically based on demand. 836 if (cpsw_add_slots(sc, &sc->tx, 128) || 837 cpsw_add_slots(sc, &sc->rx, -1)) { 838 device_printf(dev, "failed to allocate dmamaps\n"); 839 cpsw_detach(dev); 840 return (ENOMEM); 841 } 842 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 843 sc->tx.queue_slots, sc->rx.queue_slots); 844 845 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 846 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 847 848 if (cpsw_intr_attach(sc) == -1) { 849 device_printf(dev, "failed to setup interrupts\n"); 850 cpsw_detach(dev); 851 return (ENXIO); 852 } 853 854 /* Reset the controller. */ 855 cpsw_reset(sc); 856 cpsw_init(sc); 857 858 for (i = 0; i < CPSW_PORTS; i++) { 859 if (!sc->dualemac && i != sc->active_slave) 860 continue; 861 sc->port[i].dev = device_add_child(dev, "cpsw", i); 862 if (sc->port[i].dev == NULL) { 863 cpsw_detach(dev); 864 return (ENXIO); 865 } 866 } 867 bus_generic_attach(dev); 868 869 return (0); 870 } 871 872 static int 873 cpsw_detach(device_t dev) 874 { 875 struct cpsw_softc *sc; 876 int error, i; 877 878 bus_generic_detach(dev); 879 sc = device_get_softc(dev); 880 881 for (i = 0; i < CPSW_PORTS; i++) { 882 if (sc->port[i].dev) 883 device_delete_child(dev, sc->port[i].dev); 884 } 885 886 if (device_is_attached(dev)) { 887 callout_stop(&sc->watchdog.callout); 888 callout_drain(&sc->watchdog.callout); 889 } 890 891 /* Stop and release all interrupts */ 892 cpsw_intr_detach(sc); 893 894 /* Free dmamaps and mbufs */ 895 for (i = 0; i < nitems(sc->_slots); ++i) 896 cpsw_free_slot(sc, &sc->_slots[i]); 897 898 /* Free null mbuf. */ 899 if (sc->null_mbuf_dmamap) { 900 bus_dmamap_unload(sc->mbuf_dtag, sc->null_mbuf_dmamap); 901 error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap); 902 KASSERT(error == 0, ("Mapping still active")); 903 m_freem(sc->null_mbuf); 904 } 905 906 /* Free DMA tag */ 907 if (sc->mbuf_dtag) { 908 error = bus_dma_tag_destroy(sc->mbuf_dtag); 909 KASSERT(error == 0, ("Unable to destroy DMA tag")); 910 } 911 912 /* Free IO memory handler */ 913 if (sc->mem_res != NULL) 914 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); 915 bus_release_resources(dev, irq_res_spec, sc->irq_res); 916 917 /* Destroy mutexes */ 918 mtx_destroy(&sc->rx.lock); 919 mtx_destroy(&sc->tx.lock); 920 921 return (0); 922 } 923 924 static phandle_t 925 cpsw_get_node(device_t bus, device_t dev) 926 { 927 928 /* Share controller node with port device. */ 929 return (ofw_bus_get_node(bus)); 930 } 931 932 static int 933 cpswp_probe(device_t dev) 934 { 935 936 if (device_get_unit(dev) > 1) { 937 device_printf(dev, "Only two ports are supported.\n"); 938 return (ENXIO); 939 } 940 device_set_desc(dev, "Ethernet Switch Port"); 941 942 return (BUS_PROBE_DEFAULT); 943 } 944 945 static int 946 cpswp_attach(device_t dev) 947 { 948 int error; 949 struct ifnet *ifp; 950 struct cpswp_softc *sc; 951 uint32_t reg; 952 uint8_t mac_addr[ETHER_ADDR_LEN]; 953 954 sc = device_get_softc(dev); 955 sc->dev = dev; 956 sc->pdev = device_get_parent(dev); 957 sc->swsc = device_get_softc(sc->pdev); 958 sc->unit = device_get_unit(dev); 959 sc->phy = sc->swsc->port[sc->unit].phy; 960 sc->vlan = sc->swsc->port[sc->unit].vlan; 961 if (sc->swsc->dualemac && sc->vlan == -1) 962 sc->vlan = sc->unit + 1; 963 964 if (sc->unit == 0) { 965 sc->physel = MDIOUSERPHYSEL0; 966 sc->phyaccess = MDIOUSERACCESS0; 967 } else { 968 sc->physel = MDIOUSERPHYSEL1; 969 sc->phyaccess = MDIOUSERACCESS1; 970 } 971 972 mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", 973 MTX_DEF); 974 975 /* Allocate network interface */ 976 ifp = sc->ifp = if_alloc(IFT_ETHER); 977 if (ifp == NULL) { 978 cpswp_detach(dev); 979 return (ENXIO); 980 } 981 982 if_initname(ifp, device_get_name(sc->dev), sc->unit); 983 ifp->if_softc = sc; 984 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 985 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? 986 ifp->if_capenable = ifp->if_capabilities; 987 988 ifp->if_init = cpswp_init; 989 ifp->if_start = cpswp_start; 990 ifp->if_ioctl = cpswp_ioctl; 991 992 ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots; 993 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 994 IFQ_SET_READY(&ifp->if_snd); 995 996 /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ 997 ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, ®); 998 mac_addr[0] = reg & 0xFF; 999 mac_addr[1] = (reg >> 8) & 0xFF; 1000 mac_addr[2] = (reg >> 16) & 0xFF; 1001 mac_addr[3] = (reg >> 24) & 0xFF; 1002 1003 /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ 1004 ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, ®); 1005 mac_addr[4] = reg & 0xFF; 1006 mac_addr[5] = (reg >> 8) & 0xFF; 1007 1008 error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, 1009 cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); 1010 if (error) { 1011 device_printf(dev, "attaching PHYs failed\n"); 1012 cpswp_detach(dev); 1013 return (error); 1014 } 1015 sc->mii = device_get_softc(sc->miibus); 1016 1017 /* Select PHY and enable interrupts */ 1018 cpsw_write_4(sc->swsc, sc->physel, 1019 MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); 1020 1021 ether_ifattach(sc->ifp, mac_addr); 1022 callout_init(&sc->mii_callout, 0); 1023 1024 return (0); 1025 } 1026 1027 static int 1028 cpswp_detach(device_t dev) 1029 { 1030 struct cpswp_softc *sc; 1031 1032 sc = device_get_softc(dev); 1033 CPSW_DEBUGF(sc->swsc, ("")); 1034 if (device_is_attached(dev)) { 1035 ether_ifdetach(sc->ifp); 1036 CPSW_PORT_LOCK(sc); 1037 cpswp_stop_locked(sc); 1038 CPSW_PORT_UNLOCK(sc); 1039 callout_drain(&sc->mii_callout); 1040 } 1041 1042 bus_generic_detach(dev); 1043 1044 if_free(sc->ifp); 1045 mtx_destroy(&sc->lock); 1046 1047 return (0); 1048 } 1049 1050 /* 1051 * 1052 * Init/Shutdown. 1053 * 1054 */ 1055 1056 static int 1057 cpsw_ports_down(struct cpsw_softc *sc) 1058 { 1059 struct cpswp_softc *psc; 1060 struct ifnet *ifp1, *ifp2; 1061 1062 if (!sc->dualemac) 1063 return (1); 1064 psc = device_get_softc(sc->port[0].dev); 1065 ifp1 = psc->ifp; 1066 psc = device_get_softc(sc->port[1].dev); 1067 ifp2 = psc->ifp; 1068 if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0) 1069 return (1); 1070 1071 return (0); 1072 } 1073 1074 static void 1075 cpswp_init(void *arg) 1076 { 1077 struct cpswp_softc *sc = arg; 1078 1079 CPSW_DEBUGF(sc->swsc, ("")); 1080 CPSW_PORT_LOCK(sc); 1081 cpswp_init_locked(arg); 1082 CPSW_PORT_UNLOCK(sc); 1083 } 1084 1085 static void 1086 cpswp_init_locked(void *arg) 1087 { 1088 struct cpswp_softc *sc = arg; 1089 struct ifnet *ifp; 1090 uint32_t reg; 1091 1092 CPSW_DEBUGF(sc->swsc, ("")); 1093 CPSW_PORT_LOCK_ASSERT(sc); 1094 ifp = sc->ifp; 1095 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1096 return; 1097 1098 getbinuptime(&sc->init_uptime); 1099 1100 if (!sc->swsc->rx.running && !sc->swsc->tx.running) { 1101 /* Reset the controller. */ 1102 cpsw_reset(sc->swsc); 1103 cpsw_init(sc->swsc); 1104 } 1105 1106 /* Set Slave Mapping. */ 1107 cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); 1108 cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 1109 0x33221100); 1110 cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); 1111 /* Enable MAC RX/TX modules. */ 1112 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 1113 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 1114 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1115 reg |= CPSW_SL_MACTL_GMII_ENABLE; 1116 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1117 1118 /* Initialize ALE: set port to forwarding(3), initialize addrs */ 1119 cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 3); 1120 cpswp_ale_update_addresses(sc, 1); 1121 1122 if (sc->swsc->dualemac) { 1123 /* Set Port VID. */ 1124 cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), 1125 sc->vlan & 0xfff); 1126 cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, 1127 (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ 1128 (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ 1129 (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ 1130 } 1131 1132 mii_mediachg(sc->mii); 1133 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 1134 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1135 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1136 } 1137 1138 static int 1139 cpsw_shutdown(device_t dev) 1140 { 1141 struct cpsw_softc *sc; 1142 struct cpswp_softc *psc; 1143 int i; 1144 1145 sc = device_get_softc(dev); 1146 CPSW_DEBUGF(sc, ("")); 1147 for (i = 0; i < CPSW_PORTS; i++) { 1148 if (!sc->dualemac && i != sc->active_slave) 1149 continue; 1150 psc = device_get_softc(sc->port[i].dev); 1151 CPSW_PORT_LOCK(psc); 1152 cpswp_stop_locked(psc); 1153 CPSW_PORT_UNLOCK(psc); 1154 } 1155 1156 return (0); 1157 } 1158 1159 static void 1160 cpsw_rx_teardown(struct cpsw_softc *sc) 1161 { 1162 int i = 0; 1163 1164 CPSW_RX_LOCK(sc); 1165 CPSW_DEBUGF(sc, ("starting RX teardown")); 1166 sc->rx.teardown = 1; 1167 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 1168 CPSW_RX_UNLOCK(sc); 1169 while (sc->rx.running) { 1170 if (++i > 10) { 1171 device_printf(sc->dev, 1172 "Unable to cleanly shutdown receiver\n"); 1173 return; 1174 } 1175 DELAY(200); 1176 } 1177 if (!sc->rx.running) 1178 CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); 1179 } 1180 1181 static void 1182 cpsw_tx_teardown(struct cpsw_softc *sc) 1183 { 1184 int i = 0; 1185 1186 CPSW_TX_LOCK(sc); 1187 CPSW_DEBUGF(sc, ("starting TX teardown")); 1188 /* Start the TX queue teardown if queue is not empty. */ 1189 if (STAILQ_FIRST(&sc->tx.active) != NULL) 1190 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 1191 else 1192 sc->tx.teardown = 1; 1193 cpsw_tx_dequeue(sc); 1194 while (sc->tx.running && ++i < 10) { 1195 DELAY(200); 1196 cpsw_tx_dequeue(sc); 1197 } 1198 if (sc->tx.running) { 1199 device_printf(sc->dev, 1200 "Unable to cleanly shutdown transmitter\n"); 1201 } 1202 CPSW_DEBUGF(sc, 1203 ("finished TX teardown (%d retries, %d idle buffers)", i, 1204 sc->tx.active_queue_len)); 1205 CPSW_TX_UNLOCK(sc); 1206 } 1207 1208 static void 1209 cpswp_stop_locked(struct cpswp_softc *sc) 1210 { 1211 struct ifnet *ifp; 1212 uint32_t reg; 1213 1214 ifp = sc->ifp; 1215 CPSW_DEBUGF(sc->swsc, ("")); 1216 CPSW_PORT_LOCK_ASSERT(sc); 1217 1218 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1219 return; 1220 1221 /* Disable interface */ 1222 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1223 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1224 1225 /* Stop ticker */ 1226 callout_stop(&sc->mii_callout); 1227 1228 /* Tear down the RX/TX queues. */ 1229 if (cpsw_ports_down(sc->swsc)) { 1230 cpsw_rx_teardown(sc->swsc); 1231 cpsw_tx_teardown(sc->swsc); 1232 } 1233 1234 /* Stop MAC RX/TX modules. */ 1235 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1236 reg &= ~CPSW_SL_MACTL_GMII_ENABLE; 1237 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1238 1239 if (cpsw_ports_down(sc->swsc)) { 1240 /* Capture stats before we reset controller. */ 1241 cpsw_stats_collect(sc->swsc); 1242 1243 cpsw_reset(sc->swsc); 1244 cpsw_init(sc->swsc); 1245 } 1246 } 1247 1248 /* 1249 * Suspend/Resume. 1250 */ 1251 1252 static int 1253 cpsw_suspend(device_t dev) 1254 { 1255 struct cpsw_softc *sc; 1256 struct cpswp_softc *psc; 1257 int i; 1258 1259 sc = device_get_softc(dev); 1260 CPSW_DEBUGF(sc, ("")); 1261 for (i = 0; i < CPSW_PORTS; i++) { 1262 if (!sc->dualemac && i != sc->active_slave) 1263 continue; 1264 psc = device_get_softc(sc->port[i].dev); 1265 CPSW_PORT_LOCK(psc); 1266 cpswp_stop_locked(psc); 1267 CPSW_PORT_UNLOCK(psc); 1268 } 1269 1270 return (0); 1271 } 1272 1273 static int 1274 cpsw_resume(device_t dev) 1275 { 1276 struct cpsw_softc *sc; 1277 1278 sc = device_get_softc(dev); 1279 CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); 1280 1281 return (0); 1282 } 1283 1284 /* 1285 * 1286 * IOCTL 1287 * 1288 */ 1289 1290 static void 1291 cpsw_set_promisc(struct cpswp_softc *sc, int set) 1292 { 1293 uint32_t reg; 1294 1295 /* 1296 * Enabling promiscuous mode requires ALE_BYPASS to be enabled. 1297 * That disables the ALE forwarding logic and causes every 1298 * packet to be sent only to the host port. In bypass mode, 1299 * the ALE processes host port transmit packets the same as in 1300 * normal mode. 1301 */ 1302 reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); 1303 reg &= ~CPSW_ALE_CTL_BYPASS; 1304 if (set) 1305 reg |= CPSW_ALE_CTL_BYPASS; 1306 cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); 1307 } 1308 1309 static void 1310 cpsw_set_allmulti(struct cpswp_softc *sc, int set) 1311 { 1312 if (set) { 1313 printf("All-multicast mode unimplemented\n"); 1314 } 1315 } 1316 1317 static int 1318 cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1319 { 1320 struct cpswp_softc *sc; 1321 struct ifreq *ifr; 1322 int error; 1323 uint32_t changed; 1324 1325 error = 0; 1326 sc = ifp->if_softc; 1327 ifr = (struct ifreq *)data; 1328 1329 switch (command) { 1330 case SIOCSIFFLAGS: 1331 CPSW_PORT_LOCK(sc); 1332 if (ifp->if_flags & IFF_UP) { 1333 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1334 changed = ifp->if_flags ^ sc->if_flags; 1335 CPSW_DEBUGF(sc->swsc, 1336 ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", 1337 changed)); 1338 if (changed & IFF_PROMISC) 1339 cpsw_set_promisc(sc, 1340 ifp->if_flags & IFF_PROMISC); 1341 if (changed & IFF_ALLMULTI) 1342 cpsw_set_allmulti(sc, 1343 ifp->if_flags & IFF_ALLMULTI); 1344 } else { 1345 CPSW_DEBUGF(sc->swsc, 1346 ("SIOCSIFFLAGS: starting up")); 1347 cpswp_init_locked(sc); 1348 } 1349 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1350 CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); 1351 cpswp_stop_locked(sc); 1352 } 1353 1354 sc->if_flags = ifp->if_flags; 1355 CPSW_PORT_UNLOCK(sc); 1356 break; 1357 case SIOCADDMULTI: 1358 cpswp_ale_update_addresses(sc, 0); 1359 break; 1360 case SIOCDELMULTI: 1361 /* Ugh. DELMULTI doesn't provide the specific address 1362 being removed, so the best we can do is remove 1363 everything and rebuild it all. */ 1364 cpswp_ale_update_addresses(sc, 1); 1365 break; 1366 case SIOCGIFMEDIA: 1367 case SIOCSIFMEDIA: 1368 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1369 break; 1370 default: 1371 error = ether_ioctl(ifp, command, data); 1372 } 1373 return (error); 1374 } 1375 1376 /* 1377 * 1378 * MIIBUS 1379 * 1380 */ 1381 static int 1382 cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) 1383 { 1384 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1385 1386 while (--retries) { 1387 r = cpsw_read_4(sc, reg); 1388 if ((r & MDIO_PHYACCESS_GO) == 0) 1389 return (1); 1390 DELAY(CPSW_MIIBUS_DELAY); 1391 } 1392 1393 return (0); 1394 } 1395 1396 static int 1397 cpswp_miibus_readreg(device_t dev, int phy, int reg) 1398 { 1399 struct cpswp_softc *sc; 1400 uint32_t cmd, r; 1401 1402 sc = device_get_softc(dev); 1403 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1404 device_printf(dev, "MDIO not ready to read\n"); 1405 return (0); 1406 } 1407 1408 /* Set GO, reg, phy */ 1409 cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1410 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1411 1412 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1413 device_printf(dev, "MDIO timed out during read\n"); 1414 return (0); 1415 } 1416 1417 r = cpsw_read_4(sc->swsc, sc->phyaccess); 1418 if ((r & MDIO_PHYACCESS_ACK) == 0) { 1419 device_printf(dev, "Failed to read from PHY.\n"); 1420 r = 0; 1421 } 1422 return (r & 0xFFFF); 1423 } 1424 1425 static int 1426 cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) 1427 { 1428 struct cpswp_softc *sc; 1429 uint32_t cmd; 1430 1431 sc = device_get_softc(dev); 1432 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1433 device_printf(dev, "MDIO not ready to write\n"); 1434 return (0); 1435 } 1436 1437 /* Set GO, WRITE, reg, phy, and value */ 1438 cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | 1439 (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); 1440 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1441 1442 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1443 device_printf(dev, "MDIO timed out during write\n"); 1444 return (0); 1445 } 1446 1447 if ((cpsw_read_4(sc->swsc, sc->phyaccess) & MDIO_PHYACCESS_ACK) == 0) 1448 device_printf(dev, "Failed to write to PHY.\n"); 1449 1450 return (0); 1451 } 1452 1453 static void 1454 cpswp_miibus_statchg(device_t dev) 1455 { 1456 struct cpswp_softc *sc; 1457 uint32_t mac_control, reg; 1458 1459 sc = device_get_softc(dev); 1460 CPSW_DEBUGF(sc->swsc, ("")); 1461 1462 reg = CPSW_SL_MACCONTROL(sc->unit); 1463 mac_control = cpsw_read_4(sc->swsc, reg); 1464 mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | 1465 CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); 1466 1467 switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { 1468 case IFM_1000_SX: 1469 case IFM_1000_LX: 1470 case IFM_1000_CX: 1471 case IFM_1000_T: 1472 mac_control |= CPSW_SL_MACTL_GIG; 1473 break; 1474 1475 case IFM_100_TX: 1476 mac_control |= CPSW_SL_MACTL_IFCTL_A; 1477 break; 1478 } 1479 if (sc->mii->mii_media_active & IFM_FDX) 1480 mac_control |= CPSW_SL_MACTL_FULLDUPLEX; 1481 1482 cpsw_write_4(sc->swsc, reg, mac_control); 1483 } 1484 1485 /* 1486 * 1487 * Transmit/Receive Packets. 1488 * 1489 */ 1490 static void 1491 cpsw_intr_rx(void *arg) 1492 { 1493 struct cpsw_softc *sc; 1494 struct ifnet *ifp; 1495 struct mbuf *received, *next; 1496 1497 sc = (struct cpsw_softc *)arg; 1498 CPSW_RX_LOCK(sc); 1499 if (sc->rx.teardown) { 1500 sc->rx.running = 0; 1501 sc->rx.teardown = 0; 1502 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1503 } 1504 received = cpsw_rx_dequeue(sc); 1505 cpsw_rx_enqueue(sc); 1506 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1507 CPSW_RX_UNLOCK(sc); 1508 1509 while (received != NULL) { 1510 next = received->m_nextpkt; 1511 received->m_nextpkt = NULL; 1512 ifp = received->m_pkthdr.rcvif; 1513 (*ifp->if_input)(ifp, received); 1514 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1515 received = next; 1516 } 1517 } 1518 1519 static struct mbuf * 1520 cpsw_rx_dequeue(struct cpsw_softc *sc) 1521 { 1522 struct cpsw_cpdma_bd bd; 1523 struct cpsw_slot *last, *slot; 1524 struct cpswp_softc *psc; 1525 struct mbuf *mb_head, *mb_tail; 1526 int port, removed = 0; 1527 1528 last = NULL; 1529 mb_head = mb_tail = NULL; 1530 1531 /* Pull completed packets off hardware RX queue. */ 1532 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1533 cpsw_cpdma_read_bd(sc, slot, &bd); 1534 1535 /* 1536 * Stop on packets still in use by hardware, but do not stop 1537 * on packets with the teardown complete flag, they will be 1538 * discarded later. 1539 */ 1540 if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == 1541 CPDMA_BD_OWNER) 1542 break; 1543 1544 last = slot; 1545 ++removed; 1546 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1547 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1548 1549 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1550 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1551 1552 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1553 CPSW_DEBUGF(sc, ("RX teardown is complete")); 1554 m_freem(slot->mbuf); 1555 slot->mbuf = NULL; 1556 sc->rx.running = 0; 1557 sc->rx.teardown = 0; 1558 break; 1559 } 1560 1561 port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; 1562 KASSERT(port >= 0 && port <= 1, 1563 ("patcket received with invalid port: %d", port)); 1564 psc = device_get_softc(sc->port[port].dev); 1565 1566 /* Set up mbuf */ 1567 /* TODO: track SOP/EOP bits to assemble a full mbuf 1568 out of received fragments. */ 1569 slot->mbuf->m_data += bd.bufoff; 1570 slot->mbuf->m_len = bd.pktlen - 4; 1571 slot->mbuf->m_pkthdr.len = bd.pktlen - 4; 1572 slot->mbuf->m_flags |= M_PKTHDR; 1573 slot->mbuf->m_pkthdr.rcvif = psc->ifp; 1574 slot->mbuf->m_nextpkt = NULL; 1575 1576 if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1577 /* check for valid CRC by looking into pkt_err[5:4] */ 1578 if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) { 1579 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1580 slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1581 slot->mbuf->m_pkthdr.csum_data = 0xffff; 1582 } 1583 } 1584 1585 /* Add mbuf to packet list to be returned. */ 1586 if (mb_tail) { 1587 mb_tail->m_nextpkt = slot->mbuf; 1588 } else { 1589 mb_head = slot->mbuf; 1590 } 1591 mb_tail = slot->mbuf; 1592 slot->mbuf = NULL; 1593 if (sc->rx_batch > 0 && sc->rx_batch == removed) 1594 break; 1595 } 1596 1597 if (removed != 0) { 1598 cpsw_write_cp_slot(sc, &sc->rx, last); 1599 sc->rx.queue_removes += removed; 1600 sc->rx.avail_queue_len += removed; 1601 sc->rx.active_queue_len -= removed; 1602 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1603 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1604 CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); 1605 } 1606 1607 return (mb_head); 1608 } 1609 1610 static void 1611 cpsw_rx_enqueue(struct cpsw_softc *sc) 1612 { 1613 bus_dma_segment_t seg[1]; 1614 struct cpsw_cpdma_bd bd; 1615 struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; 1616 int error, nsegs, added = 0; 1617 uint32_t flags; 1618 1619 /* Register new mbufs with hardware. */ 1620 first_new_slot = NULL; 1621 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1622 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1623 if (first_new_slot == NULL) 1624 first_new_slot = slot; 1625 if (slot->mbuf == NULL) { 1626 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1627 if (slot->mbuf == NULL) { 1628 device_printf(sc->dev, 1629 "Unable to fill RX queue\n"); 1630 break; 1631 } 1632 slot->mbuf->m_len = 1633 slot->mbuf->m_pkthdr.len = 1634 slot->mbuf->m_ext.ext_size; 1635 } 1636 1637 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1638 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1639 1640 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1641 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1642 if (error != 0 || nsegs != 1) { 1643 device_printf(sc->dev, 1644 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1645 __func__, nsegs, error); 1646 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1647 m_freem(slot->mbuf); 1648 slot->mbuf = NULL; 1649 break; 1650 } 1651 1652 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1653 1654 /* Create and submit new rx descriptor. */ 1655 if ((next = STAILQ_NEXT(slot, next)) != NULL) 1656 bd.next = cpsw_cpdma_bd_paddr(sc, next); 1657 else 1658 bd.next = 0; 1659 bd.bufptr = seg->ds_addr; 1660 bd.bufoff = 0; 1661 bd.buflen = MCLBYTES - 1; 1662 bd.pktlen = bd.buflen; 1663 bd.flags = CPDMA_BD_OWNER; 1664 cpsw_cpdma_write_bd(sc, slot, &bd); 1665 ++added; 1666 1667 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1668 STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); 1669 } 1670 1671 if (added == 0 || first_new_slot == NULL) 1672 return; 1673 1674 CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); 1675 1676 /* Link new entries to hardware RX queue. */ 1677 if (last_old_slot == NULL) { 1678 /* Start a fresh queue. */ 1679 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1680 } else { 1681 /* Add buffers to end of current queue. */ 1682 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1683 /* If underrun, restart queue. */ 1684 if ((flags = cpsw_cpdma_read_bd_flags(sc, last_old_slot)) & 1685 CPDMA_BD_EOQ) { 1686 flags &= ~CPDMA_BD_EOQ; 1687 cpsw_cpdma_write_bd_flags(sc, last_old_slot, flags); 1688 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1689 sc->rx.queue_restart++; 1690 } 1691 } 1692 sc->rx.queue_adds += added; 1693 sc->rx.avail_queue_len -= added; 1694 sc->rx.active_queue_len += added; 1695 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); 1696 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) { 1697 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1698 } 1699 } 1700 1701 static void 1702 cpswp_start(struct ifnet *ifp) 1703 { 1704 struct cpswp_softc *sc; 1705 1706 sc = ifp->if_softc; 1707 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1708 sc->swsc->tx.running == 0) { 1709 return; 1710 } 1711 CPSW_TX_LOCK(sc->swsc); 1712 cpswp_tx_enqueue(sc); 1713 cpsw_tx_dequeue(sc->swsc); 1714 CPSW_TX_UNLOCK(sc->swsc); 1715 } 1716 1717 static void 1718 cpsw_intr_tx(void *arg) 1719 { 1720 struct cpsw_softc *sc; 1721 1722 sc = (struct cpsw_softc *)arg; 1723 CPSW_TX_LOCK(sc); 1724 if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) 1725 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1726 cpsw_tx_dequeue(sc); 1727 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); 1728 CPSW_TX_UNLOCK(sc); 1729 } 1730 1731 static void 1732 cpswp_tx_enqueue(struct cpswp_softc *sc) 1733 { 1734 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1735 struct cpsw_cpdma_bd bd; 1736 struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; 1737 struct mbuf *m0; 1738 int error, flags, nsegs, seg, added = 0, padlen; 1739 1740 flags = 0; 1741 if (sc->swsc->dualemac) { 1742 flags = CPDMA_BD_TO_PORT | 1743 ((sc->unit + 1) & CPDMA_BD_PORT_MASK); 1744 } 1745 /* Pull pending packets from IF queue and prep them for DMA. */ 1746 last = NULL; 1747 first_new_slot = NULL; 1748 last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); 1749 while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { 1750 IF_DEQUEUE(&sc->ifp->if_snd, m0); 1751 if (m0 == NULL) 1752 break; 1753 1754 slot->mbuf = m0; 1755 padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len; 1756 if (padlen < 0) 1757 padlen = 0; 1758 1759 /* Create mapping in DMA memory */ 1760 error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, 1761 slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1762 /* If the packet is too fragmented, try to simplify. */ 1763 if (error == EFBIG || 1764 (error == 0 && 1765 nsegs + (padlen > 0 ? 1 : 0) > sc->swsc->tx.avail_queue_len)) { 1766 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1767 if (padlen > 0) /* May as well add padding. */ 1768 m_append(slot->mbuf, padlen, 1769 sc->swsc->null_mbuf->m_data); 1770 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1771 if (m0 == NULL) { 1772 device_printf(sc->dev, 1773 "Can't defragment packet; dropping\n"); 1774 m_freem(slot->mbuf); 1775 } else { 1776 CPSW_DEBUGF(sc->swsc, 1777 ("Requeueing defragmented packet")); 1778 IF_PREPEND(&sc->ifp->if_snd, m0); 1779 } 1780 slot->mbuf = NULL; 1781 continue; 1782 } 1783 if (error != 0) { 1784 device_printf(sc->dev, 1785 "%s: Can't setup DMA (error=%d), dropping packet\n", 1786 __func__, error); 1787 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1788 m_freem(slot->mbuf); 1789 slot->mbuf = NULL; 1790 break; 1791 } 1792 1793 bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, 1794 BUS_DMASYNC_PREWRITE); 1795 1796 CPSW_DEBUGF(sc->swsc, 1797 ("Queueing TX packet: %d segments + %d pad bytes", 1798 nsegs, padlen)); 1799 1800 if (first_new_slot == NULL) 1801 first_new_slot = slot; 1802 1803 /* Link from the previous descriptor. */ 1804 if (last != NULL) 1805 cpsw_cpdma_write_bd_next(sc->swsc, last, slot); 1806 1807 slot->ifp = sc->ifp; 1808 1809 /* If there is only one segment, the for() loop 1810 * gets skipped and the single buffer gets set up 1811 * as both SOP and EOP. */ 1812 if (nsegs > 1) { 1813 next = STAILQ_NEXT(slot, next); 1814 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1815 } else 1816 bd.next = 0; 1817 /* Start by setting up the first buffer. */ 1818 bd.bufptr = segs[0].ds_addr; 1819 bd.bufoff = 0; 1820 bd.buflen = segs[0].ds_len; 1821 bd.pktlen = m_length(slot->mbuf, NULL) + padlen; 1822 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER | flags; 1823 for (seg = 1; seg < nsegs; ++seg) { 1824 /* Save the previous buffer (which isn't EOP) */ 1825 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1826 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1827 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1828 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1829 1830 /* Setup next buffer (which isn't SOP) */ 1831 if (nsegs > seg + 1) { 1832 next = STAILQ_NEXT(slot, next); 1833 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1834 } else 1835 bd.next = 0; 1836 bd.bufptr = segs[seg].ds_addr; 1837 bd.bufoff = 0; 1838 bd.buflen = segs[seg].ds_len; 1839 bd.pktlen = 0; 1840 bd.flags = CPDMA_BD_OWNER | flags; 1841 } 1842 /* Save the final buffer. */ 1843 if (padlen <= 0) 1844 bd.flags |= CPDMA_BD_EOP; 1845 else { 1846 next = STAILQ_NEXT(slot, next); 1847 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1848 } 1849 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1850 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1851 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1852 1853 if (padlen > 0) { 1854 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1855 1856 /* Setup buffer of null pad bytes (definitely EOP). */ 1857 bd.next = 0; 1858 bd.bufptr = sc->swsc->null_mbuf_paddr; 1859 bd.bufoff = 0; 1860 bd.buflen = padlen; 1861 bd.pktlen = 0; 1862 bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER | flags; 1863 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1864 ++nsegs; 1865 1866 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1867 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1868 } 1869 1870 last = slot; 1871 1872 added += nsegs; 1873 if (nsegs > sc->swsc->tx.longest_chain) 1874 sc->swsc->tx.longest_chain = nsegs; 1875 1876 // TODO: Should we defer the BPF tap until 1877 // after all packets are queued? 1878 BPF_MTAP(sc->ifp, m0); 1879 } 1880 1881 if (first_new_slot == NULL) 1882 return; 1883 1884 /* Attach the list of new buffers to the hardware TX queue. */ 1885 if (last_old_slot != NULL && 1886 (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & 1887 CPDMA_BD_EOQ) == 0) { 1888 /* Add buffers to end of current queue. */ 1889 cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, 1890 first_new_slot); 1891 } else { 1892 /* Start a fresh queue. */ 1893 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); 1894 } 1895 sc->swsc->tx.queue_adds += added; 1896 sc->swsc->tx.avail_queue_len -= added; 1897 sc->swsc->tx.active_queue_len += added; 1898 if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { 1899 sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; 1900 } 1901 CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); 1902 } 1903 1904 static int 1905 cpsw_tx_dequeue(struct cpsw_softc *sc) 1906 { 1907 struct cpsw_slot *slot, *last_removed_slot = NULL; 1908 struct cpsw_cpdma_bd bd; 1909 uint32_t flags, removed = 0; 1910 1911 /* Pull completed buffers off the hardware TX queue. */ 1912 slot = STAILQ_FIRST(&sc->tx.active); 1913 while (slot != NULL) { 1914 flags = cpsw_cpdma_read_bd_flags(sc, slot); 1915 1916 /* TearDown complete is only marked on the SOP for the packet. */ 1917 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == 1918 (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { 1919 sc->tx.teardown = 1; 1920 } 1921 1922 if ((flags & CPDMA_BD_OWNER) != 0 && sc->tx.teardown == 0) 1923 break; /* Hardware is still using this packet. */ 1924 1925 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 1926 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1927 m_freem(slot->mbuf); 1928 slot->mbuf = NULL; 1929 1930 if (slot->ifp) { 1931 if (sc->tx.teardown == 0) 1932 if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); 1933 else 1934 if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); 1935 } 1936 1937 /* Dequeue any additional buffers used by this packet. */ 1938 while (slot != NULL && slot->mbuf == NULL) { 1939 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 1940 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 1941 ++removed; 1942 last_removed_slot = slot; 1943 slot = STAILQ_FIRST(&sc->tx.active); 1944 } 1945 1946 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 1947 1948 /* Restart the TX queue if necessary. */ 1949 cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); 1950 if (slot != NULL && bd.next != 0 && (bd.flags & 1951 (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == 1952 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 1953 cpsw_write_hdp_slot(sc, &sc->tx, slot); 1954 sc->tx.queue_restart++; 1955 break; 1956 } 1957 } 1958 1959 if (removed != 0) { 1960 sc->tx.queue_removes += removed; 1961 sc->tx.active_queue_len -= removed; 1962 sc->tx.avail_queue_len += removed; 1963 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 1964 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 1965 CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); 1966 } 1967 1968 if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { 1969 CPSW_DEBUGF(sc, ("TX teardown is complete")); 1970 sc->tx.teardown = 0; 1971 sc->tx.running = 0; 1972 } 1973 1974 return (removed); 1975 } 1976 1977 /* 1978 * 1979 * Miscellaneous interrupts. 1980 * 1981 */ 1982 1983 static void 1984 cpsw_intr_rx_thresh(void *arg) 1985 { 1986 struct cpsw_softc *sc; 1987 struct ifnet *ifp; 1988 struct mbuf *received, *next; 1989 1990 sc = (struct cpsw_softc *)arg; 1991 CPSW_RX_LOCK(sc); 1992 received = cpsw_rx_dequeue(sc); 1993 cpsw_rx_enqueue(sc); 1994 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 1995 CPSW_RX_UNLOCK(sc); 1996 1997 while (received != NULL) { 1998 next = received->m_nextpkt; 1999 received->m_nextpkt = NULL; 2000 ifp = received->m_pkthdr.rcvif; 2001 (*ifp->if_input)(ifp, received); 2002 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2003 received = next; 2004 } 2005 } 2006 2007 static void 2008 cpsw_intr_misc_host_error(struct cpsw_softc *sc) 2009 { 2010 uint32_t intstat; 2011 uint32_t dmastat; 2012 int txerr, rxerr, txchan, rxchan; 2013 2014 printf("\n\n"); 2015 device_printf(sc->dev, 2016 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 2017 printf("\n\n"); 2018 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 2019 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 2020 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 2021 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 2022 2023 txerr = (dmastat >> 20) & 15; 2024 txchan = (dmastat >> 16) & 7; 2025 rxerr = (dmastat >> 12) & 15; 2026 rxchan = (dmastat >> 8) & 7; 2027 2028 switch (txerr) { 2029 case 0: break; 2030 case 1: printf("SOP error on TX channel %d\n", txchan); 2031 break; 2032 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 2033 break; 2034 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 2035 break; 2036 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 2037 break; 2038 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 2039 break; 2040 case 6: printf("Packet length error on TX channel %d\n", txchan); 2041 break; 2042 default: printf("Unknown error on TX channel %d\n", txchan); 2043 break; 2044 } 2045 2046 if (txerr != 0) { 2047 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 2048 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 2049 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 2050 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 2051 cpsw_dump_queue(sc, &sc->tx.active); 2052 } 2053 2054 switch (rxerr) { 2055 case 0: break; 2056 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 2057 break; 2058 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 2059 break; 2060 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 2061 break; 2062 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 2063 break; 2064 default: printf("Unknown RX error on RX channel %d\n", rxchan); 2065 break; 2066 } 2067 2068 if (rxerr != 0) { 2069 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 2070 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 2071 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 2072 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 2073 cpsw_dump_queue(sc, &sc->rx.active); 2074 } 2075 2076 printf("\nALE Table\n"); 2077 cpsw_ale_dump_table(sc); 2078 2079 // XXX do something useful here?? 2080 panic("CPSW HOST ERROR INTERRUPT"); 2081 2082 // Suppress this interrupt in the future. 2083 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 2084 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 2085 // The watchdog will probably reset the controller 2086 // in a little while. It will probably fail again. 2087 } 2088 2089 static void 2090 cpsw_intr_misc(void *arg) 2091 { 2092 struct cpsw_softc *sc = arg; 2093 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 2094 2095 if (stat & CPSW_WR_C_MISC_EVNT_PEND) 2096 CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); 2097 if (stat & CPSW_WR_C_MISC_STAT_PEND) 2098 cpsw_stats_collect(sc); 2099 if (stat & CPSW_WR_C_MISC_HOST_PEND) 2100 cpsw_intr_misc_host_error(sc); 2101 if (stat & CPSW_WR_C_MISC_MDIOLINK) { 2102 cpsw_write_4(sc, MDIOLINKINTMASKED, 2103 cpsw_read_4(sc, MDIOLINKINTMASKED)); 2104 } 2105 if (stat & CPSW_WR_C_MISC_MDIOUSER) { 2106 CPSW_DEBUGF(sc, 2107 ("MDIO operation completed interrupt unimplemented")); 2108 } 2109 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 2110 } 2111 2112 /* 2113 * 2114 * Periodic Checks and Watchdog. 2115 * 2116 */ 2117 2118 static void 2119 cpswp_tick(void *msc) 2120 { 2121 struct cpswp_softc *sc = msc; 2122 2123 /* Check for media type change */ 2124 mii_tick(sc->mii); 2125 if (sc->media_status != sc->mii->mii_media.ifm_media) { 2126 printf("%s: media type changed (ifm_media=%x)\n", __func__, 2127 sc->mii->mii_media.ifm_media); 2128 cpswp_ifmedia_upd(sc->ifp); 2129 } 2130 2131 /* Schedule another timeout one second from now */ 2132 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 2133 } 2134 2135 static void 2136 cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2137 { 2138 struct cpswp_softc *sc; 2139 struct mii_data *mii; 2140 2141 sc = ifp->if_softc; 2142 CPSW_DEBUGF(sc->swsc, ("")); 2143 CPSW_PORT_LOCK(sc); 2144 2145 mii = sc->mii; 2146 mii_pollstat(mii); 2147 2148 ifmr->ifm_active = mii->mii_media_active; 2149 ifmr->ifm_status = mii->mii_media_status; 2150 CPSW_PORT_UNLOCK(sc); 2151 } 2152 2153 static int 2154 cpswp_ifmedia_upd(struct ifnet *ifp) 2155 { 2156 struct cpswp_softc *sc; 2157 2158 sc = ifp->if_softc; 2159 CPSW_DEBUGF(sc->swsc, ("")); 2160 CPSW_PORT_LOCK(sc); 2161 mii_mediachg(sc->mii); 2162 sc->media_status = sc->mii->mii_media.ifm_media; 2163 CPSW_PORT_UNLOCK(sc); 2164 2165 return (0); 2166 } 2167 2168 static void 2169 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 2170 { 2171 struct cpswp_softc *psc; 2172 int i; 2173 2174 cpsw_debugf_head("CPSW watchdog"); 2175 device_printf(sc->dev, "watchdog timeout\n"); 2176 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, 2177 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); 2178 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, 2179 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); 2180 cpsw_dump_queue(sc, &sc->tx.active); 2181 for (i = 0; i < CPSW_PORTS; i++) { 2182 if (!sc->dualemac && i != sc->active_slave) 2183 continue; 2184 psc = device_get_softc(sc->port[i].dev); 2185 CPSW_PORT_LOCK(psc); 2186 cpswp_stop_locked(psc); 2187 CPSW_PORT_UNLOCK(psc); 2188 } 2189 } 2190 2191 static void 2192 cpsw_tx_watchdog(void *msc) 2193 { 2194 struct cpsw_softc *sc; 2195 2196 sc = msc; 2197 CPSW_TX_LOCK(sc); 2198 if (sc->tx.active_queue_len == 0 || !sc->tx.running) { 2199 sc->watchdog.timer = 0; /* Nothing to do. */ 2200 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 2201 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 2202 } else if (cpsw_tx_dequeue(sc) > 0) { 2203 sc->watchdog.timer = 0; /* We just did something. */ 2204 } else { 2205 /* There was something to do but it didn't get done. */ 2206 ++sc->watchdog.timer; 2207 if (sc->watchdog.timer > 5) { 2208 sc->watchdog.timer = 0; 2209 ++sc->watchdog.resets; 2210 cpsw_tx_watchdog_full_reset(sc); 2211 } 2212 } 2213 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 2214 CPSW_TX_UNLOCK(sc); 2215 2216 /* Schedule another timeout one second from now */ 2217 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 2218 } 2219 2220 /* 2221 * 2222 * ALE support routines. 2223 * 2224 */ 2225 2226 static void 2227 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2228 { 2229 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 2230 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 2231 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 2232 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 2233 } 2234 2235 static void 2236 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2237 { 2238 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 2239 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 2240 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 2241 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 2242 } 2243 2244 static void 2245 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 2246 { 2247 int i; 2248 uint32_t ale_entry[3]; 2249 2250 /* First four entries are link address and broadcast. */ 2251 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2252 cpsw_ale_read_entry(sc, i, ale_entry); 2253 if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || 2254 ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && 2255 ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ 2256 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2257 cpsw_ale_write_entry(sc, i, ale_entry); 2258 } 2259 } 2260 } 2261 2262 static int 2263 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, 2264 uint8_t *mac) 2265 { 2266 int free_index = -1, matching_index = -1, i; 2267 uint32_t ale_entry[3], ale_type; 2268 2269 /* Find a matching entry or a free entry. */ 2270 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2271 cpsw_ale_read_entry(sc, i, ale_entry); 2272 2273 /* Entry Type[61:60] is 0 for free entry */ 2274 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2275 free_index = i; 2276 2277 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 2278 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 2279 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 2280 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 2281 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 2282 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 2283 matching_index = i; 2284 break; 2285 } 2286 } 2287 2288 if (matching_index < 0) { 2289 if (free_index < 0) 2290 return (ENOMEM); 2291 i = free_index; 2292 } 2293 2294 if (vlan != -1) 2295 ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; 2296 else 2297 ale_type = ALE_TYPE_ADDR << 28; 2298 2299 /* Set MAC address */ 2300 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2301 ale_entry[1] = mac[0] << 8 | mac[1]; 2302 2303 /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ 2304 ale_entry[1] |= ALE_MCAST_FWD | ale_type; 2305 2306 /* Set portmask [68:66] */ 2307 ale_entry[2] = (portmap & 7) << 2; 2308 2309 cpsw_ale_write_entry(sc, i, ale_entry); 2310 2311 return 0; 2312 } 2313 2314 static void 2315 cpsw_ale_dump_table(struct cpsw_softc *sc) { 2316 int i; 2317 uint32_t ale_entry[3]; 2318 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2319 cpsw_ale_read_entry(sc, i, ale_entry); 2320 switch (ALE_TYPE(ale_entry)) { 2321 case ALE_TYPE_VLAN: 2322 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2323 ale_entry[1], ale_entry[0]); 2324 printf("type: %u ", ALE_TYPE(ale_entry)); 2325 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2326 printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); 2327 printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); 2328 printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); 2329 printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); 2330 printf("\n"); 2331 break; 2332 case ALE_TYPE_ADDR: 2333 case ALE_TYPE_VLAN_ADDR: 2334 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2335 ale_entry[1], ale_entry[0]); 2336 printf("type: %u ", ALE_TYPE(ale_entry)); 2337 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 2338 (ale_entry[1] >> 8) & 0xFF, 2339 (ale_entry[1] >> 0) & 0xFF, 2340 (ale_entry[0] >>24) & 0xFF, 2341 (ale_entry[0] >>16) & 0xFF, 2342 (ale_entry[0] >> 8) & 0xFF, 2343 (ale_entry[0] >> 0) & 0xFF); 2344 printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); 2345 if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) 2346 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2347 printf("port: %u ", ALE_PORTS(ale_entry)); 2348 printf("\n"); 2349 break; 2350 } 2351 } 2352 printf("\n"); 2353 } 2354 2355 static int 2356 cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) 2357 { 2358 uint8_t *mac; 2359 uint32_t ale_entry[3], ale_type, portmask; 2360 struct ifmultiaddr *ifma; 2361 2362 if (sc->swsc->dualemac) { 2363 ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; 2364 portmask = 1 << (sc->unit + 1) | 1 << 0; 2365 } else { 2366 ale_type = ALE_TYPE_ADDR << 28; 2367 portmask = 7; 2368 } 2369 2370 /* 2371 * Route incoming packets for our MAC address to Port 0 (host). 2372 * For simplicity, keep this entry at table index 0 for port 1 and 2373 * at index 2 for port 2 in the ALE. 2374 */ 2375 if_addr_rlock(sc->ifp); 2376 mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr); 2377 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2378 ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ 2379 ale_entry[2] = 0; /* port = 0 */ 2380 cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); 2381 2382 /* Set outgoing MAC Address for slave port. */ 2383 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), 2384 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 2385 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), 2386 mac[5] << 8 | mac[4]); 2387 if_addr_runlock(sc->ifp); 2388 2389 /* Keep the broadcast address at table entry 1 (or 3). */ 2390 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 2391 /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ 2392 ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; 2393 ale_entry[2] = portmask << 2; 2394 cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); 2395 2396 /* SIOCDELMULTI doesn't specify the particular address 2397 being removed, so we have to remove all and rebuild. */ 2398 if (purge) 2399 cpsw_ale_remove_all_mc_entries(sc->swsc); 2400 2401 /* Set other multicast addrs desired. */ 2402 if_maddr_rlock(sc->ifp); 2403 TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { 2404 if (ifma->ifma_addr->sa_family != AF_LINK) 2405 continue; 2406 cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, 2407 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 2408 } 2409 if_maddr_runlock(sc->ifp); 2410 2411 return (0); 2412 } 2413 2414 static int 2415 cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, 2416 int untag, int mcregflood, int mcunregflood) 2417 { 2418 int free_index, i, matching_index; 2419 uint32_t ale_entry[3]; 2420 2421 free_index = matching_index = -1; 2422 /* Find a matching entry or a free entry. */ 2423 for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { 2424 cpsw_ale_read_entry(sc, i, ale_entry); 2425 2426 /* Entry Type[61:60] is 0 for free entry */ 2427 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2428 free_index = i; 2429 2430 if (ALE_VLAN(ale_entry) == vlan) { 2431 matching_index = i; 2432 break; 2433 } 2434 } 2435 2436 if (matching_index < 0) { 2437 if (free_index < 0) 2438 return (-1); 2439 i = free_index; 2440 } 2441 2442 ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | 2443 (mcunregflood & 7) << 8 | (ports & 7); 2444 ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; 2445 ale_entry[2] = 0; 2446 cpsw_ale_write_entry(sc, i, ale_entry); 2447 2448 return (0); 2449 } 2450 2451 /* 2452 * 2453 * Statistics and Sysctls. 2454 * 2455 */ 2456 2457 #if 0 2458 static void 2459 cpsw_stats_dump(struct cpsw_softc *sc) 2460 { 2461 int i; 2462 uint32_t r; 2463 2464 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2465 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2466 cpsw_stat_sysctls[i].reg); 2467 CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2468 (intmax_t)sc->shadow_stats[i], r, 2469 (intmax_t)sc->shadow_stats[i] + r)); 2470 } 2471 } 2472 #endif 2473 2474 static void 2475 cpsw_stats_collect(struct cpsw_softc *sc) 2476 { 2477 int i; 2478 uint32_t r; 2479 2480 CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); 2481 2482 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2483 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2484 cpsw_stat_sysctls[i].reg); 2485 sc->shadow_stats[i] += r; 2486 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, 2487 r); 2488 } 2489 } 2490 2491 static int 2492 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2493 { 2494 struct cpsw_softc *sc; 2495 struct cpsw_stat *stat; 2496 uint64_t result; 2497 2498 sc = (struct cpsw_softc *)arg1; 2499 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2500 result = sc->shadow_stats[oidp->oid_number]; 2501 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2502 return (sysctl_handle_64(oidp, &result, 0, req)); 2503 } 2504 2505 static int 2506 cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2507 { 2508 struct cpsw_softc *sc; 2509 struct bintime t; 2510 unsigned result; 2511 2512 sc = (struct cpsw_softc *)arg1; 2513 getbinuptime(&t); 2514 bintime_sub(&t, &sc->attach_uptime); 2515 result = t.sec; 2516 return (sysctl_handle_int(oidp, &result, 0, req)); 2517 } 2518 2519 static int 2520 cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) 2521 { 2522 int error; 2523 struct cpsw_softc *sc; 2524 uint32_t ctrl, intr_per_ms; 2525 2526 sc = (struct cpsw_softc *)arg1; 2527 error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); 2528 if (error != 0 || req->newptr == NULL) 2529 return (error); 2530 2531 ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 2532 ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 2533 if (sc->coal_us == 0) { 2534 /* Disable the interrupt pace hardware. */ 2535 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2536 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); 2537 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); 2538 return (0); 2539 } 2540 2541 if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) 2542 sc->coal_us = CPSW_WR_C_IMAX_US_MAX; 2543 if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) 2544 sc->coal_us = CPSW_WR_C_IMAX_US_MIN; 2545 intr_per_ms = 1000 / sc->coal_us; 2546 /* Just to make sure... */ 2547 if (intr_per_ms > CPSW_WR_C_IMAX_MAX) 2548 intr_per_ms = CPSW_WR_C_IMAX_MAX; 2549 if (intr_per_ms < CPSW_WR_C_IMAX_MIN) 2550 intr_per_ms = CPSW_WR_C_IMAX_MIN; 2551 2552 /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ 2553 ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; 2554 2555 /* Enable the interrupt pace hardware. */ 2556 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); 2557 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); 2558 ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; 2559 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2560 2561 return (0); 2562 } 2563 2564 static int 2565 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2566 { 2567 struct cpsw_softc *swsc; 2568 struct cpswp_softc *sc; 2569 struct bintime t; 2570 unsigned result; 2571 2572 swsc = arg1; 2573 sc = device_get_softc(swsc->port[arg2].dev); 2574 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { 2575 getbinuptime(&t); 2576 bintime_sub(&t, &sc->init_uptime); 2577 result = t.sec; 2578 } else 2579 result = 0; 2580 return (sysctl_handle_int(oidp, &result, 0, req)); 2581 } 2582 2583 static void 2584 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2585 struct cpsw_queue *queue) 2586 { 2587 struct sysctl_oid_list *parent; 2588 2589 parent = SYSCTL_CHILDREN(node); 2590 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2591 CTLFLAG_RD, &queue->queue_slots, 0, 2592 "Total buffers currently assigned to this queue"); 2593 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2594 CTLFLAG_RD, &queue->active_queue_len, 0, 2595 "Buffers currently registered with hardware controller"); 2596 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2597 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2598 "Max value of activeBuffers since last driver reset"); 2599 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2600 CTLFLAG_RD, &queue->avail_queue_len, 0, 2601 "Buffers allocated to this queue but not currently " 2602 "registered with hardware controller"); 2603 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2604 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2605 "Max value of availBuffers since last driver reset"); 2606 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2607 CTLFLAG_RD, &queue->queue_adds, 0, 2608 "Total buffers added to queue"); 2609 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2610 CTLFLAG_RD, &queue->queue_removes, 0, 2611 "Total buffers removed from queue"); 2612 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", 2613 CTLFLAG_RD, &queue->queue_restart, 0, 2614 "Total times the queue has been restarted"); 2615 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2616 CTLFLAG_RD, &queue->longest_chain, 0, 2617 "Max buffers used for a single packet"); 2618 } 2619 2620 static void 2621 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2622 struct cpsw_softc *sc) 2623 { 2624 struct sysctl_oid_list *parent; 2625 2626 parent = SYSCTL_CHILDREN(node); 2627 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2628 CTLFLAG_RD, &sc->watchdog.resets, 0, 2629 "Total number of watchdog resets"); 2630 } 2631 2632 static void 2633 cpsw_add_sysctls(struct cpsw_softc *sc) 2634 { 2635 struct sysctl_ctx_list *ctx; 2636 struct sysctl_oid *stats_node, *queue_node, *node; 2637 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2638 struct sysctl_oid_list *ports_parent, *port_parent; 2639 char port[16]; 2640 int i; 2641 2642 ctx = device_get_sysctl_ctx(sc->dev); 2643 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2644 2645 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", 2646 CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); 2647 2648 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "rx_batch", 2649 CTLFLAG_RW, &sc->rx_batch, 0, "Set the rx batch size"); 2650 2651 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2652 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", 2653 "Time since driver attach"); 2654 2655 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", 2656 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, cpsw_intr_coalesce, "IU", 2657 "minimum time between interrupts"); 2658 2659 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", 2660 CTLFLAG_RD, NULL, "CPSW Ports Statistics"); 2661 ports_parent = SYSCTL_CHILDREN(node); 2662 for (i = 0; i < CPSW_PORTS; i++) { 2663 if (!sc->dualemac && i != sc->active_slave) 2664 continue; 2665 port[0] = '0' + i; 2666 port[1] = '\0'; 2667 node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, 2668 port, CTLFLAG_RD, NULL, "CPSW Port Statistics"); 2669 port_parent = SYSCTL_CHILDREN(node); 2670 SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", 2671 CTLTYPE_UINT | CTLFLAG_RD, sc, i, 2672 cpsw_stat_uptime, "IU", "Seconds since driver init"); 2673 } 2674 2675 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2676 CTLFLAG_RD, NULL, "CPSW Statistics"); 2677 stats_parent = SYSCTL_CHILDREN(stats_node); 2678 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2679 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2680 cpsw_stat_sysctls[i].oid, 2681 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, 2682 cpsw_stats_sysctl, "IU", 2683 cpsw_stat_sysctls[i].oid); 2684 } 2685 2686 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2687 CTLFLAG_RD, NULL, "CPSW Queue Statistics"); 2688 queue_parent = SYSCTL_CHILDREN(queue_node); 2689 2690 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2691 CTLFLAG_RD, NULL, "TX Queue Statistics"); 2692 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2693 2694 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2695 CTLFLAG_RD, NULL, "RX Queue Statistics"); 2696 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2697 2698 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2699 CTLFLAG_RD, NULL, "Watchdog Statistics"); 2700 cpsw_add_watchdog_sysctls(ctx, node, sc); 2701 } 2702