1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org> 5 * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * TI Common Platform Ethernet Switch (CPSW) Driver 32 * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. 33 * 34 * This controller is documented in the AM335x Technical Reference 35 * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM 36 * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. 37 * 38 * It is basically a single Ethernet port (port 0) wired internally to 39 * a 3-port store-and-forward switch connected to two independent 40 * "sliver" controllers (port 1 and port 2). You can operate the 41 * controller in a variety of different ways by suitably configuring 42 * the slivers and the Address Lookup Engine (ALE) that routes packets 43 * between the ports. 44 * 45 * This code was developed and tested on a BeagleBone with 46 * an AM335x SoC. 47 */ 48 49 #include <sys/cdefs.h> 50 __FBSDID("$FreeBSD$"); 51 52 #include "opt_cpsw.h" 53 54 #include <sys/param.h> 55 #include <sys/bus.h> 56 #include <sys/kernel.h> 57 #include <sys/lock.h> 58 #include <sys/mbuf.h> 59 #include <sys/module.h> 60 #include <sys/mutex.h> 61 #include <sys/rman.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/sysctl.h> 65 66 #include <machine/bus.h> 67 #include <machine/resource.h> 68 #include <machine/stdarg.h> 69 70 #include <net/ethernet.h> 71 #include <net/bpf.h> 72 #include <net/if.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 #include <net/if_types.h> 76 77 #include <dev/extres/syscon/syscon.h> 78 #include "syscon_if.h" 79 #include <arm/ti/am335x/am335x_scm.h> 80 81 #include <dev/mii/mii.h> 82 #include <dev/mii/miivar.h> 83 84 #include <dev/ofw/ofw_bus.h> 85 #include <dev/ofw/ofw_bus_subr.h> 86 87 #include <dev/fdt/fdt_common.h> 88 89 #ifdef CPSW_ETHERSWITCH 90 #include <dev/etherswitch/etherswitch.h> 91 #include "etherswitch_if.h" 92 #endif 93 94 #include "if_cpswreg.h" 95 #include "if_cpswvar.h" 96 97 #include "miibus_if.h" 98 99 /* Device probe/attach/detach. */ 100 static int cpsw_probe(device_t); 101 static int cpsw_attach(device_t); 102 static int cpsw_detach(device_t); 103 static int cpswp_probe(device_t); 104 static int cpswp_attach(device_t); 105 static int cpswp_detach(device_t); 106 107 static phandle_t cpsw_get_node(device_t, device_t); 108 109 /* Device Init/shutdown. */ 110 static int cpsw_shutdown(device_t); 111 static void cpswp_init(void *); 112 static void cpswp_init_locked(void *); 113 static void cpswp_stop_locked(struct cpswp_softc *); 114 115 /* Device Suspend/Resume. */ 116 static int cpsw_suspend(device_t); 117 static int cpsw_resume(device_t); 118 119 /* Ioctl. */ 120 static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data); 121 122 static int cpswp_miibus_readreg(device_t, int phy, int reg); 123 static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); 124 static void cpswp_miibus_statchg(device_t); 125 126 /* Send/Receive packets. */ 127 static void cpsw_intr_rx(void *arg); 128 static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); 129 static void cpsw_rx_enqueue(struct cpsw_softc *); 130 static void cpswp_start(struct ifnet *); 131 static void cpsw_intr_tx(void *); 132 static void cpswp_tx_enqueue(struct cpswp_softc *); 133 static int cpsw_tx_dequeue(struct cpsw_softc *); 134 135 /* Misc interrupts and watchdog. */ 136 static void cpsw_intr_rx_thresh(void *); 137 static void cpsw_intr_misc(void *); 138 static void cpswp_tick(void *); 139 static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *); 140 static int cpswp_ifmedia_upd(struct ifnet *); 141 static void cpsw_tx_watchdog(void *); 142 143 /* ALE support */ 144 static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); 145 static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); 146 static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); 147 static void cpsw_ale_dump_table(struct cpsw_softc *); 148 static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, 149 int); 150 static int cpswp_ale_update_addresses(struct cpswp_softc *, int); 151 152 /* Statistics and sysctls. */ 153 static void cpsw_add_sysctls(struct cpsw_softc *); 154 static void cpsw_stats_collect(struct cpsw_softc *); 155 static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); 156 157 #ifdef CPSW_ETHERSWITCH 158 static etherswitch_info_t *cpsw_getinfo(device_t); 159 static int cpsw_getport(device_t, etherswitch_port_t *); 160 static int cpsw_setport(device_t, etherswitch_port_t *); 161 static int cpsw_getconf(device_t, etherswitch_conf_t *); 162 static int cpsw_getvgroup(device_t, etherswitch_vlangroup_t *); 163 static int cpsw_setvgroup(device_t, etherswitch_vlangroup_t *); 164 static int cpsw_readreg(device_t, int); 165 static int cpsw_writereg(device_t, int, int); 166 static int cpsw_readphy(device_t, int, int); 167 static int cpsw_writephy(device_t, int, int, int); 168 #endif 169 170 /* 171 * Arbitrary limit on number of segments in an mbuf to be transmitted. 172 * Packets with more segments than this will be defragmented before 173 * they are queued. 174 */ 175 #define CPSW_TXFRAGS 16 176 177 /* Shared resources. */ 178 static device_method_t cpsw_methods[] = { 179 /* Device interface */ 180 DEVMETHOD(device_probe, cpsw_probe), 181 DEVMETHOD(device_attach, cpsw_attach), 182 DEVMETHOD(device_detach, cpsw_detach), 183 DEVMETHOD(device_shutdown, cpsw_shutdown), 184 DEVMETHOD(device_suspend, cpsw_suspend), 185 DEVMETHOD(device_resume, cpsw_resume), 186 /* Bus interface */ 187 DEVMETHOD(bus_add_child, device_add_child_ordered), 188 /* OFW methods */ 189 DEVMETHOD(ofw_bus_get_node, cpsw_get_node), 190 #ifdef CPSW_ETHERSWITCH 191 /* etherswitch interface */ 192 DEVMETHOD(etherswitch_getinfo, cpsw_getinfo), 193 DEVMETHOD(etherswitch_readreg, cpsw_readreg), 194 DEVMETHOD(etherswitch_writereg, cpsw_writereg), 195 DEVMETHOD(etherswitch_readphyreg, cpsw_readphy), 196 DEVMETHOD(etherswitch_writephyreg, cpsw_writephy), 197 DEVMETHOD(etherswitch_getport, cpsw_getport), 198 DEVMETHOD(etherswitch_setport, cpsw_setport), 199 DEVMETHOD(etherswitch_getvgroup, cpsw_getvgroup), 200 DEVMETHOD(etherswitch_setvgroup, cpsw_setvgroup), 201 DEVMETHOD(etherswitch_getconf, cpsw_getconf), 202 #endif 203 DEVMETHOD_END 204 }; 205 206 static driver_t cpsw_driver = { 207 "cpswss", 208 cpsw_methods, 209 sizeof(struct cpsw_softc), 210 }; 211 212 DRIVER_MODULE(cpswss, simplebus, cpsw_driver, 0, 0); 213 214 /* Port/Slave resources. */ 215 static device_method_t cpswp_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_probe, cpswp_probe), 218 DEVMETHOD(device_attach, cpswp_attach), 219 DEVMETHOD(device_detach, cpswp_detach), 220 /* MII interface */ 221 DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), 222 DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), 223 DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), 224 DEVMETHOD_END 225 }; 226 227 static driver_t cpswp_driver = { 228 "cpsw", 229 cpswp_methods, 230 sizeof(struct cpswp_softc), 231 }; 232 233 #ifdef CPSW_ETHERSWITCH 234 DRIVER_MODULE(etherswitch, cpswss, etherswitch_driver, 0, 0); 235 MODULE_DEPEND(cpswss, etherswitch, 1, 1, 1); 236 #endif 237 238 DRIVER_MODULE(cpsw, cpswss, cpswp_driver, 0, 0); 239 DRIVER_MODULE(miibus, cpsw, miibus_driver, 0, 0); 240 MODULE_DEPEND(cpsw, ether, 1, 1, 1); 241 MODULE_DEPEND(cpsw, miibus, 1, 1, 1); 242 243 #ifdef CPSW_ETHERSWITCH 244 static struct cpsw_vlangroups cpsw_vgroups[CPSW_VLANS]; 245 #endif 246 247 static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; 248 249 static struct resource_spec irq_res_spec[] = { 250 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 251 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 252 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 253 { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, 254 { -1, 0 } 255 }; 256 257 static struct { 258 void (*cb)(void *); 259 } cpsw_intr_cb[] = { 260 { cpsw_intr_rx_thresh }, 261 { cpsw_intr_rx }, 262 { cpsw_intr_tx }, 263 { cpsw_intr_misc }, 264 }; 265 266 /* Number of entries here must match size of stats 267 * array in struct cpswp_softc. */ 268 static struct cpsw_stat { 269 int reg; 270 char *oid; 271 } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { 272 {0x00, "GoodRxFrames"}, 273 {0x04, "BroadcastRxFrames"}, 274 {0x08, "MulticastRxFrames"}, 275 {0x0C, "PauseRxFrames"}, 276 {0x10, "RxCrcErrors"}, 277 {0x14, "RxAlignErrors"}, 278 {0x18, "OversizeRxFrames"}, 279 {0x1c, "RxJabbers"}, 280 {0x20, "ShortRxFrames"}, 281 {0x24, "RxFragments"}, 282 {0x30, "RxOctets"}, 283 {0x34, "GoodTxFrames"}, 284 {0x38, "BroadcastTxFrames"}, 285 {0x3c, "MulticastTxFrames"}, 286 {0x40, "PauseTxFrames"}, 287 {0x44, "DeferredTxFrames"}, 288 {0x48, "CollisionsTxFrames"}, 289 {0x4c, "SingleCollisionTxFrames"}, 290 {0x50, "MultipleCollisionTxFrames"}, 291 {0x54, "ExcessiveCollisions"}, 292 {0x58, "LateCollisions"}, 293 {0x5c, "TxUnderrun"}, 294 {0x60, "CarrierSenseErrors"}, 295 {0x64, "TxOctets"}, 296 {0x68, "RxTx64OctetFrames"}, 297 {0x6c, "RxTx65to127OctetFrames"}, 298 {0x70, "RxTx128to255OctetFrames"}, 299 {0x74, "RxTx256to511OctetFrames"}, 300 {0x78, "RxTx512to1024OctetFrames"}, 301 {0x7c, "RxTx1024upOctetFrames"}, 302 {0x80, "NetOctets"}, 303 {0x84, "RxStartOfFrameOverruns"}, 304 {0x88, "RxMiddleOfFrameOverruns"}, 305 {0x8c, "RxDmaOverruns"} 306 }; 307 308 /* 309 * Basic debug support. 310 */ 311 312 static void 313 cpsw_debugf_head(const char *funcname) 314 { 315 int t = (int)(time_second % (24 * 60 * 60)); 316 317 printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); 318 } 319 320 static void 321 cpsw_debugf(const char *fmt, ...) 322 { 323 va_list ap; 324 325 va_start(ap, fmt); 326 vprintf(fmt, ap); 327 va_end(ap); 328 printf("\n"); 329 330 } 331 332 #define CPSW_DEBUGF(_sc, a) do { \ 333 if ((_sc)->debug) { \ 334 cpsw_debugf_head(__func__); \ 335 cpsw_debugf a; \ 336 } \ 337 } while (0) 338 339 /* 340 * Locking macros 341 */ 342 #define CPSW_TX_LOCK(sc) do { \ 343 mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ 344 mtx_lock(&(sc)->tx.lock); \ 345 } while (0) 346 347 #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) 348 #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) 349 350 #define CPSW_RX_LOCK(sc) do { \ 351 mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ 352 mtx_lock(&(sc)->rx.lock); \ 353 } while (0) 354 355 #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) 356 #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) 357 358 #define CPSW_PORT_LOCK(_sc) do { \ 359 mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ 360 mtx_lock(&(_sc)->lock); \ 361 } while (0) 362 363 #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) 364 #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) 365 366 /* 367 * Read/Write macros 368 */ 369 #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) 370 #define cpsw_write_4(_sc, _reg, _val) \ 371 bus_write_4((_sc)->mem_res, (_reg), (_val)) 372 373 #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) 374 375 #define cpsw_cpdma_bd_paddr(sc, slot) \ 376 BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) 377 #define cpsw_cpdma_read_bd(sc, slot, val) \ 378 bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 379 #define cpsw_cpdma_write_bd(sc, slot, val) \ 380 bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) 381 #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ 382 cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) 383 #define cpsw_cpdma_write_bd_flags(sc, slot, val) \ 384 bus_write_2(sc->mem_res, slot->bd_offset + 14, val) 385 #define cpsw_cpdma_read_bd_flags(sc, slot) \ 386 bus_read_2(sc->mem_res, slot->bd_offset + 14) 387 #define cpsw_write_hdp_slot(sc, queue, slot) \ 388 cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) 389 #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) 390 #define cpsw_read_cp(sc, queue) \ 391 cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) 392 #define cpsw_write_cp(sc, queue, val) \ 393 cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) 394 #define cpsw_write_cp_slot(sc, queue, slot) \ 395 cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) 396 397 #if 0 398 /* XXX temporary function versions for debugging. */ 399 static void 400 cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 401 { 402 uint32_t reg = queue->hdp_offset; 403 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 404 CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); 405 cpsw_write_4(sc, reg, v); 406 } 407 408 static void 409 cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) 410 { 411 uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); 412 CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); 413 cpsw_write_cp(sc, queue, v); 414 } 415 #endif 416 417 /* 418 * Expanded dump routines for verbose debugging. 419 */ 420 static void 421 cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 422 { 423 static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", 424 "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", 425 "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", 426 "Port0"}; 427 struct cpsw_cpdma_bd bd; 428 const char *sep; 429 int i; 430 431 cpsw_cpdma_read_bd(sc, slot, &bd); 432 printf("BD Addr : 0x%08x Next : 0x%08x\n", 433 cpsw_cpdma_bd_paddr(sc, slot), bd.next); 434 printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); 435 printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); 436 printf(" Flags: "); 437 sep = ""; 438 for (i = 0; i < 16; ++i) { 439 if (bd.flags & (1 << (15 - i))) { 440 printf("%s%s", sep, flags[i]); 441 sep = ","; 442 } 443 } 444 printf("\n"); 445 if (slot->mbuf) { 446 printf(" Ether: %14D\n", 447 (char *)(slot->mbuf->m_data), " "); 448 printf(" Packet: %16D\n", 449 (char *)(slot->mbuf->m_data) + 14, " "); 450 } 451 } 452 453 #define CPSW_DUMP_SLOT(cs, slot) do { \ 454 IF_DEBUG(sc) { \ 455 cpsw_dump_slot(sc, slot); \ 456 } \ 457 } while (0) 458 459 static void 460 cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) 461 { 462 struct cpsw_slot *slot; 463 int i = 0; 464 int others = 0; 465 466 STAILQ_FOREACH(slot, q, next) { 467 if (i > CPSW_TXFRAGS) 468 ++others; 469 else 470 cpsw_dump_slot(sc, slot); 471 ++i; 472 } 473 if (others) 474 printf(" ... and %d more.\n", others); 475 printf("\n"); 476 } 477 478 #define CPSW_DUMP_QUEUE(sc, q) do { \ 479 IF_DEBUG(sc) { \ 480 cpsw_dump_queue(sc, q); \ 481 } \ 482 } while (0) 483 484 static void 485 cpsw_init_slots(struct cpsw_softc *sc) 486 { 487 struct cpsw_slot *slot; 488 int i; 489 490 STAILQ_INIT(&sc->avail); 491 492 /* Put the slot descriptors onto the global avail list. */ 493 for (i = 0; i < nitems(sc->_slots); i++) { 494 slot = &sc->_slots[i]; 495 slot->bd_offset = cpsw_cpdma_bd_offset(i); 496 STAILQ_INSERT_TAIL(&sc->avail, slot, next); 497 } 498 } 499 500 static int 501 cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) 502 { 503 const int max_slots = nitems(sc->_slots); 504 struct cpsw_slot *slot; 505 int i; 506 507 if (requested < 0) 508 requested = max_slots; 509 510 for (i = 0; i < requested; ++i) { 511 slot = STAILQ_FIRST(&sc->avail); 512 if (slot == NULL) 513 return (0); 514 if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { 515 device_printf(sc->dev, "failed to create dmamap\n"); 516 return (ENOMEM); 517 } 518 STAILQ_REMOVE_HEAD(&sc->avail, next); 519 STAILQ_INSERT_TAIL(&queue->avail, slot, next); 520 ++queue->avail_queue_len; 521 ++queue->queue_slots; 522 } 523 return (0); 524 } 525 526 static void 527 cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) 528 { 529 int error __diagused; 530 531 if (slot->dmamap) { 532 if (slot->mbuf) 533 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 534 error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); 535 KASSERT(error == 0, ("Mapping still active")); 536 slot->dmamap = NULL; 537 } 538 if (slot->mbuf) { 539 m_freem(slot->mbuf); 540 slot->mbuf = NULL; 541 } 542 } 543 544 static void 545 cpsw_reset(struct cpsw_softc *sc) 546 { 547 int i; 548 549 callout_stop(&sc->watchdog.callout); 550 551 /* Reset RMII/RGMII wrapper. */ 552 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); 553 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) 554 ; 555 556 /* Disable TX and RX interrupts for all cores. */ 557 for (i = 0; i < 3; ++i) { 558 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); 559 cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); 560 cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); 561 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); 562 } 563 564 /* Reset CPSW subsystem. */ 565 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); 566 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) 567 ; 568 569 /* Reset Sliver port 1 and 2 */ 570 for (i = 0; i < 2; i++) { 571 /* Reset */ 572 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); 573 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) 574 ; 575 } 576 577 /* Reset DMA controller. */ 578 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); 579 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) 580 ; 581 582 /* Disable TX & RX DMA */ 583 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); 584 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); 585 586 /* Clear all queues. */ 587 for (i = 0; i < 8; i++) { 588 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); 589 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); 590 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); 591 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); 592 } 593 594 /* Clear all interrupt Masks */ 595 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); 596 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); 597 } 598 599 static void 600 cpsw_init(struct cpsw_softc *sc) 601 { 602 struct cpsw_slot *slot; 603 uint32_t reg; 604 605 /* Disable the interrupt pacing. */ 606 reg = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 607 reg &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 608 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, reg); 609 610 /* Clear ALE */ 611 cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); 612 613 /* Enable ALE */ 614 reg = CPSW_ALE_CTL_ENABLE; 615 if (sc->dualemac) 616 reg |= CPSW_ALE_CTL_VLAN_AWARE; 617 cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); 618 619 /* Set Host Port Mapping. */ 620 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); 621 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); 622 623 /* Initialize ALE: set host port to forwarding(3). */ 624 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 625 ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); 626 627 cpsw_write_4(sc, CPSW_SS_PTYPE, 0); 628 629 /* Enable statistics for ports 0, 1 and 2 */ 630 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); 631 632 /* Turn off flow control. */ 633 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); 634 635 /* Make IP hdr aligned with 4 */ 636 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); 637 638 /* Initialize RX Buffer Descriptors */ 639 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), 0); 640 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); 641 642 /* Enable TX & RX DMA */ 643 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); 644 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); 645 646 /* Enable Interrupts for core 0 */ 647 cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); 648 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); 649 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0xFF); 650 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); 651 652 /* Enable host Error Interrupt */ 653 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); 654 655 /* Enable interrupts for RX and TX on Channel 0 */ 656 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 657 CPSW_CPDMA_RX_INT(0) | CPSW_CPDMA_RX_INT_THRESH(0)); 658 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1); 659 660 /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ 661 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ 662 cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); 663 664 /* Select MII in GMII_SEL, Internal Delay mode */ 665 //ti_scm_reg_write_4(0x650, 0); 666 667 /* Initialize active queues. */ 668 slot = STAILQ_FIRST(&sc->tx.active); 669 if (slot != NULL) 670 cpsw_write_hdp_slot(sc, &sc->tx, slot); 671 slot = STAILQ_FIRST(&sc->rx.active); 672 if (slot != NULL) 673 cpsw_write_hdp_slot(sc, &sc->rx, slot); 674 cpsw_rx_enqueue(sc); 675 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), sc->rx.active_queue_len); 676 cpsw_write_4(sc, CPSW_CPDMA_RX_PENDTHRESH(0), CPSW_TXFRAGS); 677 678 /* Activate network interface. */ 679 sc->rx.running = 1; 680 sc->tx.running = 1; 681 sc->watchdog.timer = 0; 682 callout_init(&sc->watchdog.callout, 0); 683 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 684 } 685 686 /* 687 * 688 * Device Probe, Attach, Detach. 689 * 690 */ 691 692 static int 693 cpsw_probe(device_t dev) 694 { 695 696 if (!ofw_bus_status_okay(dev)) 697 return (ENXIO); 698 699 if (!ofw_bus_is_compatible(dev, "ti,cpsw")) 700 return (ENXIO); 701 702 device_set_desc(dev, "3-port Switch Ethernet Subsystem"); 703 return (BUS_PROBE_DEFAULT); 704 } 705 706 static int 707 cpsw_intr_attach(struct cpsw_softc *sc) 708 { 709 int i; 710 711 for (i = 0; i < CPSW_INTR_COUNT; i++) { 712 if (bus_setup_intr(sc->dev, sc->irq_res[i], 713 INTR_TYPE_NET | INTR_MPSAFE, NULL, 714 cpsw_intr_cb[i].cb, sc, &sc->ih_cookie[i]) != 0) { 715 return (-1); 716 } 717 } 718 719 return (0); 720 } 721 722 static void 723 cpsw_intr_detach(struct cpsw_softc *sc) 724 { 725 int i; 726 727 for (i = 0; i < CPSW_INTR_COUNT; i++) { 728 if (sc->ih_cookie[i]) { 729 bus_teardown_intr(sc->dev, sc->irq_res[i], 730 sc->ih_cookie[i]); 731 } 732 } 733 } 734 735 static int 736 cpsw_get_fdt_data(struct cpsw_softc *sc, int port) 737 { 738 char *name; 739 int len, phy, vlan; 740 pcell_t phy_id[3], vlan_id; 741 phandle_t child; 742 unsigned long mdio_child_addr; 743 744 /* Find any slave with phy-handle/phy_id */ 745 phy = -1; 746 vlan = -1; 747 for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { 748 if (OF_getprop_alloc(child, "name", (void **)&name) < 0) 749 continue; 750 if (sscanf(name, "slave@%lx", &mdio_child_addr) != 1) { 751 OF_prop_free(name); 752 continue; 753 } 754 OF_prop_free(name); 755 756 if (mdio_child_addr != slave_mdio_addr[port] && 757 mdio_child_addr != (slave_mdio_addr[port] & 0xFFF)) 758 continue; 759 760 if (fdt_get_phyaddr(child, NULL, &phy, NULL) != 0){ 761 /* Users with old DTB will have phy_id instead */ 762 phy = -1; 763 len = OF_getproplen(child, "phy_id"); 764 if (len / sizeof(pcell_t) == 2) { 765 /* Get phy address from fdt */ 766 if (OF_getencprop(child, "phy_id", phy_id, len) > 0) 767 phy = phy_id[1]; 768 } 769 } 770 771 len = OF_getproplen(child, "dual_emac_res_vlan"); 772 if (len / sizeof(pcell_t) == 1) { 773 /* Get phy address from fdt */ 774 if (OF_getencprop(child, "dual_emac_res_vlan", 775 &vlan_id, len) > 0) { 776 vlan = vlan_id; 777 } 778 } 779 780 break; 781 } 782 if (phy == -1) 783 return (ENXIO); 784 sc->port[port].phy = phy; 785 sc->port[port].vlan = vlan; 786 787 return (0); 788 } 789 790 static int 791 cpsw_attach(device_t dev) 792 { 793 int error, i; 794 struct cpsw_softc *sc; 795 uint32_t reg; 796 797 sc = device_get_softc(dev); 798 sc->dev = dev; 799 sc->node = ofw_bus_get_node(dev); 800 getbinuptime(&sc->attach_uptime); 801 802 if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, 803 sizeof(sc->active_slave)) <= 0) { 804 sc->active_slave = 0; 805 } 806 if (sc->active_slave > 1) 807 sc->active_slave = 1; 808 809 if (OF_hasprop(sc->node, "dual_emac")) 810 sc->dualemac = 1; 811 812 for (i = 0; i < CPSW_PORTS; i++) { 813 if (!sc->dualemac && i != sc->active_slave) 814 continue; 815 if (cpsw_get_fdt_data(sc, i) != 0) { 816 device_printf(dev, 817 "failed to get PHY address from FDT\n"); 818 return (ENXIO); 819 } 820 } 821 822 /* Initialize mutexes */ 823 mtx_init(&sc->tx.lock, device_get_nameunit(dev), 824 "cpsw TX lock", MTX_DEF); 825 mtx_init(&sc->rx.lock, device_get_nameunit(dev), 826 "cpsw RX lock", MTX_DEF); 827 828 /* Allocate IRQ resources */ 829 error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); 830 if (error) { 831 device_printf(dev, "could not allocate IRQ resources\n"); 832 cpsw_detach(dev); 833 return (ENXIO); 834 } 835 836 sc->mem_rid = 0; 837 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 838 &sc->mem_rid, RF_ACTIVE); 839 if (sc->mem_res == NULL) { 840 device_printf(sc->dev, "failed to allocate memory resource\n"); 841 cpsw_detach(dev); 842 return (ENXIO); 843 } 844 845 reg = cpsw_read_4(sc, CPSW_SS_IDVER); 846 device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), 847 reg & 0xFF, (reg >> 11) & 0x1F); 848 849 cpsw_add_sysctls(sc); 850 851 /* Allocate a busdma tag and DMA safe memory for mbufs. */ 852 error = bus_dma_tag_create( 853 bus_get_dma_tag(sc->dev), /* parent */ 854 1, 0, /* alignment, boundary */ 855 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 856 BUS_SPACE_MAXADDR, /* highaddr */ 857 NULL, NULL, /* filtfunc, filtfuncarg */ 858 MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ 859 MCLBYTES, 0, /* maxsegsz, flags */ 860 NULL, NULL, /* lockfunc, lockfuncarg */ 861 &sc->mbuf_dtag); /* dmatag */ 862 if (error) { 863 device_printf(dev, "bus_dma_tag_create failed\n"); 864 cpsw_detach(dev); 865 return (error); 866 } 867 868 /* Allocate a NULL buffer for padding. */ 869 sc->nullpad = malloc(ETHER_MIN_LEN, M_DEVBUF, M_WAITOK | M_ZERO); 870 871 cpsw_init_slots(sc); 872 873 /* Allocate slots to TX and RX queues. */ 874 STAILQ_INIT(&sc->rx.avail); 875 STAILQ_INIT(&sc->rx.active); 876 STAILQ_INIT(&sc->tx.avail); 877 STAILQ_INIT(&sc->tx.active); 878 // For now: 128 slots to TX, rest to RX. 879 // XXX TODO: start with 32/64 and grow dynamically based on demand. 880 if (cpsw_add_slots(sc, &sc->tx, 128) || 881 cpsw_add_slots(sc, &sc->rx, -1)) { 882 device_printf(dev, "failed to allocate dmamaps\n"); 883 cpsw_detach(dev); 884 return (ENOMEM); 885 } 886 device_printf(dev, "Initial queue size TX=%d RX=%d\n", 887 sc->tx.queue_slots, sc->rx.queue_slots); 888 889 sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); 890 sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); 891 892 if (cpsw_intr_attach(sc) == -1) { 893 device_printf(dev, "failed to setup interrupts\n"); 894 cpsw_detach(dev); 895 return (ENXIO); 896 } 897 898 #ifdef CPSW_ETHERSWITCH 899 for (i = 0; i < CPSW_VLANS; i++) 900 cpsw_vgroups[i].vid = -1; 901 #endif 902 903 /* Reset the controller. */ 904 cpsw_reset(sc); 905 cpsw_init(sc); 906 907 for (i = 0; i < CPSW_PORTS; i++) { 908 if (!sc->dualemac && i != sc->active_slave) 909 continue; 910 sc->port[i].dev = device_add_child(dev, "cpsw", i); 911 if (sc->port[i].dev == NULL) { 912 cpsw_detach(dev); 913 return (ENXIO); 914 } 915 } 916 bus_generic_probe(dev); 917 bus_generic_attach(dev); 918 919 return (0); 920 } 921 922 static int 923 cpsw_detach(device_t dev) 924 { 925 struct cpsw_softc *sc; 926 int error, i; 927 928 bus_generic_detach(dev); 929 sc = device_get_softc(dev); 930 931 for (i = 0; i < CPSW_PORTS; i++) { 932 if (sc->port[i].dev) 933 device_delete_child(dev, sc->port[i].dev); 934 } 935 936 if (device_is_attached(dev)) { 937 callout_stop(&sc->watchdog.callout); 938 callout_drain(&sc->watchdog.callout); 939 } 940 941 /* Stop and release all interrupts */ 942 cpsw_intr_detach(sc); 943 944 /* Free dmamaps and mbufs */ 945 for (i = 0; i < nitems(sc->_slots); ++i) 946 cpsw_free_slot(sc, &sc->_slots[i]); 947 948 /* Free null padding buffer. */ 949 if (sc->nullpad) 950 free(sc->nullpad, M_DEVBUF); 951 952 /* Free DMA tag */ 953 if (sc->mbuf_dtag) { 954 error = bus_dma_tag_destroy(sc->mbuf_dtag); 955 KASSERT(error == 0, ("Unable to destroy DMA tag")); 956 } 957 958 /* Free IO memory handler */ 959 if (sc->mem_res != NULL) 960 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); 961 bus_release_resources(dev, irq_res_spec, sc->irq_res); 962 963 /* Destroy mutexes */ 964 mtx_destroy(&sc->rx.lock); 965 mtx_destroy(&sc->tx.lock); 966 967 /* Detach the switch device, if present. */ 968 error = bus_generic_detach(dev); 969 if (error != 0) 970 return (error); 971 972 return (device_delete_children(dev)); 973 } 974 975 static phandle_t 976 cpsw_get_node(device_t bus, device_t dev) 977 { 978 979 /* Share controller node with port device. */ 980 return (ofw_bus_get_node(bus)); 981 } 982 983 static int 984 cpswp_probe(device_t dev) 985 { 986 987 if (device_get_unit(dev) > 1) { 988 device_printf(dev, "Only two ports are supported.\n"); 989 return (ENXIO); 990 } 991 device_set_desc(dev, "Ethernet Switch Port"); 992 993 return (BUS_PROBE_DEFAULT); 994 } 995 996 static int 997 cpswp_attach(device_t dev) 998 { 999 int error; 1000 struct ifnet *ifp; 1001 struct cpswp_softc *sc; 1002 uint32_t reg; 1003 uint8_t mac_addr[ETHER_ADDR_LEN]; 1004 phandle_t opp_table; 1005 struct syscon *syscon; 1006 1007 sc = device_get_softc(dev); 1008 sc->dev = dev; 1009 sc->pdev = device_get_parent(dev); 1010 sc->swsc = device_get_softc(sc->pdev); 1011 sc->unit = device_get_unit(dev); 1012 sc->phy = sc->swsc->port[sc->unit].phy; 1013 sc->vlan = sc->swsc->port[sc->unit].vlan; 1014 if (sc->swsc->dualemac && sc->vlan == -1) 1015 sc->vlan = sc->unit + 1; 1016 1017 if (sc->unit == 0) { 1018 sc->physel = MDIOUSERPHYSEL0; 1019 sc->phyaccess = MDIOUSERACCESS0; 1020 } else { 1021 sc->physel = MDIOUSERPHYSEL1; 1022 sc->phyaccess = MDIOUSERACCESS1; 1023 } 1024 1025 mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", 1026 MTX_DEF); 1027 1028 /* Allocate network interface */ 1029 ifp = sc->ifp = if_alloc(IFT_ETHER); 1030 if (ifp == NULL) { 1031 cpswp_detach(dev); 1032 return (ENXIO); 1033 } 1034 1035 if_initname(ifp, device_get_name(sc->dev), sc->unit); 1036 ifp->if_softc = sc; 1037 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 1038 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? 1039 ifp->if_capenable = ifp->if_capabilities; 1040 1041 ifp->if_init = cpswp_init; 1042 ifp->if_start = cpswp_start; 1043 ifp->if_ioctl = cpswp_ioctl; 1044 1045 ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots; 1046 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1047 IFQ_SET_READY(&ifp->if_snd); 1048 1049 /* FIXME: For now; Go and kidnap syscon from opp-table */ 1050 /* ti,cpsw actually have an optional syscon reference but only for am33xx?? */ 1051 opp_table = OF_finddevice("/opp-table"); 1052 if (opp_table == -1) { 1053 device_printf(dev, "Cant find /opp-table\n"); 1054 cpswp_detach(dev); 1055 return (ENXIO); 1056 } 1057 if (!OF_hasprop(opp_table, "syscon")) { 1058 device_printf(dev, "/opp-table doesnt have required syscon property\n"); 1059 cpswp_detach(dev); 1060 return (ENXIO); 1061 } 1062 if (syscon_get_by_ofw_property(dev, opp_table, "syscon", &syscon) != 0) { 1063 device_printf(dev, "Failed to get syscon\n"); 1064 cpswp_detach(dev); 1065 return (ENXIO); 1066 } 1067 1068 /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ 1069 reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_HI + sc->unit * 8); 1070 mac_addr[0] = reg & 0xFF; 1071 mac_addr[1] = (reg >> 8) & 0xFF; 1072 mac_addr[2] = (reg >> 16) & 0xFF; 1073 mac_addr[3] = (reg >> 24) & 0xFF; 1074 1075 /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ 1076 reg = SYSCON_READ_4(syscon, SCM_MAC_ID0_LO + sc->unit * 8); 1077 mac_addr[4] = reg & 0xFF; 1078 mac_addr[5] = (reg >> 8) & 0xFF; 1079 1080 error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, 1081 cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); 1082 if (error) { 1083 device_printf(dev, "attaching PHYs failed\n"); 1084 cpswp_detach(dev); 1085 return (error); 1086 } 1087 sc->mii = device_get_softc(sc->miibus); 1088 1089 /* Select PHY and enable interrupts */ 1090 cpsw_write_4(sc->swsc, sc->physel, 1091 MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); 1092 1093 ether_ifattach(sc->ifp, mac_addr); 1094 callout_init(&sc->mii_callout, 0); 1095 1096 return (0); 1097 } 1098 1099 static int 1100 cpswp_detach(device_t dev) 1101 { 1102 struct cpswp_softc *sc; 1103 1104 sc = device_get_softc(dev); 1105 CPSW_DEBUGF(sc->swsc, ("")); 1106 if (device_is_attached(dev)) { 1107 ether_ifdetach(sc->ifp); 1108 CPSW_PORT_LOCK(sc); 1109 cpswp_stop_locked(sc); 1110 CPSW_PORT_UNLOCK(sc); 1111 callout_drain(&sc->mii_callout); 1112 } 1113 1114 bus_generic_detach(dev); 1115 1116 if_free(sc->ifp); 1117 mtx_destroy(&sc->lock); 1118 1119 return (0); 1120 } 1121 1122 /* 1123 * 1124 * Init/Shutdown. 1125 * 1126 */ 1127 1128 static int 1129 cpsw_ports_down(struct cpsw_softc *sc) 1130 { 1131 struct cpswp_softc *psc; 1132 struct ifnet *ifp1, *ifp2; 1133 1134 if (!sc->dualemac) 1135 return (1); 1136 psc = device_get_softc(sc->port[0].dev); 1137 ifp1 = psc->ifp; 1138 psc = device_get_softc(sc->port[1].dev); 1139 ifp2 = psc->ifp; 1140 if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0) 1141 return (1); 1142 1143 return (0); 1144 } 1145 1146 static void 1147 cpswp_init(void *arg) 1148 { 1149 struct cpswp_softc *sc = arg; 1150 1151 CPSW_DEBUGF(sc->swsc, ("")); 1152 CPSW_PORT_LOCK(sc); 1153 cpswp_init_locked(arg); 1154 CPSW_PORT_UNLOCK(sc); 1155 } 1156 1157 static void 1158 cpswp_init_locked(void *arg) 1159 { 1160 #ifdef CPSW_ETHERSWITCH 1161 int i; 1162 #endif 1163 struct cpswp_softc *sc = arg; 1164 struct ifnet *ifp; 1165 uint32_t reg; 1166 1167 CPSW_DEBUGF(sc->swsc, ("")); 1168 CPSW_PORT_LOCK_ASSERT(sc); 1169 ifp = sc->ifp; 1170 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1171 return; 1172 1173 getbinuptime(&sc->init_uptime); 1174 1175 if (!sc->swsc->rx.running && !sc->swsc->tx.running) { 1176 /* Reset the controller. */ 1177 cpsw_reset(sc->swsc); 1178 cpsw_init(sc->swsc); 1179 } 1180 1181 /* Set Slave Mapping. */ 1182 cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); 1183 cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 1184 0x33221100); 1185 cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); 1186 /* Enable MAC RX/TX modules. */ 1187 /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ 1188 /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ 1189 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1190 reg |= CPSW_SL_MACTL_GMII_ENABLE; 1191 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1192 1193 /* Initialize ALE: set port to forwarding, initialize addrs */ 1194 cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 1195 ALE_PORTCTL_INGRESS | ALE_PORTCTL_FORWARD); 1196 cpswp_ale_update_addresses(sc, 1); 1197 1198 if (sc->swsc->dualemac) { 1199 /* Set Port VID. */ 1200 cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), 1201 sc->vlan & 0xfff); 1202 cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, 1203 (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ 1204 (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ 1205 (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ 1206 #ifdef CPSW_ETHERSWITCH 1207 for (i = 0; i < CPSW_VLANS; i++) { 1208 if (cpsw_vgroups[i].vid != -1) 1209 continue; 1210 cpsw_vgroups[i].vid = sc->vlan; 1211 break; 1212 } 1213 #endif 1214 } 1215 1216 mii_mediachg(sc->mii); 1217 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 1218 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1219 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1220 } 1221 1222 static int 1223 cpsw_shutdown(device_t dev) 1224 { 1225 struct cpsw_softc *sc; 1226 struct cpswp_softc *psc; 1227 int i; 1228 1229 sc = device_get_softc(dev); 1230 CPSW_DEBUGF(sc, ("")); 1231 for (i = 0; i < CPSW_PORTS; i++) { 1232 if (!sc->dualemac && i != sc->active_slave) 1233 continue; 1234 psc = device_get_softc(sc->port[i].dev); 1235 CPSW_PORT_LOCK(psc); 1236 cpswp_stop_locked(psc); 1237 CPSW_PORT_UNLOCK(psc); 1238 } 1239 1240 return (0); 1241 } 1242 1243 static void 1244 cpsw_rx_teardown(struct cpsw_softc *sc) 1245 { 1246 int i = 0; 1247 1248 CPSW_RX_LOCK(sc); 1249 CPSW_DEBUGF(sc, ("starting RX teardown")); 1250 sc->rx.teardown = 1; 1251 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); 1252 CPSW_RX_UNLOCK(sc); 1253 while (sc->rx.running) { 1254 if (++i > 10) { 1255 device_printf(sc->dev, 1256 "Unable to cleanly shutdown receiver\n"); 1257 return; 1258 } 1259 DELAY(200); 1260 } 1261 if (!sc->rx.running) 1262 CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); 1263 } 1264 1265 static void 1266 cpsw_tx_teardown(struct cpsw_softc *sc) 1267 { 1268 int i = 0; 1269 1270 CPSW_TX_LOCK(sc); 1271 CPSW_DEBUGF(sc, ("starting TX teardown")); 1272 /* Start the TX queue teardown if queue is not empty. */ 1273 if (STAILQ_FIRST(&sc->tx.active) != NULL) 1274 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); 1275 else 1276 sc->tx.teardown = 1; 1277 cpsw_tx_dequeue(sc); 1278 while (sc->tx.running && ++i < 10) { 1279 DELAY(200); 1280 cpsw_tx_dequeue(sc); 1281 } 1282 if (sc->tx.running) { 1283 device_printf(sc->dev, 1284 "Unable to cleanly shutdown transmitter\n"); 1285 } 1286 CPSW_DEBUGF(sc, 1287 ("finished TX teardown (%d retries, %d idle buffers)", i, 1288 sc->tx.active_queue_len)); 1289 CPSW_TX_UNLOCK(sc); 1290 } 1291 1292 static void 1293 cpswp_stop_locked(struct cpswp_softc *sc) 1294 { 1295 struct ifnet *ifp; 1296 uint32_t reg; 1297 1298 ifp = sc->ifp; 1299 CPSW_DEBUGF(sc->swsc, ("")); 1300 CPSW_PORT_LOCK_ASSERT(sc); 1301 1302 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1303 return; 1304 1305 /* Disable interface */ 1306 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1307 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1308 1309 /* Stop ticker */ 1310 callout_stop(&sc->mii_callout); 1311 1312 /* Tear down the RX/TX queues. */ 1313 if (cpsw_ports_down(sc->swsc)) { 1314 cpsw_rx_teardown(sc->swsc); 1315 cpsw_tx_teardown(sc->swsc); 1316 } 1317 1318 /* Stop MAC RX/TX modules. */ 1319 reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); 1320 reg &= ~CPSW_SL_MACTL_GMII_ENABLE; 1321 cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); 1322 1323 if (cpsw_ports_down(sc->swsc)) { 1324 /* Capture stats before we reset controller. */ 1325 cpsw_stats_collect(sc->swsc); 1326 1327 cpsw_reset(sc->swsc); 1328 cpsw_init(sc->swsc); 1329 } 1330 } 1331 1332 /* 1333 * Suspend/Resume. 1334 */ 1335 1336 static int 1337 cpsw_suspend(device_t dev) 1338 { 1339 struct cpsw_softc *sc; 1340 struct cpswp_softc *psc; 1341 int i; 1342 1343 sc = device_get_softc(dev); 1344 CPSW_DEBUGF(sc, ("")); 1345 for (i = 0; i < CPSW_PORTS; i++) { 1346 if (!sc->dualemac && i != sc->active_slave) 1347 continue; 1348 psc = device_get_softc(sc->port[i].dev); 1349 CPSW_PORT_LOCK(psc); 1350 cpswp_stop_locked(psc); 1351 CPSW_PORT_UNLOCK(psc); 1352 } 1353 1354 return (0); 1355 } 1356 1357 static int 1358 cpsw_resume(device_t dev) 1359 { 1360 struct cpsw_softc *sc; 1361 1362 sc = device_get_softc(dev); 1363 CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); 1364 1365 return (0); 1366 } 1367 1368 /* 1369 * 1370 * IOCTL 1371 * 1372 */ 1373 1374 static void 1375 cpsw_set_promisc(struct cpswp_softc *sc, int set) 1376 { 1377 uint32_t reg; 1378 1379 /* 1380 * Enabling promiscuous mode requires ALE_BYPASS to be enabled. 1381 * That disables the ALE forwarding logic and causes every 1382 * packet to be sent only to the host port. In bypass mode, 1383 * the ALE processes host port transmit packets the same as in 1384 * normal mode. 1385 */ 1386 reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); 1387 reg &= ~CPSW_ALE_CTL_BYPASS; 1388 if (set) 1389 reg |= CPSW_ALE_CTL_BYPASS; 1390 cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); 1391 } 1392 1393 static void 1394 cpsw_set_allmulti(struct cpswp_softc *sc, int set) 1395 { 1396 if (set) { 1397 printf("All-multicast mode unimplemented\n"); 1398 } 1399 } 1400 1401 static int 1402 cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1403 { 1404 struct cpswp_softc *sc; 1405 struct ifreq *ifr; 1406 int error; 1407 uint32_t changed; 1408 1409 error = 0; 1410 sc = ifp->if_softc; 1411 ifr = (struct ifreq *)data; 1412 1413 switch (command) { 1414 case SIOCSIFCAP: 1415 changed = ifp->if_capenable ^ ifr->ifr_reqcap; 1416 if (changed & IFCAP_HWCSUM) { 1417 if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) 1418 ifp->if_capenable |= IFCAP_HWCSUM; 1419 else 1420 ifp->if_capenable &= ~IFCAP_HWCSUM; 1421 } 1422 error = 0; 1423 break; 1424 case SIOCSIFFLAGS: 1425 CPSW_PORT_LOCK(sc); 1426 if (ifp->if_flags & IFF_UP) { 1427 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1428 changed = ifp->if_flags ^ sc->if_flags; 1429 CPSW_DEBUGF(sc->swsc, 1430 ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", 1431 changed)); 1432 if (changed & IFF_PROMISC) 1433 cpsw_set_promisc(sc, 1434 ifp->if_flags & IFF_PROMISC); 1435 if (changed & IFF_ALLMULTI) 1436 cpsw_set_allmulti(sc, 1437 ifp->if_flags & IFF_ALLMULTI); 1438 } else { 1439 CPSW_DEBUGF(sc->swsc, 1440 ("SIOCSIFFLAGS: starting up")); 1441 cpswp_init_locked(sc); 1442 } 1443 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1444 CPSW_DEBUGF(sc->swsc, ("SIOCSIFFLAGS: shutting down")); 1445 cpswp_stop_locked(sc); 1446 } 1447 1448 sc->if_flags = ifp->if_flags; 1449 CPSW_PORT_UNLOCK(sc); 1450 break; 1451 case SIOCADDMULTI: 1452 cpswp_ale_update_addresses(sc, 0); 1453 break; 1454 case SIOCDELMULTI: 1455 /* Ugh. DELMULTI doesn't provide the specific address 1456 being removed, so the best we can do is remove 1457 everything and rebuild it all. */ 1458 cpswp_ale_update_addresses(sc, 1); 1459 break; 1460 case SIOCGIFMEDIA: 1461 case SIOCSIFMEDIA: 1462 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1463 break; 1464 default: 1465 error = ether_ioctl(ifp, command, data); 1466 } 1467 return (error); 1468 } 1469 1470 /* 1471 * 1472 * MIIBUS 1473 * 1474 */ 1475 static int 1476 cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) 1477 { 1478 uint32_t r, retries = CPSW_MIIBUS_RETRIES; 1479 1480 while (--retries) { 1481 r = cpsw_read_4(sc, reg); 1482 if ((r & MDIO_PHYACCESS_GO) == 0) 1483 return (1); 1484 DELAY(CPSW_MIIBUS_DELAY); 1485 } 1486 1487 return (0); 1488 } 1489 1490 static int 1491 cpswp_miibus_readreg(device_t dev, int phy, int reg) 1492 { 1493 struct cpswp_softc *sc; 1494 uint32_t cmd, r; 1495 1496 sc = device_get_softc(dev); 1497 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1498 device_printf(dev, "MDIO not ready to read\n"); 1499 return (0); 1500 } 1501 1502 /* Set GO, reg, phy */ 1503 cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; 1504 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1505 1506 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1507 device_printf(dev, "MDIO timed out during read\n"); 1508 return (0); 1509 } 1510 1511 r = cpsw_read_4(sc->swsc, sc->phyaccess); 1512 if ((r & MDIO_PHYACCESS_ACK) == 0) { 1513 device_printf(dev, "Failed to read from PHY.\n"); 1514 r = 0; 1515 } 1516 return (r & 0xFFFF); 1517 } 1518 1519 static int 1520 cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) 1521 { 1522 struct cpswp_softc *sc; 1523 uint32_t cmd; 1524 1525 sc = device_get_softc(dev); 1526 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1527 device_printf(dev, "MDIO not ready to write\n"); 1528 return (0); 1529 } 1530 1531 /* Set GO, WRITE, reg, phy, and value */ 1532 cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | 1533 (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); 1534 cpsw_write_4(sc->swsc, sc->phyaccess, cmd); 1535 1536 if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { 1537 device_printf(dev, "MDIO timed out during write\n"); 1538 return (0); 1539 } 1540 1541 return (0); 1542 } 1543 1544 static void 1545 cpswp_miibus_statchg(device_t dev) 1546 { 1547 struct cpswp_softc *sc; 1548 uint32_t mac_control, reg; 1549 1550 sc = device_get_softc(dev); 1551 CPSW_DEBUGF(sc->swsc, ("")); 1552 1553 reg = CPSW_SL_MACCONTROL(sc->unit); 1554 mac_control = cpsw_read_4(sc->swsc, reg); 1555 mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | 1556 CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); 1557 1558 switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { 1559 case IFM_1000_SX: 1560 case IFM_1000_LX: 1561 case IFM_1000_CX: 1562 case IFM_1000_T: 1563 mac_control |= CPSW_SL_MACTL_GIG; 1564 break; 1565 1566 case IFM_100_TX: 1567 mac_control |= CPSW_SL_MACTL_IFCTL_A; 1568 break; 1569 } 1570 if (sc->mii->mii_media_active & IFM_FDX) 1571 mac_control |= CPSW_SL_MACTL_FULLDUPLEX; 1572 1573 cpsw_write_4(sc->swsc, reg, mac_control); 1574 } 1575 1576 /* 1577 * 1578 * Transmit/Receive Packets. 1579 * 1580 */ 1581 static void 1582 cpsw_intr_rx(void *arg) 1583 { 1584 struct cpsw_softc *sc; 1585 struct ifnet *ifp; 1586 struct mbuf *received, *next; 1587 1588 sc = (struct cpsw_softc *)arg; 1589 CPSW_RX_LOCK(sc); 1590 if (sc->rx.teardown) { 1591 sc->rx.running = 0; 1592 sc->rx.teardown = 0; 1593 cpsw_write_cp(sc, &sc->rx, 0xfffffffc); 1594 } 1595 received = cpsw_rx_dequeue(sc); 1596 cpsw_rx_enqueue(sc); 1597 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); 1598 CPSW_RX_UNLOCK(sc); 1599 1600 while (received != NULL) { 1601 next = received->m_nextpkt; 1602 received->m_nextpkt = NULL; 1603 ifp = received->m_pkthdr.rcvif; 1604 (*ifp->if_input)(ifp, received); 1605 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1606 received = next; 1607 } 1608 } 1609 1610 static struct mbuf * 1611 cpsw_rx_dequeue(struct cpsw_softc *sc) 1612 { 1613 int nsegs, port, removed; 1614 struct cpsw_cpdma_bd bd; 1615 struct cpsw_slot *last, *slot; 1616 struct cpswp_softc *psc; 1617 struct mbuf *m, *m0, *mb_head, *mb_tail; 1618 uint16_t m0_flags; 1619 1620 nsegs = 0; 1621 m0 = NULL; 1622 last = NULL; 1623 mb_head = NULL; 1624 mb_tail = NULL; 1625 removed = 0; 1626 1627 /* Pull completed packets off hardware RX queue. */ 1628 while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { 1629 cpsw_cpdma_read_bd(sc, slot, &bd); 1630 1631 /* 1632 * Stop on packets still in use by hardware, but do not stop 1633 * on packets with the teardown complete flag, they will be 1634 * discarded later. 1635 */ 1636 if ((bd.flags & (CPDMA_BD_OWNER | CPDMA_BD_TDOWNCMPLT)) == 1637 CPDMA_BD_OWNER) 1638 break; 1639 1640 last = slot; 1641 ++removed; 1642 STAILQ_REMOVE_HEAD(&sc->rx.active, next); 1643 STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); 1644 1645 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); 1646 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1647 1648 m = slot->mbuf; 1649 slot->mbuf = NULL; 1650 1651 if (bd.flags & CPDMA_BD_TDOWNCMPLT) { 1652 CPSW_DEBUGF(sc, ("RX teardown is complete")); 1653 m_freem(m); 1654 sc->rx.running = 0; 1655 sc->rx.teardown = 0; 1656 break; 1657 } 1658 1659 port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; 1660 KASSERT(port >= 0 && port <= 1, 1661 ("patcket received with invalid port: %d", port)); 1662 psc = device_get_softc(sc->port[port].dev); 1663 1664 /* Set up mbuf */ 1665 m->m_data += bd.bufoff; 1666 m->m_len = bd.buflen; 1667 if (bd.flags & CPDMA_BD_SOP) { 1668 m->m_pkthdr.len = bd.pktlen; 1669 m->m_pkthdr.rcvif = psc->ifp; 1670 m->m_flags |= M_PKTHDR; 1671 m0_flags = bd.flags; 1672 m0 = m; 1673 } 1674 nsegs++; 1675 m->m_next = NULL; 1676 m->m_nextpkt = NULL; 1677 if (bd.flags & CPDMA_BD_EOP && m0 != NULL) { 1678 if (m0_flags & CPDMA_BD_PASS_CRC) 1679 m_adj(m0, -ETHER_CRC_LEN); 1680 m0_flags = 0; 1681 m0 = NULL; 1682 if (nsegs > sc->rx.longest_chain) 1683 sc->rx.longest_chain = nsegs; 1684 nsegs = 0; 1685 } 1686 1687 if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1688 /* check for valid CRC by looking into pkt_err[5:4] */ 1689 if ((bd.flags & 1690 (CPDMA_BD_SOP | CPDMA_BD_PKT_ERR_MASK)) == 1691 CPDMA_BD_SOP) { 1692 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1693 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1694 m->m_pkthdr.csum_data = 0xffff; 1695 } 1696 } 1697 1698 if (STAILQ_FIRST(&sc->rx.active) != NULL && 1699 (bd.flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == 1700 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 1701 cpsw_write_hdp_slot(sc, &sc->rx, 1702 STAILQ_FIRST(&sc->rx.active)); 1703 sc->rx.queue_restart++; 1704 } 1705 1706 /* Add mbuf to packet list to be returned. */ 1707 if (mb_tail != NULL && (bd.flags & CPDMA_BD_SOP)) { 1708 mb_tail->m_nextpkt = m; 1709 } else if (mb_tail != NULL) { 1710 mb_tail->m_next = m; 1711 } else if (mb_tail == NULL && (bd.flags & CPDMA_BD_SOP) == 0) { 1712 if (bootverbose) 1713 printf( 1714 "%s: %s: discanding fragment packet w/o header\n", 1715 __func__, psc->ifp->if_xname); 1716 m_freem(m); 1717 continue; 1718 } else { 1719 mb_head = m; 1720 } 1721 mb_tail = m; 1722 } 1723 1724 if (removed != 0) { 1725 cpsw_write_cp_slot(sc, &sc->rx, last); 1726 sc->rx.queue_removes += removed; 1727 sc->rx.avail_queue_len += removed; 1728 sc->rx.active_queue_len -= removed; 1729 if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) 1730 sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; 1731 CPSW_DEBUGF(sc, ("Removed %d received packet(s) from RX queue", removed)); 1732 } 1733 1734 return (mb_head); 1735 } 1736 1737 static void 1738 cpsw_rx_enqueue(struct cpsw_softc *sc) 1739 { 1740 bus_dma_segment_t seg[1]; 1741 struct cpsw_cpdma_bd bd; 1742 struct cpsw_slot *first_new_slot, *last_old_slot, *next, *slot; 1743 int error, nsegs, added = 0; 1744 1745 /* Register new mbufs with hardware. */ 1746 first_new_slot = NULL; 1747 last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); 1748 while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { 1749 if (first_new_slot == NULL) 1750 first_new_slot = slot; 1751 if (slot->mbuf == NULL) { 1752 slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1753 if (slot->mbuf == NULL) { 1754 device_printf(sc->dev, 1755 "Unable to fill RX queue\n"); 1756 break; 1757 } 1758 slot->mbuf->m_len = 1759 slot->mbuf->m_pkthdr.len = 1760 slot->mbuf->m_ext.ext_size; 1761 } 1762 1763 error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, 1764 slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); 1765 1766 KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); 1767 KASSERT(error == 0, ("DMA error (error=%d)", error)); 1768 if (error != 0 || nsegs != 1) { 1769 device_printf(sc->dev, 1770 "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", 1771 __func__, nsegs, error); 1772 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 1773 m_freem(slot->mbuf); 1774 slot->mbuf = NULL; 1775 break; 1776 } 1777 1778 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); 1779 1780 /* Create and submit new rx descriptor. */ 1781 if ((next = STAILQ_NEXT(slot, next)) != NULL) 1782 bd.next = cpsw_cpdma_bd_paddr(sc, next); 1783 else 1784 bd.next = 0; 1785 bd.bufptr = seg->ds_addr; 1786 bd.bufoff = 0; 1787 bd.buflen = MCLBYTES - 1; 1788 bd.pktlen = bd.buflen; 1789 bd.flags = CPDMA_BD_OWNER; 1790 cpsw_cpdma_write_bd(sc, slot, &bd); 1791 ++added; 1792 1793 STAILQ_REMOVE_HEAD(&sc->rx.avail, next); 1794 STAILQ_INSERT_TAIL(&sc->rx.active, slot, next); 1795 } 1796 1797 if (added == 0 || first_new_slot == NULL) 1798 return; 1799 1800 CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); 1801 1802 /* Link new entries to hardware RX queue. */ 1803 if (last_old_slot == NULL) { 1804 /* Start a fresh queue. */ 1805 cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); 1806 } else { 1807 /* Add buffers to end of current queue. */ 1808 cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); 1809 } 1810 sc->rx.queue_adds += added; 1811 sc->rx.avail_queue_len -= added; 1812 sc->rx.active_queue_len += added; 1813 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), added); 1814 if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) 1815 sc->rx.max_active_queue_len = sc->rx.active_queue_len; 1816 } 1817 1818 static void 1819 cpswp_start(struct ifnet *ifp) 1820 { 1821 struct cpswp_softc *sc; 1822 1823 sc = ifp->if_softc; 1824 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1825 sc->swsc->tx.running == 0) { 1826 return; 1827 } 1828 CPSW_TX_LOCK(sc->swsc); 1829 cpswp_tx_enqueue(sc); 1830 cpsw_tx_dequeue(sc->swsc); 1831 CPSW_TX_UNLOCK(sc->swsc); 1832 } 1833 1834 static void 1835 cpsw_intr_tx(void *arg) 1836 { 1837 struct cpsw_softc *sc; 1838 1839 sc = (struct cpsw_softc *)arg; 1840 CPSW_TX_LOCK(sc); 1841 if (cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)) == 0xfffffffc) 1842 cpsw_write_cp(sc, &sc->tx, 0xfffffffc); 1843 cpsw_tx_dequeue(sc); 1844 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 2); 1845 CPSW_TX_UNLOCK(sc); 1846 } 1847 1848 static void 1849 cpswp_tx_enqueue(struct cpswp_softc *sc) 1850 { 1851 bus_dma_segment_t segs[CPSW_TXFRAGS]; 1852 struct cpsw_cpdma_bd bd; 1853 struct cpsw_slot *first_new_slot, *last, *last_old_slot, *next, *slot; 1854 struct mbuf *m0; 1855 int error, nsegs, seg, added = 0, padlen; 1856 1857 /* Pull pending packets from IF queue and prep them for DMA. */ 1858 last = NULL; 1859 first_new_slot = NULL; 1860 last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); 1861 while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { 1862 IF_DEQUEUE(&sc->ifp->if_snd, m0); 1863 if (m0 == NULL) 1864 break; 1865 1866 slot->mbuf = m0; 1867 padlen = ETHER_MIN_LEN - ETHER_CRC_LEN - m0->m_pkthdr.len; 1868 if (padlen < 0) 1869 padlen = 0; 1870 else if (padlen > 0) 1871 m_append(slot->mbuf, padlen, sc->swsc->nullpad); 1872 1873 /* Create mapping in DMA memory */ 1874 error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, 1875 slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); 1876 /* If the packet is too fragmented, try to simplify. */ 1877 if (error == EFBIG || 1878 (error == 0 && nsegs > sc->swsc->tx.avail_queue_len)) { 1879 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1880 m0 = m_defrag(slot->mbuf, M_NOWAIT); 1881 if (m0 == NULL) { 1882 device_printf(sc->dev, 1883 "Can't defragment packet; dropping\n"); 1884 m_freem(slot->mbuf); 1885 } else { 1886 CPSW_DEBUGF(sc->swsc, 1887 ("Requeueing defragmented packet")); 1888 IF_PREPEND(&sc->ifp->if_snd, m0); 1889 } 1890 slot->mbuf = NULL; 1891 continue; 1892 } 1893 if (error != 0) { 1894 device_printf(sc->dev, 1895 "%s: Can't setup DMA (error=%d), dropping packet\n", 1896 __func__, error); 1897 bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); 1898 m_freem(slot->mbuf); 1899 slot->mbuf = NULL; 1900 break; 1901 } 1902 1903 bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, 1904 BUS_DMASYNC_PREWRITE); 1905 1906 CPSW_DEBUGF(sc->swsc, 1907 ("Queueing TX packet: %d segments + %d pad bytes", 1908 nsegs, padlen)); 1909 1910 if (first_new_slot == NULL) 1911 first_new_slot = slot; 1912 1913 /* Link from the previous descriptor. */ 1914 if (last != NULL) 1915 cpsw_cpdma_write_bd_next(sc->swsc, last, slot); 1916 1917 slot->ifp = sc->ifp; 1918 1919 /* If there is only one segment, the for() loop 1920 * gets skipped and the single buffer gets set up 1921 * as both SOP and EOP. */ 1922 if (nsegs > 1) { 1923 next = STAILQ_NEXT(slot, next); 1924 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1925 } else 1926 bd.next = 0; 1927 /* Start by setting up the first buffer. */ 1928 bd.bufptr = segs[0].ds_addr; 1929 bd.bufoff = 0; 1930 bd.buflen = segs[0].ds_len; 1931 bd.pktlen = m_length(slot->mbuf, NULL); 1932 bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER; 1933 if (sc->swsc->dualemac) { 1934 bd.flags |= CPDMA_BD_TO_PORT; 1935 bd.flags |= ((sc->unit + 1) & CPDMA_BD_PORT_MASK); 1936 } 1937 for (seg = 1; seg < nsegs; ++seg) { 1938 /* Save the previous buffer (which isn't EOP) */ 1939 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1940 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1941 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1942 slot = STAILQ_FIRST(&sc->swsc->tx.avail); 1943 1944 /* Setup next buffer (which isn't SOP) */ 1945 if (nsegs > seg + 1) { 1946 next = STAILQ_NEXT(slot, next); 1947 bd.next = cpsw_cpdma_bd_paddr(sc->swsc, next); 1948 } else 1949 bd.next = 0; 1950 bd.bufptr = segs[seg].ds_addr; 1951 bd.bufoff = 0; 1952 bd.buflen = segs[seg].ds_len; 1953 bd.pktlen = 0; 1954 bd.flags = CPDMA_BD_OWNER; 1955 } 1956 1957 /* Save the final buffer. */ 1958 bd.flags |= CPDMA_BD_EOP; 1959 cpsw_cpdma_write_bd(sc->swsc, slot, &bd); 1960 STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); 1961 STAILQ_INSERT_TAIL(&sc->swsc->tx.active, slot, next); 1962 1963 last = slot; 1964 added += nsegs; 1965 if (nsegs > sc->swsc->tx.longest_chain) 1966 sc->swsc->tx.longest_chain = nsegs; 1967 1968 BPF_MTAP(sc->ifp, m0); 1969 } 1970 1971 if (first_new_slot == NULL) 1972 return; 1973 1974 /* Attach the list of new buffers to the hardware TX queue. */ 1975 if (last_old_slot != NULL && 1976 (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & 1977 CPDMA_BD_EOQ) == 0) { 1978 /* Add buffers to end of current queue. */ 1979 cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, 1980 first_new_slot); 1981 } else { 1982 /* Start a fresh queue. */ 1983 cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); 1984 } 1985 sc->swsc->tx.queue_adds += added; 1986 sc->swsc->tx.avail_queue_len -= added; 1987 sc->swsc->tx.active_queue_len += added; 1988 if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { 1989 sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; 1990 } 1991 CPSW_DEBUGF(sc->swsc, ("Queued %d TX packet(s)", added)); 1992 } 1993 1994 static int 1995 cpsw_tx_dequeue(struct cpsw_softc *sc) 1996 { 1997 struct cpsw_slot *slot, *last_removed_slot = NULL; 1998 struct cpsw_cpdma_bd bd; 1999 uint32_t flags, removed = 0; 2000 2001 /* Pull completed buffers off the hardware TX queue. */ 2002 slot = STAILQ_FIRST(&sc->tx.active); 2003 while (slot != NULL) { 2004 flags = cpsw_cpdma_read_bd_flags(sc, slot); 2005 2006 /* TearDown complete is only marked on the SOP for the packet. */ 2007 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == 2008 (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) { 2009 sc->tx.teardown = 1; 2010 } 2011 2012 if ((flags & (CPDMA_BD_SOP | CPDMA_BD_OWNER)) == 2013 (CPDMA_BD_SOP | CPDMA_BD_OWNER) && sc->tx.teardown == 0) 2014 break; /* Hardware is still using this packet. */ 2015 2016 bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); 2017 bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); 2018 m_freem(slot->mbuf); 2019 slot->mbuf = NULL; 2020 2021 if (slot->ifp) { 2022 if (sc->tx.teardown == 0) 2023 if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); 2024 else 2025 if_inc_counter(slot->ifp, IFCOUNTER_OQDROPS, 1); 2026 } 2027 2028 /* Dequeue any additional buffers used by this packet. */ 2029 while (slot != NULL && slot->mbuf == NULL) { 2030 STAILQ_REMOVE_HEAD(&sc->tx.active, next); 2031 STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); 2032 ++removed; 2033 last_removed_slot = slot; 2034 slot = STAILQ_FIRST(&sc->tx.active); 2035 } 2036 2037 cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); 2038 2039 /* Restart the TX queue if necessary. */ 2040 cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); 2041 if (slot != NULL && bd.next != 0 && (bd.flags & 2042 (CPDMA_BD_EOP | CPDMA_BD_OWNER | CPDMA_BD_EOQ)) == 2043 (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { 2044 cpsw_write_hdp_slot(sc, &sc->tx, slot); 2045 sc->tx.queue_restart++; 2046 break; 2047 } 2048 } 2049 2050 if (removed != 0) { 2051 sc->tx.queue_removes += removed; 2052 sc->tx.active_queue_len -= removed; 2053 sc->tx.avail_queue_len += removed; 2054 if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) 2055 sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; 2056 CPSW_DEBUGF(sc, ("TX removed %d completed packet(s)", removed)); 2057 } 2058 2059 if (sc->tx.teardown && STAILQ_EMPTY(&sc->tx.active)) { 2060 CPSW_DEBUGF(sc, ("TX teardown is complete")); 2061 sc->tx.teardown = 0; 2062 sc->tx.running = 0; 2063 } 2064 2065 return (removed); 2066 } 2067 2068 /* 2069 * 2070 * Miscellaneous interrupts. 2071 * 2072 */ 2073 2074 static void 2075 cpsw_intr_rx_thresh(void *arg) 2076 { 2077 struct cpsw_softc *sc; 2078 struct ifnet *ifp; 2079 struct mbuf *received, *next; 2080 2081 sc = (struct cpsw_softc *)arg; 2082 CPSW_RX_LOCK(sc); 2083 received = cpsw_rx_dequeue(sc); 2084 cpsw_rx_enqueue(sc); 2085 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); 2086 CPSW_RX_UNLOCK(sc); 2087 2088 while (received != NULL) { 2089 next = received->m_nextpkt; 2090 received->m_nextpkt = NULL; 2091 ifp = received->m_pkthdr.rcvif; 2092 (*ifp->if_input)(ifp, received); 2093 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2094 received = next; 2095 } 2096 } 2097 2098 static void 2099 cpsw_intr_misc_host_error(struct cpsw_softc *sc) 2100 { 2101 uint32_t intstat; 2102 uint32_t dmastat; 2103 int txerr, rxerr, txchan, rxchan; 2104 2105 printf("\n\n"); 2106 device_printf(sc->dev, 2107 "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); 2108 printf("\n\n"); 2109 intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); 2110 device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); 2111 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); 2112 device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); 2113 2114 txerr = (dmastat >> 20) & 15; 2115 txchan = (dmastat >> 16) & 7; 2116 rxerr = (dmastat >> 12) & 15; 2117 rxchan = (dmastat >> 8) & 7; 2118 2119 switch (txerr) { 2120 case 0: break; 2121 case 1: printf("SOP error on TX channel %d\n", txchan); 2122 break; 2123 case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); 2124 break; 2125 case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); 2126 break; 2127 case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); 2128 break; 2129 case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); 2130 break; 2131 case 6: printf("Packet length error on TX channel %d\n", txchan); 2132 break; 2133 default: printf("Unknown error on TX channel %d\n", txchan); 2134 break; 2135 } 2136 2137 if (txerr != 0) { 2138 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 2139 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); 2140 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 2141 txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); 2142 cpsw_dump_queue(sc, &sc->tx.active); 2143 } 2144 2145 switch (rxerr) { 2146 case 0: break; 2147 case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); 2148 break; 2149 case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); 2150 break; 2151 case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); 2152 break; 2153 case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); 2154 break; 2155 default: printf("Unknown RX error on RX channel %d\n", rxchan); 2156 break; 2157 } 2158 2159 if (rxerr != 0) { 2160 printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", 2161 rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); 2162 printf("CPSW_CPDMA_RX%d_CP=0x%x\n", 2163 rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); 2164 cpsw_dump_queue(sc, &sc->rx.active); 2165 } 2166 2167 printf("\nALE Table\n"); 2168 cpsw_ale_dump_table(sc); 2169 2170 // XXX do something useful here?? 2171 panic("CPSW HOST ERROR INTERRUPT"); 2172 2173 // Suppress this interrupt in the future. 2174 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); 2175 printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); 2176 // The watchdog will probably reset the controller 2177 // in a little while. It will probably fail again. 2178 } 2179 2180 static void 2181 cpsw_intr_misc(void *arg) 2182 { 2183 struct cpsw_softc *sc = arg; 2184 uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); 2185 2186 if (stat & CPSW_WR_C_MISC_EVNT_PEND) 2187 CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); 2188 if (stat & CPSW_WR_C_MISC_STAT_PEND) 2189 cpsw_stats_collect(sc); 2190 if (stat & CPSW_WR_C_MISC_HOST_PEND) 2191 cpsw_intr_misc_host_error(sc); 2192 if (stat & CPSW_WR_C_MISC_MDIOLINK) { 2193 cpsw_write_4(sc, MDIOLINKINTMASKED, 2194 cpsw_read_4(sc, MDIOLINKINTMASKED)); 2195 } 2196 if (stat & CPSW_WR_C_MISC_MDIOUSER) { 2197 CPSW_DEBUGF(sc, 2198 ("MDIO operation completed interrupt unimplemented")); 2199 } 2200 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); 2201 } 2202 2203 /* 2204 * 2205 * Periodic Checks and Watchdog. 2206 * 2207 */ 2208 2209 static void 2210 cpswp_tick(void *msc) 2211 { 2212 struct cpswp_softc *sc = msc; 2213 2214 /* Check for media type change */ 2215 mii_tick(sc->mii); 2216 if (sc->media_status != sc->mii->mii_media.ifm_media) { 2217 printf("%s: media type changed (ifm_media=%x)\n", __func__, 2218 sc->mii->mii_media.ifm_media); 2219 cpswp_ifmedia_upd(sc->ifp); 2220 } 2221 2222 /* Schedule another timeout one second from now */ 2223 callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); 2224 } 2225 2226 static void 2227 cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2228 { 2229 struct cpswp_softc *sc; 2230 struct mii_data *mii; 2231 2232 sc = ifp->if_softc; 2233 CPSW_DEBUGF(sc->swsc, ("")); 2234 CPSW_PORT_LOCK(sc); 2235 2236 mii = sc->mii; 2237 mii_pollstat(mii); 2238 2239 ifmr->ifm_active = mii->mii_media_active; 2240 ifmr->ifm_status = mii->mii_media_status; 2241 CPSW_PORT_UNLOCK(sc); 2242 } 2243 2244 static int 2245 cpswp_ifmedia_upd(struct ifnet *ifp) 2246 { 2247 struct cpswp_softc *sc; 2248 2249 sc = ifp->if_softc; 2250 CPSW_DEBUGF(sc->swsc, ("")); 2251 CPSW_PORT_LOCK(sc); 2252 mii_mediachg(sc->mii); 2253 sc->media_status = sc->mii->mii_media.ifm_media; 2254 CPSW_PORT_UNLOCK(sc); 2255 2256 return (0); 2257 } 2258 2259 static void 2260 cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) 2261 { 2262 struct cpswp_softc *psc; 2263 int i; 2264 2265 cpsw_debugf_head("CPSW watchdog"); 2266 device_printf(sc->dev, "watchdog timeout\n"); 2267 printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", 0, 2268 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0))); 2269 printf("CPSW_CPDMA_TX%d_CP=0x%x\n", 0, 2270 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0))); 2271 cpsw_dump_queue(sc, &sc->tx.active); 2272 for (i = 0; i < CPSW_PORTS; i++) { 2273 if (!sc->dualemac && i != sc->active_slave) 2274 continue; 2275 psc = device_get_softc(sc->port[i].dev); 2276 CPSW_PORT_LOCK(psc); 2277 cpswp_stop_locked(psc); 2278 CPSW_PORT_UNLOCK(psc); 2279 } 2280 } 2281 2282 static void 2283 cpsw_tx_watchdog(void *msc) 2284 { 2285 struct cpsw_softc *sc; 2286 2287 sc = msc; 2288 CPSW_TX_LOCK(sc); 2289 if (sc->tx.active_queue_len == 0 || !sc->tx.running) { 2290 sc->watchdog.timer = 0; /* Nothing to do. */ 2291 } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { 2292 sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ 2293 } else if (cpsw_tx_dequeue(sc) > 0) { 2294 sc->watchdog.timer = 0; /* We just did something. */ 2295 } else { 2296 /* There was something to do but it didn't get done. */ 2297 ++sc->watchdog.timer; 2298 if (sc->watchdog.timer > 5) { 2299 sc->watchdog.timer = 0; 2300 ++sc->watchdog.resets; 2301 cpsw_tx_watchdog_full_reset(sc); 2302 } 2303 } 2304 sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; 2305 CPSW_TX_UNLOCK(sc); 2306 2307 /* Schedule another timeout one second from now */ 2308 callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); 2309 } 2310 2311 /* 2312 * 2313 * ALE support routines. 2314 * 2315 */ 2316 2317 static void 2318 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2319 { 2320 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); 2321 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); 2322 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); 2323 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); 2324 } 2325 2326 static void 2327 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) 2328 { 2329 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); 2330 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); 2331 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); 2332 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); 2333 } 2334 2335 static void 2336 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) 2337 { 2338 int i; 2339 uint32_t ale_entry[3]; 2340 2341 /* First four entries are link address and broadcast. */ 2342 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2343 cpsw_ale_read_entry(sc, i, ale_entry); 2344 if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || 2345 ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && 2346 ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ 2347 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2348 cpsw_ale_write_entry(sc, i, ale_entry); 2349 } 2350 } 2351 } 2352 2353 static int 2354 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, 2355 uint8_t *mac) 2356 { 2357 int free_index = -1, matching_index = -1, i; 2358 uint32_t ale_entry[3], ale_type; 2359 2360 /* Find a matching entry or a free entry. */ 2361 for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { 2362 cpsw_ale_read_entry(sc, i, ale_entry); 2363 2364 /* Entry Type[61:60] is 0 for free entry */ 2365 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2366 free_index = i; 2367 2368 if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && 2369 (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && 2370 (((ale_entry[0] >>24) & 0xFF) == mac[2]) && 2371 (((ale_entry[0] >>16) & 0xFF) == mac[3]) && 2372 (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && 2373 (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { 2374 matching_index = i; 2375 break; 2376 } 2377 } 2378 2379 if (matching_index < 0) { 2380 if (free_index < 0) 2381 return (ENOMEM); 2382 i = free_index; 2383 } 2384 2385 if (vlan != -1) 2386 ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; 2387 else 2388 ale_type = ALE_TYPE_ADDR << 28; 2389 2390 /* Set MAC address */ 2391 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2392 ale_entry[1] = mac[0] << 8 | mac[1]; 2393 2394 /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ 2395 ale_entry[1] |= ALE_MCAST_FWD | ale_type; 2396 2397 /* Set portmask [68:66] */ 2398 ale_entry[2] = (portmap & 7) << 2; 2399 2400 cpsw_ale_write_entry(sc, i, ale_entry); 2401 2402 return 0; 2403 } 2404 2405 static void 2406 cpsw_ale_dump_table(struct cpsw_softc *sc) { 2407 int i; 2408 uint32_t ale_entry[3]; 2409 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2410 cpsw_ale_read_entry(sc, i, ale_entry); 2411 switch (ALE_TYPE(ale_entry)) { 2412 case ALE_TYPE_VLAN: 2413 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2414 ale_entry[1], ale_entry[0]); 2415 printf("type: %u ", ALE_TYPE(ale_entry)); 2416 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2417 printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); 2418 printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); 2419 printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); 2420 printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); 2421 printf("\n"); 2422 break; 2423 case ALE_TYPE_ADDR: 2424 case ALE_TYPE_VLAN_ADDR: 2425 printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], 2426 ale_entry[1], ale_entry[0]); 2427 printf("type: %u ", ALE_TYPE(ale_entry)); 2428 printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", 2429 (ale_entry[1] >> 8) & 0xFF, 2430 (ale_entry[1] >> 0) & 0xFF, 2431 (ale_entry[0] >>24) & 0xFF, 2432 (ale_entry[0] >>16) & 0xFF, 2433 (ale_entry[0] >> 8) & 0xFF, 2434 (ale_entry[0] >> 0) & 0xFF); 2435 printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); 2436 if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) 2437 printf("vlan: %u ", ALE_VLAN(ale_entry)); 2438 printf("port: %u ", ALE_PORTS(ale_entry)); 2439 printf("\n"); 2440 break; 2441 } 2442 } 2443 printf("\n"); 2444 } 2445 2446 static u_int 2447 cpswp_set_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2448 { 2449 struct cpswp_softc *sc = arg; 2450 uint32_t portmask; 2451 2452 if (sc->swsc->dualemac) 2453 portmask = 1 << (sc->unit + 1) | 1 << 0; 2454 else 2455 portmask = 7; 2456 2457 cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR(sdl)); 2458 2459 return (1); 2460 } 2461 2462 static int 2463 cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) 2464 { 2465 uint8_t *mac; 2466 uint32_t ale_entry[3], ale_type, portmask; 2467 2468 if (sc->swsc->dualemac) { 2469 ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; 2470 portmask = 1 << (sc->unit + 1) | 1 << 0; 2471 } else { 2472 ale_type = ALE_TYPE_ADDR << 28; 2473 portmask = 7; 2474 } 2475 2476 /* 2477 * Route incoming packets for our MAC address to Port 0 (host). 2478 * For simplicity, keep this entry at table index 0 for port 1 and 2479 * at index 2 for port 2 in the ALE. 2480 */ 2481 mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr); 2482 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 2483 ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ 2484 ale_entry[2] = 0; /* port = 0 */ 2485 cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); 2486 2487 /* Set outgoing MAC Address for slave port. */ 2488 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), 2489 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); 2490 cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), 2491 mac[5] << 8 | mac[4]); 2492 2493 /* Keep the broadcast address at table entry 1 (or 3). */ 2494 ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ 2495 /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ 2496 ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; 2497 ale_entry[2] = portmask << 2; 2498 cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); 2499 2500 /* SIOCDELMULTI doesn't specify the particular address 2501 being removed, so we have to remove all and rebuild. */ 2502 if (purge) 2503 cpsw_ale_remove_all_mc_entries(sc->swsc); 2504 2505 /* Set other multicast addrs desired. */ 2506 if_foreach_llmaddr(sc->ifp, cpswp_set_maddr, sc); 2507 2508 return (0); 2509 } 2510 2511 static int 2512 cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, 2513 int untag, int mcregflood, int mcunregflood) 2514 { 2515 int free_index, i, matching_index; 2516 uint32_t ale_entry[3]; 2517 2518 free_index = matching_index = -1; 2519 /* Find a matching entry or a free entry. */ 2520 for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { 2521 cpsw_ale_read_entry(sc, i, ale_entry); 2522 2523 /* Entry Type[61:60] is 0 for free entry */ 2524 if (free_index < 0 && ALE_TYPE(ale_entry) == 0) 2525 free_index = i; 2526 2527 if (ALE_VLAN(ale_entry) == vlan) { 2528 matching_index = i; 2529 break; 2530 } 2531 } 2532 2533 if (matching_index < 0) { 2534 if (free_index < 0) 2535 return (-1); 2536 i = free_index; 2537 } 2538 2539 ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | 2540 (mcunregflood & 7) << 8 | (ports & 7); 2541 ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; 2542 ale_entry[2] = 0; 2543 cpsw_ale_write_entry(sc, i, ale_entry); 2544 2545 return (0); 2546 } 2547 2548 /* 2549 * 2550 * Statistics and Sysctls. 2551 * 2552 */ 2553 2554 #if 0 2555 static void 2556 cpsw_stats_dump(struct cpsw_softc *sc) 2557 { 2558 int i; 2559 uint32_t r; 2560 2561 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2562 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2563 cpsw_stat_sysctls[i].reg); 2564 CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, 2565 (intmax_t)sc->shadow_stats[i], r, 2566 (intmax_t)sc->shadow_stats[i] + r)); 2567 } 2568 } 2569 #endif 2570 2571 static void 2572 cpsw_stats_collect(struct cpsw_softc *sc) 2573 { 2574 int i; 2575 uint32_t r; 2576 2577 CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); 2578 2579 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2580 r = cpsw_read_4(sc, CPSW_STATS_OFFSET + 2581 cpsw_stat_sysctls[i].reg); 2582 sc->shadow_stats[i] += r; 2583 cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, 2584 r); 2585 } 2586 } 2587 2588 static int 2589 cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) 2590 { 2591 struct cpsw_softc *sc; 2592 struct cpsw_stat *stat; 2593 uint64_t result; 2594 2595 sc = (struct cpsw_softc *)arg1; 2596 stat = &cpsw_stat_sysctls[oidp->oid_number]; 2597 result = sc->shadow_stats[oidp->oid_number]; 2598 result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); 2599 return (sysctl_handle_64(oidp, &result, 0, req)); 2600 } 2601 2602 static int 2603 cpsw_stat_attached(SYSCTL_HANDLER_ARGS) 2604 { 2605 struct cpsw_softc *sc; 2606 struct bintime t; 2607 unsigned result; 2608 2609 sc = (struct cpsw_softc *)arg1; 2610 getbinuptime(&t); 2611 bintime_sub(&t, &sc->attach_uptime); 2612 result = t.sec; 2613 return (sysctl_handle_int(oidp, &result, 0, req)); 2614 } 2615 2616 static int 2617 cpsw_intr_coalesce(SYSCTL_HANDLER_ARGS) 2618 { 2619 int error; 2620 struct cpsw_softc *sc; 2621 uint32_t ctrl, intr_per_ms; 2622 2623 sc = (struct cpsw_softc *)arg1; 2624 error = sysctl_handle_int(oidp, &sc->coal_us, 0, req); 2625 if (error != 0 || req->newptr == NULL) 2626 return (error); 2627 2628 ctrl = cpsw_read_4(sc, CPSW_WR_INT_CONTROL); 2629 ctrl &= ~(CPSW_WR_INT_PACE_EN | CPSW_WR_INT_PRESCALE_MASK); 2630 if (sc->coal_us == 0) { 2631 /* Disable the interrupt pace hardware. */ 2632 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2633 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), 0); 2634 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), 0); 2635 return (0); 2636 } 2637 2638 if (sc->coal_us > CPSW_WR_C_IMAX_US_MAX) 2639 sc->coal_us = CPSW_WR_C_IMAX_US_MAX; 2640 if (sc->coal_us < CPSW_WR_C_IMAX_US_MIN) 2641 sc->coal_us = CPSW_WR_C_IMAX_US_MIN; 2642 intr_per_ms = 1000 / sc->coal_us; 2643 /* Just to make sure... */ 2644 if (intr_per_ms > CPSW_WR_C_IMAX_MAX) 2645 intr_per_ms = CPSW_WR_C_IMAX_MAX; 2646 if (intr_per_ms < CPSW_WR_C_IMAX_MIN) 2647 intr_per_ms = CPSW_WR_C_IMAX_MIN; 2648 2649 /* Set the prescale to produce 4us pulses from the 125 Mhz clock. */ 2650 ctrl |= (125 * 4) & CPSW_WR_INT_PRESCALE_MASK; 2651 2652 /* Enable the interrupt pace hardware. */ 2653 cpsw_write_4(sc, CPSW_WR_C_RX_IMAX(0), intr_per_ms); 2654 cpsw_write_4(sc, CPSW_WR_C_TX_IMAX(0), intr_per_ms); 2655 ctrl |= CPSW_WR_INT_C0_RX_PULSE | CPSW_WR_INT_C0_TX_PULSE; 2656 cpsw_write_4(sc, CPSW_WR_INT_CONTROL, ctrl); 2657 2658 return (0); 2659 } 2660 2661 static int 2662 cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) 2663 { 2664 struct cpsw_softc *swsc; 2665 struct cpswp_softc *sc; 2666 struct bintime t; 2667 unsigned result; 2668 2669 swsc = arg1; 2670 sc = device_get_softc(swsc->port[arg2].dev); 2671 if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { 2672 getbinuptime(&t); 2673 bintime_sub(&t, &sc->init_uptime); 2674 result = t.sec; 2675 } else 2676 result = 0; 2677 return (sysctl_handle_int(oidp, &result, 0, req)); 2678 } 2679 2680 static void 2681 cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2682 struct cpsw_queue *queue) 2683 { 2684 struct sysctl_oid_list *parent; 2685 2686 parent = SYSCTL_CHILDREN(node); 2687 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", 2688 CTLFLAG_RD, &queue->queue_slots, 0, 2689 "Total buffers currently assigned to this queue"); 2690 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", 2691 CTLFLAG_RD, &queue->active_queue_len, 0, 2692 "Buffers currently registered with hardware controller"); 2693 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", 2694 CTLFLAG_RD, &queue->max_active_queue_len, 0, 2695 "Max value of activeBuffers since last driver reset"); 2696 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", 2697 CTLFLAG_RD, &queue->avail_queue_len, 0, 2698 "Buffers allocated to this queue but not currently " 2699 "registered with hardware controller"); 2700 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", 2701 CTLFLAG_RD, &queue->max_avail_queue_len, 0, 2702 "Max value of availBuffers since last driver reset"); 2703 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", 2704 CTLFLAG_RD, &queue->queue_adds, 0, 2705 "Total buffers added to queue"); 2706 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", 2707 CTLFLAG_RD, &queue->queue_removes, 0, 2708 "Total buffers removed from queue"); 2709 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "queueRestart", 2710 CTLFLAG_RD, &queue->queue_restart, 0, 2711 "Total times the queue has been restarted"); 2712 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", 2713 CTLFLAG_RD, &queue->longest_chain, 0, 2714 "Max buffers used for a single packet"); 2715 } 2716 2717 static void 2718 cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, 2719 struct cpsw_softc *sc) 2720 { 2721 struct sysctl_oid_list *parent; 2722 2723 parent = SYSCTL_CHILDREN(node); 2724 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", 2725 CTLFLAG_RD, &sc->watchdog.resets, 0, 2726 "Total number of watchdog resets"); 2727 } 2728 2729 static void 2730 cpsw_add_sysctls(struct cpsw_softc *sc) 2731 { 2732 struct sysctl_ctx_list *ctx; 2733 struct sysctl_oid *stats_node, *queue_node, *node; 2734 struct sysctl_oid_list *parent, *stats_parent, *queue_parent; 2735 struct sysctl_oid_list *ports_parent, *port_parent; 2736 char port[16]; 2737 int i; 2738 2739 ctx = device_get_sysctl_ctx(sc->dev); 2740 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2741 2742 SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", 2743 CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); 2744 2745 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", 2746 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2747 sc, 0, cpsw_stat_attached, "IU", 2748 "Time since driver attach"); 2749 2750 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "intr_coalesce_us", 2751 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2752 sc, 0, cpsw_intr_coalesce, "IU", 2753 "minimum time between interrupts"); 2754 2755 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", 2756 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Ports Statistics"); 2757 ports_parent = SYSCTL_CHILDREN(node); 2758 for (i = 0; i < CPSW_PORTS; i++) { 2759 if (!sc->dualemac && i != sc->active_slave) 2760 continue; 2761 port[0] = '0' + i; 2762 port[1] = '\0'; 2763 node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, 2764 port, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 2765 "CPSW Port Statistics"); 2766 port_parent = SYSCTL_CHILDREN(node); 2767 SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", 2768 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, i, 2769 cpsw_stat_uptime, "IU", "Seconds since driver init"); 2770 } 2771 2772 stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 2773 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Statistics"); 2774 stats_parent = SYSCTL_CHILDREN(stats_node); 2775 for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { 2776 SYSCTL_ADD_PROC(ctx, stats_parent, i, 2777 cpsw_stat_sysctls[i].oid, 2778 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 2779 sc, 0, cpsw_stats_sysctl, "IU", 2780 cpsw_stat_sysctls[i].oid); 2781 } 2782 2783 queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", 2784 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CPSW Queue Statistics"); 2785 queue_parent = SYSCTL_CHILDREN(queue_node); 2786 2787 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", 2788 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Statistics"); 2789 cpsw_add_queue_sysctls(ctx, node, &sc->tx); 2790 2791 node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", 2792 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Statistics"); 2793 cpsw_add_queue_sysctls(ctx, node, &sc->rx); 2794 2795 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", 2796 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Watchdog Statistics"); 2797 cpsw_add_watchdog_sysctls(ctx, node, sc); 2798 } 2799 2800 #ifdef CPSW_ETHERSWITCH 2801 static etherswitch_info_t etherswitch_info = { 2802 .es_nports = CPSW_PORTS + 1, 2803 .es_nvlangroups = CPSW_VLANS, 2804 .es_name = "TI Common Platform Ethernet Switch (CPSW)", 2805 .es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q, 2806 }; 2807 2808 static etherswitch_info_t * 2809 cpsw_getinfo(device_t dev) 2810 { 2811 return (ðerswitch_info); 2812 } 2813 2814 static int 2815 cpsw_getport(device_t dev, etherswitch_port_t *p) 2816 { 2817 int err; 2818 struct cpsw_softc *sc; 2819 struct cpswp_softc *psc; 2820 struct ifmediareq *ifmr; 2821 uint32_t reg; 2822 2823 if (p->es_port < 0 || p->es_port > CPSW_PORTS) 2824 return (ENXIO); 2825 2826 err = 0; 2827 sc = device_get_softc(dev); 2828 if (p->es_port == CPSW_CPU_PORT) { 2829 p->es_flags |= ETHERSWITCH_PORT_CPU; 2830 ifmr = &p->es_ifmr; 2831 ifmr->ifm_current = ifmr->ifm_active = 2832 IFM_ETHER | IFM_1000_T | IFM_FDX; 2833 ifmr->ifm_mask = 0; 2834 ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; 2835 ifmr->ifm_count = 0; 2836 } else { 2837 psc = device_get_softc(sc->port[p->es_port - 1].dev); 2838 err = ifmedia_ioctl(psc->ifp, &p->es_ifr, 2839 &psc->mii->mii_media, SIOCGIFMEDIA); 2840 } 2841 reg = cpsw_read_4(sc, CPSW_PORT_P_VLAN(p->es_port)); 2842 p->es_pvid = reg & ETHERSWITCH_VID_MASK; 2843 2844 reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); 2845 if (reg & ALE_PORTCTL_DROP_UNTAGGED) 2846 p->es_flags |= ETHERSWITCH_PORT_DROPUNTAGGED; 2847 if (reg & ALE_PORTCTL_INGRESS) 2848 p->es_flags |= ETHERSWITCH_PORT_INGRESS; 2849 2850 return (err); 2851 } 2852 2853 static int 2854 cpsw_setport(device_t dev, etherswitch_port_t *p) 2855 { 2856 struct cpsw_softc *sc; 2857 struct cpswp_softc *psc; 2858 struct ifmedia *ifm; 2859 uint32_t reg; 2860 2861 if (p->es_port < 0 || p->es_port > CPSW_PORTS) 2862 return (ENXIO); 2863 2864 sc = device_get_softc(dev); 2865 if (p->es_pvid != 0) { 2866 cpsw_write_4(sc, CPSW_PORT_P_VLAN(p->es_port), 2867 p->es_pvid & ETHERSWITCH_VID_MASK); 2868 } 2869 2870 reg = cpsw_read_4(sc, CPSW_ALE_PORTCTL(p->es_port)); 2871 if (p->es_flags & ETHERSWITCH_PORT_DROPUNTAGGED) 2872 reg |= ALE_PORTCTL_DROP_UNTAGGED; 2873 else 2874 reg &= ~ALE_PORTCTL_DROP_UNTAGGED; 2875 if (p->es_flags & ETHERSWITCH_PORT_INGRESS) 2876 reg |= ALE_PORTCTL_INGRESS; 2877 else 2878 reg &= ~ALE_PORTCTL_INGRESS; 2879 cpsw_write_4(sc, CPSW_ALE_PORTCTL(p->es_port), reg); 2880 2881 /* CPU port does not allow media settings. */ 2882 if (p->es_port == CPSW_CPU_PORT) 2883 return (0); 2884 2885 psc = device_get_softc(sc->port[p->es_port - 1].dev); 2886 ifm = &psc->mii->mii_media; 2887 2888 return (ifmedia_ioctl(psc->ifp, &p->es_ifr, ifm, SIOCSIFMEDIA)); 2889 } 2890 2891 static int 2892 cpsw_getconf(device_t dev, etherswitch_conf_t *conf) 2893 { 2894 2895 /* Return the VLAN mode. */ 2896 conf->cmd = ETHERSWITCH_CONF_VLAN_MODE; 2897 conf->vlan_mode = ETHERSWITCH_VLAN_DOT1Q; 2898 2899 return (0); 2900 } 2901 2902 static int 2903 cpsw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg) 2904 { 2905 int i, vid; 2906 uint32_t ale_entry[3]; 2907 struct cpsw_softc *sc; 2908 2909 sc = device_get_softc(dev); 2910 2911 if (vg->es_vlangroup >= CPSW_VLANS) 2912 return (EINVAL); 2913 2914 vg->es_vid = 0; 2915 vid = cpsw_vgroups[vg->es_vlangroup].vid; 2916 if (vid == -1) 2917 return (0); 2918 2919 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2920 cpsw_ale_read_entry(sc, i, ale_entry); 2921 if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) 2922 continue; 2923 if (vid != ALE_VLAN(ale_entry)) 2924 continue; 2925 2926 vg->es_fid = 0; 2927 vg->es_vid = ALE_VLAN(ale_entry) | ETHERSWITCH_VID_VALID; 2928 vg->es_member_ports = ALE_VLAN_MEMBERS(ale_entry); 2929 vg->es_untagged_ports = ALE_VLAN_UNTAG(ale_entry); 2930 } 2931 2932 return (0); 2933 } 2934 2935 static void 2936 cpsw_remove_vlan(struct cpsw_softc *sc, int vlan) 2937 { 2938 int i; 2939 uint32_t ale_entry[3]; 2940 2941 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { 2942 cpsw_ale_read_entry(sc, i, ale_entry); 2943 if (ALE_TYPE(ale_entry) != ALE_TYPE_VLAN) 2944 continue; 2945 if (vlan != ALE_VLAN(ale_entry)) 2946 continue; 2947 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; 2948 cpsw_ale_write_entry(sc, i, ale_entry); 2949 break; 2950 } 2951 } 2952 2953 static int 2954 cpsw_setvgroup(device_t dev, etherswitch_vlangroup_t *vg) 2955 { 2956 int i; 2957 struct cpsw_softc *sc; 2958 2959 sc = device_get_softc(dev); 2960 2961 for (i = 0; i < CPSW_VLANS; i++) { 2962 /* Is this Vlan ID in use by another vlangroup ? */ 2963 if (vg->es_vlangroup != i && cpsw_vgroups[i].vid == vg->es_vid) 2964 return (EINVAL); 2965 } 2966 2967 if (vg->es_vid == 0) { 2968 if (cpsw_vgroups[vg->es_vlangroup].vid == -1) 2969 return (0); 2970 cpsw_remove_vlan(sc, cpsw_vgroups[vg->es_vlangroup].vid); 2971 cpsw_vgroups[vg->es_vlangroup].vid = -1; 2972 vg->es_untagged_ports = 0; 2973 vg->es_member_ports = 0; 2974 vg->es_vid = 0; 2975 return (0); 2976 } 2977 2978 vg->es_vid &= ETHERSWITCH_VID_MASK; 2979 vg->es_member_ports &= CPSW_PORTS_MASK; 2980 vg->es_untagged_ports &= CPSW_PORTS_MASK; 2981 2982 if (cpsw_vgroups[vg->es_vlangroup].vid != -1 && 2983 cpsw_vgroups[vg->es_vlangroup].vid != vg->es_vid) 2984 return (EINVAL); 2985 2986 cpsw_vgroups[vg->es_vlangroup].vid = vg->es_vid; 2987 cpsw_ale_update_vlan_table(sc, vg->es_vid, vg->es_member_ports, 2988 vg->es_untagged_ports, vg->es_member_ports, 0); 2989 2990 return (0); 2991 } 2992 2993 static int 2994 cpsw_readreg(device_t dev, int addr) 2995 { 2996 2997 /* Not supported. */ 2998 return (0); 2999 } 3000 3001 static int 3002 cpsw_writereg(device_t dev, int addr, int value) 3003 { 3004 3005 /* Not supported. */ 3006 return (0); 3007 } 3008 3009 static int 3010 cpsw_readphy(device_t dev, int phy, int reg) 3011 { 3012 3013 /* Not supported. */ 3014 return (0); 3015 } 3016 3017 static int 3018 cpsw_writephy(device_t dev, int phy, int reg, int data) 3019 { 3020 3021 /* Not supported. */ 3022 return (0); 3023 } 3024 #endif 3025